[ { "id": 167770, "commit_id": "f65417656ba8c59438d832b6e2a431f78d40c21c", "repo": "pandas", "path": "pandas/core/groupby/groupby.py", "file_name": "groupby.py", "fun_name": "rolling", "commit_message": "TYP: more return annotations in core/ (#47618)\n\n* TYP: more return annotations in core/\r\n\r\n* from __future__ import annotations\r\n\r\n* more __future__", "code": "def rolling(self, *args, **kwargs) -> RollingGroupby:\n \n from pandas.core.window import RollingGroupby\n\n return RollingGroupby(\n self._selected_obj,\n *args,\n _grouper=self.grouper,\n _as_index=self.as_index,\n **kwargs,\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 101, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 12, "token_counts": 48, "n_ast_nodes": 71, "n_identifiers": 13, "random_cut": "def rolling(self, *args, **kwargs) -> RollingGroupby:\n \n from pandas.core.window import RollingGroupby\n\n", "d_id": 40113, "documentation": { "docstring": "\n Return a rolling grouper, providing rolling functionality per group.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 176730, "commit_id": "2a05ccdb07cff88e56661dee8a9271859354027f", "repo": "networkx", "path": "networkx/generators/degree_seq.py", "file_name": "degree_seq.py", "fun_name": "expected_degree_graph", "commit_message": "Remove redundant py2 numeric conversions (#5661)\n\n* Remove redundant float conversion\r\n\r\n* Remove redundant int conversion\r\n\r\n* Use integer division\r\n\r\nCo-authored-by: Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>", "code": "def expected_degree_graph(w, seed=None, selfloops=True):\n r\n n = len(w)\n G = nx.empty_graph(n)\n\n # If there are no nodes are no edges in the graph, return the empty graph.\n if n == 0 or max(w) == 0:\n return G\n\n rho = 1 / sum(w)\n # Sort the weights in decreasing order. The original order of the\n # weights dictates the order of the (integer) node labels, so we\n # need to remember the permutation applied in the sorting.\n order = sorted(enumerate(w), key=itemgetter(1), reverse=True)\n mapping = {c: u for c, (u, v) in enumerate(order)}\n seq = [v for u, v in order]\n last = n\n if not selfloops:\n last -= 1\n for u in range(last):\n v = u\n if not selfloops:\n v += 1\n factor = seq[u] * rho\n p = min(seq[v] * factor, 1)\n while v < n and p > 0:\n if p != 1:\n r = seed.random()\n v += math.floor(math.log(r, 1 - p))\n if v < n:\n q = min(seq[v] * factor, 1)\n if seed.random() < q / p:\n G.add_edge(mapping[u], mapping[v])\n v += 1\n p = q\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 417, "n_words": 179, "vocab_size": 97, "complexity": 13, "nloc": 100, "token_counts": 240, "n_ast_nodes": 375, "n_identifiers": 35, "random_cut": "def expected_degree_graph(w, seed=None, selfloops=True):\n r\n n = len(w)\n G = nx.empty_graph(n)\n\n # If there are no nodes are no edges in the graph, return the empty graph.\n if n == 0 or max(w) == 0:\n return G\n\n rho = 1 / sum(w)\n # Sort the weights in decreasing order. The original order of the\n # weights dictates the order of the (integer) node labels, so we\n # need to remember the permutation applied in the sorting.\n order = sorted(enumerate(w), key=itemgetter(1), reverse=True)\n mapping = {c: u for c, (u, v) in enumerate(order)}\n seq = [v for u, v in order]\n last = n\n if not selfloops:\n last -= 1\n for u in range(last):\n v = u\n if not selfloops:\n v += 1\n factor = seq[u] * rho\n p = min(seq[v] * factor, 1)\n while v < n and p > 0:\n if p != 1:\n r = seed.random()\n v += math.floor(math.log(r, 1 - p))\n if v < n:\n q = min(seq[v] * factor, 1)\n if seed.random() < q / p:\n G.add_edge(mapping[u", "d_id": 42064, "documentation": { "docstring": "Returns a random graph with given expected degrees.\n\n Given a sequence of expected degrees $W=(w_0,w_1,\\ldots,w_{n-1})$\n of length $n$ this algorithm assigns an edge between node $u$ and\n node $v$ with probability\n\n .. math::\n\n p_{uv} = \\frac{w_u w_v}{\\sum_k w_k} .\n\n Parameters\n ----------\n w : list\n The list of expected degrees.\n selfloops: bool (default=True)\n Set to False to remove the possibility of self-loop edges.\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness`.\n\n Returns\n -------\n Graph\n\n Examples\n --------\n >>> z = [10 for i in range(100)]\n >>> G = nx.expected_degree_graph(z)\n\n Notes\n -----\n The nodes have integer labels corresponding to index of expected degrees\n input sequence.\n\n The complexity of this algorithm is $\\mathcal{O}(n+m)$ where $n$ is the\n number of nodes and $m$ is the expected number of edges.\n\n The model in [1]_ includes the possibility of self-loop edges.\n Set selfloops=False to produce a graph without self loops.\n\n For finite graphs this model doesn't produce exactly the given\n expected degree sequence. Instead the expected degrees are as\n follows.\n\n For the case without self loops (selfloops=False),\n\n .. math::\n\n E[deg(u)] = \\sum_{v \\ne u} p_{uv}\n = w_u \\left( 1 - \\frac{w_u}{\\sum_k w_k} \\right) .\n\n\n NetworkX uses the standard convention that a self-loop edge counts 2\n in the degree of a node, so with self loops (selfloops=True),\n\n .. math::\n\n E[deg(u)] = \\sum_{v \\ne u} p_{uv} + 2 p_{uu}\n = w_u \\left( 1 + \\frac{w_u}{\\sum_k w_k} \\right) .\n\n References\n ----------\n .. [1] Fan Chung and L. Lu, Connected components in random graphs with\n given expected degree sequences, Ann. Combinatorics, 6,\n pp. 125-145, 2002.\n .. [2] Joel Miller and Aric Hagberg,\n Efficient generation of networks with given expected degrees,\n in Algorithms and Models for the Web-Graph (WAW 2011),\n Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732,\n pp. 115-126, 2011.\n ", "n_words": 298, "vocab_size": 173, "n_whitespaces": 524, "language": "en" } }, { "id": 19151, "commit_id": "4c58179509e6f6047789efb0a95c2b0e20cb6c8f", "repo": "mlflow", "path": "mlflow/models/evaluation/base.py", "file_name": "base.py", "fun_name": "save", "commit_message": "Improve evaluation api (#5256)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap limitation on value type\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix format\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def save(self, path):\n \n os.makedirs(path, exist_ok=True)\n with open(os.path.join(path, \"metrics.json\"), \"w\") as fp:\n json.dump(self.metrics, fp)\n\n artifacts_metadata = {\n artifact_name: {\n \"uri\": artifact.uri,\n \"class_name\": _get_fully_qualified_class_name(artifact),\n }\n for artifact_name, artifact in self.artifacts.items()\n }\n with open(os.path.join(path, \"artifacts_metadata.json\"), \"w\") as fp:\n json.dump(artifacts_metadata, fp)\n\n artifacts_dir = os.path.join(path, \"artifacts\")\n os.mkdir(artifacts_dir)\n\n for artifact_name, artifact in self.artifacts.items():\n artifact._save(os.path.join(artifacts_dir, artifact_name))\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 208, "n_words": 49, "vocab_size": 36, "complexity": 3, "nloc": 17, "token_counts": 153, "n_ast_nodes": 253, "n_identifiers": 22, "random_cut": "def save(self, path):\n \n os.makedirs(path,", "d_id": 2897, "documentation": { "docstring": "Write the evaluation results to the specified local filesystem path", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 89933, "commit_id": "3255fa4ebb9fbc1df6bb063c0eb77a0298ca8f72", "repo": "sentry", "path": "tests/sentry/integrations/slack/test_message_builder.py", "file_name": "test_message_builder.py", "fun_name": "test_build_group_generic_issue_attachment", "commit_message": "feat(integrations): Support generic issue type alerts (#42110)\n\nAdd support for issue alerting integrations that use the message builder\r\n(Slack and MSTeams) for generic issue types.\r\n\r\n\r\nPreview text for Slack alert:\r\n\"Screen\r\n\r\nSlack generic issue alert shows the `occurrence.issue_title` and the\r\n\"important\" evidence value\r\n\"Screen\r\n\r\nMSTeams generic issue alert shows the `occurrence.issue_title` and the\r\n\"important\" evidence value\r\n\"Screen\r\n\r\n\r\nFixes #42047", "code": "def test_build_group_generic_issue_attachment(self):\n \n event = self.store_event(\n data={\"message\": \"Hello world\", \"level\": \"error\"}, project_id=self.project.id\n )\n event = event.for_group(event.groups[0])\n occurrence = self.build_occurrence(level=\"info\")\n occurrence.save(project_id=self.project.id)\n event.occurrence = occurrence\n\n event.group.type = GroupType.PROFILE_BLOCKED_THREAD\n\n attachments = SlackIssuesMessageBuilder(group=event.group, event=event).build()\n\n assert attachments[\"title\"] == occurrence.issue_title\n assert attachments[\"text\"] == occurrence.evidence_display[0].value\n assert attachments[\"fallback\"] == f\"[{self.project.slug}] {occurrence.issue_title}\"\n assert attachments[\"color\"] == \"#2788CE\" # blue for info level\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 154, "n_words": 51, "vocab_size": 38, "complexity": 1, "nloc": 14, "token_counts": 137, "n_ast_nodes": 249, "n_identifiers": 25, "random_cut": "def test_build_group_generic_issue_attachment(self):\n \n event = self.store_event(\n data={\"message\": \"Hello world\", \"level\": \"error\"}, project_id=self.project.id\n )\n event = event.for_group(event.groups[0])\n occurrence = self.build_occurrence(level=\"info\")\n occurrence.save(project_id=self.project.id)\n event.occurrence = occurrence\n\n event.group.type = GroupType.PROFILE_BLOCKED_THREAD\n\n attachments = SlackIssuesMessageBuilder(group=event.group, event=event).build()\n\n assert attachments[\"title\"] == occurrence.issue_title\n assert attachments[\"text\"] == occurrence.evidence_display[0].value\n assert attachments[\"fallback\"] == f\"[{self.project.slug}] {occurrence.issue_title}\"\n assert attachments[\"color\"] =", "d_id": 18592, "documentation": { "docstring": "Test that a generic issue type's Slack alert contains the expected values", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 179114, "commit_id": "b3bc4e734528d3b186c3a38a6e73e106c3555cc7", "repo": "DeepFaceLive", "path": "xlib/image/ImageProcessor.py", "file_name": "ImageProcessor.py", "fun_name": "apply", "commit_message": "ImageProcessor.py refactoring", "code": "def apply(self, func, mask=None) -> 'ImageProcessor':\n \n img = orig_img = self._img\n img = func(img).astype(orig_img.dtype)\n if img.ndim != 4:\n raise Exception('func used in ImageProcessor.apply changed format of image')\n\n if mask is not None:\n mask = self._check_normalize_mask(mask)\n img = ne.evaluate('orig_img*(1-mask) + img*mask').astype(orig_img.dtype)\n\n self._img = img\n return self\n", "url": "https://github.com/iperov/DeepFaceLive.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 127, "n_words": 45, "vocab_size": 34, "complexity": 3, "nloc": 21, "token_counts": 82, "n_ast_nodes": 137, "n_identifiers": 14, "random_cut": "def apply(self, func, mask=None) -> 'ImageProcessor':\n \n img = orig_img = self._img\n img = func(img).astype(orig_img.dtype)\n if img.ndim != 4:\n raise Exception('func used in ImageProcessor.apply changed format of image')\n\n if mask is not None:\n ", "d_id": 42906, "documentation": { "docstring": "\n apply your own function on internal image\n\n image has NHWC format. Do not change format, but dims can be changed.\n\n func callable (img) -> img\n\n example:\n\n .apply( lambda img: img-[102,127,63] )\n ", "n_words": 31, "vocab_size": 30, "n_whitespaces": 79, "language": "en" } }, { "id": 39007, "commit_id": "843dba903757d592f7703a83ebd75eb3ffb46f6f", "repo": "recommenders", "path": "recommenders/models/rbm/rbm.py", "file_name": "rbm.py", "fun_name": "predict", "commit_message": "removed time from returning args", "code": "def predict(self, x):\n \n\n # start the timer\n self.timer.start()\n\n v_, _ = self.eval_out() # evaluate the ratings and the associated probabilities\n vp = self.sess.run(v_, feed_dict={self.vu: x})\n \n # stop the timer\n self.timer.stop()\n\n log.info(\"Done inference, time %f2\" % self.timer.interval)\n\n return vp\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 38, "vocab_size": 30, "complexity": 1, "nloc": 7, "token_counts": 65, "n_ast_nodes": 111, "n_identifiers": 17, "random_cut": "def predict(self, x):\n \n\n # start the timer\n self.timer.start()\n\n v_, _ = self", "d_id": 7073, "documentation": { "docstring": "Returns the inferred ratings. This method is similar to recommend_k_items() with the\n exceptions that it returns all the inferred ratings\n\n Basic mechanics:\n\n The method samples new ratings from the learned joint distribution, together with\n their probabilities. The input x must have the same number of columns as the one used\n for training the model, i.e. the same number of items, but it can have an arbitrary number\n of rows (users).\n\n Args:\n x (numpy.ndarray, int32): Input user/affinity matrix. Note that this can be a single vector, i.e.\n the ratings of a single user.\n\n Returns:\n numpy.ndarray, float:\n - A matrix with the inferred ratings.\n - The elapsed time for predediction.\n ", "n_words": 108, "vocab_size": 73, "n_whitespaces": 226, "language": "en" } }, { "id": 218569, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/json/decoder.py", "file_name": "decoder.py", "fun_name": "raw_decode", "commit_message": "add python 3.10.4 for windows", "code": "def raw_decode(self, s, idx=0):\n \n try:\n obj, end = self.scan_once(s, idx)\n except StopIteration as err:\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\n return obj, end\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 24, "vocab_size": 21, "complexity": 2, "nloc": 6, "token_counts": 48, "n_ast_nodes": 76, "n_identifiers": 11, "random_cut": "def raw_decode(self, s, idx=0):\n \n try:\n obj, end = self.scan_once(s, idx)\n except StopIteration as err:\n raise JSONDecodeError(\"Expecting value\", s, err.val", "d_id": 55394, "documentation": { "docstring": "Decode a JSON document from ``s`` (a ``str`` beginning with\n a JSON document) and return a 2-tuple of the Python\n representation and the index in ``s`` where the document ended.\n\n This can be used to decode a JSON document from a string that may\n have extraneous data at the end.\n\n ", "n_words": 50, "vocab_size": 36, "n_whitespaces": 85, "language": "en" } }, { "id": 176561, "commit_id": "aa1f40a93a882db304e9a06c2a11d93b2532d80a", "repo": "networkx", "path": "networkx/algorithms/bridges.py", "file_name": "bridges.py", "fun_name": "has_bridges", "commit_message": "Improve bridges documentation (#5519)\n\n* Fix bridges documentation\r\n\r\n* Revert source code modification\r\n\r\n* Revert raise errors for multigraphs", "code": "def has_bridges(G, root=None):\n \n try:\n next(bridges(G))\n except StopIteration:\n return False\n else:\n return True\n\n\n@not_implemented_for(\"multigraph\")\n@not_implemented_for(\"directed\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"multigraph\")\n@not_implemented_for(\"directed\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 45, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 7, "token_counts": 28, "n_ast_nodes": 70, "n_identifiers": 7, "random_cut": "def has_bridges(G, root=None):\n \n try:\n next(bridges", "d_id": 41965, "documentation": { "docstring": "Decide whether a graph has any bridges.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be considered.\n\n Returns\n -------\n bool\n Whether the graph (or the connected component containing `root`)\n has any bridges.\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n NetworkXNotImplemented\n If `G` is a directed graph.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> nx.has_bridges(G)\n True\n\n On the other hand, the cycle graph has no bridges::\n\n >>> G = nx.cycle_graph(5)\n >>> nx.has_bridges(G)\n False\n\n Notes\n -----\n This implementation uses the :func:`networkx.bridges` function, so\n it shares its worst-case time complexity, $O(m + n)$, ignoring\n polylogarithmic factors, where $n$ is the number of nodes in the\n graph and $m$ is the number of edges.\n\n ", "n_words": 167, "vocab_size": 106, "n_whitespaces": 318, "language": "en" } }, { "id": 61338, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py", "file_name": "wheel.py", "fun_name": "wheel_metadata", "commit_message": "upd; format", "code": "def wheel_metadata(source, dist_info_dir):\n # type: (ZipFile, str) -> Message\n \n path = f\"{dist_info_dir}/WHEEL\"\n # Zip file path separators must be /\n wheel_contents = read_wheel_metadata_file(source, path)\n\n try:\n wheel_text = wheel_contents.decode()\n except UnicodeDecodeError as e:\n raise UnsupportedWheel(f\"error decoding {path!r}: {e!r}\")\n\n # FeedParser (used by Parser) does not raise any exceptions. The returned\n # message may have .defects populated, but for backwards-compatibility we\n # currently ignore them.\n return Parser().parsestr(wheel_text)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 112, "n_words": 65, "vocab_size": 57, "complexity": 2, "nloc": 8, "token_counts": 49, "n_ast_nodes": 103, "n_identifiers": 13, "random_cut": "def wheel_metadata(source, dist_info_dir):\n # type: (ZipFile, str) -> Message\n \n path = f\"{dist_info_dir}/WHEEL\"\n # Zip file path separators must be /\n wheel_contents = read_wheel_metadata_file(source, path)\n\n try:\n wheel_text = wheel_contents.decode()\n except UnicodeDecodeError as e:\n raise UnsupportedWheel(f\"error decoding {path!r}: {e!r}\")\n\n # FeedParser (used by Parser) does not raise any exceptions. The returned\n # message may have .defects populated, but for backwards-compatibility", "d_id": 12520, "documentation": { "docstring": "Return the WHEEL metadata of an extracted wheel, if possible.\n Otherwise, raise UnsupportedWheel.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 104416, "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "remove_column", "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def remove_column(self, i, *args, **kwargs):\n \n table = self.table.remove_column(i, *args, **kwargs)\n name = self.table.column_names[i]\n blocks = []\n for tables in self.blocks:\n blocks.append(\n [\n t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t\n for t in tables\n ]\n )\n return ConcatenationTable(table, blocks)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 172, "n_words": 40, "vocab_size": 29, "complexity": 4, "nloc": 12, "token_counts": 96, "n_ast_nodes": 145, "n_identifiers": 14, "random_cut": "def remove_column(self, i, *args, **kwargs):\n \n table = self.table.remove_column(i, *args, **kwargs)\n name = self.table.column_names[i]\n blocks = []\n for tables in self.blocks:\n blocks.append(\n [\n t.remove_colu", "d_id": 21852, "documentation": { "docstring": "\n Create new Table with the indicated column removed.\n\n Args:\n i (:obj:`int`):\n Index of column to remove.\n\n Returns:\n :class:`datasets.table.Table`:\n New table without the column.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 104, "language": "en" } }, { "id": 264886, "commit_id": "3a461d02793e6f9d41c2b1a92647e691de1abaac", "repo": "netbox", "path": "netbox/dcim/tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_cable_cannot_terminate_to_a_wireless_interface", "commit_message": "Update Cable instantiations to match new signature", "code": "def test_cable_cannot_terminate_to_a_wireless_interface(self):\n \n wireless_interface = Interface(device=self.device1, name=\"W1\", type=InterfaceTypeChoices.TYPE_80211A)\n cable = Cable(a_terminations=[self.interface2], b_terminations=[wireless_interface])\n with self.assertRaises(ValidationError):\n cable.clean()\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 57, "n_ast_nodes": 95, "n_identifiers": 18, "random_cut": "def test_cable_cannot_terminate_to_a_wireless_interface(self):\n \n wireless_interface = Interface(device=self.device1, name=\"W1\", type=InterfaceTypeChoices.TYPE_80211A)\n cable = Cable(a_terminations=[self.interface2], b_terminations=[wireless_interface])\n with self.assertRaises(ValidationError):\n cable.clean()\n", "d_id": 77897, "documentation": { "docstring": "\n A cable cannot terminate to a wireless interface\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 204838, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/creation.py", "file_name": "creation.py", "fun_name": "get_test_db_clone_settings", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_test_db_clone_settings(self, suffix):\n \n # When this function is called, the test database has been created\n # already and its name has been copied to settings_dict['NAME'] so\n # we don't need to call _get_test_db_name.\n orig_settings_dict = self.connection.settings_dict\n return {\n **orig_settings_dict,\n \"NAME\": \"{}_{}\".format(orig_settings_dict[\"NAME\"], suffix),\n }\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 114, "n_words": 43, "vocab_size": 38, "complexity": 1, "nloc": 6, "token_counts": 35, "n_ast_nodes": 63, "n_identifiers": 7, "random_cut": "def get_test_db_clone_settings(self, suffix):\n \n # When this function is called, the test database has been created\n # already and its name has been copied to", "d_id": 50917, "documentation": { "docstring": "\n Return a modified connection settings dict for the n-th clone of a DB.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 217907, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/imaplib.py", "file_name": "imaplib.py", "fun_name": "open", "commit_message": "add python 3.10.4 for windows", "code": "def open(self, host='', port=IMAP4_PORT, timeout=None):\n \n self.host = host\n self.port = port\n self.sock = self._create_socket(timeout)\n self.file = self.sock.makefile('rb')\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 52, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 50, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def open(self, host='', port=IMAP4_PORT, timeout=None):\n \n self.host = host\n self.port = port\n self.sock = self._create_socket(timeout)\n self.file = self.sock.makefile('rb')\n\n", "d_id": 55005, "documentation": { "docstring": "Setup connection to remote server on \"host:port\"\n (default: localhost:standard IMAP4 port).\n This connection will be used by the routines:\n read, readline, send, shutdown.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 59, "language": "en" } }, { "id": 183574, "commit_id": "7f27e70440c177b2a047b7f74a78ed5cd5b4b596", "repo": "textual", "path": "src/textual/_terminal_features.py", "file_name": "_terminal_features.py", "fun_name": "synchronized_output_end_sequence", "commit_message": "[terminal buffering] Address PR feedback", "code": "def synchronized_output_end_sequence(self) -> str:\n \n if self.synchronised_output:\n return TERMINAL_MODES_ANSI_SEQUENCES[Mode.SynchronizedOutput][\"end_sync\"]\n return \"\"\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 42, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 13, "token_counts": 25, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def synchronized_output_end_sequence(self) -> str:\n \n if self.synchronised_output:\n return", "d_id": 44257, "documentation": { "docstring": "\n Returns the ANSI sequence that we should send to the terminal to tell it that\n it should stop buffering the content we're about to send.\n If the terminal doesn't seem to support synchronised updates the string will be empty.\n\n Returns:\n str: the \"synchronised output stop\" ANSI sequence. It will be ab empty string\n if the terminal emulator doesn't seem to support the \"synchronised updates\" mode.\n ", "n_words": 65, "vocab_size": 41, "n_whitespaces": 127, "language": "en" } }, { "id": 162681, "commit_id": "f6021faf2a8e62f88a8d6979ce812dcb71133a8f", "repo": "AutoEq", "path": "frequency_response.py", "file_name": "frequency_response.py", "fun_name": "_band_penalty_coefficients", "commit_message": "Improved quality regularization to a point where it works well. 10 kHz to 20 kHz is RMSE is calculated from the average levels. Split neo PEQ notebook by band and Q.", "code": "def _band_penalty_coefficients(self, fc, q, gain, filter_frs):\n \n ref_frs = biquad.digital_coeffs(self.frequency, 192e3, *biquad.peaking(fc, q, gain, fs=192e3))\n est_sums = np.sum(filter_frs, axis=1)\n ref_sums = np.sum(ref_frs, axis=1)\n penalties = np.zeros((len(fc),))\n mask = np.squeeze(ref_sums) != 0.0\n penalties[mask] = est_sums[mask] / ref_sums[mask]\n return 10 * (1 - np.expand_dims(penalties, 1))\n", "url": "https://github.com/jaakkopasanen/AutoEq.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 98, "n_words": 42, "vocab_size": 34, "complexity": 1, "nloc": 8, "token_counts": 121, "n_ast_nodes": 176, "n_identifiers": 23, "random_cut": "def _band_penalty_coefficients(self, fc, q, gain, filter_frs):\n \n ref_frs = biquad.digital_coeffs(self.frequenc", "d_id": 39253, "documentation": { "docstring": "Calculates penalty coefficients for filters if their transition bands extend beyond Nyquist frequency\n\n The calculation is based on ratio of frequency response integrals between 44.1 kHz and 192 kHz\n\n Args:\n fc: Filter center frequencies, 1-D array\n q: Filter qualities, 1-D array\n gain: Filter gains, 1-D array\n filter_frs: Filter frequency responses, 2-D array, one fr per row\n\n Returns:\n Column array of penalty coefficients, one per filter\n ", "n_words": 65, "vocab_size": 50, "n_whitespaces": 148, "language": "en" } }, { "id": 261153, "commit_id": "02b04cb3ecfc5fce1f627281c312753f3b4b8494", "repo": "scikit-learn", "path": "sklearn/ensemble/tests/test_voting.py", "file_name": "test_voting.py", "fun_name": "test_predict_on_toy_problem", "commit_message": "TST use global_random_seed in sklearn/ensemble/tests/test_voting.py (#24282)\n\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def test_predict_on_toy_problem(global_random_seed):\n \n clf1 = LogisticRegression(random_state=global_random_seed)\n clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)\n clf3 = GaussianNB()\n\n X = np.array(\n [[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]]\n )\n\n y = np.array([1, 1, 1, 2, 2, 2])\n\n assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n\n eclf = VotingClassifier(\n estimators=[(\"lr\", clf1), (\"rf\", clf2), (\"gnb\", clf3)],\n voting=\"hard\",\n weights=[1, 1, 1],\n )\n assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n\n eclf = VotingClassifier(\n estimators=[(\"lr\", clf1), (\"rf\", clf2), (\"gnb\", clf3)],\n voting=\"soft\",\n weights=[1, 1, 1],\n )\n assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 201, "n_words": 104, "vocab_size": 48, "complexity": 1, "nloc": 23, "token_counts": 357, "n_ast_nodes": 469, "n_identifiers": 22, "random_cut": "def test_predict_on_toy_problem(global_random_seed):\n \n clf1 = LogisticRegression(random_state=global_random_seed)\n clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)\n clf3 = GaussianNB()\n\n X = np.array(\n [[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]]\n )\n\n y = np.array([1, 1, 1, 2, 2, 2])\n\n assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n\n eclf = VotingClassifier(\n estimators=[(\"lr\", clf1), (\"rf\", clf2), (\"gnb\", clf3)],\n voting=\"hard\",\n weights=[1, 1, 1],\n )\n assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])\n\n eclf = VotingClassifier(\n estimators=[(\"lr\", clf1), (\"rf\", clf2), (\"gnb\", clf3)],\n voting=\"soft\",\n weights=[1, 1, 1],\n )\n assert_array", "d_id": 76664, "documentation": { "docstring": "Manually check predicted class labels for toy dataset.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 260448, "commit_id": "5a850eb044ca07f1f3bcb1b284116d6f2d37df1b", "repo": "scikit-learn", "path": "sklearn/feature_extraction/_dict_vectorizer.py", "file_name": "_dict_vectorizer.py", "fun_name": "fit_transform", "commit_message": "MAINT Param validation for Dictvectorizer (#23820)", "code": "def fit_transform(self, X, y=None):\n \n self._validate_params()\n return self._transform(X, fitting=True)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def fit_transform(self, X, y=None):\n \n self._validate_params()\n return self._tran", "d_id": 76257, "documentation": { "docstring": "Learn a list of feature name -> indices mappings and transform X.\n\n Like fit(X) followed by transform(X), but does not require\n materializing X in memory.\n\n Parameters\n ----------\n X : Mapping or iterable over Mappings\n Dict(s) or Mapping(s) from feature names (arbitrary Python\n objects) to feature values (strings or convertible to dtype).\n\n .. versionchanged:: 0.24\n Accepts multiple string values for one categorical feature.\n\n y : (ignored)\n Ignored parameter.\n\n Returns\n -------\n Xa : {array, sparse matrix}\n Feature vectors; always 2-d.\n ", "n_words": 78, "vocab_size": 69, "n_whitespaces": 217, "language": "en" } }, { "id": 321150, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/webengine/webenginetab.py", "file_name": "webenginetab.py", "fun_name": "_on_feature_permission_requested", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def _on_feature_permission_requested(self, url, feature):\n \n page = self._widget.page()\n grant_permission = functools.partial(\n page.setFeaturePermission, url, feature,\n QWebEnginePage.PermissionPolicy.PermissionGrantedByUser)\n deny_permission = functools.partial(\n page.setFeaturePermission, url, feature,\n QWebEnginePage.PermissionPolicy.PermissionDeniedByUser)\n\n permission_str = debug.qenum_key(QWebEnginePage, feature)\n\n if not url.isValid():\n # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-85116\n is_qtbug = (qtutils.version_check('5.15.0',\n compiled=False,\n exact=True) and\n self._tab.is_private and\n feature == QWebEnginePage.Feature.Notifications)\n logger = log.webview.debug if is_qtbug else log.webview.warning\n logger(\"Ignoring feature permission {} for invalid URL {}\".format(\n permission_str, url))\n deny_permission()\n return\n\n if feature not in self._options:\n log.webview.error(\"Unhandled feature permission {}\".format(\n permission_str))\n deny_permission()\n return\n\n if (\n feature in [QWebEnginePage.Feature.DesktopVideoCapture,\n QWebEnginePage.Feature.DesktopAudioVideoCapture] and\n qtutils.version_check('5.13', compiled=False) and\n not qtutils.version_check('5.13.2', compiled=False)\n ):\n # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-78016\n log.webview.warning(\"Ignoring desktop sharing request due to \"\n \"crashes in Qt < 5.13.2\")\n deny_permission()\n return\n\n question = shared.feature_permission(\n url=url.adjusted(QUrl.UrlFormattingOption.RemovePath),\n option=self._options[feature], msg=self._messages[feature],\n yes_action=grant_permission, no_action=deny_permission,\n abort_on=[self._tab.abort_questions])\n\n if question is not None:\n page.featurePermissionRequestCanceled.connect(\n functools.partial(self._on_feature_permission_cancelled,\n question, url, feature))\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 761, "n_words": 125, "vocab_size": 84, "complexity": 10, "nloc": 44, "token_counts": 301, "n_ast_nodes": 470, "n_identifiers": 54, "random_cut": "def _on_feature_permission_requested(self, url, feature):\n \n page = self._widget.page()\n grant_permission = functools.partial(\n page.setFeaturePermission, url, feature,\n QWebEnginePage.PermissionPolicy.PermissionGrantedByUser)\n deny_permission = functools.partial(\n page.setFeaturePermission, url, feature,\n QWebEnginePage.PermissionPolicy.PermissionDeniedByUser)\n\n permission_str = debug.qenum_key(QWebEnginePage, feature)\n\n if not url.isValid():\n # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-85116\n is_qtbug = (qtutils.version_check('5.15.0',\n compiled=False,\n exact=True) and\n self._tab.is_private and\n feature == QWebEnginePage.Feature.Notifications)\n logger = log.webview.debug if is_qtbug else log.webview.warning\n logger(\"Ignoring feature permission {} for invalid URL {}\".format(\n permission_str, url))\n deny_permission()\n return\n\n if feature not in self._options:\n log.webview.error(\"Unhandled feature permission {}\".format(\n permission_str))\n deny_permission()\n return\n\n if (\n feature in [QWebEnginePage.Feature.DesktopVideoCapture,\n QWebEnginePage.Feature.DesktopAudioVideoCapture] and\n qtutils.version_check('5.13', compiled=", "d_id": 117565, "documentation": { "docstring": "Ask the user for approval for geolocation/media/etc..", "n_words": 7, "vocab_size": 6, "n_whitespaces": 6, "language": "en" } }, { "id": 222643, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/bdist_msi.py", "file_name": "bdist_msi.py", "fun_name": "add_find_python", "commit_message": "add python 3.10.4 for windows", "code": "def add_find_python(self):\n \n\n start = 402\n for ver in self.versions:\n install_path = r\"SOFTWARE\\Python\\PythonCore\\%s\\InstallPath\" % ver\n machine_reg = \"python.machine.\" + ver\n user_reg = \"python.user.\" + ver\n machine_prop = \"PYTHON.MACHINE.\" + ver\n user_prop = \"PYTHON.USER.\" + ver\n machine_action = \"PythonFromMachine\" + ver\n user_action = \"PythonFromUser\" + ver\n exe_action = \"PythonExe\" + ver\n target_dir_prop = \"TARGETDIR\" + ver\n exe_prop = \"PYTHON\" + ver\n if msilib.Win64:\n # type: msidbLocatorTypeRawValue + msidbLocatorType64bit\n Type = 2+16\n else:\n Type = 2\n add_data(self.db, \"RegLocator\",\n [(machine_reg, 2, install_path, None, Type),\n (user_reg, 1, install_path, None, Type)])\n add_data(self.db, \"AppSearch\",\n [(machine_prop, machine_reg),\n (user_prop, user_reg)])\n add_data(self.db, \"CustomAction\",\n [(machine_action, 51+256, target_dir_prop, \"[\" + machine_prop + \"]\"),\n (user_action, 51+256, target_dir_prop, \"[\" + user_prop + \"]\"),\n (exe_action, 51+256, exe_prop, \"[\" + target_dir_prop + \"]\\\\python.exe\"),\n ])\n add_data(self.db, \"InstallExecuteSequence\",\n [(machine_action, machine_prop, start),\n (user_action, user_prop, start + 1),\n (exe_action, None, start + 2),\n ])\n add_data(self.db, \"InstallUISequence\",\n [(machine_action, machine_prop, start),\n (user_action, user_prop, start + 1),\n (exe_action, None, start + 2),\n ])\n add_data(self.db, \"Condition\",\n [(\"Python\" + ver, 0, \"NOT TARGETDIR\" + ver)])\n start += 4\n assert start < 500\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 784, "n_words": 167, "vocab_size": 86, "complexity": 3, "nloc": 42, "token_counts": 304, "n_ast_nodes": 469, "n_identifiers": 20, "random_cut": "def add_find_python(self):\n \n\n start = 402\n for ver in self.versions:\n install_path = r\"SOFTWARE\\Python\\PythonCore\\%s\\InstallPath\" % ver\n machine_reg = \"python.machine.\" + ver\n user_reg = \"python.user.\" + ver\n machine_prop = \"PYTHON.MACHINE.\" + ver\n user_prop = \"PYTHON.USER.\" + ver\n machine_action = \"Pyth", "d_id": 56684, "documentation": { "docstring": "Adds code to the installer to compute the location of Python.\n\n Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the\n registry for each version of Python.\n\n Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,\n else from PYTHON.MACHINE.X.Y.\n\n Properties PYTHONX.Y will be set to TARGETDIRX.Y\\\\python.exe", "n_words": 45, "vocab_size": 28, "n_whitespaces": 79, "language": "en" } }, { "id": 61961, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "write_exports", "commit_message": "upd; format", "code": "def write_exports(self, exports):\n \n rf = self.get_distinfo_file(EXPORTS_FILENAME)\n with open(rf, 'w') as f:\n write_exports(exports, f)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 57, "n_identifiers": 8, "random_cut": "def write_exports(self, exports):\n \n rf = self", "d_id": 12781, "documentation": { "docstring": "\n Write a dictionary of exports to a file in .ini format.\n :param exports: A dictionary of exports, mapping an export category to\n a list of :class:`ExportEntry` instances describing the\n individual export entries.\n ", "n_words": 32, "vocab_size": 25, "n_whitespaces": 100, "language": "en" } }, { "id": 267337, "commit_id": "621e782ed0c119d2c84124d006fdf253c082449a", "repo": "ansible", "path": "lib/ansible/executor/task_executor.py", "file_name": "task_executor.py", "fun_name": "_get_action_handler_with_module_context", "commit_message": "Add toggle to fix module_defaults with module-as-redirected-action on a per-module basis (#77265)\n\n* If there is a platform specific handler, prefer the resolved module over the resolved action when loading module_defaults\r\n\r\nAdd a toggle for action plugins to prefer the resolved module when loading module_defaults\r\n\r\nAllow moving away from modules intercepted as actions pattern\r\n\r\nFixes #77059", "code": "def _get_action_handler_with_module_context(self, connection, templar):\n \n module_collection, separator, module_name = self._task.action.rpartition(\".\")\n module_prefix = module_name.split('_')[0]\n if module_collection:\n # For network modules, which look for one action plugin per platform, look for the\n # action plugin in the same collection as the module by prefixing the action plugin\n # with the same collection.\n network_action = \"{0}.{1}\".format(module_collection, module_prefix)\n else:\n network_action = module_prefix\n\n collections = self._task.collections\n\n # Check if the module has specified an action handler\n module = self._shared_loader_obj.module_loader.find_plugin_with_context(\n self._task.action, collection_list=collections\n )\n if not module.resolved or not module.action_plugin:\n module = None\n if module is not None:\n handler_name = module.action_plugin\n # let action plugin override module, fallback to 'normal' action plugin otherwise\n elif self._shared_loader_obj.action_loader.has_plugin(self._task.action, collection_list=collections):\n handler_name = self._task.action\n elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))):\n handler_name = network_action\n display.vvvv(\"Using network group action {handler} for {action}\".format(handler=handler_name,\n action=self._task.action),\n host=self._play_context.remote_addr)\n else:\n # use ansible.legacy.normal to allow (historic) local action_plugins/ override without collections search\n handler_name = 'ansible.legacy.normal'\n collections = None # until then, we don't want the task's collection list to be consulted; use the builtin\n\n handler = self._shared_loader_obj.action_loader.get(\n handler_name,\n task=self._task,\n connection=connection,\n play_context=self._play_context,\n loader=self._loader,\n templar=templar,\n shared_loader_obj=self._shared_loader_obj,\n collection_list=collections\n )\n\n if not handler:\n raise AnsibleError(\"the handler '%s' was not found\" % handler_name)\n\n return handler, module\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 685, "n_words": 191, "vocab_size": 117, "complexity": 8, "nloc": 38, "token_counts": 264, "n_ast_nodes": 420, "n_identifiers": 41, "random_cut": "def _get_action_handler_with_module_context(self, connection, templar):\n \n module_collection, separator, module_name = self._task.action.rpartition(\".\")\n module_prefix = module_name.split('_')[0]\n if module_collection:\n # For network modules, which look for one action plugin per platform, look for the\n # action plugin in the same collection as the module by prefixing the action plugin\n # with the same collecti", "d_id": 78856, "documentation": { "docstring": "\n Returns the correct action plugin to handle the requestion task action and the module context\n ", "n_words": 15, "vocab_size": 12, "n_whitespaces": 30, "language": "en" } }, { "id": 262500, "commit_id": "c17ff17a18f21be60c6916714ac8afd87d4441df", "repo": "TTS", "path": "TTS/tts/layers/losses.py", "file_name": "losses.py", "fun_name": "forward", "commit_message": "Fix SSIM loss", "code": "def forward(self, y_hat, y, length):\n \n mask = sequence_mask(sequence_length=length, max_len=y.size(1)).unsqueeze(2)\n y_norm = sample_wise_min_max(y, mask)\n y_hat_norm = sample_wise_min_max(y_hat, mask)\n ssim_loss = self.loss_func((y_norm * mask).unsqueeze(1), (y_hat_norm * mask).unsqueeze(1))\n\n if ssim_loss.item() > 1.0:\n print(f\" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 1.0\")\n ssim_loss == 1.0\n\n if ssim_loss.item() < 0.0:\n print(f\" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 0.0\")\n ssim_loss == 0.0\n\n return ssim_loss\n\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 161, "n_words": 61, "vocab_size": 40, "complexity": 3, "nloc": 12, "token_counts": 122, "n_ast_nodes": 203, "n_identifiers": 18, "random_cut": "def forward(self, y_hat, y, length):\n \n mask = sequence_mask(sequence_length=length, max_len=y.size(1)).unsqueeze(2)\n y_norm = sample_wise_min_max(y, mask)\n y_hat_norm = sample_wise_min_max(y_hat, mask)\n ssim_loss = self.loss_func((y_norm * mask).unsqueeze(1), (y_hat_norm * mask).unsqueeze(1))\n\n if ssim_loss.item() > 1.0:\n print(f\" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 1.0\")\n ssim_loss == 1.0\n\n if ssim_loss.item() < 0.0:\n print(f\" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 0.0\")\n", "d_id": 77241, "documentation": { "docstring": "\n Args:\n y_hat (tensor): model prediction values.\n y (tensor): target values.\n length (tensor): length of each sample in a batch for masking.\n\n Shapes:\n y_hat: B x T X D\n y: B x T x D\n length: B\n\n Returns:\n loss: An average loss value in range [0, 1] masked by the length.\n ", "n_words": 50, "vocab_size": 39, "n_whitespaces": 157, "language": "en" } }, { "id": 202989, "commit_id": "7346c288e307e1821e3ceb757d686c9bd879389c", "repo": "django", "path": "django/core/management/__init__.py", "file_name": "__init__.py", "fun_name": "get_commands", "commit_message": "Refs #32355 -- Removed unnecessary list() calls before reversed() on dictviews.\n\nDict and dictviews are iterable in reversed insertion order using\r\nreversed() in Python 3.8+.", "code": "def get_commands():\n \n commands = {name: 'django.core' for name in find_commands(__path__[0])}\n\n if not settings.configured:\n return commands\n\n for app_config in reversed(apps.get_app_configs()):\n path = os.path.join(app_config.path, 'management')\n commands.update({name: app_config.name for name in find_commands(path)})\n\n return commands\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 67, "n_words": 31, "vocab_size": 22, "complexity": 5, "nloc": 8, "token_counts": 77, "n_ast_nodes": 126, "n_identifiers": 15, "random_cut": "def get_commands():\n \n commands = {name: 'django.core' for name in find_commands(__path__[0])}\n\n if not settings.configured:\n return commands\n\n for app_config in reversed(apps.get_app_configs()):\n path = os.path.join(app_config.path, 'management')\n commands.update({n", "d_id": 50200, "documentation": { "docstring": "\n Return a dictionary mapping command names to their callback applications.\n\n Look for a management.commands package in django.core, and in each\n installed application -- if a commands package exists, register all\n commands in that package.\n\n Core commands are always included. If a settings module has been\n specified, also include user-defined commands.\n\n The dictionary is in the format {command_name: app_name}. Key-value\n pairs from this dictionary can then be used in calls to\n load_command_class(app_name, command_name)\n\n If a specific version of a command must be loaded (e.g., with the\n startapp command), the instantiated module can be placed in the\n dictionary in place of the application name.\n\n The dictionary is cached on the first call and reused on subsequent\n calls.\n ", "n_words": 115, "vocab_size": 79, "n_whitespaces": 161, "language": "en" } }, { "id": 223611, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_parseaddr.py", "file_name": "_parseaddr.py", "fun_name": "getphraselist", "commit_message": "add python 3.10.4 for windows", "code": "def getphraselist(self):\n \n plist = []\n\n while self.pos < len(self.field):\n if self.field[self.pos] in self.FWS:\n self.pos += 1\n elif self.field[self.pos] == '\"':\n plist.append(self.getquote())\n elif self.field[self.pos] == '(':\n self.commentlist.append(self.getcomment())\n elif self.field[self.pos] in self.phraseends:\n break\n else:\n plist.append(self.getatom(self.phraseends))\n\n return plist\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 193, "n_words": 35, "vocab_size": 26, "complexity": 6, "nloc": 14, "token_counts": 119, "n_ast_nodes": 196, "n_identifiers": 13, "random_cut": "def getphraselist(self):\n \n plist = []\n\n while self.pos < len(self.field):\n if self.field[self.pos] in self.FWS:\n self.pos += 1\n elif self.field[self.pos] == '\"':\n plist.append(self.getquote())\n elif self.field[self.pos] == '(':\n s", "d_id": 57004, "documentation": { "docstring": "Parse a sequence of RFC 2822 phrases.\n\n A phrase is a sequence of words, which are in turn either RFC 2822\n atoms or quoted-strings. Phrases are canonicalized by squeezing all\n runs of continuous whitespace into one space.\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 66, "language": "en" } }, { "id": 109724, "commit_id": "8387676bc049d7b3e071846730c632e6ced137ed", "repo": "matplotlib", "path": "lib/matplotlib/axes/_secondary_axes.py", "file_name": "_secondary_axes.py", "fun_name": "set_location", "commit_message": "Clean up code in SecondaryAxis", "code": "def set_location(self, location):\n \n\n # This puts the rectangle into figure-relative coordinates.\n if isinstance(location, str):\n _api.check_in_list(self._locstrings, location=location)\n self._pos = 1. if location in ('top', 'right') else 0.\n elif isinstance(location, numbers.Real):\n self._pos = location\n else:\n raise ValueError(\n f\"location must be {self._locstrings[0]!r}, \"\n f\"{self._locstrings[1]!r}, or a float, not {location!r}\")\n\n self._loc = location\n\n if self._orientation == 'x':\n # An x-secondary axes is like an inset axes from x = 0 to x = 1 and\n # from y = pos to y = pos + eps, in the parent's transAxes coords.\n bounds = [0, self._pos, 1., 1e-10]\n else: # 'y'\n bounds = [self._pos, 0, 1e-10, 1]\n\n # this locator lets the axes move in the parent axes coordinates.\n # so it never needs to know where the parent is explicitly in\n # figure coordinates.\n # it gets called in ax.apply_aspect() (of all places)\n self.set_axes_locator(\n _TransformedBoundsLocator(bounds, self._parent.transAxes))\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 363, "n_words": 142, "vocab_size": 97, "complexity": 5, "nloc": 17, "token_counts": 130, "n_ast_nodes": 230, "n_identifiers": 19, "random_cut": "def set_location(self, location):\n \n\n # This puts the rectangle ", "d_id": 23720, "documentation": { "docstring": "\n Set the vertical or horizontal location of the axes in\n parent-normalized coordinates.\n\n Parameters\n ----------\n location : {'top', 'bottom', 'left', 'right'} or float\n The position to put the secondary axis. Strings can be 'top' or\n 'bottom' for orientation='x' and 'right' or 'left' for\n orientation='y'. A float indicates the relative position on the\n parent axes to put the new axes, 0.0 being the bottom (or left)\n and 1.0 being the top (or right).\n ", "n_words": 71, "vocab_size": 51, "n_whitespaces": 170, "language": "en" } }, { "id": 153347, "commit_id": "e7cb2e82f8b9c7a68f82abdd3b6011d661230b7e", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py", "file_name": "partition.py", "fun_name": "length", "commit_message": "REFACTOR-#4251: define public interfaces in `modin.core.execution.ray` module (#3868)\n\nSigned-off-by: Anatoly Myachev ", "code": "def length(self):\n \n if self._length_cache is None:\n if len(self.call_queue):\n self.drain_call_queue()\n else:\n self._length_cache, self._width_cache = _get_index_and_columns.remote(\n self.oid\n )\n if isinstance(self._length_cache, ObjectIDType):\n self._length_cache = ray.get(self._length_cache)\n return self._length_cache\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 149, "n_words": 24, "vocab_size": 19, "complexity": 4, "nloc": 11, "token_counts": 70, "n_ast_nodes": 115, "n_identifiers": 14, "random_cut": "def length(self):\n \n if self._length_cache is None:\n if len(self.call_queue):\n self.drain_call_queue()\n else:\n self._length_cache, self._width_cache = _get_index_and_columns.remote(\n self.oid\n ", "d_id": 35383, "documentation": { "docstring": "\n Get the length of the object wrapped by this partition.\n\n Returns\n -------\n int\n The length of the object.\n ", "n_words": 18, "vocab_size": 14, "n_whitespaces": 65, "language": "en" } }, { "id": 195939, "commit_id": "0f6dde45a1c75b02c208323574bdb09b8536e3e4", "repo": "sympy", "path": "sympy/polys/densearith.py", "file_name": "densearith.py", "fun_name": "dmp_l2_norm_squared", "commit_message": "Add `l2_norm_squared` methods.", "code": "def dmp_l2_norm_squared(f, u, K):\n \n if not u:\n return dup_l2_norm_squared(f, K)\n\n v = u - 1\n\n return sum([ dmp_l2_norm_squared(c, v, K) for c in f ])\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 25, "vocab_size": 23, "complexity": 3, "nloc": 5, "token_counts": 44, "n_ast_nodes": 67, "n_identifiers": 8, "random_cut": "def dmp_l2_norm_squared(f, u, K):\n \n if not u:\n return dup_l2_norm_squared(f, K)\n\n v = u - 1\n\n return s", "d_id": 47480, "documentation": { "docstring": "\n Returns squared l2 norm of a polynomial in ``K[X]``.\n\n Examples\n ========\n\n >>> from sympy.polys import ring, ZZ\n >>> R, x,y = ring(\"x,y\", ZZ)\n\n >>> R.dmp_l2_norm_squared(2*x*y - x - 3)\n 14\n\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 55, "language": "en" } }, { "id": 266740, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py", "file_name": "__init__.py", "fun_name": "cloud_filter", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def cloud_filter(args, targets): # type: (IntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> t.List[str]\n \n if args.metadata.cloud_config is not None:\n return [] # cloud filter already performed prior to delegation\n\n exclude = [] # type: t.List[str]\n\n for provider in get_cloud_providers(args, targets):\n provider.filter(targets, exclude)\n\n return exclude\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 40, "vocab_size": 32, "complexity": 3, "nloc": 7, "token_counts": 45, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def cloud_filter(args, targets): # type: (IntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> t.List[str]\n \n if args.metadata.cloud_config is not None:\n return [] # cloud filter already performed prior to delegation\n\n exclude = [] # type: t.List[str]\n\n for provider in get_cloud_providers(", "d_id": 78551, "documentation": { "docstring": "Return a list of target names to exclude based on the given targets.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 215087, "commit_id": "f1c37893caf90738288e789c3233ab934630254f", "repo": "salt", "path": "tests/pytests/unit/modules/test_aixpkg.py", "file_name": "test_aixpkg.py", "fun_name": "test_upgrade_available_none", "commit_message": "Working tests for install", "code": "def test_upgrade_available_none():\n \n\n chk_upgrade_out = (\n \"Last metadata expiration check: 22:5:48 ago on Mon Dec 6 19:26:36 EST 2021.\"\n )\n\n dnf_call = MagicMock(return_value={\"retcode\": 100, \"stdout\": chk_upgrade_out})\n version_mock = MagicMock(return_value=\"6.6-2\")\n with patch(\"pathlib.Path.is_file\", return_value=True):\n with patch.dict(\n aixpkg.__salt__,\n {\"cmd.run_all\": dnf_call, \"config.get\": MagicMock(return_value=False)},\n ), patch.object(aixpkg, \"version\", version_mock):\n result = aixpkg.upgrade_available(\"info\")\n assert dnf_call.call_count == 1\n libpath_env = {\"LIBPATH\": \"/opt/freeware/lib:/usr/lib\"}\n dnf_call.assert_any_call(\n \"/opt/freeware/bin/dnf check-update info\",\n env=libpath_env,\n ignore_retcode=True,\n python_shell=False,\n )\n assert not result\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 252, "n_words": 64, "vocab_size": 56, "complexity": 1, "nloc": 21, "token_counts": 124, "n_ast_nodes": 217, "n_identifiers": 19, "random_cut": "def test_upgrade_available_none():\n \n\n chk_upgrade_out = (\n \"Last metadata ex", "d_id": 53805, "documentation": { "docstring": "\n test upgrade available where a valid upgrade is not available\n ", "n_words": 10, "vocab_size": 8, "n_whitespaces": 17, "language": "en" } }, { "id": 87293, "commit_id": "361b7f444a53cc34cad8ddc378d125b7027d96df", "repo": "sentry", "path": "tests/sentry/event_manager/test_event_manager.py", "file_name": "test_event_manager.py", "fun_name": "test_too_many_boosted_releases_do_not_boost_anymore", "commit_message": "feat(ds): Limit the amount of boosted releases to 10 (#40501)\n\nLimits amount of boosted releases to 10 releases\r\notherwise do not add any more releases to hash set of listed releases", "code": "def test_too_many_boosted_releases_do_not_boost_anymore(self):\n \n release_2 = Release.get_or_create(self.project, \"2.0\")\n release_3 = Release.get_or_create(self.project, \"3.0\")\n\n for release_id in (self.release.id, release_2.id):\n self.redis_client.set(f\"ds::p:{self.project.id}:r:{release_id}\", 1, 60 * 60 * 24)\n self.redis_client.hset(\n f\"ds::p:{self.project.id}:boosted_releases\",\n release_id,\n time(),\n )\n\n with self.options(\n {\n \"dynamic-sampling:boost-latest-release\": True,\n }\n ):\n self.make_release_transaction(\n release_version=release_3.version,\n environment_name=self.environment1.name,\n project_id=self.project.id,\n checksum=\"b\" * 32,\n timestamp=self.timestamp,\n )\n assert self.redis_client.hgetall(f\"ds::p:{self.project.id}:boosted_releases\") == {\n str(self.release.id): str(time()),\n str(release_2.id): str(time()),\n }\n assert self.redis_client.get(f\"ds::p:{self.project.id}:r:{release_3.id}\") is None\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 373, "n_words": 56, "vocab_size": 46, "complexity": 2, "nloc": 27, "token_counts": 185, "n_ast_nodes": 342, "n_identifiers": 27, "random_cut": "def test_too_many_boosted_releases_do_not_boost_anymore(self):\n \n release_2 = Release.get_or_create(", "d_id": 18273, "documentation": { "docstring": "\n This test tests the case when we have already too many boosted releases, in this case we want to skip the\n boosting of anymore releases\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 47, "language": "en" } }, { "id": 176175, "commit_id": "5dfd57af2a141a013ae3753e160180b82bec9469", "repo": "networkx", "path": "networkx/algorithms/link_analysis/hits_alg.py", "file_name": "hits_alg.py", "fun_name": "hits", "commit_message": "Use scipy.sparse array datastructure (#5139)\n\n* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.\r\n\r\nSeems like a reasonable place to start.\r\nnx.to_scipy_sparse_matrix is one of the primary interfaces to\r\nscipy.sparse from within NetworkX.\r\n\r\n* 1: Use np.outer instead of mult col/row vectors\r\n\r\nFix two instances in modularitymatrix where a new 2D array was being\r\ncreated via an outer product of two \\\"vectors\\\".\r\n\r\nIn the matrix case, this was a row vector \\* a column vector. In the\r\narray case this can be disambiguated by being explicit with np.outer.\r\n\r\n* Update _transition_matrix in laplacianmatrix module\r\n\r\n - A few instances of matrix multiplication operator\r\n - Add np.newaxis + transpose to get shape right for broadcasting\r\n - Explicitly convert e.g. sp.sparse.spdiags to a csr_array.\r\n\r\n* Update directed_combinitorial_laplacian w/ sparse array.\r\n\r\n - Wrap spdiags in csr_array and update matmul operators.\r\n\r\n* Rm matrix-specific code from lgc and hmn modules\r\n\r\n - Replace .A call with appropriate array semantics\r\n - wrap sparse.diags in csr_array.\r\n\r\n* Change hits to use sparse array semantics.\r\n\r\n - Replace * with @\r\n - Remove superfluous calls to flatten.\r\n\r\n* Update sparse matrix usage in layout module.\r\n - Simplify lil.getrowview call\r\n - Wrap spdiags in csr_array.\r\n\r\n* lil_matrix -> lil_array in graphmatrix.py.\r\n\r\n* WIP: Start working on algebraic connectivity module.\r\n\r\n* Incorporate auth mat varname feedback.\r\n\r\n* Revert 1D slice and comment for 1D sparse future.\r\n\r\n* Add TODOs: rm csr_array wrapper around spdiags etc.\r\n\r\n* WIP: cleanup algebraicconn: tracemin_fiedler.\r\n\r\n* Typo.\r\n\r\n* Finish reviewing algebraicconnectivity.\r\n\r\n* Convert bethe_hessian matrix to use sparse arrays.\r\n\r\n* WIP: update laplacian.\r\n\r\nUpdate undirected laplacian functions.\r\n\r\n* WIP: laplacian - add comment about _transition_matrix return types.\r\n\r\n* Finish laplacianmatrix review.\r\n\r\n* Update attrmatrix.\r\n\r\n* Switch to official laplacian function.\r\n\r\n* Update pagerank to use sparse array.\r\n\r\n* Switch bipartite matrix to sparse arrays.\r\n\r\n* Check from_scipy_sparse_matrix works with arrays.\r\n\r\nModifies test suite.\r\n\r\n* Apply changes from review.\r\n\r\n* Fix failing docstring tests.\r\n\r\n* Fix missing axis for in-place multiplication.\r\n\r\n* Use scipy==1.8rc2\r\n\r\n* Use matrix multiplication\r\n\r\n* Fix PyPy CI\r\n\r\n* [MRG] Create plot_subgraphs.py example (#5165)\r\n\r\n* Create plot_subgraphs.py\r\n\r\nhttps://github.com/networkx/networkx/issues/4220\r\n\r\n* Update plot_subgraphs.py\r\n\r\nblack\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint plus font_size\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded more plots\r\n\r\n* Update plot_subgraphs.py\r\n\r\nremoved plots from the unit test and added comments\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint\r\n\r\n* Update plot_subgraphs.py\r\n\r\ntypos fixed\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded nodes to the plot of the edges removed that was commented out for whatever reason\r\n\r\n* Update plot_subgraphs.py\r\n\r\nrevert the latest commit - the line was commented out for a reason - it's broken\r\n\r\n* Update plot_subgraphs.py\r\n\r\nfixed node color issue\r\n\r\n* Update plot_subgraphs.py\r\n\r\nformat fix\r\n\r\n* Update plot_subgraphs.py\r\n\r\nforgot to draw the nodes... now fixed\r\n\r\n* Fix sphinx warnings about heading length.\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult \r\n\r\n* Add traveling salesman problem to example gallery (#4874)\r\n\r\nAdds an example of the using Christofides to solve the TSP problem to the example galery.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037)\r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges()\r\n\r\n* Resolved Requested Changes\r\n\r\n* Revert changes to degree docstrings.\r\n\r\n* Update comments in example.\r\n\r\n* Apply wording to edges method in all graph classes.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Compatibility updates from testing with numpy/scipy/pytest rc's (#5226)\r\n\r\n* Rm deprecated scipy subpkg access.\r\n\r\n* Use recwarn fixture in place of deprecated pytest pattern.\r\n\r\n* Rm unnecessary try/except from tests.\r\n\r\n* Replace internal `close` fn with `math.isclose`. (#5224)\r\n\r\n* Replace internal close fn with math.isclose.\r\n\r\n* Fix lines in docstring examples.\r\n\r\n* Fix Python 3.10 deprecation warning w/ int div. (#5231)\r\n\r\n* Touchups and suggestions for subgraph gallery example (#5225)\r\n\r\n* Simplify construction of G with edges rm'd\r\n\r\n* Rm unused graph attribute.\r\n\r\n* Shorten categorization by node type.\r\n\r\n* Simplify node coloring.\r\n\r\n* Simplify isomorphism check.\r\n\r\n* Rm unit test.\r\n\r\n* Rm redundant plotting of each subgraph.\r\n\r\n* Use new package name (#5234)\r\n\r\n* Allowing None edges in weight function of bidirectional Dijkstra (#5232)\r\n\r\n* added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None.\r\n\r\n* changed syntax for better readability and code duplicate avoidance\r\n\r\nCo-authored-by: Hohmann, Nikolas \r\n\r\n* Add an FAQ about assigning issues. (#5182)\r\n\r\n* Add FAQ about assigning issues.\r\n\r\n* Add note about linking issues from new PRs.\r\n\r\n* Update dev deps (#5243)\r\n\r\n* Update minor doc issues with tex notation (#5244)\r\n\r\n* Add FutureWarnings to fns that return sparse matrices\r\n\r\n - biadjacency_matrix.\r\n - bethe_hessian_matrix.\r\n - incidence_matrix.\r\n - laplacian functions.\r\n - modularity_matrix functions.\r\n - adjacency_matrix.\r\n\r\n* Add to_scipy_sparse_array and use it everywhere.\r\n\r\nAdd a new conversion function to preserve array semantics internally\r\nwhile not altering behavior for users.\r\n\r\nAlso adds FutureWarning to to_scipy_sparse_matrix.\r\n\r\n* Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix.\r\n\r\n* Handle deprecations in separate PR.\r\n\r\n* Fix docstring examples.\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\nCo-authored-by: Jarrod Millman \r\nCo-authored-by: Andrew Knyazev \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: eskountis <56514439+eskountis@users.noreply.github.com>\r\nCo-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com>\r\nCo-authored-by: NikHoh \r\nCo-authored-by: Hohmann, Nikolas \r\nCo-authored-by: Sultan Orazbayev \r\nCo-authored-by: Mridul Seth ", "code": "def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):\n \n import numpy as np\n import scipy as sp\n import scipy.sparse.linalg # call as sp.sparse.linalg\n\n if len(G) == 0:\n return {}, {}\n A = nx.adjacency_matrix(G, nodelist=list(G), dtype=float)\n\n if nstart is None:\n u, s, vt = sp.sparse.linalg.svds(A, k=1, maxiter=max_iter, tol=tol)\n else:\n nstart = np.array(list(nstart.values()))\n u, s, vt = sp.sparse.linalg.svds(A, k=1, v0=nstart, maxiter=max_iter, tol=tol)\n\n a = vt.flatten().real\n h = A @ a\n if normalized:\n h = h / h.sum()\n a = a / a.sum()\n hubs = dict(zip(G, map(float, h)))\n authorities = dict(zip(G, map(float, a)))\n return hubs, authorities\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 175, "n_words": 90, "vocab_size": 56, "complexity": 4, "nloc": 20, "token_counts": 226, "n_ast_nodes": 339, "n_identifiers": 39, "random_cut": "def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):\n \n import numpy as np\n import scipy as sp\n imp", "d_id": 41745, "documentation": { "docstring": "Returns HITS hubs and authorities values for nodes.\n\n The HITS algorithm computes two numbers for a node.\n Authorities estimates the node value based on the incoming links.\n Hubs estimates the node value based on outgoing links.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n max_iter : integer, optional\n Maximum number of iterations in power method.\n\n tol : float, optional\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional\n Starting value of each node for power method iteration.\n\n normalized : bool (default=True)\n Normalize results by the sum of all of the values.\n\n Returns\n -------\n (hubs,authorities) : two-tuple of dictionaries\n Two dictionaries keyed by node containing the hub and authority\n values.\n\n Raises\n ------\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> h, a = nx.hits(G)\n\n Notes\n -----\n The eigenvector calculation is done by the power iteration method\n and has no guarantee of convergence. The iteration will stop\n after max_iter iterations or an error tolerance of\n number_of_nodes(G)*tol has been reached.\n\n The HITS algorithm was designed for directed graphs but this\n algorithm does not check if the input graph is directed and will\n execute on undirected graphs.\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Jon Kleinberg,\n Authoritative sources in a hyperlinked environment\n Journal of the ACM 46 (5): 604-32, 1999.\n doi:10.1145/324133.324140.\n http://www.cs.cornell.edu/home/kleinber/auth.pdf.\n ", "n_words": 248, "vocab_size": 152, "n_whitespaces": 446, "language": "en" } }, { "id": 45823, "commit_id": "26e8d6d7664bbaae717438bdb41766550ff57e4f", "repo": "airflow", "path": "airflow/providers/ftp/hooks/ftp.py", "file_name": "ftp.py", "fun_name": "test_connection", "commit_message": "Updates FTPHook provider to have test_connection (#21997)\n\n* Updates FTP provider to have test_connection\r\n\r\nCo-authored-by: eladkal <45845474+eladkal@users.noreply.github.com>", "code": "def test_connection(self) -> Tuple[bool, str]:\n \n try:\n conn = self.get_conn()\n conn.pwd\n return True, \"Connection successfully tested\"\n except Exception as e:\n return False, str(e)\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 8, "token_counts": 41, "n_ast_nodes": 71, "n_identifiers": 10, "random_cut": "def test_connection(self) -> Tuple[bool, str]:\n \n try:\n conn = se", "d_id": 8731, "documentation": { "docstring": "Test the FTP connection by calling path with directory", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 285727, "commit_id": "1661ddd44044c637526e9a1e812e7c1863be35fc", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/crypto_controller.py", "file_name": "crypto_controller.py", "fun_name": "call_price", "commit_message": "Integrate live feeds from Pyth (#2178)\n\n* added dependency\r\n\r\n* added pyth models\r\n\r\n* dependencies\r\n\r\n* docs\r\n\r\n* some improvements to this pyth command (#2433)\r\n\r\n* some improvements to this pyth command\r\n\r\n* minor improv\r\n\r\n* dependencies\r\n\r\n* tests\r\n\r\nCo-authored-by: DidierRLopes ; COlin", "code": "def call_price(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"price\",\n description=,\n )\n parser.add_argument(\n \"-s\",\n \"--symbol\",\n required=\"-h\" not in other_args,\n type=str,\n dest=\"symbol\",\n help=\"Symbol of coin to load data for, ~100 symbols are available\",\n )\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-s\")\n\n ns_parser = self.parse_known_args_and_warn(parser, other_args)\n\n if ns_parser:\n\n if ns_parser.symbol in pyth_model.ASSETS.keys():\n console.print(\n \"[param]If it takes too long, you can use 'Ctrl + C' to cancel.\\n[/param]\"\n )\n pyth_view.display_price(ns_parser.symbol)\n else:\n console.print(\"[red]The symbol selected does not exist.[/red]\\n\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 352, "n_words": 74, "vocab_size": 64, "complexity": 5, "nloc": 26, "token_counts": 131, "n_ast_nodes": 221, "n_identifiers": 28, "random_cut": "def call_price(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"price\",\n description=,\n )\n parser.add_argument(\n \"-s\",\n \"--symbol\",\n required=\"-h\" not in other_args,\n type=str,\n dest=\"symbol\",\n help=\"Symbol of coin to load data for, ~100 symbols are availa", "d_id": 85397, "documentation": { "docstring": "Process price commandDisplay price and interval of confidence in real-time. [Source: Pyth]", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 104238, "commit_id": "6ed6ac9448311930557810383d2cfd4fe6aae269", "repo": "datasets", "path": "src/datasets/utils/py_utils.py", "file_name": "py_utils.py", "fun_name": "_single_map_nested", "commit_message": "Better TQDM output (#3654)\n\n* Show progress bar when generating examples\r\n\r\n* Consistent utils.is_progress_bar_enabled calls\r\n\r\n* Fix tqdm in notebook\r\n\r\n* Add missing params to DatasetDict.map\r\n\r\n* Specify total in tqdm progress bar in map\r\n\r\n* Fix total computation\r\n\r\n* Small fix\r\n\r\n* Add desc to map_nested\r\n\r\n* Add more precise descriptions to download\r\n\r\n* Address comments\r\n\r\n* Fix docstring\r\n\r\n* Final changes\r\n\r\n* Minor change", "code": "def _single_map_nested(args):\n \n function, data_struct, types, rank, disable_tqdm, desc = args\n\n # Singleton first to spare some computation\n if not isinstance(data_struct, dict) and not isinstance(data_struct, types):\n return function(data_struct)\n\n # Reduce logging to keep things readable in multiprocessing with tqdm\n if rank is not None and logging.get_verbosity() < logging.WARNING:\n logging.set_verbosity_warning()\n # Print at least one thing to fix tqdm in notebooks in multiprocessing\n # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308\n if rank is not None and not disable_tqdm and any(\"notebook\" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__):\n print(\" \", end=\"\", flush=True)\n\n # Loop over single examples or batches and write to buffer/file if examples are to be updated\n pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct\n pbar_desc = (desc + \" \" if desc is not None else \"\") + \"#\" + str(rank) if rank is not None else desc\n pbar = utils.tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit=\"obj\", desc=pbar_desc)\n\n if isinstance(data_struct, dict):\n return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar}\n else:\n mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar]\n if isinstance(data_struct, list):\n return mapped\n elif isinstance(data_struct, tuple):\n return tuple(mapped)\n else:\n return np.array(mapped)\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 316, "n_words": 182, "vocab_size": 107, "complexity": 17, "nloc": 21, "token_counts": 259, "n_ast_nodes": 398, "n_identifiers": 38, "random_cut": "def _single_map_nested(args):\n \n function, data_struct, types, rank, disable_tqdm, desc = args\n\n # Singleton first to spare some computation\n if not isinstance(data_struct, dict) and not isinstance(data_struct, types):\n return function(data_struct)\n\n # Reduce logging to keep things readable in multiprocessing with tqdm\n if rank is not None and logging.get_verbosity() < logging.WARNING:\n logging.set_verbosity_warning()\n # Print at least one thing to fix tqdm in notebooks in multiprocessing\n # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308\n if rank is not None and not disable_tqdm and any(\"notebook\" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__):\n print(\" \", end=\"\", flush=True)\n\n # Loop over single examples or batches and write to buffer/file if examples are to be updated\n pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct\n pbar_desc = (desc + \" \" if desc is not None else \"\") + \"#\" + str(rank) if rank is not None else desc\n pbar = utils.tqdm(pbar_iterable, dis", "d_id": 21793, "documentation": { "docstring": "Apply a function recursively to each element of a nested data struct.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 207334, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_unified", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_unified(self):\n \n self.write_settings(\"settings_to_diff.py\", sdict={\"FOO\": '\"bar\"'})\n args = [\"diffsettings\", \"--settings=settings_to_diff\", \"--output=unified\"]\n out, err = self.run_manage(args)\n self.assertNoOutput(err)\n self.assertOutput(out, \"+ FOO = 'bar'\")\n self.assertOutput(out, \"- SECRET_KEY = ''\")\n self.assertOutput(out, \"+ SECRET_KEY = 'django_tests_secret_key'\")\n self.assertNotInOutput(out, \" APPEND_SLASH = True\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 99, "n_words": 35, "vocab_size": 26, "complexity": 1, "nloc": 9, "token_counts": 77, "n_ast_nodes": 140, "n_identifiers": 11, "random_cut": "def test_unified(self):\n \n ", "d_id": 51930, "documentation": { "docstring": "--output=unified emits settings diff in unified mode.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 139848, "commit_id": "eb2692cb32bb1747e312d5b20e976d7a879c9588", "repo": "ray", "path": "python/ray/runtime_context.py", "file_name": "runtime_context.py", "fun_name": "runtime_env", "commit_message": "[runtime env] runtime env inheritance refactor (#24538)\n\n* [runtime env] runtime env inheritance refactor (#22244)\r\n\r\nRuntime Environments is already GA in Ray 1.6.0. The latest doc is [here](https://docs.ray.io/en/master/ray-core/handling-dependencies.html#runtime-environments). And now, we already supported a [inheritance](https://docs.ray.io/en/master/ray-core/handling-dependencies.html#inheritance) behavior as follows (copied from the doc):\r\n- The runtime_env[\"env_vars\"] field will be merged with the runtime_env[\"env_vars\"] field of the parent. This allows for environment variables set in the parent’s runtime environment to be automatically propagated to the child, even if new environment variables are set in the child’s runtime environment.\r\n- Every other field in the runtime_env will be overridden by the child, not merged. For example, if runtime_env[\"py_modules\"] is specified, it will replace the runtime_env[\"py_modules\"] field of the parent.\r\n\r\nWe think this runtime env merging logic is so complex and confusing to users because users can't know the final runtime env before the jobs are run.\r\n\r\nCurrent PR tries to do a refactor and change the behavior of Runtime Environments inheritance. Here is the new behavior:\r\n- **If there is no runtime env option when we create actor, inherit the parent runtime env.**\r\n- **Otherwise, use the optional runtime env directly and don't do the merging.**\r\n\r\nAdd a new API named `ray.runtime_env.get_current_runtime_env()` to get the parent runtime env and modify this dict by yourself. Like:\r\n```Actor.options(runtime_env=ray.runtime_env.get_current_runtime_env().update({\"X\": \"Y\"}))```\r\nThis new API also can be used in ray client.", "code": "def runtime_env(self):\n \n\n return RuntimeEnv.deserialize(self._get_runtime_env_string())\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def runtime_env(self):\n \n", "d_id": 31793, "documentation": { "docstring": "Get the runtime env of the current job/worker.\n\n If this API is called in driver or ray client, returns the job level runtime\n env.\n If this API is called in workers/actors, returns the worker level runtime env.\n\n Returns:\n A new ray.runtime_env.RuntimeEnv instance.\n\n To merge from the current runtime env in some specific cases, you can get the\n current runtime env by this API and modify it by yourself.\n\n Example:\n >>> # Inherit current runtime env, except `env_vars`\n >>> Actor.options( # doctest: +SKIP\n ... runtime_env=ray.get_runtime_context().runtime_env.update(\n ... {\"env_vars\": {\"A\": \"a\", \"B\": \"b\"}})\n ... ) # doctest: +SKIP\n ", "n_words": 95, "vocab_size": 60, "n_whitespaces": 205, "language": "en" } }, { "id": 187407, "commit_id": "d1a8d1597d4fe9f129a72fe94c1508304b7eae0f", "repo": "streamlink", "path": "src/streamlink/stream/dash.py", "file_name": "dash.py", "fun_name": "sleeper", "commit_message": "stream.dash: update DASHStreamWorker.iter_segments\n\n- Refactor DASHStreamWorker.iter_segments()\n- Replace dash_manifest.sleeper() with SegmentedStreamWorker.wait(),\n and make the worker thread shut down immediately on close().\n- Prevent unnecessary wait times for static manifest types by calling\n close() after all segments were put into the writer's queue.", "code": "def sleeper(self, duration):\n \n s = time()\n yield\n time_to_sleep = duration - (time() - s)\n if time_to_sleep > 0:\n self.wait(time_to_sleep)\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 36, "n_ast_nodes": 63, "n_identifiers": 7, "random_cut": "def sleeper(self, duration):\n \n s = time()\n yield\n time_to_sleep = duration - (time() - s)\n if time_to_sleep > 0:\n s", "d_id": 45770, "documentation": { "docstring": "\n Do something and then wait for a given duration minus the time it took doing something\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 109399, "commit_id": "a17f4f3bd63e3ca3754f96d7db4ce5197720589b", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_colors.py", "file_name": "test_colors.py", "fun_name": "test_BoundaryNorm", "commit_message": "MNT: convert tests and internal usage way from using mpl.cm.get_cmap", "code": "def test_BoundaryNorm():\n \n\n boundaries = [0, 1.1, 2.2]\n vals = [-1, 0, 1, 2, 2.2, 4]\n\n # Without interpolation\n expected = [-1, 0, 0, 1, 2, 2]\n ncolors = len(boundaries) - 1\n bn = mcolors.BoundaryNorm(boundaries, ncolors)\n assert_array_equal(bn(vals), expected)\n\n # ncolors != len(boundaries) - 1 triggers interpolation\n expected = [-1, 0, 0, 2, 3, 3]\n ncolors = len(boundaries)\n bn = mcolors.BoundaryNorm(boundaries, ncolors)\n assert_array_equal(bn(vals), expected)\n\n # with a single region and interpolation\n expected = [-1, 1, 1, 1, 3, 3]\n bn = mcolors.BoundaryNorm([0, 2.2], ncolors)\n assert_array_equal(bn(vals), expected)\n\n # more boundaries for a third color\n boundaries = [0, 1, 2, 3]\n vals = [-1, 0.1, 1.1, 2.2, 4]\n ncolors = 5\n expected = [-1, 0, 2, 4, 5]\n bn = mcolors.BoundaryNorm(boundaries, ncolors)\n assert_array_equal(bn(vals), expected)\n\n # a scalar as input should not trigger an error and should return a scalar\n boundaries = [0, 1, 2]\n vals = [-1, 0.1, 1.1, 2.2]\n bn = mcolors.BoundaryNorm(boundaries, 2)\n expected = [-1, 0, 1, 2]\n for v, ex in zip(vals, expected):\n ret = bn(v)\n assert isinstance(ret, int)\n assert_array_equal(ret, ex)\n assert_array_equal(bn([v]), ex)\n\n # same with interp\n bn = mcolors.BoundaryNorm(boundaries, 3)\n expected = [-1, 0, 2, 3]\n for v, ex in zip(vals, expected):\n ret = bn(v)\n assert isinstance(ret, int)\n assert_array_equal(ret, ex)\n assert_array_equal(bn([v]), ex)\n\n # Clipping\n bn = mcolors.BoundaryNorm(boundaries, 3, clip=True)\n expected = [0, 0, 2, 2]\n for v, ex in zip(vals, expected):\n ret = bn(v)\n assert isinstance(ret, int)\n assert_array_equal(ret, ex)\n assert_array_equal(bn([v]), ex)\n\n # Masked arrays\n boundaries = [0, 1.1, 2.2]\n vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9])\n\n # Without interpolation\n ncolors = len(boundaries) - 1\n bn = mcolors.BoundaryNorm(boundaries, ncolors)\n expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0])\n assert_array_equal(bn(vals), expected)\n\n # With interpolation\n bn = mcolors.BoundaryNorm(boundaries, len(boundaries))\n expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0])\n assert_array_equal(bn(vals), expected)\n\n # Non-trivial masked arrays\n vals = np.ma.masked_invalid([np.Inf, np.NaN])\n assert np.all(bn(vals).mask)\n vals = np.ma.masked_invalid([np.Inf])\n assert np.all(bn(vals).mask)\n\n # Incompatible extend and clip\n with pytest.raises(ValueError, match=\"not compatible\"):\n mcolors.BoundaryNorm(np.arange(4), 5, extend='both', clip=True)\n\n # Too small ncolors argument\n with pytest.raises(ValueError, match=\"ncolors must equal or exceed\"):\n mcolors.BoundaryNorm(np.arange(4), 2)\n\n with pytest.raises(ValueError, match=\"ncolors must equal or exceed\"):\n mcolors.BoundaryNorm(np.arange(4), 3, extend='min')\n\n with pytest.raises(ValueError, match=\"ncolors must equal or exceed\"):\n mcolors.BoundaryNorm(np.arange(4), 4, extend='both')\n\n # Testing extend keyword, with interpolation (large cmap)\n bounds = [1, 2, 3]\n cmap = mpl.colormaps['viridis']\n mynorm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both')\n refnorm = mcolors.BoundaryNorm([0] + bounds + [4], cmap.N)\n x = np.random.randn(100) * 10 + 2\n ref = refnorm(x)\n ref[ref == 0] = -1\n ref[ref == cmap.N - 1] = cmap.N\n assert_array_equal(mynorm(x), ref)\n\n # Without interpolation\n cmref = mcolors.ListedColormap(['blue', 'red'])\n cmref.set_over('black')\n cmref.set_under('white')\n cmshould = mcolors.ListedColormap(['white', 'blue', 'red', 'black'])\n\n assert mcolors.same_color(cmref.get_over(), 'black')\n assert mcolors.same_color(cmref.get_under(), 'white')\n\n refnorm = mcolors.BoundaryNorm(bounds, cmref.N)\n mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='both')\n assert mynorm.vmin == refnorm.vmin\n assert mynorm.vmax == refnorm.vmax\n\n assert mynorm(bounds[0] - 0.1) == -1 # under\n assert mynorm(bounds[0] + 0.1) == 1 # first bin -> second color\n assert mynorm(bounds[-1] - 0.1) == cmshould.N - 2 # next-to-last color\n assert mynorm(bounds[-1] + 0.1) == cmshould.N # over\n\n x = [-1, 1.2, 2.3, 9.6]\n assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2, 3]))\n x = np.random.randn(100) * 10 + 2\n assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))\n\n # Just min\n cmref = mcolors.ListedColormap(['blue', 'red'])\n cmref.set_under('white')\n cmshould = mcolors.ListedColormap(['white', 'blue', 'red'])\n\n assert mcolors.same_color(cmref.get_under(), 'white')\n\n assert cmref.N == 2\n assert cmshould.N == 3\n refnorm = mcolors.BoundaryNorm(bounds, cmref.N)\n mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='min')\n assert mynorm.vmin == refnorm.vmin\n assert mynorm.vmax == refnorm.vmax\n x = [-1, 1.2, 2.3]\n assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2]))\n x = np.random.randn(100) * 10 + 2\n assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))\n\n # Just max\n cmref = mcolors.ListedColormap(['blue', 'red'])\n cmref.set_over('black')\n cmshould = mcolors.ListedColormap(['blue', 'red', 'black'])\n\n assert mcolors.same_color(cmref.get_over(), 'black')\n\n assert cmref.N == 2\n assert cmshould.N == 3\n refnorm = mcolors.BoundaryNorm(bounds, cmref.N)\n mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='max')\n assert mynorm.vmin == refnorm.vmin\n assert mynorm.vmax == refnorm.vmax\n x = [1.2, 2.3, 4]\n assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2]))\n x = np.random.randn(100) * 10 + 2\n assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 1100, "n_words": 623, "vocab_size": 192, "complexity": 4, "nloc": 119, "token_counts": 1470, "n_ast_nodes": 2192, "n_identifiers": 52, "random_cut": "def test_BoundaryNorm():\n \n\n boundaries = [0, 1.1, 2.2]\n vals = [-1, 0, 1, 2, 2.2, 4]\n\n # Without interpolation\n expected = [-1, 0, 0, 1, 2, 2]\n ncolors = len(boundaries) - 1\n bn = mcolors.BoundaryNorm(boundaries, ncolors)\n assert_array_equal(bn(vals), expected)\n\n # ncolors != len(boundaries) - 1 triggers interpolation\n expected = [-1, 0, 0, 2, 3, 3]\n ncolors = len(boundaries)\n bn = mcolors.BoundaryNorm(boundaries, ncolors)\n assert_array_equal(bn(vals), expected)\n\n # with a single region and interpolation\n expected = [-1, 1, 1, 1, 3, 3]\n bn = mcolors.BoundaryNorm([0, 2.2], ncolors)\n assert_array_equal(bn(vals), expected)\n\n # more boundaries for a third color\n boundaries = [0, 1, 2, 3]\n vals = [-1, 0.1, 1.1, 2.2, 4]\n ncolors = 5\n expected = [-1, 0, 2, 4, 5]\n bn = mcolors.BoundaryNorm(boundaries, ncolors)\n assert_array_equal(bn(vals), expected)\n\n # a scalar as input should not trigger an error and should return a scalar\n boundaries = [0, 1, 2]\n vals = [-1, 0.1, 1.1, 2.2]\n bn = mcolors.BoundaryNorm(boundaries, 2)\n expected = [-1, 0, 1, 2]\n for v, ex in zip(vals, expected):\n ret = bn(v)\n assert isinstance(ret, int)\n assert_array_equal(ret, ex)\n assert_array_equal(bn([v]), ex)\n\n # same with interp\n bn = mcolors.BoundaryNorm(boundaries, 3)\n expected = [-1, 0, 2, 3]\n for v, ex in zip(vals, expected):\n ret = bn(v)\n assert isinstance(ret, int)\n assert_array_equal(ret, ex)\n assert_array_equal(bn([v]), ex)\n\n # Clipping\n bn = mcolors.BoundaryNorm(boundaries, 3, clip=True)\n expected = [0, 0, 2, 2]\n for v, ex in zip(vals, expected):\n ret = bn(v)\n assert isinstance(ret, int)\n assert_array_equal(ret, ex)\n assert_array_equal(bn([v]), ex)\n\n # Masked arrays\n boundaries = [0, 1.1, 2.2]\n vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9])\n\n # Without interpolation\n ncolors = len(boundaries) - 1\n bn = mcolors.BoundaryNorm(boundaries, ncolors)\n expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0])\n assert_array_equal(bn(vals), expected)\n\n # With interpolation\n bn = mcolors.BoundaryNorm(boundaries, len(boundaries))\n expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0])\n assert_array_equal(bn(vals), expected)\n\n # Non-trivial masked arrays\n vals = np.ma.masked_invalid([np.Inf, np.NaN])\n assert np.all(bn(vals).mask)\n vals = np.ma.masked_invalid([np.Inf])\n assert np.all(bn(vals).mask)\n\n # Incompatible extend and clip\n with pytest.raises(ValueError, match=\"not compatible\"):\n mcolors.BoundaryNorm(np.arange(4), 5, extend='both', clip=True)\n\n # Too small ncolors argument\n with pytest.raises(ValueError, match=\"ncolors must equal or exceed\"):\n mcolors.BoundaryNorm(np.arange(4), 2)\n\n with pytest.raises(ValueError, match=\"ncolors must equal or exceed\"):\n mcolors.BoundaryNorm(np.arange(4), 3, extend='min')\n\n with pytest.raises(ValueError, match=\"ncolors must equal or exceed\"):\n mcolors.BoundaryNorm(np.arange(4), 4, extend='both')\n\n # Testing extend keyword, with interpolation (large cmap)\n bounds = [1, 2, 3]\n cmap = mpl.colormaps['viridis']\n mynorm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both')\n refnorm = mcolors.BoundaryNorm([0] + bounds + [4], cmap.N)\n x = np.random.randn(100) * 10 + 2\n ref = refnorm(x)\n ref[ref == 0] = -1\n ref[ref == cmap.N - 1] = cmap.N\n assert_array_equal(mynorm(x), ref)\n\n # Without interpolation\n cmref = mcolors.ListedColormap(['blue', 'red'])\n cmref.set_over('black')\n cmref.set_under('white')\n cmshould = mcolors.ListedColormap(['white', 'blue', 'red', 'black'])\n\n assert mcolors.same_color(cmref.get_over(), 'black')\n assert mcolors.same_color(cmref.get_under(), 'white')\n\n refnorm = mcolors.BoundaryNorm(bounds, cmref.N)\n mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='both')\n assert mynorm.vmin == refnorm.vmin\n assert mynorm.vmax == refnorm.vmax\n\n assert mynorm(bounds[0] - 0.1) == -1 # under\n assert mynorm(bounds[0] + 0.1) == 1 # first bin -> second color\n assert mynorm(bounds[-1] - 0.1) == cmshould.N - 2 # next-to-last color\n assert mynorm(bounds[-1] + 0.1) == cmshould.N # over\n\n x = [-1, 1.2, 2.3, 9.6]\n assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2, 3]))\n x = np.random.randn(100) * 10 + 2\n assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))\n\n # Just min\n cmref = mcolors.ListedColormap(['blue', 'red'])\n cmref.set_under('white')\n cmshould = mcolors.ListedColormap(['white', 'blue', 'red'])\n\n assert mcolors.same_color(cmref.get_under(), 'white')\n\n assert cmref.N == 2\n assert cmshould.N == 3\n refnorm = mcolors.BoundaryNorm(bounds, cmref.N)\n mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='min')\n assert mynorm.vmin == refnorm.vmin\n assert mynorm.vmax == refnorm.vmax\n x = [-1, 1.2, 2.3]\n assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2]))\n x = np.random.randn(100) * 10 + 2\n assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))\n\n # Just max\n cmref = mcolors.Lis", "d_id": 23566, "documentation": { "docstring": "\n GitHub issue #1258: interpolation was failing with numpy\n 1.7 pre-release.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 20, "language": "en" } }, { "id": 154556, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_join_by_index", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def _join_by_index(self, other_modin_frames, how, sort, ignore_index):\n \n if how == \"outer\":\n raise NotImplementedError(\"outer join is not supported in HDK engine\")\n\n lhs = self._maybe_materialize_rowid()\n reset_index_names = False\n for rhs in other_modin_frames:\n rhs = rhs._maybe_materialize_rowid()\n if len(lhs._index_cols) != len(rhs._index_cols):\n raise NotImplementedError(\n \"join by indexes with different sizes is not supported\"\n )\n\n reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols\n\n condition = lhs._build_equi_join_condition(\n rhs, lhs._index_cols, rhs._index_cols\n )\n\n exprs = lhs._index_exprs()\n new_columns = lhs.columns.to_list()\n for col in lhs.columns:\n exprs[col] = lhs.ref(col)\n for col in rhs.columns:\n # Handle duplicating column names here. When user specifies\n # suffixes to make a join, actual renaming is done in front-end.\n new_col_name = col\n rename_idx = 0\n while new_col_name in exprs:\n new_col_name = f\"{col}{rename_idx}\"\n rename_idx += 1\n exprs[new_col_name] = rhs.ref(col)\n new_columns.append(new_col_name)\n\n op = JoinNode(\n lhs,\n rhs,\n how=how,\n exprs=exprs,\n condition=condition,\n )\n\n new_columns = Index.__new__(\n Index, data=new_columns, dtype=self.columns.dtype\n )\n lhs = lhs.__constructor__(\n dtypes=lhs._dtypes_for_exprs(exprs),\n columns=new_columns,\n index_cols=lhs._index_cols,\n op=op,\n force_execution_mode=self._force_execution_mode,\n )\n\n if sort:\n lhs = lhs.sort_rows(\n lhs._index_cols,\n ascending=True,\n ignore_index=False,\n na_position=\"last\",\n )\n\n if reset_index_names:\n lhs = lhs._reset_index_names()\n\n if ignore_index:\n new_columns = Index.__new__(RangeIndex, data=range(len(lhs.columns)))\n lhs = lhs._set_columns(new_columns)\n\n return lhs\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 912, "n_words": 171, "vocab_size": 113, "complexity": 11, "nloc": 57, "token_counts": 315, "n_ast_nodes": 498, "n_identifiers": 44, "random_cut": "def _join_by_index(self, other_modin_frames, how, sort, ignore_index):\n \n if how == \"outer\":\n raise NotImplementedError(\"outer join is not supported in HDK engine\")\n\n lhs = self._maybe_materialize_rowid()\n reset_index_names = False\n for rhs in other_modin_frames:\n rhs = rhs._maybe_materialize_rowid()\n if len(lhs._index_cols) != len(rhs._index_cols):\n raise NotImplementedError(\n \"join by indexes with different sizes is not supported\"\n )\n\n reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols\n\n condition = lhs._build_equi_join_condition(\n rhs, lhs._index_cols, rhs._index_cols\n )\n\n exprs = lhs._index_exprs()\n new_columns = lhs.columns.to_list()\n for col in lhs.columns:\n e", "d_id": 36066, "documentation": { "docstring": "\n Perform equi-join operation for multiple frames by index columns.\n\n Parameters\n ----------\n other_modin_frames : list of HdkOnNativeDataframe\n Frames to join with.\n how : str\n A type of join.\n sort : bool\n Sort the result by join keys.\n ignore_index : bool\n If True then reset column index for the resulting frame.\n\n Returns\n -------\n HdkOnNativeDataframe\n The new frame.\n ", "n_words": 55, "vocab_size": 43, "n_whitespaces": 188, "language": "en" } }, { "id": 2710, "commit_id": "e272ed2fa4c58e0a89e273a3e85da7d13a85e04c", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/action/function_or_constructor_action.py", "file_name": "function_or_constructor_action.py", "fun_name": "_object2proto", "commit_message": "[syft.core.node.common.action] Change syft import absolute -> relative", "code": "def _object2proto(self) -> RunFunctionOrConstructorAction_PB:\n \n return RunFunctionOrConstructorAction_PB(\n path=self.path,\n args=[serialize(x, to_bytes=True) for x in self.args],\n kwargs={k: serialize(v, to_bytes=True) for k, v in self.kwargs.items()},\n id_at_location=serialize(self.id_at_location),\n address=serialize(self.address),\n msg_id=serialize(self.id),\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 112, "n_words": 25, "vocab_size": 22, "complexity": 3, "nloc": 23, "token_counts": 91, "n_ast_nodes": 135, "n_identifiers": 16, "random_cut": "def _object2proto(self) -> RunFunctionOrConstructorAction_PB:\n \n return RunFunctionOrConstructorAction_PB(\n path=self.path,\n args=[serialize(x, to_bytes=True) for x in self.args],\n kwargs={k: serialize(v, to_bytes=True) for k, v in self.kwargs.items()},\n id_at_location=serialize(self.id_at_location),\n address=serialize(self.address),\n msg_id=serialize(s", "d_id": 342, "documentation": { "docstring": "Returns a protobuf serialization of self.\n\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n\n :return: returns a protobuf object\n :rtype: RunFunctionOrConstructorAction_PB\n\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "n_words": 68, "vocab_size": 56, "n_whitespaces": 150, "language": "en" } }, { "id": 176171, "commit_id": "dec723f072eb997a497a159dbe8674cd39999ee9", "repo": "networkx", "path": "networkx/generators/small.py", "file_name": "small.py", "fun_name": "truncated_cube_graph", "commit_message": "Docstrings for the small.py module (#5240)\n\n* added description for the first 5 small graphs\r\n\r\n* modified descriptions based on comment and added description for two more functions\r\n\r\n* added doctrings to all the functions\r\n\r\n* Minor touchups.\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def truncated_cube_graph(create_using=None):\n \n description = [\n \"adjacencylist\",\n \"Truncated Cube Graph\",\n 24,\n [\n [2, 3, 5],\n [12, 15],\n [4, 5],\n [7, 9],\n [6],\n [17, 19],\n [8, 9],\n [11, 13],\n [10],\n [18, 21],\n [12, 13],\n [15],\n [14],\n [22, 23],\n [16],\n [20, 24],\n [18, 19],\n [21],\n [20],\n [24],\n [22],\n [23],\n [24],\n [],\n ],\n ]\n G = make_small_undirected_graph(description, create_using)\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 370, "n_words": 56, "vocab_size": 46, "complexity": 1, "nloc": 34, "token_counts": 152, "n_ast_nodes": 197, "n_identifiers": 5, "random_cut": "def truncated_cube_graph(create_using=None):\n \n description = [\n \"adjacencylist\",\n \"Truncated Cube Graph\",\n 24,\n [\n [2, 3, 5],\n [12, 15],\n [4, 5],\n [7, 9],\n [6],\n [17, 19],\n [8, 9],\n [11, 13],\n [10],\n [18, 21],\n [12, 13],\n [15],\n [14],\n [22, 23],\n [16],\n [20, 24],\n [18, 19],\n [21],\n [20],\n [24],\n [22],\n [23],\n [24],\n [],\n ],\n ", "d_id": 41741, "documentation": { "docstring": "\n Returns the skeleton of the truncated cube.\n\n The truncated cube is an Archimedean solid with 14 regular\n faces (6 octagonal and 8 triangular), 36 edges and 24 nodes [1]_.\n The truncated cube is created by truncating (cutting off) the tips\n of the cube one third of the way into each edge [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Skeleton of the truncated cube\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Truncated_cube\n .. [2] https://www.coolmath.com/reference/polyhedra-truncated-cube\n\n ", "n_words": 91, "vocab_size": 68, "n_whitespaces": 153, "language": "en" } }, { "id": 70994, "commit_id": "de3fcba9e95818e9634ab7de6bfcb1f4221f2775", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/options.py", "file_name": "options.py", "fun_name": "get_admin_urls_for_registration", "commit_message": "Fix warnings from flake8-comprehensions.", "code": "def get_admin_urls_for_registration(self):\n \n urls = ()\n for instance in self.modeladmin_instances:\n urls += instance.get_admin_urls_for_registration()\n return urls\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 5, "random_cut": "def get_admin_urls_for_registration(self):\n \n urls = ()\n for instance in self.modeladmin_instances:\n urls += instance.get_admin_urls_for_registration()\n return urls\n", "d_id": 15593, "documentation": { "docstring": "\n Utilised by Wagtail's 'register_admin_urls' hook to register urls for\n used by any associated ModelAdmin instances\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 63304, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "setName", "commit_message": "upd; format", "code": "def setName(self, name):\n \n self.name = name\n self.errmsg = \"Expected \" + self.name\n if __diag__.enable_debug_on_named_expressions:\n self.setDebug()\n return self\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 63, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 34, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def setName(self, name):\n \n self.name = name\n self.errmsg = \"Expected \" + self.name\n if __diag__.enable_debug_on_named_expressions:\n self.setDebug()\n return self\n", "d_id": 13241, "documentation": { "docstring": "\n Define name for this expression, makes debugging and exception messages clearer.\n\n Example::\n\n Word(nums).parseString(\"ABC\") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)\n Word(nums).setName(\"integer\").parseString(\"ABC\") # -> Exception: Expected integer (at char 0), (line:1, col:1)\n ", "n_words": 34, "vocab_size": 25, "n_whitespaces": 80, "language": "en" } }, { "id": 153097, "commit_id": "1e65a4afd191cf61ba05b80545d23f9b88962f41", "repo": "modin", "path": "modin/core/dataframe/algebra/default2pandas/groupby.py", "file_name": "groupby.py", "fun_name": "get_func", "commit_message": "FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373)\n\nSigned-off-by: Dmitry Chigarev ", "code": "def get_func(cls, key, **kwargs):\n \n if \"agg_func\" in kwargs:\n return cls.inplace_applyier_builder(key, kwargs[\"agg_func\"])\n elif \"func_dict\" in kwargs:\n return cls.inplace_applyier_builder(key, kwargs[\"func_dict\"])\n else:\n return cls.inplace_applyier_builder(key)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 82, "n_words": 21, "vocab_size": 16, "complexity": 3, "nloc": 7, "token_counts": 54, "n_ast_nodes": 92, "n_identifiers": 5, "random_cut": "def get_func(cls, key, **kwargs):\n \n if \"agg_func\" in kwargs:\n return cls.inplace_applyier_builder(key, kwargs[\"agg_func\"])\n elif \"func_dict\" in kwargs:\n return cls.inplace_applyier_builder(key, kwargs[\"func_dict\"])\n else:\n return cls.inplace_applyier_builder(key)\n", "d_id": 35257, "documentation": { "docstring": "\n Extract aggregation function from groupby arguments.\n\n Parameters\n ----------\n key : callable or str\n Default aggregation function. If aggregation function is not specified\n via groupby arguments, then `key` function is used.\n **kwargs : dict\n GroupBy arguments that may contain aggregation function.\n\n Returns\n -------\n callable\n Aggregation function.\n\n Notes\n -----\n There are two ways of how groupby aggregation can be invoked:\n 1. Explicitly with query compiler method: `qc.groupby_sum()`.\n 2. By passing aggregation function as an argument: `qc.groupby_agg(\"sum\")`.\n Both are going to produce the same result, however in the first case actual aggregation\n function can be extracted from the method name, while for the second only from the method arguments.\n ", "n_words": 106, "vocab_size": 78, "n_whitespaces": 271, "language": "en" } }, { "id": 133351, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/torch_trainer.py", "file_name": "torch_trainer.py", "fun_name": "update_scheduler", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def update_scheduler(self, metric):\n \n self.worker_group.apply_all_operators(\n lambda op: [sched.step(metric) for sched in op._schedulers]\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 44, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 9, "random_cut": "def update_scheduler(self, metric):\n \n self.worker_group.apply_all_operators(\n lambda op: [sched.step(m", "d_id": 29983, "documentation": { "docstring": "Calls ``scheduler.step(metric)`` on all registered schedulers.\n\n This is useful for lr_schedulers such as ``ReduceLROnPlateau``.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 28, "language": "en" } }, { "id": 258521, "commit_id": "a5b70b3132467b5e3616178d9ecca6cb7316c400", "repo": "scikit-learn", "path": "sklearn/metrics/pairwise.py", "file_name": "pairwise.py", "fun_name": "paired_cosine_distances", "commit_message": "DOC Ensures that sklearn.metrics.pairwise.paired_cosine_distances passes numpydoc validation (#22141)\n\nCo-authored-by: Thomas J. Fan ", "code": "def paired_cosine_distances(X, Y):\n \n X, Y = check_paired_arrays(X, Y)\n return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True)\n\n\nPAIRED_DISTANCES = {\n \"cosine\": paired_cosine_distances,\n \"euclidean\": paired_euclidean_distances,\n \"l2\": paired_euclidean_distances,\n \"l1\": paired_manhattan_distances,\n \"manhattan\": paired_manhattan_distances,\n \"cityblock\": paired_manhattan_distances,\n}\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 31, "vocab_size": 27, "complexity": 1, "nloc": 3, "token_counts": 39, "n_ast_nodes": 108, "n_identifiers": 10, "random_cut": "def paired_cosine_distances(X, Y):\n \n X, Y = c", "d_id": 75273, "documentation": { "docstring": "\n Compute the paired cosine distances between X and Y.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples, n_features)\n An array where each row is a sample and each column is a feature.\n\n Returns\n -------\n distances : ndarray of shape (n_samples,)\n Returns the distances between the row vectors of `X`\n and the row vectors of `Y`, where `distances[i]` is the\n distance between `X[i]` and `Y[i]`.\n\n Notes\n -----\n The cosine distance is equivalent to the half the squared\n euclidean distance if each sample is normalized to unit norm.\n ", "n_words": 114, "vocab_size": 57, "n_whitespaces": 192, "language": "en" } }, { "id": 30695, "commit_id": "897a8dd89f40817201bc4aebe532a096405bdeb1", "repo": "transformers", "path": "src/transformers/trainer.py", "file_name": "trainer.py", "fun_name": "torchdynamo_smart_context_manager", "commit_message": "Support compilation via Torchdynamo, AOT Autograd, NVFuser (#17308)\n\n* Support compilation via Torchdynamo, AOT Autograd, NVFuser\r\n\r\n* Address comments\r\n\r\n* Lint\r\n\r\n* Stas comments - missing quality test\r\n\r\n* Lintere\r\n\r\n* Quality test\r\n\r\n* Doc lint\r\n\r\n* Reset CUDA peak mem\r\n\r\n* Add CustomTrainer\r\n\r\n* require a single gpu\r\n\r\nCo-authored-by: Stas Bekman ", "code": "def torchdynamo_smart_context_manager(self):\n \n ctx_manager = contextlib.nullcontext()\n if is_torchdynamo_available():\n import torchdynamo\n from torchdynamo.optimizations.training import aot_autograd_speedup_strategy\n\n if self.args.torchdynamo == \"eager\":\n ctx_manager = torchdynamo.optimize(\"eager\")\n elif self.args.torchdynamo == \"nvfuser\":\n ctx_manager = torchdynamo.optimize(aot_autograd_speedup_strategy)\n return ctx_manager\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 131, "n_words": 29, "vocab_size": 20, "complexity": 4, "nloc": 10, "token_counts": 64, "n_ast_nodes": 112, "n_identifiers": 12, "random_cut": "def torchdynamo_smart_context_manager(self):\n \n ctx_manager = contextlib.nullcontext()\n if is_torchdynamo_available():\n import torchdynamo\n from torchdy", "d_id": 5648, "documentation": { "docstring": "\n A helper wrapper that creates an appropriate context manager for `torchdynamo`.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 186677, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/parser.py", "file_name": "parser.py", "fun_name": "check_aug_version", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def check_aug_version(self) -> bool:\n \n\n self.aug.set(\"/test/path/testing/arg\", \"aRgUMeNT\")\n try:\n matches = self.aug.match(\n \"/test//*[self::arg=~regexp('argument', 'i')]\")\n except RuntimeError:\n self.aug.remove(\"/test/path\")\n return False\n self.aug.remove(\"/test/path\")\n return matches\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 110, "n_words": 20, "vocab_size": 17, "complexity": 2, "nloc": 13, "token_counts": 53, "n_ast_nodes": 98, "n_identifiers": 9, "random_cut": "def check_aug_version(self) -> bool:\n \n\n self.aug.set(\"/test/path/testing/arg\", \"aRgUMeNT\")\n try:\n matches = self.aug.match(\n \"/test//*[self::arg=~regexp('argument', 'i')]\")\n except RuntimeError:\n self.aug.remove(\"/test/path\")\n return False\n self.aug.remove(\"/test/path\")\n return matches\n", "d_id": 45584, "documentation": { "docstring": " Checks that we have recent enough version of libaugeas.\n If augeas version is recent enough, it will support case insensitive\n regexp matching", "n_words": 22, "vocab_size": 20, "n_whitespaces": 36, "language": "en" } }, { "id": 157635, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/modules/midas/utils.py", "file_name": "utils.py", "fun_name": "resize_depth", "commit_message": "release more models", "code": "def resize_depth(depth, width, height):\n \n depth = torch.squeeze(depth[0, :, :, :]).to(\"cpu\")\n\n depth_resized = cv2.resize(\n depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC\n )\n\n return depth_resized\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 42, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 58, "n_ast_nodes": 91, "n_identifiers": 13, "random_cut": "def resize_depth(depth, width, height):\n \n depth = torch.squeeze(depth[0, :, :, :]).to(\"cpu\")\n\n depth_resized = cv2.resize(\n depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC\n )\n\n return depth_resized\n", "d_id": 37003, "documentation": { "docstring": "Resize depth map and bring to CPU (numpy).\n\n Args:\n depth (tensor): depth\n width (int): image width\n height (int): image height\n\n Returns:\n array: processed depth\n ", "n_words": 24, "vocab_size": 17, "n_whitespaces": 61, "language": "en" } }, { "id": 195853, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/core/numbers.py", "file_name": "numbers.py", "fun_name": "comp", "commit_message": "Improved documentation formatting", "code": "def comp(z1, z2, tol=None):\n r\n if type(z2) is str:\n if not pure_complex(z1, or_real=True):\n raise ValueError('when z2 is a str z1 must be a Number')\n return str(z1) == z2\n if not z1:\n z1, z2 = z2, z1\n if not z1:\n return True\n if not tol:\n a, b = z1, z2\n if tol == '':\n return str(a) == str(b)\n if tol is None:\n a, b = sympify(a), sympify(b)\n if not all(i.is_number for i in (a, b)):\n raise ValueError('expecting 2 numbers')\n fa = a.atoms(Float)\n fb = b.atoms(Float)\n if not fa and not fb:\n # no floats -- compare exactly\n return a == b\n # get a to be pure_complex\n for _ in range(2):\n ca = pure_complex(a, or_real=True)\n if not ca:\n if fa:\n a = a.n(prec_to_dps(min([i._prec for i in fa])))\n ca = pure_complex(a, or_real=True)\n break\n else:\n fa, fb = fb, fa\n a, b = b, a\n cb = pure_complex(b)\n if not cb and fb:\n b = b.n(prec_to_dps(min([i._prec for i in fb])))\n cb = pure_complex(b, or_real=True)\n if ca and cb and (ca[1] or cb[1]):\n return all(comp(i, j) for i, j in zip(ca, cb))\n tol = 10**prec_to_dps(min(a._prec, getattr(b, '_prec', a._prec)))\n return int(abs(a - b)*tol) <= 5\n diff = abs(z1 - z2)\n az1 = abs(z1)\n if z2 and az1 > 1:\n return diff/az1 <= tol\n else:\n return diff <= tol\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 729, "n_words": 213, "vocab_size": 107, "complexity": 26, "nloc": 105, "token_counts": 381, "n_ast_nodes": 605, "n_identifiers": 34, "random_cut": "def comp(z1, z2, tol=None):\n r\n if type(z2) is str:\n if not ", "d_id": 47440, "documentation": { "docstring": "Return a bool indicating whether the error between z1 and z2\n is $\\le$ ``tol``.\n\n Examples\n ========\n\n If ``tol`` is ``None`` then ``True`` will be returned if\n :math:`|z1 - z2|\\times 10^p \\le 5` where $p$ is minimum value of the\n decimal precision of each value.\n\n >>> from sympy import comp, pi\n >>> pi4 = pi.n(4); pi4\n 3.142\n >>> comp(_, 3.142)\n True\n >>> comp(pi4, 3.141)\n False\n >>> comp(pi4, 3.143)\n False\n\n A comparison of strings will be made\n if ``z1`` is a Number and ``z2`` is a string or ``tol`` is ''.\n\n >>> comp(pi4, 3.1415)\n True\n >>> comp(pi4, 3.1415, '')\n False\n\n When ``tol`` is provided and $z2$ is non-zero and\n :math:`|z1| > 1` the error is normalized by :math:`|z1|`:\n\n >>> abs(pi4 - 3.14)/pi4\n 0.000509791731426756\n >>> comp(pi4, 3.14, .001) # difference less than 0.1%\n True\n >>> comp(pi4, 3.14, .0005) # difference less than 0.1%\n False\n\n When :math:`|z1| \\le 1` the absolute error is used:\n\n >>> 1/pi4\n 0.3183\n >>> abs(1/pi4 - 0.3183)/(1/pi4)\n 3.07371499106316e-5\n >>> abs(1/pi4 - 0.3183)\n 9.78393554684764e-6\n >>> comp(1/pi4, 0.3183, 1e-5)\n True\n\n To see if the absolute error between ``z1`` and ``z2`` is less\n than or equal to ``tol``, call this as ``comp(z1 - z2, 0, tol)``\n or ``comp(z1 - z2, tol=tol)``:\n\n >>> abs(pi4 - 3.14)\n 0.00160156249999988\n >>> comp(pi4 - 3.14, 0, .002)\n True\n >>> comp(pi4 - 3.14, 0, .001)\n False\n ", "n_words": 217, "vocab_size": 120, "n_whitespaces": 363, "language": "en" } }, { "id": 245152, "commit_id": "36c1f477b273cb2fb0dea3c921ec267db877c039", "repo": "mmdetection", "path": "mmdet/datasets/openimages.py", "file_name": "openimages.py", "fun_name": "_parse_img_level_ann", "commit_message": "Refactor OpenImages.", "code": "def _parse_img_level_ann(self, image_level_ann_file):\n \n\n item_lists = defaultdict(list)\n with self.file_client.get_local_path(\n image_level_ann_file) as local_path:\n with open(local_path, 'r') as f:\n reader = csv.reader(f)\n i = -1\n for line in reader:\n i += 1\n if i == 0:\n continue\n else:\n img_id = line[0]\n label_id = line[1]\n assert label_id in self.label_id_mapping\n image_level_label = int(\n self.label_id_mapping[label_id])\n confidence = float(line[2])\n item_lists[img_id].append(\n dict(\n image_level_label=image_level_label,\n confidence=confidence))\n return item_lists\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 491, "n_words": 58, "vocab_size": 45, "complexity": 3, "nloc": 23, "token_counts": 122, "n_ast_nodes": 201, "n_identifiers": 24, "random_cut": "def _parse_img_level_ann(self, image_level_ann_file):\n \n\n item_lists = defaultdict(list)\n with self.file_client.get_local_path(\n image_level_ann_file) as local_path:\n with open(local_path, 'r') as f:\n reader = csv.reader(f)\n i = -1\n for line ", "d_id": 70677, "documentation": { "docstring": "Parse image level annotations from csv style ann_file.\n\n Args:\n image_level_ann_file (str): CSV style image level annotation\n file path.\n\n Returns:\n defaultdict[list[dict]]: Annotations where item of the defaultdict\n indicates an image, each of which has (n) dicts.\n Keys of dicts are:\n\n - `image_level_label` (int): of shape 1.\n - `confidence` (float): of shape 1.\n ", "n_words": 51, "vocab_size": 41, "n_whitespaces": 161, "language": "en" } }, { "id": 219771, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "logical_and", "commit_message": "add python 3.10.4 for windows", "code": "def logical_and(self, a, b):\n \n a = _convert_other(a, raiseit=True)\n return a.logical_and(b, context=self)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 48, "n_identifiers": 7, "random_cut": "def logical_and(self, a, b):\n \n a = _convert", "d_id": 55789, "documentation": { "docstring": "Applies the logical operation 'and' between each operand's digits.\n\n The operands must be both logical numbers.\n\n >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))\n Decimal('0')\n >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))\n Decimal('0')\n >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))\n Decimal('0')\n >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))\n Decimal('1')\n >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))\n Decimal('1000')\n >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))\n Decimal('10')\n >>> ExtendedContext.logical_and(110, 1101)\n Decimal('100')\n >>> ExtendedContext.logical_and(Decimal(110), 1101)\n Decimal('100')\n >>> ExtendedContext.logical_and(110, Decimal(1101))\n Decimal('100')\n ", "n_words": 52, "vocab_size": 33, "n_whitespaces": 192, "language": "en" } }, { "id": 197371, "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", "repo": "sympy", "path": "sympy/utilities/enumerative.py", "file_name": "enumerative.py", "fun_name": "decrement_part_small", "commit_message": "Remove abbreviations in documentation", "code": "def decrement_part_small(self, part, ub):\n \n if self.lpart >= ub - 1:\n self.p1 += 1 # increment to keep track of usefulness of tests\n return False\n plen = len(part)\n for j in range(plen - 1, -1, -1):\n # Knuth's mod, (answer to problem 7.2.1.5.69)\n if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u:\n self.k1 += 1\n return False\n\n if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0:\n # found val to decrement\n part[j].v -= 1\n # Reset trailing parts back to maximum\n for k in range(j + 1, plen):\n part[k].v = part[k].u\n\n # Have now decremented part, but are we doomed to\n # failure when it is expanded? Check one oddball case\n # that turns out to be surprisingly common - exactly\n # enough room to expand the leading component, but no\n # room for the second component, which has v=0.\n if (plen > 1 and part[1].v == 0 and\n (part[0].u - part[0].v) ==\n ((ub - self.lpart - 1) * part[0].v)):\n self.k2 += 1\n self.db_trace(\"Decrement fails test 3\")\n return False\n return True\n return False\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 587, "n_words": 182, "vocab_size": 114, "complexity": 13, "nloc": 21, "token_counts": 214, "n_ast_nodes": 333, "n_identifiers": 16, "random_cut": "def decrement_part_small(self, part, ub):\n \n if self.lpart >= ub - 1:\n self.p1 += 1 # increment to keep track of usefulness of tests\n return False\n plen = len(part)\n for j in range(plen - 1, -1, -1):\n # Knuth's mod, (answer to problem 7.2.1.5.69)\n if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u:\n self.k1 += 1\n return False\n\n if j == 0 and part[j].v > 1 or ", "d_id": 48514, "documentation": { "docstring": "Decrements part (a subrange of pstack), if possible, returning\n True iff the part was successfully decremented.\n\n Parameters\n ==========\n\n part\n part to be decremented (topmost part on the stack)\n\n ub\n the maximum number of parts allowed in a partition\n returned by the calling traversal.\n\n Notes\n =====\n\n The goal of this modification of the ordinary decrement method\n is to fail (meaning that the subtree rooted at this part is to\n be skipped) when it can be proved that this part can only have\n child partitions which are larger than allowed by ``ub``. If a\n decision is made to fail, it must be accurate, otherwise the\n enumeration will miss some partitions. But, it is OK not to\n capture all the possible failures -- if a part is passed that\n should not be, the resulting too-large partitions are filtered\n by the enumeration one level up. However, as is usual in\n constrained enumerations, failing early is advantageous.\n\n The tests used by this method catch the most common cases,\n although this implementation is by no means the last word on\n this problem. The tests include:\n\n 1) ``lpart`` must be less than ``ub`` by at least 2. This is because\n once a part has been decremented, the partition\n will gain at least one child in the spread step.\n\n 2) If the leading component of the part is about to be\n decremented, check for how many parts will be added in\n order to use up the unallocated multiplicity in that\n leading component, and fail if this number is greater than\n allowed by ``ub``. (See code for the exact expression.) This\n test is given in the answer to Knuth's problem 7.2.1.5.69.\n\n 3) If there is *exactly* enough room to expand the leading\n component by the above test, check the next component (if\n it exists) once decrementing has finished. If this has\n ``v == 0``, this next component will push the expansion over the\n limit by 1, so fail.\n ", "n_words": 319, "vocab_size": 181, "n_whitespaces": 637, "language": "en" } }, { "id": 125638, "commit_id": "90cea203befa8f2e86e9c1c18bb3972296358e7b", "repo": "ray", "path": "python/ray/runtime_context.py", "file_name": "runtime_context.py", "fun_name": "get_node_id", "commit_message": "Ray 2.0 API deprecation (#26116)\n\nRay 2.0 API deprecation for:\r\n\r\n ray.remote(): placement_group\r\n ray.remote(): placement_group_bundle_index\r\n ray.remote(): placement_group_capture_child_tasks\r\n ray.get_dashboard_url()\r\n ray.get_resource_ids()\r\n ray.disconnect()\r\n ray.connect()\r\n ray.util.ActorGroup\r\n ray.util.ActorPool\r\n Add get_xx_id() to return hex (rather than object), and then deprecate the xx_id() (which returns Cython object): the xx here can be node, task etc.\r\n ray start: --plasma-store-socket-name\r\n ray start: --raylet-socket-name", "code": "def get_node_id(self) -> str:\n \n node_id = self.worker.current_node_id\n assert not node_id.is_nil()\n return node_id.hex()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 12, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "def get_node_id(self) -> str:\n \n node_id = self.worker.current_node_id\n assert not node_id.is_nil()\n return node_i", "d_id": 27935, "documentation": { "docstring": "Get current node ID for this worker or driver.\n\n Node ID is the id of a node that your driver, task, or actor runs.\n The ID will be in hex format.\n\n Returns:\n A node id in hex format for this worker or driver.\n ", "n_words": 43, "vocab_size": 30, "n_whitespaces": 82, "language": "en" } }, { "id": 320849, "commit_id": "6c4e2810285af0698538aed9d46a99de085eb310", "repo": "qutebrowser", "path": "qutebrowser/completion/models/configmodel.py", "file_name": "configmodel.py", "fun_name": "list_option", "commit_message": "pylint: Fix new unnecessary-lambda-assignment", "code": "def list_option(*, info):\n \n return _option(\n info,\n \"List options\",\n lambda opt: (isinstance(info.config.get_obj(opt.name), list) and\n not opt.no_autoconfig)\n )\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 57, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 7, "token_counts": 41, "n_ast_nodes": 67, "n_identifiers": 10, "random_cut": "def list_option(*, info):\n \n return _option(\n info,\n \"List options\",\n lambda opt: (isinstance(info.config.get_obj(op", "d_id": 117392, "documentation": { "docstring": "A CompletionModel filled with settings whose values are lists.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 3546, "commit_id": "2e7ee756eb1d55080d707cef63454788a7abb6be", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-google-ads/unit_tests/test_source.py", "file_name": "test_source.py", "fun_name": "get_instance_from_config_with_end_date", "commit_message": "Source GoogleAds: add end_date to config (#8669)\n\n* GoogleAds add end_date to config\r\n\r\n* Update script following review comments\r\n\r\n* Add unit test\r\n\r\n* Solve conflicts\r\n\r\n* Solve conflicts in MR\r\n\r\n* Update test_google_ads.py\r\n\r\nInstanciate IncrementalGoogleAdsStream in tests + add missing line between functions\r\n\r\n* Update test_source.py\r\n\r\nremove extra hashtag\r\n\r\n* Update tests with missing params\r\n\r\n* Add missing time_zone param\r\n\r\n* merge user code\r\n\r\n* run format\r\n\r\n* revert unit test stream count\r\n\r\n* remove error log file\r\n\r\n* bump connector version\r\n\r\n* run seed file\r\n\r\nCo-authored-by: Marcos Marx ", "code": "def get_instance_from_config_with_end_date(config, query):\n start_date = \"2021-03-04\"\n end_date = \"2021-04-04\"\n conversion_window_days = 14\n google_api = GoogleAds(credentials=config[\"credentials\"], customer_id=config[\"customer_id\"])\n\n instance = CustomQuery(\n api=google_api,\n conversion_window_days=conversion_window_days,\n start_date=start_date,\n end_date=end_date,\n time_zone=\"local\",\n custom_query_config={\"query\": query, \"table_name\": \"whatever_table\"},\n )\n return instance\n\n\n@pytest.mark.parametrize(\n \"query, fields\",\n [\n (\n ,\n [\"campaign.id\", \"campaign.name\", \"campaign.status\", \"metrics.impressions\"],\n ),\n (\n ,\n [\"campaign.accessible_bidding_strategy\", \"segments.ad_destination_type\", \"campaign.start_date\", \"campaign.end_date\"],\n ),\n (, []),\n ],\n)", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"query, fields\",\n [\n (\n \"\"\"\n SELecT\n campaign.id,\n campaign.name,\n campaign.status,\n metrics.impressions FROM campaign\nwheRe campaign.status = 'PAUSED'\nAND metrics.impressions > 100\norder by campaign.status\n \"\"\",\n [\"campaign.id\", \"campaign.name\", \"campaign.status\", \"metrics.impressions\"],\n ),\n (\n \"\"\"\n SELECT\n campaign.accessible_bidding_strategy,\n segments.ad_destination_type,\n campaign.start_date,\n campaign.end_date\n FROM campaign\n \"\"\",\n [\"campaign.accessible_bidding_strategy\", \"segments.ad_destination_type\", \"campaign.start_date\", \"campaign.end_date\"],\n ),\n (\"\"\"selet aasdasd from aaa\"\"\", []),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 201, "n_words": 53, "vocab_size": 44, "complexity": 1, "nloc": 14, "token_counts": 73, "n_ast_nodes": 208, "n_identifiers": 18, "random_cut": "def get_instance_from_config_with_end_date(config, query):\n start_date = \"2021-03-04\"\n end_date = \"2021-04-04\"\n conversion_window_days = 14\n google_api = GoogleAds(credentials=config[\"credentials\"], customer_id=config[\"customer_id\"])\n\n instance = CustomQuery(\n api=google_api,\n conversion_window_days=conversion_window_days,\n start_date=start_date,\n end_date=end_date,\n time_zone=\"local\",\n custom_query_config={\"query\": query, \"table_name\": \"whatever_table\"},\n )\n return instance\n\n\n@pytest.mark.parametrize(\n \"query, ", "d_id": 480, "documentation": { "docstring": "\n SELecT\n campaign.id,\n campaign.name,\n campaign.status,\n metrics.impressions FROM campaign\nwheRe campaign.status = 'PAUSED'\nAND metrics.impressions > 100\norder by campaign.status\n \n SELECT\n campaign.accessible_bidding_strategy,\n segments.ad_destination_type,\n campaign.start_date,\n campaign.end_date\n FROM campaign\n selet aasdasd from aaa", "n_words": 29, "vocab_size": 25, "n_whitespaces": 98, "language": "en" } }, { "id": 176324, "commit_id": "34d9d630bb02426d297d3e20fedb7da8c3ced03a", "repo": "networkx", "path": "networkx/algorithms/assortativity/pairs.py", "file_name": "pairs.py", "fun_name": "node_degree_xy", "commit_message": "MAINT: Cleanup assortativity module, remove unused variables (#5301)\n\nRemove unused variables, sort imports,\r\nraise errors instead of accepting invalid arguments silently\r\n\r\nCo-authored-by: Dan Schult ", "code": "def node_degree_xy(G, x=\"out\", y=\"in\", weight=None, nodes=None):\n \n nodes = set(G) if nodes is None else set(nodes)\n if G.is_directed():\n direction = {\"out\": G.out_degree, \"in\": G.in_degree}\n xdeg = direction[x]\n ydeg = direction[y]\n else:\n xdeg = ydeg = G.degree\n\n for u, degu in xdeg(nodes, weight=weight):\n # use G.edges to treat multigraphs correctly\n neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes)\n for _, degv in ydeg(neighbors, weight=weight):\n yield degu, degv\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 144, "n_words": 69, "vocab_size": 49, "complexity": 7, "nloc": 12, "token_counts": 132, "n_ast_nodes": 209, "n_identifiers": 21, "random_cut": "def node_degree_xy(G, x=\"out\", y=\"in\", weight=None, nodes=None):\n \n nodes = set(G) if nodes is None else set(nodes)\n if G.is_directed():\n direction = {\"out\": G.out_degree, \"in\": G.in_degree}\n xdeg = direction[x]\n ydeg = direction[y]\n else:\n xdeg = ydeg = G.degree\n\n for u, degu in xdeg(nodes, weight=weight):\n # use G.edges to treat multigraphs correctly\n neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes)\n fo", "d_id": 41838, "documentation": { "docstring": "Generate node degree-degree pairs for edges in G.\n\n Parameters\n ----------\n G: NetworkX graph\n\n x: string ('in','out')\n The degree type for source node (directed graphs only).\n\n y: string ('in','out')\n The degree type for target node (directed graphs only).\n\n weight: string or None, optional (default=None)\n The edge attribute that holds the numerical value used\n as a weight. If None, then each edge has weight 1.\n The degree is the sum of the edge weights adjacent to the node.\n\n nodes: list or iterable (optional)\n Use only edges that are adjacency to specified nodes.\n The default is all nodes.\n\n Returns\n -------\n (x, y): 2-tuple\n Generates 2-tuple of (degree, degree) values.\n\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_edge(1, 2)\n >>> list(nx.node_degree_xy(G, x=\"out\", y=\"in\"))\n [(1, 1)]\n >>> list(nx.node_degree_xy(G, x=\"in\", y=\"out\"))\n [(0, 0)]\n\n Notes\n -----\n For undirected graphs each edge is produced twice, once for each edge\n representation (u, v) and (v, u), with the exception of self-loop edges\n which only appear once.\n ", "n_words": 157, "vocab_size": 111, "n_whitespaces": 281, "language": "en" } }, { "id": 133353, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/torch_trainer.py", "file_name": "torch_trainer.py", "fun_name": "validate", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def validate(self, num_steps=None, profile=False, reduce_results=True, info=None):\n \n worker_stats = self.worker_group.validate(\n num_steps=num_steps, profile=profile, info=info\n )\n\n if reduce_results:\n return self._process_stats(worker_stats)\n else:\n return worker_stats\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 88, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 8, "token_counts": 56, "n_ast_nodes": 85, "n_identifiers": 9, "random_cut": "def validate(self, num_steps=None, profile=False, reduce_results=True, info=None):\n \n worker_stats = self.worker_group.validate(\n ", "d_id": 29985, "documentation": { "docstring": "Evaluates the model on the validation data set.\n\n Args:\n num_steps (int): Number of batches to compute update steps on\n per worker. This corresponds also to the number of times\n ``TrainingOperator.validate_batch`` is called per worker.\n profile (bool): Returns time stats for the evaluation procedure.\n reduce_results (bool): Whether to average all metrics across\n all workers into one dict. If a metric is a non-numerical\n value (or nested dictionaries), one value will be randomly\n selected among the workers. If False, returns a list of dicts.\n info (dict): Optional dictionary passed to the training\n operator for `validate` and `validate_batch`.\n\n Returns:\n A dictionary of metrics for validation.\n You can provide custom metrics by passing in a custom\n ``training_operator_cls``.\n ", "n_words": 113, "vocab_size": 84, "n_whitespaces": 309, "language": "en" } }, { "id": 60255, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/io.py", "file_name": "io.py", "fun_name": "set_raw_scale", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def set_raw_scale(self, in_, scale):\n \n self.__check_input(in_)\n self.raw_scale[in_] = scale\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 6, "random_cut": "def set_raw_scale(self, in_, scale):\n \n self.__check_input(in_)\n self.raw_scale[in_] = scale\n", "d_id": 12047, "documentation": { "docstring": "\n Set the scale of raw features s.t. the input blob = input * scale.\n While Python represents images in [0, 1], certain Caffe models\n like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale\n of these models must be 255.\n\n Parameters\n ----------\n in_ : which input to assign this scale factor\n scale : scale coefficient\n ", "n_words": 57, "vocab_size": 44, "n_whitespaces": 121, "language": "en" } }, { "id": 134046, "commit_id": "d1aa5608979891e3dd859c07fa919fa01cfead5f", "repo": "ray", "path": "ci/run/bazel_sharding/tests/test_bazel_sharding.py", "file_name": "test_bazel_sharding.py", "fun_name": "test_add_rule_to_best_shard", "commit_message": "[CI] Make bazel sharding for parallel buildkite more intelligent (#29221)\n\nThis PR implements two changes to our `bazel-sharding.py` script, used for determining which bazel tests to run on each instance when buildkite parallelism is used:\r\n* An ability to filter tests before they are sharded, using the same logic as `bazel test`. This is done by specifying the `--tag_filters` argument, eg. `--tag_filters=air,-gpu`. If we filter tests with `bazel test` *after* they are sharded, we can end up with imbalanced shards as eg. all tests we want to filter out are assigned to one shard. This feature is enabled for Serve tests and it will be required for the changes I want to make to AIR CI.\r\n* A new algorithm to balance the shards, finally implementing what that comment was asking for all this time. Instead of trying to assign the same number of tests (which have variable timeouts) to each shard, the new algorithm tries to make sure each shard will finish in more or less the same time. This is achieved through a simple but good enough heuristic. The old algorithm can still be accessed through the `--sharding_strategy` argument.\r\n\r\nThose two changes do cause the complexity of the script to increase, necessitating proper testing. In order to facilitate that, this PR also adds a basic buildkite test harness for CI tools/scripts.\r\n\r\nAfter this PR is merged, the next step will be to move most of our manually parallelized jobs to use buildkite parallelism with the new logic here.\r\n\r\nSigned-off-by: Antoni Baum ", "code": "def test_add_rule_to_best_shard():\n \n\n # If we start with an empty list, then add to first shard\n shards: List[List[bazel_sharding.BazelRule]] = [list() for _ in range(4)]\n optimum = 600\n\n rule = bazel_sharding.BazelRule(\"mock\", \"medium\")\n bazel_sharding.add_rule_to_best_shard(rule, shards, optimum)\n assert shards[0][0] == rule\n assert all(not shard for shard in shards[1:])\n\n # Add to first shard below optimum\n old_rule = bazel_sharding.BazelRule(\"mock\", \"medium\")\n shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)]\n shards[3] = []\n optimum = old_rule.actual_timeout_s\n\n rule = bazel_sharding.BazelRule(\"mock\", \"small\")\n bazel_sharding.add_rule_to_best_shard(rule, shards, optimum)\n assert shards[3][0] == rule\n assert all(shard[-1] == old_rule for shard in shards[0:3])\n\n # If all shards are above or equal optimum, add to the one with the smallest\n # difference\n old_rule = bazel_sharding.BazelRule(\"mock\", \"large\")\n shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)]\n optimum = old_rule.actual_timeout_s\n old_rule_medium = bazel_sharding.BazelRule(\"mock\", \"medium\")\n shards[3][0] = old_rule_medium\n\n rule = bazel_sharding.BazelRule(\"mock\", \"small\")\n bazel_sharding.add_rule_to_best_shard(rule, shards, optimum)\n assert shards[3][0] == old_rule_medium\n assert shards[3][-1] == rule\n assert all(shard[-1] == old_rule for shard in shards[0:3])\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 238, "n_words": 151, "vocab_size": 61, "complexity": 7, "nloc": 25, "token_counts": 291, "n_ast_nodes": 460, "n_identifiers": 16, "random_cut": "def test_add_rule_to_best_shard():\n \n\n # If we start with an empty list, then add to first shard\n shards: List[List[bazel_sharding.BazelRule]] = [list() for _ in range(4)]\n optimum = 600\n\n rule = bazel_sharding.BazelRule(\"mock\", \"medium\")\n bazel_sharding.add_rule_to_best_shard(rule, shards, optimum)\n assert shards[0][0] == rule\n assert all(not shard for shard in shards[1:])\n\n # Add to first shard below optimum\n old_rule = bazel_sharding.BazelRule(\"mock\", \"medium\")\n shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)]\n shards[3] = []\n optimum = old_rule.actual_timeout_s\n\n rule = bazel_sharding.BazelRule(\"mock\", \"small\")\n bazel_sharding.add_rule_to_best_shard(rule, shards, optimum)\n assert shards[3][0] == rule\n assert all(shard[-1] == old_rule for shard in shards[0:3])\n\n # If all shards are above or equal optimum, add to the one with the smallest\n # difference\n old_rule = bazel_sharding.BazelRule(\"mock\", \"large\")\n shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)]\n optimum = old_rule.actual_timeout_s\n old_rule_medium = bazel_sharding.BazelRule(\"mock\", \"medium\")\n shards[3][0] = old_rule_medium\n\n rule = bazel_sharding.BazelRule(\"mock\", \"small\")\n bazel_sharding.add_rule_to_best_shard(rule, shards, optimum)\n assert shards[3][0] == old_rule_medium\n assert shards[3][-1] == rule\n assert all(shard[-1] == old_rule for shard in shards[0:3])\n\n", "d_id": 30178, "documentation": { "docstring": "Test that the best shard in optimal strategy is chosen correctly.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 312146, "commit_id": "6c38a6b5697bcf4587e00101771001bf596974f9", "repo": "core", "path": "homeassistant/components/isy994/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "async_heartbeat", "commit_message": "Enable strict typing for isy994 (#65439)\n\nCo-authored-by: Martin Hjelmare ", "code": "def async_heartbeat(self) -> None:\n \n self._computed_state = False\n self._restart_timer()\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 37, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 11, "token_counts": 23, "n_ast_nodes": 42, "n_identifiers": 5, "random_cut": "def async_heartbeat(self) -> None:\n \n self._computed_state = False\n self._restart_timer()\n self.async_write_ha_stat", "d_id": 110798, "documentation": { "docstring": "Mark the device as online, and restart the 25 hour timer.\n\n This gets called when the heartbeat node beats, but also when the\n parent sensor sends any events, as we can trust that to mean the device\n is online. This mitigates the risk of false positives due to a single\n missed heartbeat event.\n ", "n_words": 53, "vocab_size": 42, "n_whitespaces": 88, "language": "en" } }, { "id": 166848, "commit_id": "7e23a37e1c5bda81234801a6584563e2880769eb", "repo": "pandas", "path": "pandas/tests/util/test_assert_series_equal.py", "file_name": "test_assert_series_equal.py", "fun_name": "test_assert_series_equal_interval_dtype_mismatch", "commit_message": "ENH: consistency of input args for boundaries - Interval (#46522)", "code": "def test_assert_series_equal_interval_dtype_mismatch():\n # https://github.com/pandas-dev/pandas/issues/32747\n left = Series([pd.Interval(0, 1, \"right\")], dtype=\"interval\")\n right = left.astype(object)\n\n msg = \n\n tm.assert_series_equal(left, right, check_dtype=False)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(left, right, check_dtype=True)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 11, "token_counts": 72, "n_ast_nodes": 123, "n_identifiers": 17, "random_cut": "def test_assert_series_equal_interval_dtype_mismatch():\n # https://github.com/pandas-dev/pandas/issues/32747\n left = Series([pd.Interval(0, 1, \"right\")], dtype=\"interval\")\n right = left.astype(object)\n\n msg = \n\n tm.assert_series_equal(left, right, check_dtype=False)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(left, right, check_dtype=True)\n\n", "d_id": 39861, "documentation": { "docstring": "Attributes of Series are different\n\nAttribute \"dtype\" are different\n\\\\[left\\\\]: interval\\\\[int64, right\\\\]\n\\\\[right\\\\]: object", "n_words": 14, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 108225, "commit_id": "7c6c5f6215b40a27cfefb7bf21246299fd9b3a1e", "repo": "matplotlib", "path": "lib/matplotlib/__init__.py", "file_name": "__init__.py", "fun_name": "rc_file_defaults", "commit_message": "Fix removed cross-references", "code": "def rc_file_defaults():\n \n # Deprecation warnings were already handled when creating rcParamsOrig, no\n # need to reemit them here.\n with _api.suppress_matplotlib_deprecation_warning():\n from .style.core import STYLE_BLACKLIST\n rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig\n if k not in STYLE_BLACKLIST})\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 85, "n_words": 35, "vocab_size": 32, "complexity": 3, "nloc": 5, "token_counts": 41, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def rc_file_defaults():\n \n # ", "d_id": 23106, "documentation": { "docstring": "\n Restore the `.rcParams` from the original rc file loaded by Matplotlib.\n\n Style-blacklisted `.rcParams` (defined in\n ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 32, "language": "en" } }, { "id": 20845, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/syntax.py", "file_name": "syntax.py", "fun_name": "lexer", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def lexer(self) -> Optional[Lexer]:\n \n\n if isinstance(self._lexer, Lexer):\n return self._lexer\n try:\n return get_lexer_by_name(\n self._lexer,\n stripnl=False,\n ensurenl=True,\n tabsize=self.tab_size,\n )\n except ClassNotFound:\n return None\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 153, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 16, "token_counts": 54, "n_ast_nodes": 83, "n_identifiers": 12, "random_cut": "def lexer(self) -> Optional[Lexer]:\n \n\n if isinstance(self._lexer, Lexer):\n return self._lexer\n try:\n return get_lexer_by_name(\n self._lexer,\n stripnl=False,\n ensurenl=True,\n tabsize=self.tab_size,\n )\n except ClassNotFound:\n ", "d_id": 3587, "documentation": { "docstring": "The lexer for this syntax, or None if no lexer was found.\n\n Tries to find the lexer by name if a string was passed to the constructor.\n ", "n_words": 27, "vocab_size": 21, "n_whitespaces": 41, "language": "en" } }, { "id": 215808, "commit_id": "a35b29b2651bf33c5d5b45e64bc7765ffde4aff4", "repo": "salt", "path": "tests/pytests/functional/modules/file/test_replace.py", "file_name": "test_replace.py", "fun_name": "test_numeric_repl", "commit_message": "Add some funtional tests\n\nAdd functional tests for the following:\n- file.readlink\n- file.replace\n- file.symlink\n\nRemove unit tests for file.replace as they are duplicated in the added\nfunctional test", "code": "def test_numeric_repl(file, multiline_file):\n \n file.replace(multiline_file, r\"Etiam\", 123)\n assert \"123\" in multiline_file.read_text()\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 46, "n_identifiers": 5, "random_cut": "def test_numeric_repl(file, multiline_file):\n \n file.replace(multiline_fi", "d_id": 54182, "documentation": { "docstring": "\n This test covers cases where the replacement string is numeric. The CLI\n parser yaml-fies it into a numeric type. If not converted back to a string\n type in file.replace, a TypeError occurs when the replace is attempted. See\n https://github.com/saltstack/salt/issues/9097 for more information.\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 58, "language": "en" } }, { "id": 179715, "commit_id": "7fa8e45b6782d545fa0ead112d92d13bdad7417c", "repo": "gradio", "path": "gradio/components.py", "file_name": "components.py", "fun_name": "set_interpret_parameters", "commit_message": "Blocks-Components\n- fixes\n- format", "code": "def set_interpret_parameters(self, segments=16):\n \n self.interpretation_segments = segments\n return self\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def set_interpret_parameters(self, segments=16):\n \n self.interpretation_segments = segments\n retu", "d_id": 43005, "documentation": { "docstring": "\n Calculates interpretation score of image subsections by splitting the image into subsections, then using a \"leave one out\" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value.\n Parameters:\n segments (int): Number of interpretation segments to split image into.\n ", "n_words": 50, "vocab_size": 35, "n_whitespaces": 79, "language": "en" } }, { "id": 248309, "commit_id": "1fe202a1a3343fad77da270ffe0923a46f1944dd", "repo": "synapse", "path": "synapse/storage/engines/sqlite.py", "file_name": "sqlite.py", "fun_name": "can_native_upsert", "commit_message": "Tidy up and type-hint the database engine modules (#12734)\n\nCo-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com>", "code": "def can_native_upsert(self) -> bool:\n \n return sqlite3.sqlite_version_info >= (3, 24, 0)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 20, "n_ast_nodes": 32, "n_identifiers": 5, "random_cut": "def can_native_upsert(self) -> bool:\n \n return sqlite3.sqlite_version_info >= (3, 2", "d_id": 72207, "documentation": { "docstring": "\n Do we support native UPSERTs? This requires SQLite3 3.24+, plus some\n more work we haven't done yet to tell what was inserted vs updated.\n ", "n_words": 24, "vocab_size": 23, "n_whitespaces": 46, "language": "en" } }, { "id": 138397, "commit_id": "30ab5458a7e4ba2351d5e1beef8c8797b5946493", "repo": "ray", "path": "dashboard/state_aggregator.py", "file_name": "state_aggregator.py", "fun_name": "get_actors", "commit_message": "[State Observability] Tasks and Objects API (#23912)\n\nThis PR implements ray list tasks and ray list objects APIs.\r\n\r\nNOTE: You can ignore the merge conflict for now. It is because the first PR was reverted. There's a fix PR open now.", "code": "async def get_actors(self) -> dict:\n \n reply = await self._client.get_all_actor_info(timeout=DEFAULT_RPC_TIMEOUT)\n result = {}\n for message in reply.actor_table_data:\n data = self._message_to_dict(message=message, fields_to_decode=[\"actor_id\"])\n data = filter_fields(data, ActorState)\n result[data[\"actor_id\"]] = data\n return result\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 97, "n_words": 29, "vocab_size": 22, "complexity": 2, "nloc": 14, "token_counts": 67, "n_ast_nodes": 111, "n_identifiers": 16, "random_cut": "async def get_actors(self) -> dict:\n \n reply = await self._client.get_all_actor_info(timeout=DEFAULT_RPC_TIMEOUT)\n result = {}\n for message in rep", "d_id": 31405, "documentation": { "docstring": "List all actor information from the cluster.\n\n Returns:\n {actor_id -> actor_data_in_dict}\n actor_data_in_dict's schema is in ActorState\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 52, "language": "en" } }, { "id": 113876, "commit_id": "551205a18ac2ac19626f4e4ffb2ed88fcad705b9", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", "file_name": "mysql_proxy.py", "fun_name": "insert_predictor_answer", "commit_message": "fix", "code": "def insert_predictor_answer(self, insert):\n \n model_interface = self.session.model_interface\n data_store = self.session.data_store\n\n select_data_query = insert.get('select_data_query')\n if isinstance(select_data_query, str) is False or len(select_data_query) == 0:\n self.packet(\n ErrPacket,\n err_code=ERR.ER_WRONG_ARGUMENTS,\n msg=\"'select_data_query' should not be empty\"\n ).send()\n return\n\n models = model_interface.get_models()\n if insert['name'] in [x['name'] for x in models]:\n self.packet(\n ErrPacket,\n err_code=ERR.ER_WRONG_ARGUMENTS,\n msg=f\"predictor with name '{insert['name']}'' already exists\"\n ).send()\n return\n\n kwargs = {}\n if isinstance(insert.get('training_options'), str) \\\n and len(insert['training_options']) > 0:\n try:\n kwargs = json.loads(insert['training_options'])\n except Exception:\n self.packet(\n ErrPacket,\n err_code=ERR.ER_WRONG_ARGUMENTS,\n msg='training_options should be in valid JSON string'\n ).send()\n return\n\n integration = self.session.integration\n if isinstance(integration, str) is False or len(integration) == 0:\n self.packet(\n ErrPacket,\n err_code=ERR.ER_WRONG_ARGUMENTS,\n msg='select_data_query can be used only in query from database'\n ).send()\n return\n insert['select_data_query'] = insert['select_data_query'].replace(r\"\\'\", \"'\")\n ds_name = data_store.get_vacant_name(insert['name'])\n ds = data_store.save_datasource(ds_name, integration, {'query': insert['select_data_query']})\n\n insert['predict'] = [x.strip() for x in insert['predict'].split(',')]\n\n ds_data = data_store.get_datasource(ds_name)\n if ds_data is None:\n raise Exception(f\"DataSource '{ds_name}' does not exists\")\n ds_columns = [x['name'] for x in ds_data['columns']]\n for col in insert['predict']:\n if col not in ds_columns:\n data_store.delete_datasource(ds_name)\n raise Exception(f\"Column '{col}' not exists\")\n\n try:\n insert['predict'] = self._check_predict_columns(insert['predict'], ds_columns)\n except Exception:\n data_store.delete_datasource(ds_name)\n raise\n\n model_interface.learn(\n insert['name'], ds, insert['predict'], ds_data['id'], kwargs=kwargs, delete_ds_on_fail=True\n )\n\n self.packet(OkPacket).send()\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 833, "n_words": 181, "vocab_size": 109, "complexity": 18, "nloc": 63, "token_counts": 445, "n_ast_nodes": 713, "n_identifiers": 42, "random_cut": "def insert_predictor_answer(self, insert):\n \n model_interface = self.session.model_interface\n data_store = self.session.data_store\n\n select_data_query = insert.get('select_data_query')\n if isinstance(select_data_query, str) is False or len(select_data_query) == 0:\n self.packet(\n ErrPacket,\n err_code=ERR.ER_WRONG_ARGUMENTS,\n msg=\"'select_data_query' should not be empty\"\n ).send()\n return\n\n models = model_interface.get_models()\n if insert['name'] in [x['name'] for x in models]:\n self.packet(\n ErrPacket,\n err_code=ERR.ER_WRONG_ARGUMENTS,\n msg=f\"predictor with name '{insert['name']}'' already exists\"\n ).send()\n return\n\n kwargs = {}\n if isinstance(insert.get('training_options'), str) \\\n and len(insert['training_options']) > 0:\n try:\n kwargs = json.loads(insert['training_options'])\n except Exception:\n self.packet(\n ErrPacket,\n err_code=ERR.ER_WRONG_ARGUMENTS,\n msg='training_options should be in valid JSON string'\n ).send()\n return\n\n integration = self.session.integration\n if isinstance(integration, str) is False or len(integration) == 0:\n self.packet(\n ErrPacket,\n err_code=E", "d_id": 25051, "documentation": { "docstring": " Start learn new predictor.\n Parameters:\n - insert - dict with keys as columns of mindsb.predictors table.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 47, "language": "en" } }, { "id": 285032, "commit_id": "2c3e10a128fa0ce4e937d8d50dc0cd6d7cd11485", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_model.py", "file_name": "portfolio_model.py", "fun_name": "populate_historical_trade_data", "commit_message": "Overhaul Portfolio class (#2021)\n\n* adds pythonic portfolio class\r\n\r\n* start calculate trades refactoring\r\n\r\n* adds comments to portfolio model - delete afterwards\r\n\r\n* finish calculate trades refactoring\r\n\r\n* restore original portfolio_model.py\r\n\r\n* implement calculate_allocations\r\n\r\n* adapt and test controller load, show, bench, alloc and perf\r\n\r\n* add old code that was ok\r\n\r\n* adapt controller\r\n\r\n* adapt portfolio_view\r\n\r\n* run black on pythonic_portfolio.py\r\n\r\n* fix crypto bug\r\n\r\n* change column name in example datasets\r\n\r\n* substitute portfolio_model.py\r\n\r\n* update show command\r\n\r\n* push cumulative returns calculation to model\r\n\r\n* fix last change in cumulative returns\r\n\r\n* add comments on possibly unused code\r\n\r\n* run black on changes\r\n\r\n* bring metrics from helper to model\r\n\r\n* push rolling metrics from view to model\r\n\r\n* Details and linting\r\n\r\n* Fix tests\r\n\r\n* remove empty attribute and rename class\r\n\r\n* fix view and controller rf\r\n\r\n* change returns calculation method\r\n\r\n* remove CASH from code\r\n\r\n* remove cash from tickers_list\r\n\r\n* run black on changes\r\n\r\n* change function name\r\n\r\n* adapt to PortfolioModel\r\n\r\n* fix tests\r\n\r\n* fix tests on help\r\n\r\n* fix linting\r\n\r\n* call metrics from PortfolioModel\r\n\r\n* call drawdown from model\r\n\r\n* fix some mypy issues\r\n\r\n* fix remaining mypy issues\r\n\r\n* fix test\r\n\r\n* Fix linting\r\n\r\n* Remove unused function\r\n\r\n* Small fixes\r\n\r\n* Remove old code and adjust summary to simply work\r\n\r\n* Update the Excel since CASH is no longer a thing\r\n\r\n* Fix tests\r\n\r\n* Update the csvs\r\n\r\n* Updates to usage of full_shares and more details\r\n\r\n* Fix -t flag for perf\r\n\r\nCo-authored-by: Jeroen Bouma ", "code": "def populate_historical_trade_data(self):\n \n trade_data = self.__orderbook.pivot(\n index=\"Date\",\n columns=\"Ticker\",\n values=[\n \"Type\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Premium\",\n \"Investment\",\n \"Side\",\n \"Currency\",\n ],\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n # Merge with historical close prices (and fillna)\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"right\",\n left_index=True,\n right_index=True,\n ).fillna(0)\n\n # Accumulate quantity held by trade date\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n\n trade_data.loc[:, (\"Investment\", \"Total\")] = trade_data[\"Investment\"][\n self.tickers_list\n ].sum(axis=1)\n\n self.historical_trade_data = trade_data\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 469, "n_words": 78, "vocab_size": 65, "complexity": 1, "nloc": 34, "token_counts": 164, "n_ast_nodes": 282, "n_identifiers": 23, "random_cut": "def populate_historical_trade_data(self):\n \n trade_data = self.__orderbook.pivot(\n index=\"Date\",\n columns=\"Ticker\",\n values=[\n \"Type\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Price\",\n \"Quantity\",\n ", "d_id": 85115, "documentation": { "docstring": "Create a new dataframe to store historical prices by ticker", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 259898, "commit_id": "a47d569e670fd4102af37c3165c9b1ddf6fd3005", "repo": "scikit-learn", "path": "sklearn/datasets/tests/test_openml.py", "file_name": "test_openml.py", "fun_name": "test_fetch_openml_equivalence_array_dataframe", "commit_message": "ENH improve ARFF parser using pandas (#21938)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Adrin Jalali ", "code": "def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser):\n \n pytest.importorskip(\"pandas\")\n\n data_id = 61\n _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)\n bunch_as_frame_true = fetch_openml(\n data_id=data_id,\n as_frame=True,\n cache=False,\n parser=parser,\n )\n\n bunch_as_frame_false = fetch_openml(\n data_id=data_id,\n as_frame=False,\n cache=False,\n parser=parser,\n )\n\n assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data)\n assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target)\n\n\n# Known failure of PyPy for OpenML. See the following issue:\n# https://github.com/scikit-learn/scikit-learn/issues/18906\n@fails_if_pypy\n@pytest.mark.parametrize(\"parser\", [\"liac-arff\", \"pandas\"])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@fails_if_pypy\n@pytest.mark.parametrize(\"parser\", [\"liac-arff\", \"pandas\"])", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 129, "n_words": 47, "vocab_size": 39, "complexity": 1, "nloc": 18, "token_counts": 89, "n_ast_nodes": 167, "n_identifiers": 20, "random_cut": "def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser):\n \n pytest.importorskip(\"pandas\")\n\n data_id = 61\n _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)\n bunch_as_frame_true = fetch_openml(\n data_id=data_id,\n as_frame=True,\n cache=False,\n parser=parser,\n )\n\n bunch_as_frame_false = fetch_openml(\n data_id=data_id,\n as_frame=False,\n cache=False,\n parser=parser,\n )\n\n assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data)\n assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target)\n\n\n# Known failure of PyPy for OpenML. See the following issue:\n# https://github.com/scikit-learn/scikit-learn/issues/18906\n@fails_if_pypy\n@pytest.mark.parametrize(\"parser\", [\"liac-arff\", \"pandas\"])", "d_id": 75979, "documentation": { "docstring": "Check the equivalence of the dataset when using `as_frame=False` and\n `as_frame=True`.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 252396, "commit_id": "002f919dda5f01d067c2e786426c68751551d15c", "repo": "mitmproxy", "path": "mitmproxy/contrib/kaitaistruct/google_protobuf.py", "file_name": "google_protobuf.py", "fun_name": "wire_type", "commit_message": "update kaitai definitions", "code": "def wire_type(self):\n \n if hasattr(self, '_m_wire_type'):\n return self._m_wire_type\n\n self._m_wire_type = KaitaiStream.resolve_enum(GoogleProtobuf.Pair.WireTypes, (self.key.value & 7))\n return getattr(self, '_m_wire_type', None)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 76, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 51, "n_ast_nodes": 83, "n_identifiers": 12, "random_cut": "def wire_type(self):\n \n if hasattr(self, '_m_wire_type'):\n return self._m_wire_type\n\n self._m_wire_type = Kaita", "d_id": 73944, "documentation": { "docstring": "\"Wire type\" is a part of the \"key\" that carries enough\n information to parse value from the wire, i.e. read correct\n amount of bytes, but there's not enough informaton to\n interprete in unambiguously. For example, one can't clearly\n distinguish 64-bit fixed-sized integers from 64-bit floats,\n signed zigzag-encoded varints from regular unsigned varints,\n arbitrary bytes from UTF-8 encoded strings, etc.\n ", "n_words": 59, "vocab_size": 51, "n_whitespaces": 136, "language": "en" } }, { "id": 251333, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/connection.py", "file_name": "connection.py", "fun_name": "address", "commit_message": "make it black!", "code": "def address(self): # pragma: no cover\n \n warnings.warn(\n \"Client.address is deprecated, use Client.peername instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.peername\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 80, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 23, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def address(self): # pragma: no cover\n \n warnings.warn(\n \"Client.address is deprecated, use Client.peername instead.\",\n D", "d_id": 73687, "documentation": { "docstring": "*Deprecated:* An outdated alias for Client.peername.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 196907, "commit_id": "a4fdabab38def4bf6b4007f8cd67d6944740b303", "repo": "sympy", "path": "sympy/matrices/common.py", "file_name": "common.py", "fun_name": "jordan_block", "commit_message": "Update the Matrix.jordan_block() rows and cols kwargs deprecation", "code": "def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs):\n \n if 'rows' in kwargs or 'cols' in kwargs:\n msg = \n if 'rows' in kwargs and 'cols' in kwargs:\n msg += f", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "f\"\"\"\\\n To get asquare Jordan block matrix use a morebanded matrix", "n_ast_errors": 3, "ast_levels": 12, "n_whitespaces": 80, "n_words": 28, "vocab_size": 19, "complexity": 16, "nloc": 45, "token_counts": 239, "n_ast_nodes": 109, "n_identifiers": 21, "random_cut": "def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs):\n \n if 'r", "d_id": 48242, "documentation": { "docstring": "Returns a Jordan block\n\n Parameters\n ==========\n\n size : Integer, optional\n Specifies the shape of the Jordan block matrix.\n\n eigenvalue : Number or Symbol\n Specifies the value for the main diagonal of the matrix.\n\n .. note::\n The keyword ``eigenval`` is also specified as an alias\n of this keyword, but it is not recommended to use.\n\n We may deprecate the alias in later release.\n\n band : 'upper' or 'lower', optional\n Specifies the position of the off-diagonal to put `1` s on.\n\n cls : Matrix, optional\n Specifies the matrix class of the output form.\n\n If it is not specified, the class type where the method is\n being executed on will be returned.\n\n rows, cols : Integer, optional\n Specifies the shape of the Jordan block matrix. See Notes\n section for the details of how these key works.\n\n .. deprecated:: 1.4\n The rows and cols parameters are deprecated and will be\n removed in a future version.\n\n\n Returns\n =======\n\n Matrix\n A Jordan block matrix.\n\n Raises\n ======\n\n ValueError\n If insufficient arguments are given for matrix size\n specification, or no eigenvalue is given.\n\n Examples\n ========\n\n Creating a default Jordan block:\n\n >>> from sympy import Matrix\n >>> from sympy.abc import x\n >>> Matrix.jordan_block(4, x)\n Matrix([\n [x, 1, 0, 0],\n [0, x, 1, 0],\n [0, 0, x, 1],\n [0, 0, 0, x]])\n\n Creating an alternative Jordan block matrix where `1` is on\n lower off-diagonal:\n\n >>> Matrix.jordan_block(4, x, band='lower')\n Matrix([\n [x, 0, 0, 0],\n [1, x, 0, 0],\n [0, 1, x, 0],\n [0, 0, 1, x]])\n\n Creating a Jordan block with keyword arguments\n\n >>> Matrix.jordan_block(size=4, eigenvalue=x)\n Matrix([\n [x, 1, 0, 0],\n [0, x, 1, 0],\n [0, 0, x, 1],\n [0, 0, 0, x]])\n\n Notes\n =====\n\n .. deprecated:: 1.4\n This feature is deprecated and will be removed in a future\n version.\n\n The keyword arguments ``size``, ``rows``, ``cols`` relates to\n the Jordan block size specifications.\n\n If you want to create a square Jordan block, specify either\n one of the three arguments.\n\n If you want to create a rectangular Jordan block, specify\n ``rows`` and ``cols`` individually.\n\n +--------------------------------+---------------------+\n | Arguments Given | Matrix Shape |\n +----------+----------+----------+----------+----------+\n | size | rows | cols | rows | cols |\n +==========+==========+==========+==========+==========+\n | size | Any | size | size |\n +----------+----------+----------+----------+----------+\n | | None | ValueError |\n | +----------+----------+----------+----------+\n | None | rows | None | rows | rows |\n | +----------+----------+----------+----------+\n | | None | cols | cols | cols |\n + +----------+----------+----------+----------+\n | | rows | cols | rows | cols |\n +----------+----------+----------+----------+----------+\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Jordan_matrix\n \n The 'rows' and 'cols' keywords to Matrix.jordan_block() are\n deprecated. Use the 'size' parameter instead.\n \\\n To get a non-square Jordan block matrix use a more generic\n banded matrix constructor, like\n", "n_words": 442, "vocab_size": 190, "n_whitespaces": 1426, "language": "en" } }, { "id": 258441, "commit_id": "86ade4817eda3142d2ddef65a0b1e29ffee770e3", "repo": "haystack", "path": "rest_api/rest_api/utils.py", "file_name": "utils.py", "fun_name": "get_openapi_specs", "commit_message": "bug: fix the docs rest api reference url (#3775)\n\n* bug: fix the docs rest api reference url\r\n\r\n* revert openapi json changes\r\n\r\n* remove last line on json files\r\n\r\n* Add explanation about `servers` and remove `servers` parameter from FastAPI\r\n\r\n* generate openapi schema without empty end line", "code": "def get_openapi_specs() -> dict:\n \n\n app = get_app()\n return get_openapi(\n title=app.title,\n version=app.version,\n openapi_version=app.openapi_version,\n description=app.description,\n routes=app.routes,\n servers=[{\"url\": \"http://localhost:8000\"}],\n )\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 17, "token_counts": 56, "n_ast_nodes": 89, "n_identifiers": 11, "random_cut": "def get_openapi_specs() -> dict:\n \n\n app = get_app()", "d_id": 75236, "documentation": { "docstring": "\n Used to autogenerate OpenAPI specs file to use in the documentation.\n\n Returns `servers` to specify base URL for OpenAPI Playground (see https://swagger.io/docs/specification/api-host-and-base-path/)\n\n See `.github/utils/generate_openapi_specs.py`\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 37, "language": "en" } }, { "id": 66068, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/employee/employee.py", "file_name": "employee.py", "fun_name": "get_all_employee_emails", "commit_message": "style: format code with black", "code": "def get_all_employee_emails(company):\n\t\n\temployee_list = frappe.get_all(\n\t\t\"Employee\", fields=[\"name\", \"employee_name\"], filters={\"status\": \"Active\", \"company\": company}\n\t)\n\temployee_emails = []\n\tfor employee in employee_list:\n\t\tif not employee:\n\t\t\tcontinue\n\t\tuser, company_email, personal_email = frappe.db.get_value(\n\t\t\t\"Employee\", employee, [\"user_id\", \"company_email\", \"personal_email\"]\n\t\t)\n\t\temail = user or company_email or personal_email\n\t\tif email:\n\t\t\temployee_emails.append(email)\n\treturn employee_emails\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 32, "n_words": 47, "vocab_size": 38, "complexity": 6, "nloc": 15, "token_counts": 90, "n_ast_nodes": 156, "n_identifiers": 16, "random_cut": "def get_all_employee_emails(company):\n\t\n\temployee_list = frappe.get_all(\n\t\t\"Employee\", fields=[\"name\", \"employee_name\"], filters={\"status\": \"Active\", \"company\": company}\n\t)\n\temployee_emails = []\n\tfor", "d_id": 14099, "documentation": { "docstring": "Returns list of employee emails either based on user_id or company_email", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 90257, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_group_index.py", "file_name": "test_organization_group_index.py", "fun_name": "test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release(self):\n \n release_1 = self.create_release(\n date_added=timezone.now() - timedelta(minutes=45), version=\"foobar 1\"\n )\n release_2 = self.create_release(version=\"foobar 2\")\n self.create_release(version=\"foobar 3\")\n\n group = self.store_event(\n data={\n \"timestamp\": iso_format(before_now(seconds=12)),\n \"fingerprint\": [\"group-1\"],\n \"release\": release_1.version,\n },\n project_id=self.project.id,\n ).group\n\n self.login_as(user=self.user)\n\n response = self.get_success_response(\n qs_params={\"id\": group.id}, status=\"resolvedInNextRelease\"\n )\n assert response.data[\"status\"] == \"resolved\"\n assert response.data[\"statusDetails\"][\"inNextRelease\"]\n\n grp_resolution = GroupResolution.objects.filter(group=group)\n\n assert len(grp_resolution) == 1\n grp_resolution = grp_resolution[0]\n\n assert grp_resolution.current_release_version == release_1.version\n assert grp_resolution.release.id == release_2.id\n assert grp_resolution.type == GroupResolution.Type.in_release\n assert grp_resolution.status == GroupResolution.Status.resolved\n\n activity = Activity.objects.filter(\n group=grp_resolution.group,\n type=Activity.SET_RESOLVED_IN_RELEASE,\n ident=grp_resolution.id,\n ).first()\n assert activity.data[\"version\"] == release_2.version\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 368, "n_words": 81, "vocab_size": 59, "complexity": 1, "nloc": 33, "token_counts": 249, "n_ast_nodes": 407, "n_identifiers": 43, "random_cut": "def test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release(self):\n \n release_1 = self.create_release(\n date_added=timezon", "d_id": 18657, "documentation": { "docstring": "\n Test that ensures that if we basically know the next release when clicking on Resolved\n In Next Release because that release exists, then we can short circuit setting\n GroupResolution to type \"inNextRelease\", and then having `clear_exrired_resolutions` run\n once a new release is created to convert GroupResolution to in_release and set Activity.\n Basically we treat \"ResolvedInNextRelease\" as \"ResolvedInRelease\" when there is a release\n that was created after the last release associated with the group being resolved\n ", "n_words": 75, "vocab_size": 55, "n_whitespaces": 125, "language": "en" } }, { "id": 26436, "commit_id": "aca6418d6c36956bc1ab530e6ef7e146ec9df90c", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py", "file_name": "fixtures.py", "fun_name": "subscription_order_updated_webhook", "commit_message": "Add Webhook payload via graphql subscriptions (#9394)\n\n* Add PoC of webhook subscriptions\r\n\r\n* add async webhooks subscription payloads feature\r\n\r\n* remove unneeded file\r\n\r\n* add translations subscription handling, fixes after review\r\n\r\n* remove todo\r\n\r\n* add descriptions\r\n\r\n* add descriptions, move subsrciption_payloads.py\r\n\r\n* refactor\r\n\r\n* fix imports, add changelog\r\n\r\n* check_document_is_single_subscription refactor\r\n\r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def subscription_order_updated_webhook(subscription_webhook):\n return subscription_webhook(\n ORDER_UPDATED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.ORDER_UPDATED\n )\n\n\nORDER_CONFIRMED_SUBSCRIPTION_QUERY = \n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 21, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 14, "n_ast_nodes": 36, "n_identifiers": 8, "random_cut": "def subscription_order_updated_webhook(subscription_webhook):\n return subscription_webhook(\n ORDER_UPDATED_SUBSCRIPTION_QUERY, Webhook", "d_id": 4995, "documentation": { "docstring": "\n subscription{\n event{\n ...on OrderConfirmed{\n order{\n id\n }\n }\n }\n }\n", "n_words": 10, "vocab_size": 7, "n_whitespaces": 69, "language": "en" } }, { "id": 153357, "commit_id": "241a46dd5f4dce7bc7f630b58c80d15222d6bde7", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/omnisci_on_native/omnisci_worker.py", "file_name": "omnisci_worker.py", "fun_name": "cast_to_compatible_types", "commit_message": "FIX-#3368: support unsigned integers in OmniSci backend (#4256)\n\nSigned-off-by: Dmitry Chigarev \r\nCo-authored-by: Yaroslav Igoshev ", "code": "def cast_to_compatible_types(table):\n \n schema = table.schema\n new_schema = schema\n need_cast = False\n uint_to_int_cast = False\n new_cols = {}\n uint_to_int_map = {\n pa.uint8(): pa.int16(),\n pa.uint16(): pa.int32(),\n pa.uint32(): pa.int64(),\n pa.uint64(): pa.int64(), # May cause overflow\n }\n for i, field in enumerate(schema):\n # Currently OmniSci doesn't support Arrow table import with\n # dictionary columns. Here we cast dictionaries until support\n # is in place.\n # https://github.com/modin-project/modin/issues/1738\n if pa.types.is_dictionary(field.type):\n # Conversion for dictionary of null type to string is not supported\n # in Arrow. Build new column for this case for now.\n if pa.types.is_null(field.type.value_type):\n mask = np.full(table.num_rows, True, dtype=bool)\n new_col_data = np.empty(table.num_rows, dtype=str)\n new_col = pa.array(new_col_data, pa.string(), mask)\n new_cols[i] = new_col\n else:\n need_cast = True\n new_field = pa.field(\n field.name, pa.string(), field.nullable, field.metadata\n )\n new_schema = new_schema.set(i, new_field)\n # OmniSci doesn't support importing Arrow's date type:\n # https://github.com/omnisci/omniscidb/issues/678\n elif pa.types.is_date(field.type):\n # Arrow's date is the number of days since the UNIX-epoch, so we can convert it\n # to a timestamp[s] (number of seconds since the UNIX-epoch) without losing precision\n new_field = pa.field(\n field.name, pa.timestamp(\"s\"), field.nullable, field.metadata\n )\n new_schema = new_schema.set(i, new_field)\n need_cast = True\n # OmniSci doesn't support unsigned types\n elif pa.types.is_unsigned_integer(field.type):\n new_field = pa.field(\n field.name,\n uint_to_int_map[field.type],\n field.nullable,\n field.metadata,\n )\n new_schema = new_schema.set(i, new_field)\n need_cast = True\n uint_to_int_cast = True\n\n # Such cast may affect the data, so we have to raise a warning about it\n if uint_to_int_cast:\n ErrorMessage.single_warning(\n \"OmniSci does not support unsigned integer types, such types will be rounded up to the signed equivalent.\"\n )\n\n for i, col in new_cols.items():\n table = table.set_column(i, new_schema[i], col)\n\n if need_cast:\n try:\n table = table.cast(new_schema)\n except pa.lib.ArrowInvalid as e:\n raise (OverflowError if uint_to_int_cast else RuntimeError)(\n \"An error occurred when trying to convert unsupported by OmniSci 'dtypes' \"\n + f\"to the supported ones, the schema to cast was: \\n{new_schema}.\"\n ) from e\n\n return table\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1180, "n_words": 295, "vocab_size": 171, "complexity": 11, "nloc": 56, "token_counts": 382, "n_ast_nodes": 613, "n_identifiers": 55, "random_cut": "def cast_to_compatible_types(table):\n \n schema = table.schema\n new_schema = schema\n need_cast = False\n uint_to_int_cast = False\n new_cols = {}\n uint_to_int_map = {\n pa.uint8(): pa.int16(),\n pa.uint16(): pa.int32(),\n pa.uint32(): pa.int64(),\n pa.uint64(): pa.int64(), # May cause overflow\n }\n for i, field in enumerate(schema):\n # Currently OmniSci doesn't support Arrow table import with\n # dictionary columns. Here we cast dictionaries until support\n # is in place.\n # https://github.com/modin-project/modin/issues/1738\n if pa.types.is_dictionary(field.type):\n ", "d_id": 35391, "documentation": { "docstring": "\n Cast PyArrow table to be fully compatible with OmniSci.\n\n Parameters\n ----------\n table : pyarrow.Table\n Source table.\n\n Returns\n -------\n pyarrow.Table\n Table with fully compatible types with OmniSci.\n ", "n_words": 26, "vocab_size": 19, "n_whitespaces": 105, "language": "en" } }, { "id": 9959, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/types/request/data.py", "file_name": "data.py", "fun_name": "data", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def data(self) -> 'DataRequest._DataContent':\n \n return DataRequest._DataContent(self.proto.data)\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 6, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def data(self) -> 'DataRequest._DataContent':\n \n return DataRequest._DataCon", "d_id": 1807, "documentation": { "docstring": "Get the data contaned in this data request\n\n :return: the data content as an instance of _DataContent wrapping docs and groundtruths\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 275883, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/model_config.py", "file_name": "model_config.py", "fun_name": "model_from_json", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def model_from_json(json_string, custom_objects=None):\n \n from keras.layers import (\n deserialize_from_json,\n ) # pylint: disable=g-import-not-at-top\n\n return deserialize_from_json(json_string, custom_objects=custom_objects)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 28, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "def model_from_json(json_string, custom_objects=None):\n \n from keras.layers import (\n deserialize_from_json,\n ) # pylint: disable=g-import-not", "d_id": 81498, "documentation": { "docstring": "Parses a JSON model configuration string and returns a model instance.\n\n Usage:\n\n >>> model = tf.keras.Sequential([\n ... tf.keras.layers.Dense(5, input_shape=(3,)),\n ... tf.keras.layers.Softmax()])\n >>> config = model.to_json()\n >>> loaded_model = tf.keras.models.model_from_json(config)\n\n Args:\n json_string: JSON string encoding a model configuration.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n Returns:\n A Keras model instance (uncompiled).\n ", "n_words": 59, "vocab_size": 45, "n_whitespaces": 137, "language": "en" } }, { "id": 319727, "commit_id": "08c3d6e84b17da2acfb10250438fe357398e5e0e", "repo": "paperless-ngx", "path": "src/documents/tests/test_management_convert_thumbnail.py", "file_name": "test_management_convert_thumbnail.py", "fun_name": "test_do_nothing_if_converted", "commit_message": "Fixes existing testing, adds test coverage of new command", "code": "def test_do_nothing_if_converted(self, run_convert_mock):\n \n\n stdout, _ = self.call_command()\n run_convert_mock.assert_not_called()\n self.assertIn(\"Converting all PNG thumbnails to WebP\", stdout)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 43, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 53, "n_identifiers": 8, "random_cut": "def test_do_nothing_if_converted(self, run_convert_mock):\n \n\n stdout, _ = self.call_command()\n run_convert_mock.assert_not_called()\n self.assertIn(\"Converting all PNG thumbnails to WebP\", stdout)\n", "d_id": 116987, "documentation": { "docstring": "\n GIVEN:\n - Document exists with default WebP thumbnail path\n WHEN:\n - Thumbnail conversion is attempted\n THEN:\n - Nothing is converted\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 82, "language": "en" } }, { "id": 20578, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/core.py", "file_name": "core.py", "fun_name": "__ror__", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def __ror__(self, other):\n \n if isinstance(other, str_type):\n other = self._literalStringClass(other)\n if not isinstance(other, ParserElement):\n raise TypeError(\n \"Cannot combine element of type {} with ParserElement\".format(\n type(other).__name__\n )\n )\n return other | self\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 140, "n_words": 30, "vocab_size": 26, "complexity": 3, "nloc": 10, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 11, "random_cut": "def __ror__(self, other):\n \n if isinstance(other, str_type):\n other = self._literalStringClass(other)\n if not isinstance(other, ParserElement):\n raise TypeError(\n \"Cannot combine element of type {} with ParserElement\".format(\n ", "d_id": 3433, "documentation": { "docstring": "\n Implementation of ``|`` operator when left operand is not a :class:`ParserElement`\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 213062, "commit_id": "a5db070f446b7cfebdaa6ad2e3dcf78f6105a272", "repo": "serverless-application-model", "path": "samtranslator/third_party/py27hash/hash.py", "file_name": "hash.py", "fun_name": "shash", "commit_message": "fix: Py27hash fix (#2182)\n\n* Add third party py27hash code\r\n\r\n* Add Py27UniStr and unit tests\r\n\r\n* Add py27hash_fix utils and tests\r\n\r\n* Add to_py27_compatible_template and tests\r\n\r\n* Apply py27hash fix to wherever it is needed\r\n\r\n* Apply py27hash fix, all tests pass except api_with_any_method_in_swagger\r\n\r\n* apply py27hash fix in openapi + run black\r\n\r\n* remove py27 testing\r\n\r\n* remove other py27 references\r\n\r\n* black fixes\r\n\r\n* fixes/typos\r\n\r\n* remove py27 from tox.ini\r\n\r\n* refactoring\r\n\r\n* third party notice\r\n\r\n* black\r\n\r\n* Fix py27hash fix to deal with null events\r\n\r\n* Fix Py27UniStr repr for unicode literals\r\n\r\n* black reformat\r\n\r\n* Update _template_has_api_resource to check data type more defensively\r\n\r\n* Apply py27Dict in _get_authorizers\r\n\r\n* Apply Py27Dict to authorizers and gateway responses which will go into swagger\r\n\r\n* Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class\r\n\r\n* Rename _convert_to_py27_dict to _convert_to_py27_type\r\n\r\n* Apply Py27UniStr to path param name\r\n\r\n* Handle HttpApi resource under to_py27_compatible_template\r\n\r\n* Fix InvalidDocumentException to not sort different exceptions\r\n\r\n* black reformat\r\n\r\n* Remove unnecessary test files\r\n\r\nCo-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com>", "code": "def shash(value):\n \n\n length = len(value)\n\n if length == 0:\n return 0\n\n x = Hash.ordinal(value[0]) << 7\n for c in value:\n x = (1000003 * x) ^ Hash.ordinal(c)\n\n x ^= length\n x &= 0xFFFFFFFFFFFFFFFF\n if x == -1:\n x = -2\n\n # Convert to C long type\n return ctypes.c_long(x).value\n", "url": "https://github.com/aws/serverless-application-model.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 151, "n_words": 48, "vocab_size": 35, "complexity": 4, "nloc": 12, "token_counts": 77, "n_ast_nodes": 125, "n_identifiers": 10, "random_cut": "def shash(value):\n \n\n length = len(value)\n\n if length == 0:\n return 0\n\n x = Hash.ordinal(value[0]) << 7\n for c in value:\n x = (1000003 * x) ^ Hash.ordinal(c)\n\n x ^= length\n x &= 0xFFFFFFFFFFFFFFFF\n if x == -1:\n x = -2\n\n # Convert to C long type\n ", "d_id": 53616, "documentation": { "docstring": "\n Returns a Python 2.7 hash for a string.\n\n Logic ported from the 2.7 Python branch: cpython/Objects/stringobject.c\n Method: static long string_hash(PyStringObject *a)\n\n Args:\n value: input string\n\n Returns:\n Python 2.7 hash\n ", "n_words": 29, "vocab_size": 23, "n_whitespaces": 94, "language": "en" } }, { "id": 105508, "commit_id": "6ea46d88c6a09244d785e55e2681bc4033740442", "repo": "datasets", "path": "tests/packaged_modules/test_folder_based_builder.py", "file_name": "test_folder_based_builder.py", "fun_name": "data_files_with_one_split_and_metadata", "commit_message": "Add AudioFolder packaged loader (#4530)\n\n* add audiofolder loader (almost identical to imagefolder except for inferring labels is not default)\r\n\r\n* add instruction on how to obtain list of audio extensions\r\n\r\n* add a generic loader\r\n\r\n* patch autofolder for streaming manually\r\n\r\n* align autofolder with the latest imagefolder implementation\r\n\r\n* update tests\r\n\r\n* add test for duplicate label col\r\n\r\n* add tests for autofolder (+copied from imagefolder)\r\n\r\n* add missed audio_file fixture\r\n\r\n* add documentation\r\n\r\n* remove boilerplate, make base feature builder's class arg instead of a config's one\r\n\r\n* remove self.config.label_name, use hardcoded 'label'\r\n\r\n* patch parents that inherit from DatasetBuilder, revert get_imports\r\n\r\n* rename autofolder -> folder_builder\r\n\r\n* make base column name an abstract attr of FolderBuilder instead of config's parameter\r\n\r\n* Update src/datasets/streaming.py\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* rename FolderBuilder -> FolderBasedBuilder\r\n\r\n* set drop_labels to None by default for AudioFolder\r\n\r\n* update documentation\r\n\r\n* check if builder extending for streaming is not in datasets.builder module\r\n\r\nCo-authored-by: Mario Šaško \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def data_files_with_one_split_and_metadata(tmp_path, auto_text_file):\n data_dir = tmp_path / \"autofolder_data_dir_with_metadata_one_split\"\n data_dir.mkdir(parents=True, exist_ok=True)\n subdir = data_dir / \"subdir\"\n subdir.mkdir(parents=True, exist_ok=True)\n\n filename = data_dir / \"file.txt\"\n shutil.copyfile(auto_text_file, filename)\n filename2 = data_dir / \"file2.txt\"\n shutil.copyfile(auto_text_file, filename2)\n filename3 = subdir / \"file3.txt\" # in subdir\n shutil.copyfile(auto_text_file, filename3)\n\n metadata_filename = data_dir / \"metadata.jsonl\"\n metadata = textwrap.dedent(\n \n )\n with open(metadata_filename, \"w\", encoding=\"utf-8\") as f:\n f.write(metadata)\n data_files_with_one_split_and_metadata = DataFilesDict.from_local_or_remote(\n get_data_patterns_locally(data_dir), data_dir\n )\n assert len(data_files_with_one_split_and_metadata) == 1\n assert len(data_files_with_one_split_and_metadata[\"train\"]) == 4\n return data_files_with_one_split_and_metadata\n\n\n@pytest.fixture", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 152, "n_words": 74, "vocab_size": 48, "complexity": 1, "nloc": 27, "token_counts": 145, "n_ast_nodes": 255, "n_identifiers": 27, "random_cut": "def data_files_with_one_split_and_metadata(tmp_path, auto_text_file):\n data_dir = tmp_path / \"autofolder_data_dir_with_metadata_one_split\"\n data_dir.mkdir(parents=True, exist_ok=True)\n subdir = data_dir / \"subdir\"\n subdir.mkdir(parents=True, exist_ok=True)\n\n filename = data_dir / \"file.txt\"\n shutil.copyfile(auto_text_file, filename)\n filename2 = data_dir / \"file2.txt\"\n shutil.copyfile(auto_text_file, filename2)\n filename3 = subdir / \"file3.txt\" # in subdir\n shutil.copyfile(auto_text_file, filename3)\n\n metadata_filename = data_dir / \"metadata.jsonl\"\n metadata = textwrap.dedent(\n \n )\n with open(metadata_filename, \"w\", encoding=\"utf-8\") as f:\n f.write(metadata)\n data_files_with_one_split_and_metadata = DataFilesDict.from_local_or_remote(\n get_data_patterns_locally(data_dir), data_dir\n )\n assert len(data_files_with_one_split_and_metadata) == 1\n assert len(data_files_with_one_split_and_metadata[\"train\"]) == 4\n return data_files_with_one_split_and_metadata\n\n\n@pytest.fixture", "d_id": 22138, "documentation": { "docstring": "\\\n {\"file_name\": \"file.txt\", \"additional_feature\": \"Dummy file\"}\n {\"file_name\": \"file2.txt\", \"additional_feature\": \"Second dummy file\"}\n {\"file_name\": \"subdir/file3.txt\", \"additional_feature\": \"Third dummy file\"}\n ", "n_words": 18, "vocab_size": 11, "n_whitespaces": 46, "language": "en" } }, { "id": 250293, "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", "repo": "synapse", "path": "tests/handlers/test_e2e_room_keys.py", "file_name": "test_e2e_room_keys.py", "fun_name": "test_upload_room_keys_wrong_version", "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", "code": "def test_upload_room_keys_wrong_version(self) -> None:\n \n version = self.get_success(\n self.handler.create_version(\n self.local_user,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"first_version_auth_data\",\n },\n )\n )\n self.assertEqual(version, \"1\")\n\n version = self.get_success(\n self.handler.create_version(\n self.local_user,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"second_version_auth_data\",\n },\n )\n )\n self.assertEqual(version, \"2\")\n\n e = self.get_failure(\n self.handler.upload_room_keys(self.local_user, \"1\", room_keys), SynapseError\n )\n res = e.value.code\n self.assertEqual(res, 403)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 345, "n_words": 47, "vocab_size": 30, "complexity": 1, "nloc": 27, "token_counts": 120, "n_ast_nodes": 202, "n_identifiers": 16, "random_cut": "def test_upload_room_keys_wrong_version(self) -> None:\n \n version = self.get_success(\n self.handler.create_version(\n self.local_user,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"first_version_auth_data\",\n },\n )\n )\n self.assertEqual(version, \"1\")\n\n version = self.get_success(\n self.handler.create_version(\n self.local_user,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"second_version_auth_data\",\n },\n )\n )\n self.assertEqual", "d_id": 73371, "documentation": { "docstring": "Check that we get a 403 on uploading keys for an old version", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 101501, "commit_id": "dc18c74eea0c7837a820d27628cb12b0824fa30e", "repo": "faceswap", "path": "lib/gui/utils.py", "file_name": "utils.py", "fun_name": "previewtrain", "commit_message": "Bugfix: Preview for extract in batch mode", "code": "def previewtrain(self) -> Dict[str, List[Union[Image.Image, ImageTk.PhotoImage, None, float]]]:\n \n return self._previewtrain\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 10, "token_counts": 33, "n_ast_nodes": 48, "n_identifiers": 11, "random_cut": "def previewtrain(self) -> Dict[str, List[Union[Image.Image, ImageTk.PhotoImage, None, float]]]:\n \n return self._previewtrain\n", "d_id": 20913, "documentation": { "docstring": " dict or ``None``: The training preview images. Dictionary key is the image name\n (`str`). Dictionary values are a `list` of the training image (:class:`PIL.Image`), the\n image formatted for tkinter display (:class:`PIL.ImageTK.PhotoImage`), the last\n modification time of the image (`float`).\n\n The value of this property is ``None`` if training is not running or there are no preview\n images available.\n ", "n_words": 58, "vocab_size": 40, "n_whitespaces": 101, "language": "en" } }, { "id": 60305, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py", "file_name": "test_coord_map.py", "fun_name": "test_padding", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def test_padding(self):\n \n n = coord_net_spec()\n ax, a, b = coord_map_from_to(n.deconv, n.data)\n pad = random.randint(0, 10)\n # conv padding\n n = coord_net_spec(pad=pad)\n _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)\n self.assertEquals(a, a_pad)\n self.assertEquals(b - pad, b_pad)\n # deconv padding\n n = coord_net_spec(dpad=pad)\n _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)\n self.assertEquals(a, a_pad)\n self.assertEquals(b + pad, b_pad)\n # pad both to cancel out\n n = coord_net_spec(pad=pad, dpad=pad)\n _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)\n self.assertEquals(a, a_pad)\n self.assertEquals(b, b_pad)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 204, "n_words": 71, "vocab_size": 36, "complexity": 1, "nloc": 16, "token_counts": 165, "n_ast_nodes": 254, "n_identifiers": 18, "random_cut": "def test_padding(self):\n \n ", "d_id": 12083, "documentation": { "docstring": "\n Padding conv adds offset while padding deconv subtracts offset.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 337458, "commit_id": "e5c17f36a8b5bf8b9478d416c4a80841a353fb19", "repo": "accelerate", "path": "src/accelerate/test_utils/testing.py", "file_name": "testing.py", "fun_name": "require_cuda", "commit_message": "Clean up tests + fix import (#330)", "code": "def require_cuda(test_case):\n \n return unittest.skipUnless(torch.cuda.is_available(), \"test requires a GPU\")(test_case)\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 43, "n_identifiers": 7, "random_cut": "def require_cuda(test_case):\n \n return unittest.skipUnless(torch.cuda.is_a", "d_id": 121060, "documentation": { "docstring": "\n Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 24, "language": "en" } }, { "id": 248286, "commit_id": "aec69d2481e9ea1d8ea1c0ffce1706a65a7896a8", "repo": "synapse", "path": "synapse/logging/handlers.py", "file_name": "handlers.py", "fun_name": "_flush_periodically", "commit_message": "Another batch of type annotations (#12726)", "code": "def _flush_periodically(self) -> None:\n \n\n while self._active:\n # flush is thread-safe; it acquires and releases the lock internally\n self.flush()\n time.sleep(self._flush_period)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 66, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 26, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def _flush_periodically(self) -> None:\n \n\n while self._active:\n # flush is thread-safe; it acquires and releases the lock internally\n self.flush()\n time.sleep(self._flush_period)\n", "d_id": 72191, "documentation": { "docstring": "\n Whilst this handler is active, flush the handler periodically.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 180503, "commit_id": "70ebf698fa75ad094a2ba52cd1de645fa58eff85", "repo": "gradio", "path": "gradio/components.py", "file_name": "components.py", "fun_name": "save_flagged", "commit_message": "Live website changes (#1578)\n\n* fix audio output cache (#804)\r\n\r\n* fix audio output cache\r\n\r\n* changes\r\n\r\n* version update\r\n\r\nCo-authored-by: Ali Abid \r\n\r\n* Website Tracker Slackbot (#797)\r\n\r\n* added commands to reload script\r\n\r\n* catch errors with git pull\r\n\r\n* read new webhook from os variable\r\n\r\n* correcting bash\r\n\r\n* bash fixes\r\n\r\n* formatting\r\n\r\n* more robust error checking\r\n\r\n* only sends success if git changes\r\n\r\n* catching error from script\r\n\r\n* escaping error text to send with curl\r\n\r\n* correct text escaping for error message\r\n\r\n* fix search bug in guides (#809)\r\n\r\n* Update getting_started.md (#808)\r\n\r\n* Fix type of server returned by `Launchable` (#810)\r\n\r\n* `Launchable` returns a FastAPI now\r\n\r\n* Update .gitignore\r\n\r\n* Add a missing line to getting started (#816)\r\n\r\n\r\n\r\nFormer-commit-id: 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 96f203108bf1222fe333a0175687293abdc669d7]\r\nFormer-commit-id: eaff13262853078e0c6c0baa54c731d9e56bc73f\r\n\r\n* Add a missing line to getting started (#816)\r\n\r\n\r\n\r\nFormer-commit-id: 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 96f203108bf1222fe333a0175687293abdc669d7]]\r\nFormer-commit-id: eaff13262853078e0c6c0baa54c731d9e56bc73f\r\nFormer-commit-id: b5112c3f425c0ea961461854efae9c28a73aea01\r\n\r\n* Add a missing line to getting started (#816)\r\n\r\n\r\n\r\nFormer-commit-id: 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 96f203108bf1222fe333a0175687293abdc669d7]]]\r\nFormer-commit-id: eaff13262853078e0c6c0baa54c731d9e56bc73f\r\nFormer-commit-id: b5112c3f425c0ea961461854efae9c28a73aea01\r\nFormer-commit-id: bce6f9c4c5254301eb73e76eb47cddab3e132c24\r\n\r\n* Add a missing line to getting started (#816)\r\n\r\n\r\n\r\nFormer-commit-id: 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 81e271ca22e838e1ee618d48cdb0e904fd233cf3 [formerly 96f203108bf1222fe333a0175687293abdc669d7]]]]\r\nFormer-commit-id: eaff13262853078e0c6c0baa54c731d9e56bc73f\r\nFormer-commit-id: b5112c3f425c0ea961461854efae9c28a73aea01\r\nFormer-commit-id: bce6f9c4c5254301eb73e76eb47cddab3e132c24\r\nFormer-commit-id: feba0888e3d488b82a3518343f607517d0836f13\r\n\r\n* Add a missing line to getting started (#816)\r\n\r\n* Clean-History\r\n- Remove 51MB file with this commit\r\n\r\n\r\nFormer-commit-id: 34b6a2325d613eeef622410f2d1ff3d869d3133c\r\n\r\n* Clean-History\r\n- Remove 51MB file with this commit\r\n\r\n\r\nFormer-commit-id: 34b6a2325d613eeef622410f2d1ff3d869d3133c\r\nFormer-commit-id: dd700c33cca3f560621219530444b631b7767392\r\n\r\n* Clean-History\r\n- Remove 51MB file with this commit\r\n\r\n\r\nFormer-commit-id: 34b6a2325d613eeef622410f2d1ff3d869d3133c\r\nFormer-commit-id: dd700c33cca3f560621219530444b631b7767392\r\nFormer-commit-id: 0d80e6a056abad1c4d1fd6f162eb725e0db5fb4f\r\n\r\n* Clean-History\r\n- Remove 51MB file with this commit\r\n\r\n\r\nFormer-commit-id: 34b6a2325d613eeef622410f2d1ff3d869d3133c\r\nFormer-commit-id: dd700c33cca3f560621219530444b631b7767392\r\nFormer-commit-id: 0d80e6a056abad1c4d1fd6f162eb725e0db5fb4f\r\nFormer-commit-id: 20523b05194438209cf64cb688008b4599eb847e\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* Homepage: header image size (#1347)\r\n\r\n* image size\r\n\r\n* image in local assets\r\n\r\n* add dall-e mini banner\r\n\r\n* undo ui changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* updates\r\n\r\n* updates\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* h11 dependency\r\n\r\n* add npm build-mac\r\n\r\n* expand demo button to all classes\r\n\r\n* add demos to docstrings\r\n\r\n* add anchor tags to headers\r\n\r\n* add required tag to param table\r\n\r\n* add consistent styling for headers\r\n\r\n* skip param beginning with underscore from docs\r\n\r\n* skip kwargs param from docs\r\n\r\n* remove types in param docstring\r\n\r\n* override signature to reflect usage\r\n\r\n* add supported events\r\n\r\n* add step-by-step guides\r\n\r\n* fix guide contribution link\r\n\r\n* add related spaces\r\n\r\n* fix img styling on guides\r\n\r\n* pin quickstart, advanced, and block guides to top\r\n\r\n* margin fix\r\n\r\n* autogenerated copy buttons for all codeblocks\r\n\r\n* changes\r\n\r\n* documentaiton\r\n\r\n* format\r\n\r\n* launch\r\n\r\n* formatting\r\n\r\n* style changes\r\n\r\n* remove backticks\r\n\r\n* changes\r\n\r\n* changes\r\n\r\nCo-authored-by: Ali Abid \r\nCo-authored-by: Ali Abdalla \r\nCo-authored-by: Julien Chaumond \r\nCo-authored-by: Ömer Faruk Özdemir \r\nCo-authored-by: Ali \r\nCo-authored-by: Victor Muštar \r\nCo-authored-by: Abubakar Abid ", "code": "def save_flagged(self, dir, label, data, encryption_key) -> str | Dict:\n \n if \"confidences\" in data:\n return json.dumps(\n {\n example[\"label\"]: example[\"confidence\"]\n for example in data[\"confidences\"]\n }\n )\n else:\n return data[\"label\"]\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 150, "n_words": 28, "vocab_size": 26, "complexity": 3, "nloc": 14, "token_counts": 54, "n_ast_nodes": 90, "n_identifiers": 11, "random_cut": "def save_flagged(self, dir, label, data, encryption_key) -> str | Dict:\n \n if \"confidences\" in data:\n return json.dumps(\n {\n example[\"label\"]: example[\"confidence\"]\n ", "d_id": 43183, "documentation": { "docstring": "\n Returns:\n Either a string representing the main category label, or a dictionary with category keys mapping to confidence levels.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 45, "language": "en" } }, { "id": 133740, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/impala/tests/test_vtrace.py", "file_name": "test_vtrace.py", "fun_name": "test_higher_rank_inputs_for_importance_weights", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_higher_rank_inputs_for_importance_weights(self):\n \n for fw in framework_iterator(frameworks=(\"torch\", \"tf\"), session=True):\n vtrace = vtrace_tf if fw != \"torch\" else vtrace_torch\n if fw == \"tf\":\n inputs_ = {\n \"log_rhos\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 1]\n ),\n \"discounts\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 1]\n ),\n \"rewards\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 42]\n ),\n \"values\": tf1.placeholder(dtype=tf.float32, shape=[None, None, 42]),\n \"bootstrap_value\": tf1.placeholder(\n dtype=tf.float32, shape=[None, 42]\n ),\n }\n else:\n inputs_ = {\n \"log_rhos\": Box(-1.0, 1.0, (8, 10, 1)).sample(),\n \"discounts\": Box(-1.0, 1.0, (8, 10, 1)).sample(),\n \"rewards\": Box(-1.0, 1.0, (8, 10, 42)).sample(),\n \"values\": Box(-1.0, 1.0, (8, 10, 42)).sample(),\n \"bootstrap_value\": Box(-1.0, 1.0, (10, 42)).sample(),\n }\n output = vtrace.from_importance_weights(**inputs_)\n check(int(output.vs.shape[-1]), 42)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 583, "n_words": 96, "vocab_size": 47, "complexity": 4, "nloc": 29, "token_counts": 315, "n_ast_nodes": 447, "n_identifiers": 23, "random_cut": "def test_higher_rank_inputs_for_importance_weights(self):\n \n for fw in framework_iterator(frameworks=(\"torch\", \"tf\"), session=True):\n vtrace = vtrace_tf if fw != \"torch\" else vtrace_torch\n if fw ==", "d_id": 30091, "documentation": { "docstring": "Checks support for additional dimensions in inputs.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 47727, "commit_id": "70049f19e4ac82ea922d7e59871a3b4ebae068f1", "repo": "airflow", "path": "tests/www/views/test_views_tasks.py", "file_name": "test_views_tasks.py", "fun_name": "test_task_fail_duration", "commit_message": "Fix TaskFail queries in views after run_id migration (#23008)\n\nTwo problems here:\r\n\r\n1. TaskFail no longer has a executin_date property -- switch to run_id\r\n2. We weren't joining to DagRun correctly, meaning we'd end up with a\r\n cross-product effect(? Something weird anyway)\r\n\r\nCo-authored-by: Karthikeyan Singaravelan ", "code": "def test_task_fail_duration(app, admin_client, dag_maker, session):\n \n with dag_maker() as dag:\n op1 = BashOperator(task_id='fail', bash_command='exit 1')\n op2 = BashOperator(task_id='success', bash_command='exit 0')\n\n with pytest.raises(AirflowException):\n op1.run()\n op2.run()\n\n op1_fails = (\n session.query(TaskFail)\n .filter(\n TaskFail.task_id == 'fail',\n TaskFail.dag_id == dag.dag_id,\n )\n .all()\n )\n\n op2_fails = (\n session.query(TaskFail)\n .filter(\n TaskFail.task_id == 'success',\n TaskFail.dag_id == dag.dag_id,\n )\n .all()\n )\n\n assert len(op1_fails) == 1\n assert len(op2_fails) == 0\n\n with unittest.mock.patch.object(app, 'dag_bag') as mocked_dag_bag:\n mocked_dag_bag.get_dag.return_value = dag\n resp = admin_client.get(f\"dags/{dag.dag_id}/duration\", follow_redirects=True)\n html = resp.get_data().decode()\n cumulative_chart = json.loads(re.search(\"data_cumlinechart=(.*);\", html).group(1))\n line_chart = json.loads(re.search(\"data_linechart=(.*);\", html).group(1))\n\n assert resp.status_code == 200\n assert sorted(item[\"key\"] for item in cumulative_chart) == [\"fail\", \"success\"]\n assert sorted(item[\"key\"] for item in line_chart) == [\"fail\", \"success\"]\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 314, "n_words": 104, "vocab_size": 63, "complexity": 3, "nloc": 34, "token_counts": 268, "n_ast_nodes": 458, "n_identifiers": 46, "random_cut": "def test_task_fail_duration(app, admin_client, dag_maker, session):\n \n with dag_maker() as dag:\n op1 = BashOperator(task_id='fail', bash_command='exit 1')\n op2 = BashOperator(task_id='success', bash_command='exit 0')\n\n with pytest.raises(AirflowException):\n op1.run()\n op2.run()\n\n op1_fails = (\n session.query(TaskFail)\n .filter(\n TaskFail.task_id == 'fail',\n TaskFail.dag_id == dag.dag_id,\n )\n .all()\n )\n\n op2_fails = (\n session.query(TaskFail)\n .filter(\n TaskFail.task_id == 'success',\n TaskFail.dag_id == dag.dag_id,\n )\n .all()\n )\n\n assert len(op1_fails) == 1\n assert len(op2_fails) == 0\n\n with unittest.mock.patch.object(app, 'dag_bag') as mocked_dag_bag:\n mocked_dag_bag.get_dag.return_valu", "d_id": 9232, "documentation": { "docstring": "Task duration page with a TaskFail entry should render without error.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 246168, "commit_id": "901b264c0c88f39cbfb8b2229e0dc57968882658", "repo": "synapse", "path": "tests/rest/admin/test_user.py", "file_name": "test_user.py", "fun_name": "test_all_users", "commit_message": "Add type hints to `tests/rest/admin` (#11851)", "code": "def test_all_users(self) -> None:\n \n self._create_users(2)\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?deactivated=true\",\n {},\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(3, len(channel.json_body[\"users\"]))\n self.assertEqual(3, channel.json_body[\"total\"])\n\n # Check that all fields are available\n self._check_fields(channel.json_body[\"users\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 137, "n_words": 30, "vocab_size": 29, "complexity": 1, "nloc": 15, "token_counts": 96, "n_ast_nodes": 157, "n_identifiers": 16, "random_cut": "def test_all_users(self) -> None:\n \n self._create_users(2)\n\n channel = self.make_request(\n \"GET\",\n self.url + \"", "d_id": 71062, "documentation": { "docstring": "\n List all users, including deactivated users.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 222485, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/difflib.py", "file_name": "difflib.py", "fun_name": "real_quick_ratio", "commit_message": "add python 3.10.4 for windows", "code": "def real_quick_ratio(self):\n \n\n la, lb = len(self.a), len(self.b)\n # can't have more matches than the number of elements in the\n # shorter sequence\n return _calculate_ratio(min(la, lb), la + lb)\n\n __class_getitem__ = classmethod(GenericAlias)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 3, "token_counts": 37, "n_ast_nodes": 72, "n_identifiers": 12, "random_cut": "def real_quick_ratio(self):\n \n\n la, lb = len(self.a), len(self.b)\n # can't have more matches than the number of elements in the\n # shorter sequence\n return _calculate_ratio(min(la, lb), la + lb)\n\n __class_getitem__ = classmethod(GenericAlias)\n", "d_id": 56586, "documentation": { "docstring": "Return an upper bound on ratio() very quickly.\n\n This isn't defined beyond that it is an upper bound on .ratio(), and\n is faster to compute than either .ratio() or .quick_ratio().\n ", "n_words": 30, "vocab_size": 25, "n_whitespaces": 51, "language": "en" } }, { "id": 71415, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_bulk_actions/test_bulk_unpublish.py", "file_name": "test_bulk_unpublish.py", "fun_name": "test_unpublish_view_invalid_page_id", "commit_message": "Reformat with black", "code": "def test_unpublish_view_invalid_page_id(self):\n \n # Request confirm unpublish page but with illegal page id\n response = self.client.get(\n reverse(\n \"wagtail_bulk_action\",\n args=(\n \"wagtailcore\",\n \"page\",\n \"unpublish\",\n ),\n )\n )\n\n # Check that the user received a 404 response\n self.assertEqual(response.status_code, 404)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 201, "n_words": 35, "vocab_size": 31, "complexity": 1, "nloc": 12, "token_counts": 41, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def test_unpublish_view_invalid_page_id(self):\n \n # Request confirm unpublish page but with illegal page id\n response = self.client.get(\n reverse(\n \"wagtail_bulk_action\",\n args=(\n \"wagtailcore\",\n \"page\",\n \"unpublish\",\n ),\n )\n )\n\n # Check that the user receiv", "d_id": 15672, "documentation": { "docstring": "\n This tests that the unpublish view returns an error if the page id is invalid\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 30, "language": "en" } }, { "id": 134305, "commit_id": "9b29fd6501ff0e3e69d0333bf214482b86f9e97f", "repo": "ray", "path": "python/ray/train/tests/test_session.py", "file_name": "test_session.py", "fun_name": "test_warn_report", "commit_message": "[AIR] Hard deprecate train.report, warn on air.session misuse (#29613)\n\nSigned-off-by: Antoni Baum antoni.baum@protonmail.com\r\n\r\nHard deprecates `ray.train.report` and other session functions and ensures that the user is informed when using `ray.air.session` if they are not in session for consistency with the old functions.", "code": "def test_warn_report():\n \n\n fn = report\n\n with warnings.catch_warnings(record=True) as record:\n # Ignore Deprecation warnings.\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n assert not fn(dict())\n\n assert fn.__name__ in record[0].message.args[0]\n\n reset_log_once_with_str(fn.__name__)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 59, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 7, "token_counts": 60, "n_ast_nodes": 104, "n_identifiers": 14, "random_cut": "def test_warn_report():\n \n\n fn = report\n\n with warnings.catch_warnings(", "d_id": 30250, "documentation": { "docstring": "Checks if calling session.report function outside of session raises warning.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 244142, "commit_id": "14f0e9585c15c28f0c31dcc3ea352449bbe5eb96", "repo": "mmdetection", "path": "mmdet/models/dense_heads/mask2former_head.py", "file_name": "mask2former_head.py", "fun_name": "forward", "commit_message": "[Feature] Add Mask2Former to mmdet (#6938)\n\nupdate doc\r\n\r\nupdate doc format\r\n\r\ndeepcopy pixel_decoder cfg\r\n\r\nmove mask_pseudo_sampler cfg to config file\r\n\r\nmove part of postprocess from head to detector\r\n\r\nfix bug in postprocessing\r\n\r\nmove class setting from head to config file\r\n\r\nremove if else\r\n\r\nmove mask2bbox to mask/util\r\n\r\nupdate docstring\r\n\r\nupdate docstring in result2json\r\n\r\nfix bug\r\n\r\nupdate class_weight\r\n\r\nadd maskformer_fusion_head\r\n\r\nadd maskformer fusion head\r\n\r\nupdate\r\n\r\nadd cfg for filter_low_score\r\n\r\nupdate maskformer\r\n\r\nupdate class_weight\r\n\r\nupdate config\r\n\r\nupdate unit test\r\n\r\nrename param\r\n\r\nupdate comments in config\r\n\r\nrename variable, rm arg, update unit tests\r\n\r\nupdate mask2bbox\r\n\r\nadd unit test for mask2bbox\r\n\r\nreplace unsqueeze(1) and squeeze(1)\r\n\r\nadd unit test for maskformer_fusion_head\r\n\r\nupdate docstrings\r\n\r\nupdate docstring\r\n\r\ndelete \\\r\n\r\nremove modification to ce loss\r\n\r\nupdate docstring\r\n\r\nupdate docstring\r\n\r\nupdate docstring of ce loss\r\n\r\nupdate unit test\r\n\r\nupdate docstring\r\n\r\nupdate docstring\r\n\r\nupdate docstring\r\n\r\nrename\r\n\r\nrename\r\n\r\nadd msdeformattn pixel decoder\r\n\r\nmaskformer refactor\r\n\r\nadd strides in config\r\n\r\nremove redundant code\r\n\r\nremove redundant code\r\n\r\nupdate unit test\r\n\r\nupdate config\r\n\r\nupdate", "code": "def forward(self, feats, img_metas):\n \n batch_size = len(img_metas)\n mask_features, multi_scale_memorys = self.pixel_decoder(feats)\n # multi_scale_memorys (from low resolution to high resolution)\n decoder_inputs = []\n decoder_positional_encodings = []\n for i in range(self.num_transformer_feat_level):\n decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i])\n # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n decoder_input = decoder_input.flatten(2).permute(2, 0, 1)\n level_embed = self.level_embed.weight[i].view(1, 1, -1)\n decoder_input = decoder_input + level_embed\n # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n mask = decoder_input.new_zeros(\n (batch_size, ) + multi_scale_memorys[i].shape[-2:],\n dtype=torch.bool)\n decoder_positional_encoding = self.decoder_positional_encoding(\n mask)\n decoder_positional_encoding = decoder_positional_encoding.flatten(\n 2).permute(2, 0, 1)\n decoder_inputs.append(decoder_input)\n decoder_positional_encodings.append(decoder_positional_encoding)\n # shape (num_queries, c) -> (num_queries, batch_size, c)\n query_feat = self.query_feat.weight.unsqueeze(1).repeat(\n (1, batch_size, 1))\n query_embed = self.query_embed.weight.unsqueeze(1).repeat(\n (1, batch_size, 1))\n\n cls_pred_list = []\n mask_pred_list = []\n cls_pred, mask_pred, attn_mask = self.forward_head(\n query_feat, mask_features, multi_scale_memorys[0].shape[-2:])\n cls_pred_list.append(cls_pred)\n mask_pred_list.append(mask_pred)\n\n for i in range(self.num_transformer_decoder_layers):\n level_idx = i % self.num_transformer_feat_level\n # if a mask is all True(all background), then set it all False.\n attn_mask[torch.where(\n attn_mask.sum(-1) == attn_mask.shape[-1])] = False\n\n # cross_attn + self_attn\n layer = self.transformer_decoder.layers[i]\n attn_masks = [attn_mask, None]\n query_feat = layer(\n query=query_feat,\n key=decoder_inputs[level_idx],\n value=decoder_inputs[level_idx],\n query_pos=query_embed,\n key_pos=decoder_positional_encodings[level_idx],\n attn_masks=attn_masks,\n query_key_padding_mask=None,\n # here we do not apply masking on padded region\n key_padding_mask=None)\n cls_pred, mask_pred, attn_mask = self.forward_head(\n query_feat, mask_features, multi_scale_memorys[\n (i + 1) % self.num_transformer_feat_level].shape[-2:])\n\n cls_pred_list.append(cls_pred)\n mask_pred_list.append(mask_pred)\n\n return cls_pred_list, mask_pred_list\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 828, "n_words": 201, "vocab_size": 121, "complexity": 3, "nloc": 50, "token_counts": 412, "n_ast_nodes": 632, "n_identifiers": 54, "random_cut": "def forward(self, feats, img_metas):\n \n batch_size = len(img_metas)\n mask_features, multi_scale_memorys = self.pixel_decoder(feats)\n # multi_scale_memorys (from low resolution to high resolution)\n decoder_inputs = []\n decoder_positional_encodings = []\n for i in range(self.num_transformer_feat_level):\n decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i])\n # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n decoder_input = decoder_input.flatten(2).permute(2, 0, 1)\n level_embed = self.level_embed.weight[i].view(1, 1, -1)\n decoder_input = decoder_input + level_embed\n # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n mask = decoder_input.new_zeros(\n (batch_size, ) + multi_scale_memorys[i].shape[-2:],\n dtype=torch.bool)\n decoder_positional_encoding = self.decoder_positional_encoding(\n mask)\n decoder_positional_encoding = decoder_positional_encoding.flatten(\n 2).permute(2, 0, 1)\n decoder_inputs.append(decoder_input)\n decoder_positional_encodings.append(decoder_positional_encoding)\n # shape (num_queries, c) -> (num_queries, batch_size, c)\n query_feat = self.query_feat.weight.unsqueeze(1).repeat(\n (1, batch_size, 1))\n query_embed = self.query_embed.weight.unsqueeze(1).repeat(\n (1, batch_size, 1))\n\n cls_pred_list = []\n mask_pred_list = []\n cls_pred, mask_pred, attn_mask = self.forward_head(\n query_feat, mask_features, multi_scale_memorys[0].shape[-2:])\n cls_pred_list.append(cls_pred)\n mask_pred_list.append(mask_pred)\n\n for i in range(self.num_transformer_decoder_layers):\n level_idx = i % self.num_transformer_feat_level\n # if a mask is all True(all background), then set it all False.\n attn_mask[torch.where(\n attn_mask.sum(-1) == at", "d_id": 70258, "documentation": { "docstring": "Forward function.\n\n Args:\n feats (list[Tensor]): Multi scale Features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple: A tuple contains two elements.\n\n - cls_pred_list (list[Tensor)]: Classification logits \\\n for each decoder layer. Each is a 3D-tensor with shape \\\n (batch_size, num_queries, cls_out_channels). \\\n Note `cls_out_channels` should includes background.\n - mask_pred_list (list[Tensor]): Mask logits for each \\\n decoder layer. Each with shape (batch_size, num_queries, \\\n h, w).\n ", "n_words": 73, "vocab_size": 54, "n_whitespaces": 240, "language": "en" } }, { "id": 203453, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/options.py", "file_name": "options.py", "fun_name": "formfield_for_manytomany", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def formfield_for_manytomany(self, db_field, request, **kwargs):\n \n # If it uses an intermediary model that isn't auto created, don't show\n # a field in admin.\n if not db_field.remote_field.through._meta.auto_created:\n return None\n db = kwargs.get(\"using\")\n\n if \"widget\" not in kwargs:\n autocomplete_fields = self.get_autocomplete_fields(request)\n if db_field.name in autocomplete_fields:\n kwargs[\"widget\"] = AutocompleteSelectMultiple(\n db_field,\n self.admin_site,\n using=db,\n )\n elif db_field.name in self.raw_id_fields:\n kwargs[\"widget\"] = widgets.ManyToManyRawIdWidget(\n db_field.remote_field,\n self.admin_site,\n using=db,\n )\n elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]:\n kwargs[\"widget\"] = widgets.FilteredSelectMultiple(\n db_field.verbose_name, db_field.name in self.filter_vertical\n )\n if \"queryset\" not in kwargs:\n queryset = self.get_field_queryset(db, db_field, request)\n if queryset is not None:\n kwargs[\"queryset\"] = queryset\n\n form_field = db_field.formfield(**kwargs)\n if isinstance(form_field.widget, SelectMultiple) and not isinstance(\n form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple)\n ):\n msg = _(\n \"Hold down “Control”, or “Command” on a Mac, to select more than one.\"\n )\n help_text = form_field.help_text\n form_field.help_text = (\n format_lazy(\"{} {}\", help_text, msg) if help_text else msg\n )\n return form_field\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 627, "n_words": 139, "vocab_size": 89, "complexity": 11, "nloc": 38, "token_counts": 237, "n_ast_nodes": 376, "n_identifiers": 36, "random_cut": "def formfield_for_manytomany(self, db_field, request, **kwargs):\n \n # If it uses an intermediary model that isn't auto created, don't show\n # a field in admin.\n if not db_field.remote_field.through._meta.auto_created:\n return None\n db = kwargs.get(\"using\")\n\n if \"widget\" not in kwargs:\n autocomplete_fields = self.get_autocomplete_fields(request)\n if db_field.name in autocomplete_fields:\n kwargs[\"widget\"] = AutocompleteSelectMultiple(\n db_field,\n self.admin_site,\n using=db,\n )\n elif db_field.name in self.raw_id_fields:\n kwargs[\"widget\"] = widgets.ManyToManyRawIdWidget(\n db_field.remote_field,\n self.admin_site,\n using=db,\n )\n elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]:\n kwargs[\"widget\"] = widgets.FilteredSelectMultiple(\n db_field.verbose_name, db_field.name in self.filter_vertical\n )\n if \"queryset\" not in kwargs:\n queryset = self.get_field_queryset(db, db_field, request)\n if", "d_id": 50383, "documentation": { "docstring": "\n Get a form Field for a ManyToManyField.\n ", "n_words": 7, "vocab_size": 6, "n_whitespaces": 22, "language": "en" } }, { "id": 250109, "commit_id": "3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b", "repo": "synapse", "path": "tests/storage/test_cleanup_extrems.py", "file_name": "test_cleanup_extrems.py", "fun_name": "test_expiry_logic", "commit_message": "Require types in tests.storage. (#14646)\n\nAdds missing type hints to `tests.storage` package\r\nand does not allow untyped definitions.", "code": "def test_expiry_logic(self) -> None:\n \n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[\n \"1\"\n ] = 100000\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[\n \"2\"\n ] = 200000\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[\n \"3\"\n ] = 300000\n\n self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion()\n # All entries within time frame\n self.assertEqual(\n len(\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion\n ),\n 3,\n )\n # Oldest room to expire\n self.pump(1.01)\n self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion()\n self.assertEqual(\n len(\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion\n ),\n 2,\n )\n # All rooms to expire\n self.pump(2)\n self.assertEqual(\n len(\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion\n ),\n 0,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 374, "n_words": 57, "vocab_size": 35, "complexity": 1, "nloc": 35, "token_counts": 114, "n_ast_nodes": 186, "n_identifiers": 8, "random_cut": "def test_expiry_logic(self) -> None:\n \n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[\n \"1\"\n ] = 100000\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[\n \"2\"\n ] = 200000\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[\n \"3\"\n ] = 300000\n\n self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion()\n # All entries within time frame\n self.assertEqual(\n len(\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion\n ),\n 3,\n )\n # Oldest room to expire\n self.pump(1.01)\n self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion()\n self.assertEqual(\n len(\n self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion\n ),\n 2,\n )\n # All rooms to expire\n self.pump(2)\n self.assertEqual(\n ", "d_id": 73276, "documentation": { "docstring": "Simple test to ensure that _expire_rooms_to_exclude_from_dummy_event_insertion()\n expires old entries correctly.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 24, "language": "en" } }, { "id": 123916, "commit_id": "dc7ed086a5038775e378b32cb31fb4a79f418dd9", "repo": "ray", "path": "python/ray/util/ml_utils/checkpoint_manager.py", "file_name": "checkpoint_manager.py", "fun_name": "_tune_legacy_checkpoint_score_attr", "commit_message": "[AIR] More checkpoint configurability, `Result` extension (#25943)\n\nThis PR:\r\n* Allows the user to set `keep_checkpoints_num` and `checkpoint_score_attr` in `RunConfig` using the `CheckpointStrategy` dataclass\r\n* Adds two new fields to the `Result` object - `best_checkpoints` - a list of saved best checkpoints as determined by `CheckpointingConfig`.", "code": "def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]:\n \n if self.checkpoint_score_attribute is None:\n return self.checkpoint_score_attribute\n prefix = \"\"\n if self.checkpoint_score_order == MIN:\n prefix = \"min-\"\n return f\"{prefix}{self.checkpoint_score_attribute}\"\n\n\n# Alias for backwards compatibility\n\ndeprecation_message = (\n \"`CheckpointStrategy` is deprecated and will be removed in \"\n \"the future. Please use `ray.air.config.CheckpointStrategy` \"\n \"instead.\"\n)\n\n\n@Deprecated(message=deprecation_message)\n@dataclass", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@Deprecated(message=deprecation_message)\n@dataclass", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 110, "n_words": 49, "vocab_size": 41, "complexity": 3, "nloc": 11, "token_counts": 38, "n_ast_nodes": 111, "n_identifiers": 12, "random_cut": "def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]:\n \n if self.checkpoint_score_attribute is None:\n return self.checkpoint_score_attribute\n prefix = \"\"\n if self.checkpoint_score_order == MIN:\n prefix = \"min-\"\n return f\"{prefix}{self.checkpoint_score_attribute}\"\n\n\n# Alias for backwards compatibility\n\ndeprecation_message = (\n \"`CheckpointStrategy` is deprecated and will be removed in \"\n \"the future. Please use `ray.air.config.CheckpointStrategy` \"\n \"instead.\"\n)\n\n\n@Deprecated(message=deprecation_message)\n@da", "d_id": 27474, "documentation": { "docstring": "Same as ``checkpoint_score_attr`` in ``tune.run``.\n\n Only used for Legacy API compatibility.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 25, "language": "en" } }, { "id": 89979, "commit_id": "b83aa7328d49e5b45357417c78b7d1a63bfb056e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_details.py", "file_name": "test_project_details.py", "fun_name": "test_dynamic_sampling_bias_activation", "commit_message": "ref(sampling): Prettify audit logs - Part 1 (#42534)", "code": "def test_dynamic_sampling_bias_activation(self):\n \n\n project = self.project # force creation\n project.update_option(\n \"sentry:dynamic_sampling_biases\",\n [\n {\"id\": \"boostEnvironments\", \"active\": False},\n ],\n )\n self.login_as(self.user)\n\n token = ApiToken.objects.create(user=self.user, scope_list=[\"project:write\"])\n authorization = f\"Bearer {token.token}\"\n\n url = reverse(\n \"sentry-api-0-project-details\",\n kwargs={\n \"organization_slug\": self.project.organization.slug,\n \"project_slug\": self.project.slug,\n },\n )\n\n with Feature({self.new_ds_flag: True}):\n self.client.put(\n url,\n format=\"json\",\n HTTP_AUTHORIZATION=authorization,\n data={\n \"dynamicSamplingBiases\": [\n {\"id\": \"boostEnvironments\", \"active\": True},\n ]\n },\n )\n\n assert AuditLogEntry.objects.filter(\n organization=self.project.organization,\n event=audit_log.get_event_id(\"SAMPLING_BIAS_ENABLED\"),\n ).exists()\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 451, "n_words": 59, "vocab_size": 49, "complexity": 1, "nloc": 33, "token_counts": 170, "n_ast_nodes": 293, "n_identifiers": 30, "random_cut": "def test_dynamic_sampling_bias_activation(self):\n \n\n project = self.project # force creation\n project.update_option(\n \"sentry:dynamic_sampling_biases\",\n [\n {\"id\": \"boostEnvironments\", \"active\": False},\n ],\n )\n self.login_as(self.user)\n\n token = ApiToken.objects.create(user=self.user, scope_list=[\"project:write\"])\n authorization = f\"Bearer {token.token}\"\n\n url = reverse(\n \"sentry-api-0-project-details\",\n kwargs={\n \"organization_slug\": self.project.organization.slug,\n \"project_slug\": self.project.slug,", "d_id": 18600, "documentation": { "docstring": "\n Tests that when sending a request to enable a dynamic sampling bias,\n the bias will be successfully enabled and the audit log 'SAMPLING_BIAS_ENABLED' will be triggered\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 48, "language": "en" } }, { "id": 100351, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/model/layers.py", "file_name": "layers.py", "fun_name": "call", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def call(self, inputs, *args, **kwargs):\n \n input_shape = K.int_shape(inputs)\n if len(input_shape) != 4:\n raise ValueError('Inputs should have rank ' +\n str(4) +\n '; Received input shape:', str(input_shape))\n\n if self.data_format == 'channels_first':\n batch_size, channels, height, width = input_shape\n if batch_size is None:\n batch_size = -1\n r_height, r_width = self.size\n o_height, o_width = height * r_height, width * r_width\n o_channels = channels // (r_height * r_width)\n\n out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width))\n out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))\n out = K.reshape(out, (batch_size, o_channels, o_height, o_width))\n elif self.data_format == 'channels_last':\n batch_size, height, width, channels = input_shape\n if batch_size is None:\n batch_size = -1\n r_height, r_width = self.size\n o_height, o_width = height * r_height, width * r_width\n o_channels = channels // (r_height * r_width)\n\n out = K.reshape(inputs, (batch_size, height, width, r_height, r_width, o_channels))\n out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))\n out = K.reshape(out, (batch_size, o_height, o_width, o_channels))\n return out\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 467, "n_words": 152, "vocab_size": 71, "complexity": 6, "nloc": 27, "token_counts": 267, "n_ast_nodes": 406, "n_identifiers": 25, "random_cut": "def call(self, inputs, *args, **kwargs):\n \n input_shape = K.int_shape(inputs)\n if len(input_shape) != 4:\n raise ValueError('Inputs should have rank ' +\n str(4) +\n '; Received input shape:', str(input_shape))\n\n if self.data_format == 'channels_first':\n batch_size, channels, height, width = input_shape\n if batch_size is None:\n batch_size = -1\n r_height, r_width = self.size\n o_height, o_width = height * r_height, width * r_width\n o_channels = channels // (r_height * r_width)\n\n out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width))\n out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))\n out = K.reshape(out, (batch_size, o_channels, o_height, o_width))\n elif self.data_format == 'channels_last':\n batch_size, height, width, channels = input_shape\n if batch_size is None:\n batch_size = -1\n r_height, r_width = self.size\n o_height, o_width = height * r_height, width * r_width\n o_channels = channels // (r_height * r_width)\n\n out = K.reshape(inputs, (ba", "d_id": 19844, "documentation": { "docstring": "This is where the layer's logic lives.\n\n Parameters\n ----------\n inputs: tensor\n Input tensor, or list/tuple of input tensors\n args: tuple\n Additional standard keras Layer arguments\n kwargs: dict\n Additional standard keras Layer keyword arguments\n\n Returns\n -------\n tensor\n A tensor or list/tuple of tensors\n ", "n_words": 42, "vocab_size": 31, "n_whitespaces": 149, "language": "en" } }, { "id": 168239, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "to_native_types", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def to_native_types(self, slicer=None, **kwargs) -> np.ndarray:\n \n warnings.warn(\n \"The 'to_native_types' method is deprecated and will be removed in \"\n \"a future version. Use 'astype(str)' instead.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n values = self\n if slicer is not None:\n values = values[slicer]\n return values._format_native_types(**kwargs)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 137, "n_words": 40, "vocab_size": 37, "complexity": 2, "nloc": 37, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 15, "random_cut": "def to_native_types(self, slicer=None, **kwargs) -> np.ndarray:\n \n warnings.warn(\n \"The 'to_native_types' method is deprecated and will be removed in \"\n \"a future version. Use 'astype(str)' instead.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n values = self\n ", "d_id": 40250, "documentation": { "docstring": "\n Format specified values of `self` and return them.\n\n .. deprecated:: 1.2.0\n\n Parameters\n ----------\n slicer : int, array-like\n An indexer into `self` that specifies which values\n are used in the formatting process.\n kwargs : dict\n Options for specifying how the values should be formatted.\n These options include the following:\n\n 1) na_rep : str\n The value that serves as a placeholder for NULL values\n 2) quoting : bool or None\n Whether or not there are quoted values in `self`\n 3) date_format : str\n The format used to represent date-like values.\n\n Returns\n -------\n numpy.ndarray\n Formatted values.\n ", "n_words": 93, "vocab_size": 72, "n_whitespaces": 297, "language": "en" } }, { "id": 177079, "commit_id": "28f78cfa9a386620ee1179582fda1db5ffc59f84", "repo": "networkx", "path": "networkx/algorithms/distance_measures.py", "file_name": "distance_measures.py", "fun_name": "periphery", "commit_message": "Add weight distance metrics (#5305)\n\nAdds the weight keyword argument to allow users to compute weighted distance metrics\r\ne.g. diameter, eccentricity, periphery, etc. The kwarg works in the same fashion as the\r\nweight param for shortest paths - i.e. if a string, look up with edge attr by key, if callable,\r\ncompute the weight via the function. Default is None, meaning return unweighted result\r\nwhich is the current behavior.\r\n\r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Ross Barnowski ", "code": "def periphery(G, e=None, usebounds=False, weight=None):\n \n if usebounds is True and e is None and not G.is_directed():\n return _extrema_bounding(G, compute=\"periphery\", weight=weight)\n if e is None:\n e = eccentricity(G, weight=weight)\n diameter = max(e.values())\n p = [v for v in e if e[v] == diameter]\n return p\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 44, "vocab_size": 31, "complexity": 7, "nloc": 8, "token_counts": 90, "n_ast_nodes": 140, "n_identifiers": 14, "random_cut": "def periphery(G, e=None, usebounds=False, weight=None):\n \n if usebounds is True and e is None and not G.is_directed():\n return _extrema_bounding(G, compute=\"periphery\", weight=weight)\n ", "d_id": 42266, "documentation": { "docstring": "Returns the periphery of the graph G.\n\n The periphery is the set of nodes with eccentricity equal to the diameter.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph\n\n e : eccentricity dictionary, optional\n A precomputed dictionary of eccentricities.\n\n weight : string, function, or None\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n If this is None, every edge has weight/distance/cost 1.\n\n Weights stored as floating point values can lead to small round-off\n errors in distances. Use integer weights to avoid this.\n\n Weights should be positive, since they are distances.\n\n Returns\n -------\n p : list\n List of nodes in periphery\n\n Examples\n --------\n >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])\n >>> nx.periphery(G)\n [2, 5]\n\n See Also\n --------\n barycenter\n center\n ", "n_words": 212, "vocab_size": 128, "n_whitespaces": 384, "language": "en" } }, { "id": 292193, "commit_id": "67e94f2b4ba614a37544f54ccb85984f0d600376", "repo": "core", "path": "homeassistant/components/zwave_js/climate.py", "file_name": "climate.py", "fun_name": "_current_mode_setpoint_enums", "commit_message": "Add type ignore error codes [N-Z] (#66779)", "code": "def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType | None]:\n \n if self._current_mode is None:\n # Thermostat(valve) with no support for setting a mode is considered heating-only\n return [ThermostatSetpointType.HEATING]\n return THERMOSTAT_MODE_SETPOINT_MAP.get(int(self._current_mode.value), []) # type: ignore[no-any-return]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 30, "vocab_size": 27, "complexity": 2, "nloc": 5, "token_counts": 43, "n_ast_nodes": 71, "n_identifiers": 10, "random_cut": "def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType | None]:\n \n if self._current_mode is None:\n # Thermostat(valve) with no support for setting a mode is considered heating-only\n return [ThermostatSetpointType.HEATING]\n return THERMOSTA", "d_id": 91294, "documentation": { "docstring": "Return the list of enums that are relevant to the current thermostat mode.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 63386, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "line", "commit_message": "upd; format", "code": "def line(loc, strg):\n \n lastCR = strg.rfind(\"\\n\", 0, loc)\n nextCR = strg.find(\"\\n\", loc)\n if nextCR >= 0:\n return strg[lastCR + 1:nextCR]\n else:\n return strg[lastCR + 1:]\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 25, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 54, "n_ast_nodes": 90, "n_identifiers": 7, "random_cut": "def line(loc, strg):\n \n lastCR = strg.rfind(\"\\n\", 0, loc)\n nextCR = strg.find(\"\\n\", loc)\n if nextCR >= 0:\n return strg[lastCR + 1:nextCR]\n else:\n return strg[lastCR + 1:]\n", "d_id": 13281, "documentation": { "docstring": "Returns the line of text containing loc within a string, counting newlines as line separators.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 21, "language": "en" } }, { "id": 102080, "commit_id": "48c886b3dce3d3117ad16edaf35c8abd28dc51f5", "repo": "faceswap", "path": "lib/sysinfo.py", "file_name": "sysinfo.py", "fun_name": "_parse_configs", "commit_message": "Allow decoding errors", "code": "def _parse_configs(self, config_files):\n \n formatted = \"\"\n for cfile in config_files:\n fname = os.path.basename(cfile)\n ext = os.path.splitext(cfile)[1]\n formatted += f\"\\n--------- {fname} ---------\\n\"\n if ext == \".ini\":\n formatted += self._parse_ini(cfile)\n elif fname == \".faceswap\":\n formatted += self._parse_json(cfile)\n return formatted\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 150, "n_words": 37, "vocab_size": 26, "complexity": 4, "nloc": 11, "token_counts": 71, "n_ast_nodes": 127, "n_identifiers": 13, "random_cut": "def _parse_configs(self, config_files):\n \n formatted = \"\"\n for cfile in config_files:\n fname = os.path.basename(cfile)\n ext = os.path.splitext(cfile)[1]\n formatted += f\"\\n--------- {fname} ---------\\n\"\n if ext == \".ini\":\n ", "d_id": 21445, "documentation": { "docstring": " Parse the given list of config files into a human readable format.\n\n Parameters\n ----------\n config_files: list\n A list of paths to the faceswap config files\n\n Returns\n -------\n str\n The current configuration in the config files formatted in a human readable format\n ", "n_words": 41, "vocab_size": 28, "n_whitespaces": 113, "language": "en" } }, { "id": 181946, "commit_id": "2635f58e7c3d10b161ee69a15ebfe6499ac26daa", "repo": "textual", "path": "src/textual/dom.py", "file_name": "dom.py", "fun_name": "parent", "commit_message": "docstrings and tidy", "code": "def parent(self) -> DOMNode:\n \n if self._parent is None:\n raise NoParent(f\"{self} has no parent\")\n assert isinstance(self._parent, DOMNode)\n return self._parent\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 57, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 13, "token_counts": 34, "n_ast_nodes": 60, "n_identifiers": 6, "random_cut": "def parent(self) -> DOMNode:\n \n if self._parent is None:\n raise NoParent(f\"{self} has no parent\")\n assert isinstance(self._parent, DOMNode)\n return self._parent\n", "d_id": 43685, "documentation": { "docstring": "Get the parent node.\n\n Raises:\n NoParent: If this is the root node.\n\n Returns:\n DOMNode: The node which is the direct parent of this node.\n ", "n_words": 24, "vocab_size": 17, "n_whitespaces": 67, "language": "en" } }, { "id": 20236, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/unix.py", "file_name": "unix.py", "fun_name": "user_documents_dir", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def user_documents_dir(self) -> str:\n \n documents_dir = _get_user_dirs_folder(\"XDG_DOCUMENTS_DIR\")\n if documents_dir is None:\n documents_dir = os.environ.get(\"XDG_DOCUMENTS_DIR\", \"\").strip()\n if not documents_dir:\n documents_dir = os.path.expanduser(\"~/Documents\")\n\n return documents_dir\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 88, "n_words": 23, "vocab_size": 16, "complexity": 3, "nloc": 10, "token_counts": 51, "n_ast_nodes": 93, "n_identifiers": 11, "random_cut": "def user_documents_dir(self) -> str:\n \n documents_dir = _get_user_dirs_folder(\"XDG_DOCUMENTS_DIR\")\n if documents_dir is None:\n documents_dir = os.environ.get(\"XDG_DOCUMENTS_DIR\", \"\").strip()\n if not documents_dir:\n documents_dir = os.path.ex", "d_id": 3287, "documentation": { "docstring": "\n :return: documents directory tied to the user, e.g. ``~/Documents``\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 259992, "commit_id": "6ca1f5e4d0d16bc9a7f28582079a15e14f012719", "repo": "scikit-learn", "path": "sklearn/ensemble/tests/test_iforest.py", "file_name": "test_iforest.py", "fun_name": "test_iforest_sparse", "commit_message": "TST use global_random_seed in sklearn/ensemble/tests/test_iforest.py (#22901)\n\n\r\n\r\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Olivier Grisel ", "code": "def test_iforest_sparse(global_random_seed):\n \n rng = check_random_state(global_random_seed)\n X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng)\n grid = ParameterGrid({\"max_samples\": [0.5, 1.0], \"bootstrap\": [True, False]})\n\n for sparse_format in [csc_matrix, csr_matrix]:\n X_train_sparse = sparse_format(X_train)\n X_test_sparse = sparse_format(X_test)\n\n for params in grid:\n # Trained on sparse format\n sparse_classifier = IsolationForest(\n n_estimators=10, random_state=global_random_seed, **params\n ).fit(X_train_sparse)\n sparse_results = sparse_classifier.predict(X_test_sparse)\n\n # Trained on dense format\n dense_classifier = IsolationForest(\n n_estimators=10, random_state=global_random_seed, **params\n ).fit(X_train)\n dense_results = dense_classifier.predict(X_test)\n\n assert_array_equal(sparse_results, dense_results)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 230, "n_words": 65, "vocab_size": 47, "complexity": 3, "nloc": 17, "token_counts": 144, "n_ast_nodes": 221, "n_identifiers": 27, "random_cut": "def test_iforest_sparse(global_random_seed):\n \n rng = check_random_state(global_random_seed)\n X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng)\n grid = ParameterGrid({\"m", "d_id": 76025, "documentation": { "docstring": "Check IForest for various parameter settings on sparse input.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 259304, "commit_id": "7dc97a378ecbfa056dd9cfa9d1ef4c07d2d0cc1f", "repo": "scikit-learn", "path": "sklearn/metrics/_scorer.py", "file_name": "_scorer.py", "fun_name": "get_scorer_names", "commit_message": "API get_scorer returns a copy and introduce get_scorer_names (#22866)", "code": "def get_scorer_names():\n \n return sorted(_SCORERS.keys())\n\n\nfor name, metric in [\n (\"precision\", precision_score),\n (\"recall\", recall_score),\n (\"f1\", f1_score),\n (\"jaccard\", jaccard_score),\n]:\n _SCORERS[name] = make_scorer(metric, average=\"binary\")\n for average in [\"macro\", \"micro\", \"samples\", \"weighted\"]:\n qualified_name = \"{0}_{1}\".format(name, average)\n _SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average)\n\nSCORERS = _DeprecatedScorers(_SCORERS)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 41, "vocab_size": 35, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 171, "n_identifiers": 17, "random_cut": "def get_scorer_names():\n \n ", "d_id": 75703, "documentation": { "docstring": "Get the names of all available scorers.\n\n These names can be passed to :func:`~sklearn.metrics.get_scorer` to\n retrieve the scorer object.\n\n Returns\n -------\n list of str\n Names of all available scorers.\n ", "n_words": 29, "vocab_size": 21, "n_whitespaces": 54, "language": "en" } }, { "id": 105541, "commit_id": "f10d38b8b60b09a633823a2fb2529c83933b9c80", "repo": "datasets", "path": "datasets/swda/swda.py", "file_name": "swda.py", "fun_name": "_split_generators", "commit_message": "Support streaming swda dataset (#4914)\n\n* Support streaming swda dataset\r\n\r\n* Remove unused import", "code": "def _split_generators(self, dl_manager):\n \n\n # Download extract and return path of data file.\n dl_dir = dl_manager.download_and_extract(_URL)\n # Use swda/ folder.\n data_dir = os.path.join(dl_dir, \"swda\")\n # Handle partitions files: download extract and return paths of split files.\n downloaded_files = dl_manager.download(self._URLS)\n\n return [\n # Return whole data path and train splits file downloaded path.\n datasets.SplitGenerator(\n name=datasets.Split.TRAIN, gen_kwargs={\"data_dir\": data_dir, \"split_file\": downloaded_files[\"train\"]}\n ),\n # Return whole data path and dev splits file downloaded path.\n datasets.SplitGenerator(\n name=datasets.Split.VALIDATION,\n gen_kwargs={\"data_dir\": data_dir, \"split_file\": downloaded_files[\"dev\"]},\n ),\n # Return whole data path and train splits file downloaded path.\n datasets.SplitGenerator(\n name=datasets.Split.TEST, gen_kwargs={\"data_dir\": data_dir, \"split_file\": downloaded_files[\"test\"]}\n ),\n ]\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 317, "n_words": 95, "vocab_size": 51, "complexity": 1, "nloc": 16, "token_counts": 126, "n_ast_nodes": 211, "n_identifiers": 21, "random_cut": "def _split_generators(self, dl_manager):\n \n\n # Download extract and return path of data file.\n dl_dir = dl_manager.download_and_extract(_URL)\n # Use swda/ folder.\n data_dir = os.path.join(dl_dir, \"swda\")\n # Handle partitions files: download extract and return paths of split files.\n downloaded_files = dl_manager.download(self._URLS)\n\n return [\n # Return whole data path and train splits file downloaded path.\n datasets.SplitGenerator(\n name=datasets.Split.TRAIN, gen_kwargs={\"data_dir\": data_dir, \"split_file\": downloaded_files[\"train\"]}\n ),\n # Return whole data path and dev splits file downloaded path.\n datasets.SplitGenerator(\n name=datasets.Split.VALIDATION,\n gen_kwargs={\"data_dir\": data_dir, \"split_file\": downloaded_files[\"dev\"]},\n ),\n # Return whole data path and train splits file downloaded path.\n datasets.SplitGenerator(\n ", "d_id": 22155, "documentation": { "docstring": "\n Returns SplitGenerators.\n This method is tasked with downloading/extracting the data and defining the splits.\n\n Args:\n dl_manager (:obj:`datasets.utils.download_manager.DownloadManager`):\n Download manager to download and extract data files from urls.\n\n Returns:\n :obj:`list[str]`:\n List of paths to data.\n ", "n_words": 34, "vocab_size": 30, "n_whitespaces": 123, "language": "en" } }, { "id": 281540, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/discovery/disc_controller.py", "file_name": "disc_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n help_text = \n console.print(text=help_text, menu=\"Stocks - Discovery\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 31, "token_counts": 21, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def print_help(self):\n \n help", "d_id": 83838, "documentation": { "docstring": "Print help[cmds]\n[src][Geek of Wall St][/src]\n rtearn realtime earnings from and expected moves\n[src][Finnhub][/src]\n pipo past IPOs dates\n fipo future IPOs dates\n[src][Yahoo Finance][/src]\n gainers show latest top gainers\n losers show latest top losers\n ugs undervalued stocks with revenue and earnings growth in excess of 25%\n gtech tech stocks with revenue and earnings growth more than 25%\n active most active stocks by intraday trade volume\n ulc potentially undervalued large cap stocks\n asc small cap stocks with earnings growth rates better than 25%\n[src][Fidelity][/src]\n ford orders by Fidelity Customers\n[src][Cathiesark.com][/src]\n arkord orders by ARK Investment Management LLC\n[src][Seeking Alpha][/src]\n upcoming upcoming earnings release dates\n trending trending news\n cnews customized news (buybacks, ipos, spacs, healthcare, politics)\n[src][Shortinterest.com][/src]\n lowfloat low float stocks under 10M shares float\n[src][Pennystockflow.com][/src]\n hotpenny today's hot penny stocks\n[src][NASDAQ Data Link (Formerly Quandl)][/src]\n rtat top 10 retail traded stocks per day[/cmds]\n", "n_words": 142, "vocab_size": 101, "n_whitespaces": 340, "language": "en" } }, { "id": 204760, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/serializers/xml_serializer.py", "file_name": "xml_serializer.py", "fun_name": "handle_fk_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def handle_fk_field(self, obj, field):\n \n self._start_relational_field(field)\n related_att = getattr(obj, field.get_attname())\n if related_att is not None:\n if self.use_natural_foreign_keys and hasattr(\n field.remote_field.model, \"natural_key\"\n ):\n related = getattr(obj, field.name)\n # If related object has a natural key, use it\n related = related.natural_key()\n # Iterable natural keys are rolled out as subelements\n for key_value in related:\n self.xml.startElement(\"natural\", {})\n self.xml.characters(str(key_value))\n self.xml.endElement(\"natural\")\n else:\n self.xml.characters(str(related_att))\n else:\n self.xml.addQuickElement(\"None\")\n self.xml.endElement(\"field\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 308, "n_words": 60, "vocab_size": 50, "complexity": 5, "nloc": 18, "token_counts": 133, "n_ast_nodes": 225, "n_identifiers": 22, "random_cut": "def handle_fk_field(self, obj, field):\n \n self._start_relational_field(field)\n related_att = getattr(obj, field.get_attname())\n if related_att is not None:\n if self.use_natural_foreign_keys and hasattr(\n field.remote_field.model, \"natural_key\"\n ):\n related = getattr(obj, field.name)\n # If related object has a natural key, use it\n related = related.natural_key()\n # Iterable natural keys are rolled out as subelements\n for key_value in related:\n ", "d_id": 50874, "documentation": { "docstring": "\n Handle a ForeignKey (they need to be treated slightly\n differently from regular fields).\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 127793, "commit_id": "42da4445e7a3cb358a1a02ae433a004e9fa836b5", "repo": "ray", "path": "python/ray/tests/test_metrics_head.py", "file_name": "test_metrics_head.py", "fun_name": "test_metrics_folder", "commit_message": "Export default configurations for grafana and prometheus (#28286)", "code": "def test_metrics_folder():\n \n with _ray_start(include_dashboard=True) as context:\n session_dir = context[\"session_dir\"]\n assert os.path.exists(\n f\"{session_dir}/metrics/grafana/provisioning/dashboards/default.yml\"\n )\n assert os.path.exists(\n f\"{session_dir}/metrics/grafana/provisioning/dashboards\"\n \"/default_grafana_dashboard.json\"\n )\n assert os.path.exists(\n f\"{session_dir}/metrics/grafana/provisioning/datasources/default.yml\"\n )\n assert os.path.exists(f\"{session_dir}/metrics/prometheus/prometheus.yml\")\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 130, "n_words": 24, "vocab_size": 17, "complexity": 1, "nloc": 14, "token_counts": 62, "n_ast_nodes": 126, "n_identifiers": 8, "random_cut": "def test_metrics_folder():\n \n with _ray_start(include_dashboard=True) as context:\n session_dir = context[\"session_dir\"]\n assert os.path.exists(\n f\"{session_dir}/metrics/grafana/provisioning/dashboards/default.yml\"\n )\n assert os.path.exists(\n f\"{session_dir}/metrics/grafana/provisioning/dashboards\"\n \"/default_grafana_dashboard.json\"\n )\n assert os.path.exists(\n f\"{session_dir}/metrics/grafana/provisioning/", "d_id": 28528, "documentation": { "docstring": "\n Tests that the default dashboard files get created.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 38931, "commit_id": "3293cf72a0abd5cf77a831996bd054bc908476a6", "repo": "DeepSpeed", "path": "deepspeed/runtime/fp16/fused_optimizer.py", "file_name": "fused_optimizer.py", "fun_name": "state_dict", "commit_message": "[ZeRO] Default disable elastic ckpt in stage 1+2 and reduce CPU memory overhead during ckpt load (#1525)\n\nCo-authored-by: Olatunji Ruwase ", "code": "def state_dict(self):\n \n state_dict = {}\n state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale\n state_dict['cur_scale'] = self.cur_scale\n state_dict['cur_iter'] = self.cur_iter\n if state_dict['dynamic_loss_scale']:\n state_dict['last_overflow_iter'] = self.last_overflow_iter\n state_dict['scale_factor'] = self.scale_factor\n state_dict['scale_window'] = self.scale_window\n state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()\n state_dict['fp32_groups_flat'] = self.fp32_groups_flat\n state_dict['clip_grad'] = self.clip_grad\n return state_dict\n\n # Refresh fp32 master params from fp16 copies", "url": "https://github.com/microsoft/DeepSpeed.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 150, "n_words": 44, "vocab_size": 34, "complexity": 2, "nloc": 13, "token_counts": 94, "n_ast_nodes": 166, "n_identifiers": 12, "random_cut": "def state_dict(self):\n \n state_dict = {}\n state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale\n state_dict['cur_scale'] = self.cur_scale\n state_dict['cur_iter'] = self.cur_iter\n if state_dict['dynamic_loss_scale']:\n state_dict['last_overflow_iter'] = self.last_overflow_iter\n state_dict['scale_factor'] = self.scale_factor\n state_dict['scale_window'] = self.scale_window\n state_dict[OPTIMIZER_STATE_DICT] = self.optimize", "d_id": 7049, "documentation": { "docstring": "\n Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.\n This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict\n of the contained Pytorch optimizer.\n Example::\n checkpoint = {}\n checkpoint['model'] = model.state_dict()\n checkpoint['optimizer'] = optimizer.state_dict()\n torch.save(checkpoint, \"saved.pth\")\n ", "n_words": 39, "vocab_size": 31, "n_whitespaces": 119, "language": "en" } }, { "id": 261829, "commit_id": "2cce02414d4a7161f0d105450c196d94b1182220", "repo": "scikit-learn", "path": "sklearn/naive_bayes.py", "file_name": "naive_bayes.py", "fun_name": "_update_mean_variance", "commit_message": "TST Add common tests for single class fitting induced by sample weights (#24140)\n\nCo-authored-by: johayon \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def _update_mean_variance(n_past, mu, var, X, sample_weight=None):\n \n if X.shape[0] == 0:\n return mu, var\n\n # Compute (potentially weighted) mean and variance of new datapoints\n if sample_weight is not None:\n n_new = float(sample_weight.sum())\n if np.isclose(n_new, 0.0):\n return mu, var\n new_mu = np.average(X, axis=0, weights=sample_weight)\n new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight)\n else:\n n_new = X.shape[0]\n new_var = np.var(X, axis=0)\n new_mu = np.mean(X, axis=0)\n\n if n_past == 0:\n return new_mu, new_var\n\n n_total = float(n_past + n_new)\n\n # Combine mean of old and new data, taking into consideration\n # (weighted) number of observations\n total_mu = (n_new * new_mu + n_past * mu) / n_total\n\n # Combine variance of old and new data, taking into consideration\n # (weighted) number of observations. This is achieved by combining\n # the sum-of-squared-differences (ssd)\n old_ssd = n_past * var\n new_ssd = n_new * new_var\n total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2\n total_var = total_ssd / n_total\n\n return total_mu, total_var\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 402, "n_words": 162, "vocab_size": 81, "complexity": 5, "nloc": 22, "token_counts": 204, "n_ast_nodes": 314, "n_identifiers": 24, "random_cut": "def _update_mean_variance(n_past, mu, var, X, sample_weight=None):\n \n if X.shape[0] == 0:\n return mu, var\n\n # Compute (potentially weighted) mean and variance of new datapoints\n if sample_weight is not None:\n n_new = float(sample_weight.sum())\n if np.isclose(n_new, 0.0):\n return mu, var\n new_mu = np.average(X, axis=0, weights=sample_weight)\n new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weig", "d_id": 77021, "documentation": { "docstring": "Compute online update of Gaussian mean and variance.\n\n Given starting sample count, mean, and variance, a new set of\n points X, and optionally sample weights, return the updated mean and\n variance. (NB - each dimension (column) in X is treated as independent\n -- you get variance, not covariance).\n\n Can take scalar mean and variance, or vector mean and variance to\n simultaneously update a number of independent Gaussians.\n\n See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:\n\n http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf\n\n Parameters\n ----------\n n_past : int\n Number of samples represented in old mean and variance. If sample\n weights were given, this should contain the sum of sample\n weights represented in old mean and variance.\n\n mu : array-like of shape (number of Gaussians,)\n Means for Gaussians in original set.\n\n var : array-like of shape (number of Gaussians,)\n Variances for Gaussians in original set.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Weights applied to individual samples (1. for unweighted).\n\n Returns\n -------\n total_mu : array-like of shape (number of Gaussians,)\n Updated mean for each Gaussian over the combined set.\n\n total_var : array-like of shape (number of Gaussians,)\n Updated variance for each Gaussian over the combined set.\n ", "n_words": 191, "vocab_size": 105, "n_whitespaces": 412, "language": "en" } }, { "id": 197220, "commit_id": "b27e2b44626d138bd6ea235fbf114644baa5b144", "repo": "sympy", "path": "sympy/functions/combinatorial/numbers.py", "file_name": "numbers.py", "fun_name": "is_prime", "commit_message": "Deprecate redundant static methods", "code": "def is_prime(n):\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.11\",\n active_deprecations_target='deprecated-carmichael-static-methods',\n )\n return isprime(n)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 50, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 10, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 6, "random_cut": "def is_prime(n):\n sympy_deprecation_warning(\n ,\n ", "d_id": 48393, "documentation": { "docstring": "\nis_prime is just a wrapper around sympy.ntheory.primetest.isprime so use that\ndirectly instead.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 221074, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/base64.py", "file_name": "base64.py", "fun_name": "standard_b64decode", "commit_message": "add python 3.10.4 for windows", "code": "def standard_b64decode(s):\n \n return b64decode(s)\n\n\n_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')\n_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 16, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def standard_b64decode(s):\n \n return b64decode(s)\n\n\n_urlsafe_encode_tr", "d_id": 56186, "documentation": { "docstring": "Decode bytes encoded with the standard Base64 alphabet.\n\n Argument s is a bytes-like object or ASCII string to decode. The result\n is returned as a bytes object. A binascii.Error is raised if the input\n is incorrectly padded. Characters that are not in the standard alphabet\n are discarded prior to the padding check.\n ", "n_words": 52, "vocab_size": 41, "n_whitespaces": 70, "language": "en" } }, { "id": 133298, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/examples/dcgan.py", "file_name": "dcgan.py", "fun_name": "inception_score", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def inception_score(self, imgs, batch_size=32, splits=1):\n \n N = len(imgs)\n dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)\n up = nn.Upsample(\n size=(28, 28),\n mode=\"bilinear\",\n align_corners=False, # This is to reduce user warnings from torch.\n ).type(torch.FloatTensor)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 98, "n_words": 29, "vocab_size": 27, "complexity": 4, "nloc": 25, "token_counts": 236, "n_ast_nodes": 105, "n_identifiers": 20, "random_cut": "def inception_score(self, imgs, batch_size=32, splits=1):\n \n N = len(imgs)\n dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)\n up = nn.Upsample(\n size=(28, 28),\n mo", "d_id": 29975, "documentation": { "docstring": "Calculate the inception score of the generated images.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 66833, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/update_shipment_status.py", "file_name": "update_shipment_status.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"shipment\")\n\n\t# update submitted status\n\tfrappe.db.sql(\n\t\t\n\t)\n\n\t# update cancelled status\n\tfrappe.db.sql(\n\t\t\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 9, "n_words": 17, "vocab_size": 12, "complexity": 1, "nloc": 12, "token_counts": 30, "n_ast_nodes": 60, "n_identifiers": 5, "random_cut": "def execute():\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"shipment\")\n\n\t# update submitted status\n\tfrappe.db.sql(\n\t\t\n\t)\n\n\t", "d_id": 14354, "documentation": { "docstring": "UPDATE `tabShipment`\n\t\t\t\t\tSET status = \"Submitted\"\n\t\t\t\t\tWHERE status = \"Draft\" AND docstatus = 1UPDATE `tabShipment`\n\t\t\t\t\tSET status = \"Cancelled\"\n\t\t\t\t\tWHERE status = \"Draft\" AND docstatus = 2", "n_words": 27, "vocab_size": 13, "n_whitespaces": 22, "language": "en" } }, { "id": 204496, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/files/storage.py", "file_name": "storage.py", "fun_name": "url", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def url(self, name):\n \n raise NotImplementedError(\"subclasses of Storage must provide a url() method\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 4, "random_cut": "def url(self, name):\n \n raise NotImplementedError(\"subclasses of Storage must provide a url() method", "d_id": 50753, "documentation": { "docstring": "\n Return an absolute URL where the file's contents can be accessed\n directly by a web browser.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 275866, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/hdf5_format.py", "file_name": "hdf5_format.py", "fun_name": "load_attributes_from_hdf5_group", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def load_attributes_from_hdf5_group(group, name):\n \n if name in group.attrs:\n data = [\n n.decode(\"utf8\") if hasattr(n, \"decode\") else n\n for n in group.attrs[name]\n ]\n else:\n data = []\n chunk_id = 0\n while \"%s%d\" % (name, chunk_id) in group.attrs:\n data.extend(\n [\n n.decode(\"utf8\") if hasattr(n, \"decode\") else n\n for n in group.attrs[\"%s%d\" % (name, chunk_id)]\n ]\n )\n chunk_id += 1\n return data\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 227, "n_words": 57, "vocab_size": 34, "complexity": 7, "nloc": 18, "token_counts": 107, "n_ast_nodes": 174, "n_identifiers": 10, "random_cut": "def load_attributes_from_hdf5_group(group, name):\n \n if name in group.", "d_id": 81491, "documentation": { "docstring": "Loads attributes of the specified name from the HDF5 group.\n\n This method deals with an inherent problem\n of HDF5 file which is not able to store\n data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n Args:\n group: A pointer to a HDF5 group.\n name: A name of the attributes to load.\n\n Returns:\n data: Attributes data.\n ", "n_words": 51, "vocab_size": 39, "n_whitespaces": 90, "language": "en" } }, { "id": 55970, "commit_id": "a05e44c89acf0b6073ac876479be24a5e51d7754", "repo": "prefect", "path": "src/prefect/orion/models/block_schemas.py", "file_name": "block_schemas.py", "fun_name": "_find_root_block_schema", "commit_message": "Nested Block Schemas (PrefectHQ/orion#1846)\n\n* Adds models and migration for block schema and block document references\r\n\r\n* Adds customization to the generation of a block schema's fields\r\n\r\n* Adds ability to reconstruct block schema fields on read\r\n\r\n* Adds ability to reconstruct block schema when read by checksum\r\n\r\n* Adds schema reconstruction when reading multiple block schemas\r\n\r\n* Adds ordering to query of recursive CTE\r\n\r\n* Refactors to make code path and purpose easier to follow", "code": "def _find_root_block_schema(block_schemas_with_references):\n \n return next(\n (\n block_schema\n for (\n block_schema,\n _,\n parent_block_schema_id,\n ) in block_schemas_with_references\n if parent_block_schema_id is None\n ),\n None,\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 140, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 13, "token_counts": 31, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "def _find_root_block_schema(block_schemas_with_references):\n \n return next(\n (\n block_schema\n for (\n block_schema,\n _,\n parent_block_schema_id,\n ) in block_schemas_with_references\n if parent_block_schema_", "d_id": 11429, "documentation": { "docstring": "\n Attempts to find the root block schema from a list of block schemas\n with references. Returns None if a root block schema is not found.\n Returns only the first potential root block schema if multiple are found.\n ", "n_words": 37, "vocab_size": 25, "n_whitespaces": 50, "language": "en" } }, { "id": 25203, "commit_id": "1f9400dd7374ce9cc47981372e324ff412e53ba3", "repo": "PaddleOCR", "path": "ppocr/modeling/heads/local_graph.py", "file_name": "local_graph.py", "fun_name": "feature_embedding", "commit_message": "add drrg", "code": "def feature_embedding(input_feats, out_feat_len):\n \n assert input_feats.ndim == 2\n assert isinstance(out_feat_len, int)\n assert out_feat_len >= input_feats.shape[1]\n\n num_nodes = input_feats.shape[0]\n feat_dim = input_feats.shape[1]\n feat_repeat_times = out_feat_len // feat_dim\n residue_dim = out_feat_len % feat_dim\n\n if residue_dim > 0:\n embed_wave = np.array([\n np.power(1000, 2.0 * (j // 2) / feat_repeat_times + 1)\n for j in range(feat_repeat_times + 1)\n ]).reshape((feat_repeat_times + 1, 1, 1))\n repeat_feats = np.repeat(\n np.expand_dims(\n input_feats, axis=0), feat_repeat_times, axis=0)\n residue_feats = np.hstack([\n input_feats[:, 0:residue_dim], np.zeros(\n (num_nodes, feat_dim - residue_dim))\n ])\n residue_feats = np.expand_dims(residue_feats, axis=0)\n repeat_feats = np.concatenate([repeat_feats, residue_feats], axis=0)\n embedded_feats = repeat_feats / embed_wave\n embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2])\n embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2])\n embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape(\n (num_nodes, -1))[:, 0:out_feat_len]\n else:\n embed_wave = np.array([\n np.power(1000, 2.0 * (j // 2) / feat_repeat_times)\n for j in range(feat_repeat_times)\n ]).reshape((feat_repeat_times, 1, 1))\n repeat_feats = np.repeat(\n np.expand_dims(\n input_feats, axis=0), feat_repeat_times, axis=0)\n embedded_feats = repeat_feats / embed_wave\n embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2])\n embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2])\n embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape(\n (num_nodes, -1)).astype(np.float32)\n\n return embedded_feats\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 465, "n_words": 162, "vocab_size": 79, "complexity": 4, "nloc": 41, "token_counts": 416, "n_ast_nodes": 639, "n_identifiers": 32, "random_cut": "def feature_embedding(input_feats, out_feat_len):\n \n assert input_feats.ndim == 2\n assert isinstance(out_feat_len, int)\n assert out_feat_len >= input_feats.shape[1]\n\n num_nodes = input_feats.shape[0]\n feat_dim = input_feats.shape[1]\n feat_repeat_times = out_feat_len // feat_dim\n residue_dim = out_feat_len % feat_dim\n\n if residue_dim > 0:\n embed_wave = np.array([\n np.power(1000, 2.0 * (j // 2) / feat_repeat_times + 1)\n for j in range(feat_repeat_times + 1)\n ]).reshape((feat_repeat_times + 1, 1, 1))\n repeat_feats = np.repeat(\n np.expand_dims(\n input_feats, axis=0), feat_repeat_times, axis=0)\n residue_feats = np.hstack([\n input_feats[:, 0:residue_dim], np.zeros(\n (num_nodes, feat_dim - residue_dim))\n ])\n residue_feats = np.expand_dims(residue_feats, axis=0)\n repeat_feats = np.concatenate([repeat_feats, residue_feats], axis=0)\n embedded_feats = repeat_feats / embed_wave\n embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2])\n embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2])\n embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape(\n (num_nodes, -1))[:, 0:out_feat_len]\n else:\n embed_wave = np.array([\n np.power(1000, 2.0 * (j // 2) / feat_repeat_times)\n for j in range(feat_repeat_times)\n ]).reshape((feat_repeat_times, 1, 1))\n repeat_feats = np.repeat(\n np.expand_dims(\n input_feats, axis=0), feat_repeat_times, axis=0)\n embedded_feats = repeat_feats / embed_wave", "d_id": 4866, "documentation": { "docstring": "Embed features. This code was partially adapted from\n https://github.com/GXYM/DRRG licensed under the MIT license.\n\n Args:\n input_feats (ndarray): The input features of shape (N, d), where N is\n the number of nodes in graph, d is the input feature vector length.\n out_feat_len (int): The length of output feature vector.\n\n Returns:\n embedded_feats (ndarray): The embedded features.\n ", "n_words": 54, "vocab_size": 43, "n_whitespaces": 98, "language": "en" } }, { "id": 261007, "commit_id": "2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b", "repo": "scikit-learn", "path": "sklearn/linear_model/_base.py", "file_name": "_base.py", "fun_name": "decision_function", "commit_message": "ENH Adds Array API support to LinearDiscriminantAnalysis (#22554)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Julien Jerphanion ", "code": "def decision_function(self, X):\n \n check_is_fitted(self)\n xp, _ = get_namespace(X)\n\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\n return xp.reshape(scores, -1) if scores.shape[1] == 1 else scores\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 71, "n_words": 29, "vocab_size": 26, "complexity": 2, "nloc": 6, "token_counts": 77, "n_ast_nodes": 119, "n_identifiers": 18, "random_cut": "def decision_function(self, X):\n \n check_is_fitted(self)\n xp, _ = get_namespace(X)\n\n X = s", "d_id": 76622, "documentation": { "docstring": "\n Predict confidence scores for samples.\n\n The confidence score for a sample is proportional to the signed\n distance of that sample to the hyperplane.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data matrix for which we want to get the confidence scores.\n\n Returns\n -------\n scores : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Confidence scores per `(n_samples, n_classes)` combination. In the\n binary case, confidence score for `self.classes_[1]` where >0 means\n this class would be predicted.\n ", "n_words": 79, "vocab_size": 58, "n_whitespaces": 194, "language": "en" } }, { "id": 216630, "commit_id": "38ca08446d560797522b7828720032799584d32a", "repo": "Open-Assistant", "path": "backend/postprocessing/rankings.py", "file_name": "rankings.py", "fun_name": "get_ranking", "commit_message": "ran pre-commit hook", "code": "def get_ranking(pairs):\n \n if len(pairs) == 1:\n return list(pairs[0])\n w = get_winner(pairs)\n # now remove the winner from the list of pairs\n p_new = np.array([(a, b) for a, b in pairs if a != w])\n return [w] + get_ranking(p_new)\n\n", "url": "https://github.com/LAION-AI/Open-Assistant.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 38, "vocab_size": 33, "complexity": 4, "nloc": 6, "token_counts": 61, "n_ast_nodes": 98, "n_identifiers": 11, "random_cut": "def get_ranking(pairs):\n \n if len(pairs) == 1:\n return list(", "d_id": 54671, "documentation": { "docstring": "\n Abuses concordance property to get a (not necessarily unqiue) ranking.\n The lack of uniqueness is due to the potential existance of multiple\n equally ranked winners. We have to pick one, which is where\n the non-uniqueness comes from\n ", "n_words": 37, "vocab_size": 32, "n_whitespaces": 53, "language": "en" } }, { "id": 61263, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "backup_dir", "commit_message": "upd; format", "code": "def backup_dir(dir, ext=\".bak\"):\n # type: (str, str) -> str\n \n n = 1\n extension = ext\n while os.path.exists(dir + extension):\n n += 1\n extension = ext + str(n)\n return dir + extension\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 31, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 43, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def backup_dir(dir, ext=\".bak\"):\n # type: (str, str) -> str\n \n n = 1\n extension = ext\n while os.path.exists(dir + extension):\n n += 1\n extension = ext + str(n)\n return dir + extens", "d_id": 12476, "documentation": { "docstring": "Figure out the name of a directory to back up the given dir to\n (adding .bak, .bak2, etc)", "n_words": 18, "vocab_size": 16, "n_whitespaces": 20, "language": "en" } }, { "id": 140580, "commit_id": "905258dbc19753c81039f993477e7ab027960729", "repo": "ray", "path": "rllib/utils/filter_manager.py", "file_name": "filter_manager.py", "fun_name": "synchronize", "commit_message": "Clean up docstyle in python modules and add LINT rule (#25272)", "code": "def synchronize(local_filters, remotes, update_remote=True):\n \n remote_filters = ray.get(\n [r.get_filters.remote(flush_after=True) for r in remotes]\n )\n for rf in remote_filters:\n for k in local_filters:\n local_filters[k].apply_changes(rf[k], with_buffer=False)\n if update_remote:\n copies = {k: v.as_serializable() for k, v in local_filters.items()}\n remote_copy = ray.put(copies)\n [r.sync_filters.remote(remote_copy) for r in remotes]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 147, "n_words": 42, "vocab_size": 30, "complexity": 7, "nloc": 11, "token_counts": 107, "n_ast_nodes": 164, "n_identifiers": 22, "random_cut": "def synchronize(local_filters, remotes, update_remote=True):\n \n remote_filters = ray.get(\n [r.get_filters.remote(flush_", "d_id": 32051, "documentation": { "docstring": "Aggregates all filters from remote evaluators.\n\n Local copy is updated and then broadcasted to all remote evaluators.\n\n Args:\n local_filters: Filters to be synchronized.\n remotes: Remote evaluators with filters.\n update_remote: Whether to push updates to remote filters.\n ", "n_words": 36, "vocab_size": 28, "n_whitespaces": 90, "language": "en" } }, { "id": 261775, "commit_id": "9017c701833114a75903f580dd0772e1d8d7d125", "repo": "scikit-learn", "path": "sklearn/tests/test_base.py", "file_name": "test_base.py", "fun_name": "test_estimator_empty_instance_dict", "commit_message": "FIX fix pickling for empty object with Python 3.11+ (#25188)\n\nCo-authored-by: Adrin Jalali \r\nCo-authored-by: Guillaume Lemaitre \r\n\r\nPython 3.11 introduces `__getstate__` on the `object` level, which breaks our existing `__getstate__` code for objects w/o any attributes. This fixes the issue.", "code": "def test_estimator_empty_instance_dict(estimator):\n \n state = estimator.__getstate__()\n expected = {\"_sklearn_version\": sklearn.__version__}\n assert state == expected\n\n # this should not raise\n pickle.loads(pickle.dumps(BaseEstimator()))\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 70, "n_identifiers": 11, "random_cut": "def test_estimator_empty_instance_dict(estimator):\n \n state = estimator.__getstate__()\n expected = {\"_sklearn_version\": sklearn.__version__}\n assert state == expected\n\n # this should not raise\n pickle.loads(pickle.dumps(BaseEstimator()))\n\n", "d_id": 76986, "documentation": { "docstring": "Check that ``__getstate__`` returns an empty ``dict`` with an empty\n instance.\n\n Python 3.11+ changed behaviour by returning ``None`` instead of raising an\n ``AttributeError``. Non-regression test for gh-25188.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 39, "language": "en" } }, { "id": 78234, "commit_id": "524cab82e33b43463b746c3df1a80657b3ae874a", "repo": "wagtail", "path": "wagtail/admin/tests/test_templatetags.py", "file_name": "test_templatetags.py", "fun_name": "test_with_variables", "commit_message": "Introduce new template fragment composition tags", "code": "def test_with_variables(self):\n context = Context({\"name\": \"jonathan wells\"})\n\n template = \n\n expected = \n\n self.assertHTMLEqual(expected, Template(template).render(context))\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 42, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 15, "token_counts": 37, "n_ast_nodes": 68, "n_identifiers": 9, "random_cut": "def test_with_variables(self):\n context = Context({\"name\": \"j", "d_id": 16740, "documentation": { "docstring": "\n {% load wagtailadmin_tags %}\n {% fragment as my_fragment %}\n

Hello, {{ name|title }}

\n {% endfragment %}\n Text coming after:\n {{ my_fragment }}\n \n Text coming after:\n

Hello, Jonathan Wells

\n ", "n_words": 28, "vocab_size": 18, "n_whitespaces": 136, "language": "en" } }, { "id": 266422, "commit_id": "9142be2f6cabbe6597c9254c5bb9186d17036d55", "repo": "ansible", "path": "lib/ansible/executor/module_common.py", "file_name": "module_common.py", "fun_name": "_extract_interpreter", "commit_message": "Allow specifying specific python via shebang (#76677)\n\n\r\n modules with python were always normalized to /usr/bin/python,\r\n while other interpreters could have specific versions.\r\n\r\n* now shebang is always constructed by get_shebang and args are preserved\r\n* only update shebang if interpreter changed\r\n* updated test expectation\r\n* added python shebang test", "code": "def _extract_interpreter(b_module_data):\n \n\n interpreter = None\n args = []\n b_lines = b_module_data.split(b\"\\n\", 1)\n if b_lines[0].startswith(b\"#!\"):\n b_shebang = b_lines[0].strip()\n\n # shlex.split on python-2.6 needs bytes. On python-3.x it needs text\n cli_split = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))\n\n # convert args to text\n cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split]\n interpreter = cli_split[0]\n args = cli_split[1:]\n\n return interpreter, args\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 122, "n_words": 54, "vocab_size": 39, "complexity": 3, "nloc": 11, "token_counts": 98, "n_ast_nodes": 162, "n_identifiers": 15, "random_cut": "def _extract_interpreter(b_module_data):\n \n\n interpreter = None\n args = []\n b_lines = b_module_data.split(b\"\\n\", 1)\n if b_lines[0].startswith(b\"#!\"):\n b_shebang = b_lines[0].strip()\n\n # shlex.split on python-2.6 needs bytes. On python-3.x it needs text\n cli_split = ", "d_id": 78390, "documentation": { "docstring": "\n Used to extract shebang expression from binary module data and return a text\n string with the shebang, or None if no shebang is detected.\n ", "n_words": 24, "vocab_size": 23, "n_whitespaces": 34, "language": "en" } }, { "id": 126557, "commit_id": "46ed3557ba6b4f4f72c15ef960aba5270ada2a9c", "repo": "ray", "path": "python/ray/tune/tests/test_tune_restore.py", "file_name": "test_tune_restore.py", "fun_name": "test_resource_exhausted_info", "commit_message": "[tune] Fix test_resource_exhausted_info test (#27426)\n\n#27213 broke this test\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def test_resource_exhausted_info(self):\n \n\n # generate some random data to be captured implicitly in training func.\n from sklearn.datasets import fetch_olivetti_faces\n\n a_large_array = []\n for i in range(50):\n a_large_array.append(fetch_olivetti_faces())\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 72, "n_words": 26, "vocab_size": 25, "complexity": 2, "nloc": 11, "token_counts": 51, "n_ast_nodes": 56, "n_identifiers": 9, "random_cut": "def test_resource_exhausted_info(self):", "d_id": 28198, "documentation": { "docstring": "This is to test if helpful information is displayed when\n the objects captured in trainable/training function are too\n large and RESOURCES_EXHAUSTED error of gRPC is triggered.", "n_words": 26, "vocab_size": 24, "n_whitespaces": 39, "language": "en" } }, { "id": 55193, "commit_id": "4adc737611ffa284d9952779ba2f68174a7e73cc", "repo": "prefect", "path": "tests/conftest.py", "file_name": "conftest.py", "fun_name": "testing_session_settings", "commit_message": "Squash issues with tests", "code": "def testing_session_settings():\n \n with tempfile.TemporaryDirectory() as tmpdir:\n profile = prefect.settings.Profile(\n name=\"test-session\",\n settings={\n # Set PREFECT_HOME to a temporary directory to avoid clobbering\n # environments and settings\n PREFECT_HOME: tmpdir,\n PREFECT_PROFILES_PATH: \"$PREFECT_HOME/profiles.toml\",\n # Enable debug logging\n PREFECT_LOGGING_LEVEL: \"DEBUG\",\n # Disable shipping logs to the API;\n # can be enabled by the `enable_orion_handler` mark\n PREFECT_LOGGING_ORION_ENABLED: False,\n # Disable services for test runs\n PREFECT_ORION_ANALYTICS_ENABLED: False,\n PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED: False,\n PREFECT_ORION_SERVICES_SCHEDULER_ENABLED: False,\n },\n source=__file__,\n )\n\n with prefect.settings.use_profile(\n profile,\n override_environment_variables=True,\n include_current_context=False,\n ) as ctx:\n yield ctx\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 394, "n_words": 77, "vocab_size": 62, "complexity": 1, "nloc": 21, "token_counts": 87, "n_ast_nodes": 146, "n_identifiers": 22, "random_cut": "def testing_session_settings():\n \n with tempfile.TemporaryDirectory() as tmpdir:\n profile = prefect.settings.Profile(\n name=\"test-session\",\n settings={\n # Set PREFECT_HOME to a temporary directory to avoid clobbering\n # environments and settings\n PREFECT_HOME: tmpdir,\n PREFECT_PROFILES_PATH: \"$PREFECT_HOME/profiles.toml\",\n # Enable debug logging\n PREFECT_LOGGING_LEVEL: \"DEBUG\",\n # Disable shipping logs to the API;\n # can be enabled by the `enable_orion_handler` mark\n PREFECT_LOGGING_ORION_ENABLED: False,\n # Disable services for test runs\n PREFECT_ORION_ANALYTICS_ENABLED: False,\n PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED: False,\n PREFECT_ORION_SERVICES_SCHEDULER_ENABLED: False,\n },\n source=__file__,\n )\n\n", "d_id": 11268, "documentation": { "docstring": "\n Creates a fixture for the scope of the test session that modifies setting defaults.\n\n This ensures that tests are isolated from existing settings, databases, etc.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 35, "language": "en" } }, { "id": 149925, "commit_id": "5bf021be2e8f1479753e66573575fa7cde00a2b6", "repo": "freqtrade", "path": "tests/strategy/strats/hyperoptable_strategy.py", "file_name": "hyperoptable_strategy.py", "fun_name": "bot_start", "commit_message": "Enhance hyperoptable strategy to test instance parameters", "code": "def bot_start(self, **kwargs) -> None:\n \n self.buy_rsi = IntParameter([0, 50], default=30, space='buy')\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 31, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def bot_start(self, **kwargs) -> None:\n \n self.buy_rsi =", "d_id": 34592, "documentation": { "docstring": "\n Parameters can also be defined here ...\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 204736, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/serializers/base.py", "file_name": "base.py", "fun_name": "getvalue", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def getvalue(self):\n \n if callable(getattr(self.stream, \"getvalue\", None)):\n return self.stream.getvalue()\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 3, "token_counts": 29, "n_ast_nodes": 50, "n_identifiers": 5, "random_cut": "def getvalue(self):\n \n if callable(getattr(self.stream, \"getvalue\", None)):\n return self.stream.getvalue()\n\n", "d_id": 50864, "documentation": { "docstring": "\n Return the fully serialized queryset (or None if the output stream is\n not seekable).\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 106845, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "boxplot", "commit_message": "apply black py to all python files", "code": "def boxplot(self, X, win=None, env=None, opts=None):\n \n\n X = np.squeeze(X)\n assert X.ndim == 1 or X.ndim == 2, \"X should be one or two-dimensional\"\n if X.ndim == 1:\n X = X[:, None]\n\n opts = {} if opts is None else opts\n _title2str(opts)\n _assert_opts(opts)\n\n if opts.get(\"legend\") is not None:\n assert (\n len(opts[\"legend\"]) == X.shape[1]\n ), \"number of legened labels must match number of columns\"\n\n data = []\n for k in range(X.shape[1]):\n _data = {\n \"y\": X.take(k, 1).tolist(),\n \"type\": \"box\",\n }\n if opts.get(\"legend\"):\n _data[\"name\"] = opts[\"legend\"][k]\n else:\n _data[\"name\"] = \"column \" + str(k)\n\n data.append(_data)\n\n return self._send(\n {\n \"data\": data,\n \"win\": win,\n \"eid\": env,\n \"layout\": _opts2layout(opts),\n \"opts\": opts,\n }\n )\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 450, "n_words": 106, "vocab_size": 82, "complexity": 7, "nloc": 32, "token_counts": 215, "n_ast_nodes": 357, "n_identifiers": 24, "random_cut": "def boxplot(self, X, win=None, env=None, opts=None):\n \n\n X = np.squeeze(X)\n assert X.ndim == 1 or X.ndim == 2, \"X should be one or two-dimensional\"\n if X.ndim == 1:\n X = X[:, None]\n\n opts = {} if opts is None else opts\n _title2str(opts)\n _assert_opts(opts)\n\n if opts.get(\"legend\") is not None:\n assert (\n len(opts[\"legend\"]) == X.shape[1]\n ), \"number of legened labels must match number of columns\"\n\n data = []\n for k in range(X.shape[1]):\n ", "d_id": 22470, "documentation": { "docstring": "\n This function draws boxplots of the specified data. It takes as input\n an `N` or an `NxM` tensor `X` that specifies the `N` data values of\n which to construct the `M` boxplots.\n\n The following plot-specific `opts` are currently supported:\n - `opts.legend`: labels for each of the columns in `X`\n ", "n_words": 49, "vocab_size": 41, "n_whitespaces": 92, "language": "en" } }, { "id": 203275, "commit_id": "c5cd8783825b5f6384417dac5f3889b4210b7d08", "repo": "django", "path": "tests/requests/tests.py", "file_name": "tests.py", "fun_name": "test_body_after_POST_multipart_related", "commit_message": "Refs #33476 -- Refactored problematic code before reformatting by Black.\n\nIn these cases Black produces unexpected results, e.g.\r\n\r\ndef make_random_password(\r\n self,\r\n length=10,\r\n allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',\r\n):\r\n\r\nor\r\n\r\ncursor.execute(\"\"\"\r\nSELECT ...\r\n\"\"\",\r\n [table name],\r\n)", "code": "def test_body_after_POST_multipart_related(self):\n \n # Ticket #9054\n # There are cases in which the multipart data is related instead of\n # being a binary upload, in which case it should still be accessible\n # via body.\n payload_data = b\"\\r\\n\".join([\n b'--boundary',\n b'Content-ID: id; name=\"name\"',\n b'',\n b'value',\n b'--boundary--'\n ])\n payload = FakePayload(payload_data)\n request = WSGIRequest({\n 'REQUEST_METHOD': 'POST',\n 'CONTENT_TYPE': 'multipart/related; boundary=boundary',\n 'CONTENT_LENGTH': len(payload),\n 'wsgi.input': payload,\n })\n self.assertEqual(request.POST, {})\n self.assertEqual(request.body, payload_data)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 248, "n_words": 65, "vocab_size": 58, "complexity": 1, "nloc": 17, "token_counts": 83, "n_ast_nodes": 146, "n_identifiers": 12, "random_cut": "def test_body_after_POST_multipart_related(self):\n \n # Ticket #9054\n # There are cases in which the multipart data is related instead of\n # being a binary upload, in which case it should still be accessible\n # via body.\n payload_data = b\"\\r\\n\".join([\n b'--boundary',\n b'Content-ID: id; name=\"name\"',\n b'',\n b'value',\n b'--boundary--'\n ])\n payload = FakePayload(payload_data)\n request = WSGIRequest({\n 'REQUEST_METHOD': 'POST',\n", "d_id": 50282, "documentation": { "docstring": "\n Reading body after parsing multipart that isn't form-data is allowed\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 46739, "commit_id": "c758c76ac336c054fd17d4b878378aa893b7a979", "repo": "airflow", "path": "airflow/providers/arangodb/hooks/arangodb.py", "file_name": "arangodb.py", "fun_name": "query", "commit_message": "Adding ArangoDB Provider (#22548)\n\n* Adding ArangoDB Provider", "code": "def query(self, query, **kwargs) -> Result:\n \n try:\n if self.db_conn:\n result = self.db_conn.aql.execute(query, **kwargs)\n return result\n else:\n raise AirflowException(\n f\"Failed to execute AQLQuery, error connecting to database: {self.database}\"\n )\n except AQLQueryExecuteError as error:\n raise AirflowException(f\"Failed to execute AQLQuery, error: {str(error)}\")\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 172, "n_words": 39, "vocab_size": 31, "complexity": 3, "nloc": 18, "token_counts": 56, "n_ast_nodes": 109, "n_identifiers": 13, "random_cut": "def query(self, query, **kwargs) -> Result:\n \n try:\n if self.db_conn:\n result = self.db_conn.aql.execute(query, **kwargs)\n return result\n else:\n raise AirflowException(\n f\"Failed to execute AQLQuery, error connecting to database: {self.database}\"\n )\n except AQLQueryExecuteError as", "d_id": 8978, "documentation": { "docstring": "\n Function to create a arangodb session\n and execute the AQL query in the session.\n\n :param query: AQL query\n :return: Result\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 56, "language": "en" } }, { "id": 259883, "commit_id": "a47d569e670fd4102af37c3165c9b1ddf6fd3005", "repo": "scikit-learn", "path": "sklearn/datasets/tests/test_arff_parser.py", "file_name": "test_arff_parser.py", "fun_name": "test_post_process_frame", "commit_message": "ENH improve ARFF parser using pandas (#21938)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Adrin Jalali ", "code": "def test_post_process_frame(feature_names, target_names):\n \n pd = pytest.importorskip(\"pandas\")\n\n X_original = pd.DataFrame(\n {\n \"col_int_as_integer\": [1, 2, 3],\n \"col_int_as_numeric\": [1, 2, 3],\n \"col_float_as_real\": [1.0, 2.0, 3.0],\n \"col_float_as_numeric\": [1.0, 2.0, 3.0],\n \"col_categorical\": [\"a\", \"b\", \"c\"],\n \"col_string\": [\"a\", \"b\", \"c\"],\n }\n )\n\n X, y = _post_process_frame(X_original, feature_names, target_names)\n assert isinstance(X, pd.DataFrame)\n if len(target_names) >= 2:\n assert isinstance(y, pd.DataFrame)\n elif len(target_names) == 1:\n assert isinstance(y, pd.Series)\n else:\n assert y is None\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 192, "n_words": 64, "vocab_size": 46, "complexity": 3, "nloc": 20, "token_counts": 158, "n_ast_nodes": 233, "n_identifiers": 14, "random_cut": "def test_post_process_frame(feature_names, target_names):\n \n pd = pytest.importorskip(\"pandas\")\n\n X_original = pd.DataFrame(\n {\n \"col_int_as_integer\": [1, 2, 3],\n \"col_int_as_numeric\": [1, 2, 3],\n \"col_float_as_real\": [1.0, 2.0, 3.0],\n \"col_float_as_numeric\": [1.0, 2.0, 3.0],\n \"col_categorical\": [\"a\", \"b\", \"c\"],\n \"col_string\": [\"a\", \"b\", \"c\"],\n }\n )\n\n X, y = _post_process_frame(X_original, feature_names, target_names)\n assert isinstance(X, pd.DataFrame)\n if len(target_names) >= 2:\n assert isinstance(y, pd.DataFrame)\n elif len(target_names) == 1:\n assert isinstance(y, pd.Series)\n else:\n ", "d_id": 75968, "documentation": { "docstring": "Check the behaviour of the post-processing function for splitting a dataframe.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 206806, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/views/debug.py", "file_name": "debug.py", "fun_name": "cleanse_setting", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def cleanse_setting(self, key, value):\n \n try:\n is_sensitive = self.hidden_settings.search(key)\n except TypeError:\n is_sensitive = False\n\n if is_sensitive:\n cleansed = self.cleansed_substitute\n elif isinstance(value, dict):\n cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}\n elif isinstance(value, list):\n cleansed = [self.cleanse_setting(\"\", v) for v in value]\n elif isinstance(value, tuple):\n cleansed = tuple([self.cleanse_setting(\"\", v) for v in value])\n else:\n cleansed = value\n\n if callable(cleansed):\n cleansed = CallableSettingWrapper(cleansed)\n\n return cleansed\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 222, "n_words": 64, "vocab_size": 37, "complexity": 10, "nloc": 18, "token_counts": 138, "n_ast_nodes": 219, "n_identifiers": 19, "random_cut": "def cleanse_setting(self, key, value):\n \n try:\n is_sensitive = self.hidden_settings.search(key)\n except TypeError:\n is_sensitive = False\n\n if is_sensitive:\n cleansed = self.cleansed_substitute\n elif isinstance(value, dict):\n cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}\n elif isinstance(value, list):\n cleansed = [s", "d_id": 51718, "documentation": { "docstring": "\n Cleanse an individual setting key/value of sensitive content. If the\n value is a dictionary, recursively cleanse the keys in that dictionary.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 43, "language": "en" } }, { "id": 276840, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/generic_utils.py", "file_name": "generic_utils.py", "fun_name": "func_dump", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def func_dump(func):\n \n if os.name == \"nt\":\n raw_code = marshal.dumps(func.__code__).replace(b\"\\\\\", b\"/\")\n code = codecs.encode(raw_code, \"base64\").decode(\"ascii\")\n else:\n raw_code = marshal.dumps(func.__code__)\n code = codecs.encode(raw_code, \"base64\").decode(\"ascii\")\n defaults = func.__defaults__\n if func.__closure__:\n closure = tuple(c.cell_contents for c in func.__closure__)\n else:\n closure = None\n return code, defaults, closure\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 105, "n_words": 42, "vocab_size": 28, "complexity": 4, "nloc": 13, "token_counts": 109, "n_ast_nodes": 185, "n_identifiers": 20, "random_cut": "def func_dump(func):\n \n if os.name == \"nt\":\n raw_code = marshal.dumps(func.__code__).replace(b\"\\\\\", b\"/\")\n code = codecs.encode(raw_code, \"base64\").decode(\"ascii\")\n else:\n raw_code = marshal.dumps(func.__code__)\n code = codecs.encode(raw_", "d_id": 81751, "documentation": { "docstring": "Serializes a user defined function.\n\n Args:\n func: the function to serialize.\n\n Returns:\n A tuple `(code, defaults, closure)`.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 195948, "commit_id": "d032a7a870672667f778be8bf02a3eba4ae89381", "repo": "sympy", "path": "sympy/polys/polyclasses.py", "file_name": "polyclasses.py", "fun_name": "cauchy_upper_bound", "commit_message": "Add new methods to `DMP` class, corresp. to new funcs.", "code": "def cauchy_upper_bound(f):\n \n if not f.lev:\n return dup_cauchy_upper_bound(f.rep, f.dom)\n else:\n raise ValueError('univariate polynomial expected')\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 53, "n_identifiers": 7, "random_cut": "def cauchy_upper_bound(f):\n \n if not f.lev:\n ", "d_id": 47486, "documentation": { "docstring": "Computes the Cauchy upper bound on the roots of ``f``. ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 197316, "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", "repo": "sympy", "path": "sympy/core/sympify.py", "file_name": "sympify.py", "fun_name": "kernS", "commit_message": "Remove abbreviations in documentation", "code": "def kernS(s):\n \n hit = False\n quoted = '\"' in s or \"'\" in s\n if '(' in s and not quoted:\n if s.count('(') != s.count(\")\"):\n raise SympifyError('unmatched left parenthesis')\n\n # strip all space from s\n s = ''.join(s.split())\n olds = s\n # now use space to represent a symbol that\n # will\n # step 1. turn potential 2-arg Muls into 3-arg versions\n # 1a. *( -> * *(\n s = s.replace('*(', '* *(')\n # 1b. close up exponentials\n s = s.replace('** *', '**')\n # 2. handle the implied multiplication of a negated\n # parenthesized expression in two steps\n # 2a: -(...) --> -( *(...)\n target = '-( *('\n s = s.replace('-(', target)\n # 2b: double the matching closing parenthesis\n # -( *(...) --> -( *(...))\n i = nest = 0\n assert target.endswith('(') # assumption below\n while True:\n j = s.find(target, i)\n if j == -1:\n break\n j += len(target) - 1\n for j in range(j, len(s)):\n if s[j] == \"(\":\n nest += 1\n elif s[j] == \")\":\n nest -= 1\n if nest == 0:\n break\n s = s[:j] + \")\" + s[j:]\n i = j + 2 # the first char after 2nd )\n if ' ' in s:\n # get a unique kern\n kern = '_'\n while kern in s:\n kern += choice(string.ascii_letters + string.digits)\n s = s.replace(' ', kern)\n hit = kern in s\n else:\n hit = False\n\n for i in range(2):\n try:\n expr = sympify(s)\n break\n except TypeError: # the kern might cause unknown errors...\n if hit:\n s = olds # maybe it didn't like the kern; use un-kerned s\n hit = False\n continue\n expr = sympify(s) # let original error raise\n\n if not hit:\n return expr\n\n from .symbol import Symbol\n rep = {Symbol(kern): 1}", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 868, "n_words": 288, "vocab_size": 166, "complexity": 17, "nloc": 53, "token_counts": 307, "n_ast_nodes": 535, "n_identifiers": 29, "random_cut": "def kernS(s):\n \n hit = False\n quoted = '\"' in s or \"'\" in s\n if '(' in s and not quoted:\n if s.count('(') != s.count(\")\"):\n raise SympifyError('unmatched left parenthesis')\n\n # strip all space from s\n s = ''.join(s.split())\n olds = s\n # now use space to represent a symbol that\n # will\n # step 1. turn potential 2-arg Muls into 3-arg versions\n # 1a. *( -> * *(\n s = s.replace('*(', '* *(')\n # 1b. close up exponentials\n s = s.replace('** *', '**')\n # 2. handle the implied multiplication of a negated\n # parenthesized expression in two steps\n # 2a: -(...) --> -( *(...)\n target = '-( *('\n s = s.replace('-(', target)\n # 2b: double the matching closing parenthesis\n # -( *(...) --> -( *(...))\n i = nest = 0\n assert target.endswith('(') # assumption below\n while True:\n j = s.find(target, i)\n if j == -1:\n break\n j += len(target) - 1\n for j in range(j, len(s)):\n if s[j] == \"(\":\n nest += 1\n elif s[j] == \")\":\n nest -= 1\n if nest == 0:\n break\n s = s[:j] + \")\" + s[j:]\n i = j + 2 # the first char after 2nd )\n if ' ' in s:\n # get a unique kern\n kern = '_'\n while kern in s:\n kern += choice(string.ascii_letters + string.digits)\n s = s.replace(' ', kern)\n hit = kern in s\n else:\n hit = False\n\n for i in range(2):\n try:\n expr = sympify(s)\n break\n except TypeError: # the kern might cause unknown errors...\n if hit:\n s = olds # maybe it didn't like the kern; use un-kerned s\n hit = False\n continue\n expr = sympify(s) # let original error raise\n\n if n", "d_id": 48459, "documentation": { "docstring": "Use a hack to try keep autosimplification from distributing a\n a number into an Add; this modification does not\n prevent the 2-arg Mul from becoming an Add, however.\n\n Examples\n ========\n\n >>> from sympy.core.sympify import kernS\n >>> from sympy.abc import x, y\n\n The 2-arg Mul distributes a number (or minus sign) across the terms\n of an expression, but kernS will prevent that:\n\n >>> 2*(x + y), -(x + 1)\n (2*x + 2*y, -x - 1)\n >>> kernS('2*(x + y)')\n 2*(x + y)\n >>> kernS('-(x + 1)')\n -(x + 1)\n\n If use of the hack fails, the un-hacked string will be passed to sympify...\n and you get what you get.\n\n XXX This hack should not be necessary once issue 4596 has been resolved.\n ", "n_words": 121, "vocab_size": 82, "n_whitespaces": 175, "language": "en" } }, { "id": 319856, "commit_id": "77fbbe95ffb965525136982846f50e3ad8244de9", "repo": "paperless-ngx", "path": "src/documents/tests/test_classifier.py", "file_name": "test_classifier.py", "fun_name": "test_load_corrupt_file", "commit_message": "Updates the classifier to catch warnings from scikit-learn and rebuild the model file when this happens", "code": "def test_load_corrupt_file(self, patched_pickle_load):\n \n # First load is the schema version\n patched_pickle_load.side_effect = [DocumentClassifier.FORMAT_VERSION, OSError()]\n\n with self.assertRaises(ClassifierModelCorruptError):\n self.classifier.load()\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 36, "n_ast_nodes": 63, "n_identifiers": 11, "random_cut": "def test_load_corrupt_file(self, patched_pickle_load):\n \n # First load is the schema version\n p", "d_id": 117007, "documentation": { "docstring": "\n GIVEN:\n - Corrupted classifier pickle file\n WHEN:\n - An attempt is made to load the classifier\n THEN:\n - The ClassifierModelCorruptError is raised\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 84, "language": "en" } }, { "id": 287694, "commit_id": "5c7d40cccf473c3549900949fe410dbe9d2e1a19", "repo": "core", "path": "homeassistant/components/plugwise/select.py", "file_name": "select.py", "fun_name": "current_option", "commit_message": "Rename property in Plugwise EntityDescription (#78935)", "code": "def current_option(self) -> str:\n \n return self.device[self.entity_description.current_option_key]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 6, "random_cut": "def current_option(self) -> str:\n \n return self.device[self.entity_description.current_option_key]\n", "d_id": 86883, "documentation": { "docstring": "Return the selected entity option to represent the entity state.", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 24604, "commit_id": "97f7f748085fbe516952d36808735902d305da40", "repo": "PaddleOCR", "path": "ppstructure/table/convert_label2html.py", "file_name": "convert_label2html.py", "fun_name": "gen_html", "commit_message": "add copyright", "code": "def gen_html(img):\n \n html_code = img['html']['structure']['tokens'].copy()\n to_insert = [i for i, tag in enumerate(html_code) if tag in ('', '>')]\n for i, cell in zip(to_insert[::-1], img['html']['cells'][::-1]):\n if cell['tokens']:\n text = ''.join(cell['tokens'])\n # skip empty text\n sp_char_list = ['', '', '\\u2028', ' ', '', '']\n text_remove_style = skip_char(text, sp_char_list)\n if len(text_remove_style) == 0:\n continue\n html_code.insert(i + 1, text)\n html_code = ''.join(html_code)\n html_code = '{}
'.format(html_code)\n return html_code\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 172, "n_words": 63, "vocab_size": 46, "complexity": 6, "nloc": 14, "token_counts": 149, "n_ast_nodes": 265, "n_identifiers": 18, "random_cut": "def gen_html(img):\n \n html_code = img['html']['structure']['tokens'].copy()\n to_insert = [i for i, tag in enumerate(html_code) if tag in ('', '>')]\n for i, cell in zip(to_insert[::-1], img['html']['cells'][::-1]):\n if cell['tokens']:\n text = ''.join(cell['tokens'])\n # skip empty text\n sp_char_list = ['', '', '\\u2028', ' ', '', '']\n text_remove_style = skip_char(text, sp_char_list)\n if len(text_remove_style) == 0:\n continue\n html_code.insert(i + 1, text)\n html_code = ''.join(html_code)\n html_code = '{} Iterable[str]:\n \n for alias in _get_key_aliases(self.key):\n yield _normalize_key(alias)\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 35, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 4, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 8, "random_cut": "def key_aliases(self) -> Iterable[str]:\n \n for alias in _get_key_aliases(self.key):\n yi", "d_id": 44978, "documentation": { "docstring": "Get the aliases for the key, including the key itself", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 265561, "commit_id": "bfbf97aec9119539f7f42cf16f52d0ca8203ba60", "repo": "netbox", "path": "netbox/ipam/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_create_single_available_ip", "commit_message": "Closes #10031: Enforce 'application/json' content type for REST API requests", "code": "def test_create_single_available_ip(self):\n \n vrf = VRF.objects.create(name='VRF 1')\n prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), vrf=vrf, is_pool=True)\n url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk})\n self.add_permissions('ipam.view_prefix', 'ipam.add_ipaddress')\n\n # Create all four available IPs with individual requests\n for i in range(1, 5):\n data = {\n 'description': 'Test IP {}'.format(i)\n }\n response = self.client.post(url, data, format='json', **self.header)\n self.assertHttpStatus(response, status.HTTP_201_CREATED)\n self.assertEqual(response.data['vrf']['id'], vrf.pk)\n self.assertEqual(response.data['description'], data['description'])\n\n # Try to create one more IP\n response = self.client.post(url, {}, format='json', **self.header)\n self.assertHttpStatus(response, status.HTTP_409_CONFLICT)\n self.assertIn('detail', response.data)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 227, "n_words": 69, "vocab_size": 57, "complexity": 2, "nloc": 16, "token_counts": 194, "n_ast_nodes": 323, "n_identifiers": 30, "random_cut": "def test_create_single_available_ip(self):\n \n vrf = VRF.objects.create(name='VRF 1')\n prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), vrf=vrf, is_pool=True)\n u", "d_id": 78141, "documentation": { "docstring": "\n Test retrieval of the first available IP address within a parent prefix.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 292702, "commit_id": "87593fa3ec4edd1fb467ed0709ef57c3c41e0fc4", "repo": "core", "path": "tests/components/zwave_js/conftest.py", "file_name": "conftest.py", "fun_name": "climate_adc_t3000_missing_setpoint_fixture", "commit_message": "Add Humidifier support to zwave_js (#65847)", "code": "def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state):\n \n data = copy.deepcopy(climate_adc_t3000_state)\n data[\"name\"] = f\"{data['name']} missing setpoint\"\n for value in data[\"values\"][:]:\n if (\n value[\"commandClassName\"] == \"Humidity Control Setpoint\"\n and value[\"propertyKeyName\"] == \"De-humidifier\"\n ):\n data[\"values\"].remove(value)\n node = Node(client, data)\n client.driver.controller.nodes[node.node_id] = node\n return node\n\n\n@pytest.fixture(name=\"climate_adc_t3000_missing_mode\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"climate_adc_t3000_missing_mode\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 105, "n_words": 38, "vocab_size": 32, "complexity": 4, "nloc": 12, "token_counts": 84, "n_ast_nodes": 171, "n_identifiers": 17, "random_cut": "def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state):\n \n data = copy.deepcopy(climate_adc_t3000_state)\n data[\"name\"] = f\"{data['name']} missing setpoint\"\n for value in data[\"values\"][:]:\n if (\n value[\"commandClassName\"] == \"Humidity Control Setpoint\"\n and value[\"propertyKeyName\"] == \"De-humidifier\"\n ):\n data[\"values\"].remove(value)\n node = Node(client, data)\n client.driver.controller.nodes[node.node_id] = node\n return node\n\n\n@pytest.fixture(name=\"climate_adc_t3000_missing_mode\")", "d_id": 91775, "documentation": { "docstring": "Mock a climate ADC-T3000 node with missing de-humidify setpoint.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 198577, "commit_id": "32589850ff6a970bee8af3034980e37932db2eb9", "repo": "sympy", "path": "sympy/solvers/ode/ode.py", "file_name": "ode.py", "fun_name": "classify_ode", "commit_message": "Allow initial conditions of the form f(0): f(0) in dsolve\n\nThere was a check that the replacement value does not contain f, but this\nmakes perfect sense. The check was changed to checking that the value doesn't\ncontain x.\n\nFixes #23702", "code": "def classify_ode(eq, func=None, dict=False, ics=None, *, prep=True, xi=None, eta=None, n=None, **kwargs):\n r", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "\n r\"\"\"\n Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`\n classifications for an ODE.\n\n The tuple is ordered so that first item is the classification that\n :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In\n general, classifications at the near the beginning of the list will\n produce better solutions faster than those near the end, thought there are\n always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a\n different classification, use ``dsolve(ODE, func,\n hint=)``. See also the\n :py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints\n you can use.\n\n If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will\n return a dictionary of ``hint:match`` expression terms. This is intended\n for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that\n because dictionaries are ordered arbitrarily, this will most likely not be\n in the same order as the tuple.\n\n You can get help on different hints by executing\n ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint\n without ``_Integral``.\n\n See :py:data:`~sympy.solvers.ode.allhints` or the\n :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints\n that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.\n\n Notes\n =====\n\n These are remarks on hint names.\n\n ``_Integral``r\"\"\"\n Returns a tuple ofpy:meth:classificationsan ODE.\n\n The tuple is ordered so that first item is the classification that\n :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In\n general, classifications at the near the beginning of the list will\n produce better solutions faster than those near the end, thought there are\n always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a\n differentclassifications at the near the beginning of the list will\n produce better solutions faster than those near thethought there are\n always exceptions. To make :use ``hint=)``. See also the\n :py:meth:`~sympy.solvers.ode.dsolve` docstring for different=)``. See alsohints\n you can use.\n\n Ifyou can usedict:py:meth:`~sympy.solvers.ode.classify_ode` will\n return a dictionary of ``hint:match`` expression terms. This is intended\n for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that\n because dictionaries are orderedfor internal use by :py:meththis will most likely not be\n in the same order as the tuple.\n\n You can get help on different hints by executing\n ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint\n without ``_Integral``.\n\n See :py:data:`~sympy.solvers.ode.allhints` or the\n :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints\n that can be returned from :py:meth:the samethe tuple.\n\n You can get help on different hints by executing\n ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint\n without`_Integral===\n\n These are remarks on hint names.\n\n ``If a classification has ``_Integral`` at the end, it will_Integralexpressionanclass:", "n_ast_errors": 28, "ast_levels": 10, "n_whitespaces": 14, "n_words": 12, "vocab_size": 12, "complexity": 66, "nloc": 270, "token_counts": 1582, "n_ast_nodes": 439, "n_identifiers": 106, "random_cut": "def classify_ode(eq, func=None, dict=False, ics=None, *, prep=True, xi=None, eta=None, n=None, **kwargs):\n r", "d_id": 49006, "documentation": { "docstring": "\n Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`\n classifications for an ODE.\n\n The tuple is ordered so that first item is the classification that\n :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In\n general, classifications at the near the beginning of the list will\n produce better solutions faster than those near the end, thought there are\n always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a\n different classification, use ``dsolve(ODE, func,\n hint=)``. See also the\n :py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints\n you can use.\n\n If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will\n return a dictionary of ``hint:match`` expression terms. This is intended\n for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that\n because dictionaries are ordered arbitrarily, this will most likely not be\n in the same order as the tuple.\n\n You can get help on different hints by executing\n ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint\n without ``_Integral``.\n\n See :py:data:`~sympy.solvers.ode.allhints` or the\n :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints\n that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.\n\n Notes\n =====\n\n These are remarks on hint names.\n\n ``_Integral``\n\n If a classification has ``_Integral`` at the end, it will return the\n expression with an unevaluated :py:class:`~.Integral`", "n_words": 184, "vocab_size": 118, "n_whitespaces": 280, "language": "en" } }, { "id": 241885, "commit_id": "7438fe5edfb565ff341fa6ab054461fcdd504aa2", "repo": "scipy", "path": "scipy/stats/_stats_py.py", "file_name": "_stats_py.py", "fun_name": "mode", "commit_message": "MAINT: stats: mode: fix negative axis issue with np.moveaxis instead of custom code (#15421)", "code": "def mode(a, axis=0, nan_policy='propagate'):\n \n a, axis = _chk_asarray(a, axis)\n if a.size == 0:\n return ModeResult(np.array([]), np.array([]))\n\n contains_nan, nan_policy = _contains_nan(a, nan_policy)\n\n if contains_nan and nan_policy == 'omit':\n a = ma.masked_invalid(a)\n return mstats_basic.mode(a, axis)\n\n if a.dtype == object and np.nan in set(a.ravel()):\n # Fall back to a slower method since np.unique does not work with NaN\n scores = set(np.ravel(a)) # get ALL unique values\n testshape = list(a.shape)\n testshape[axis] = 1\n oldmostfreq = np.zeros(testshape, dtype=a.dtype)\n oldcounts = np.zeros(testshape, dtype=int)\n\n for score in scores:\n template = (a == score)\n counts = np.sum(template, axis, keepdims=True)\n mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)\n oldcounts = np.maximum(counts, oldcounts)\n oldmostfreq = mostfrequent\n\n return ModeResult(mostfrequent, oldcounts)\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 259, "n_words": 108, "vocab_size": 78, "complexity": 8, "nloc": 31, "token_counts": 340, "n_ast_nodes": 336, "n_identifiers": 35, "random_cut": "def mode(a, axis=0, nan_policy='propagate'):\n \n a, axis = _chk_asarray(a, axis)\n if a.size == 0:\n return ModeResult(np.array([]), np.array([]))\n\n contains_nan, nan_policy = _contains_nan(a, nan_policy)\n\n if contains_nan and nan_policy == 'omit':\n a = ma.masked_invalid(a)\n return mstats_basic.mode(a, axis)\n\n if a.dtype == object and np.nan in set(a.ravel()):\n # Fall back to a slower method since np.unique does not work with NaN\n scores = set(np.ravel(a)) # get ALL unique values\n testshape = list(a.shape)\n testshape[axis] = 1\n oldmostfreq = np.zeros(testshape, dtype=a.dtype)\n oldcounts = np.zeros(testshape, dtype=int)\n\n for score in scores:\n template = (a == score)\n counts = np.sum(template, axis, keepdims=True)\n mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)\n oldcounts = np.maximum(counts, oldcounts)\n ", "d_id": 69724, "documentation": { "docstring": "Return an array of the modal (most common) value in the passed array.\n\n If there is more than one such value, only the smallest is returned.\n The bin-count for the modal bins is also returned.\n\n Parameters\n ----------\n a : array_like\n n-dimensional array of which to find mode(s).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n\n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n\n Returns\n -------\n mode : ndarray\n Array of modal values.\n count : ndarray\n Array of counts for each mode.\n\n Examples\n --------\n >>> a = np.array([[6, 8, 3, 0],\n ... [3, 2, 1, 7],\n ... [8, 1, 8, 4],\n ... [5, 3, 0, 5],\n ... [4, 7, 5, 9]])\n >>> from scipy import stats\n >>> stats.mode(a)\n ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))\n\n To get mode of whole array, specify ``axis=None``:\n\n >>> stats.mode(a, axis=None)\n ModeResult(mode=array([3]), count=array([3]))\n\n ", "n_words": 183, "vocab_size": 131, "n_whitespaces": 390, "language": "en" } }, { "id": 268024, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_profiles.py", "file_name": "host_profiles.py", "fun_name": "wait_for_instance", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def wait_for_instance(self) -> AnsibleCoreCI:\n \n core_ci = self.get_instance()\n core_ci.wait()\n\n return core_ci\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 38, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 22, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def wait_for_instance(self) -> AnsibleCoreCI:\n \n core_ci = self.get_instance()\n cor", "d_id": 79298, "documentation": { "docstring": "Wait for an AnsibleCoreCI VM instance to become ready.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 199635, "commit_id": "c6be089c27dd1891d4e273e7701926f7e5cf4d6f", "repo": "sympy", "path": "sympy/polys/orthopolys.py", "file_name": "orthopolys.py", "fun_name": "spherical_bessel_fn", "commit_message": "Link Appell sequences to corresponding continuous functions", "code": "def spherical_bessel_fn(n, x=None, polys=False):\n \n\n if n < 0:\n dup = dup_spherical_bessel_fn_minus(-int(n), ZZ)\n else:\n dup = dup_spherical_bessel_fn(int(n), ZZ)\n\n poly = DMP(dup, ZZ)\n\n if x is not None:\n poly = Poly.new(poly, 1/x)\n else:\n poly = PurePoly.new(poly, 1/Dummy('x'))\n\n return poly if polys else poly.as_expr()\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 90, "n_words": 41, "vocab_size": 28, "complexity": 4, "nloc": 11, "token_counts": 97, "n_ast_nodes": 154, "n_identifiers": 16, "random_cut": "def spherical_bessel_fn(n, x=None, polys=False):\n \n\n if n < 0:\n dup = dup_spherical_bessel_fn_minus(-int(n), ZZ)\n else:\n dup = dup_spherical_bessel_fn(int(n), ZZ)\n\n poly = DMP(dup, ZZ)\n\n if x is not None:\n poly = Poly.new(poly, 1/x)\n else:\n poly = PurePoly.new(poly, 1/Dummy('x'))\n\n return poly if polys el", "d_id": 49306, "documentation": { "docstring": "\n Coefficients for the spherical Bessel functions.\n\n Those are only needed in the jn() function.\n\n The coefficients are calculated from:\n\n fn(0, z) = 1/z\n fn(1, z) = 1/z**2\n fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)\n\n Parameters\n ==========\n\n n : int\n `n` decides the degree of polynomial\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n\n Examples\n ========\n\n >>> from sympy.polys.orthopolys import spherical_bessel_fn as fn\n >>> from sympy import Symbol\n >>> z = Symbol(\"z\")\n >>> fn(1, z)\n z**(-2)\n >>> fn(2, z)\n -1/z + 3/z**3\n >>> fn(3, z)\n -6/z**2 + 15/z**4\n >>> fn(4, z)\n 1/z - 45/z**3 + 105/z**5\n\n ", "n_words": 107, "vocab_size": 77, "n_whitespaces": 197, "language": "en" } }, { "id": 192158, "commit_id": "97eddc5d6a83a9bf620070075ef1e1864c9a68ac", "repo": "vision", "path": "torchvision/models/optical_flow/raft.py", "file_name": "raft.py", "fun_name": "raft_large", "commit_message": "Change default weights of RAFT model builders (#5381)\n\n* Change default weights of RAFT model builders\r\n\r\n* update handle_legacy_interface input\r\n\r\n* Oops, wrong default", "code": "def raft_large(*, pretrained=False, progress=True, **kwargs):\n \n\n return _raft(\n arch=\"raft_large\",\n pretrained=pretrained,\n progress=progress,\n # Feature encoder\n feature_encoder_layers=(64, 64, 96, 128, 256),\n feature_encoder_block=ResidualBlock,\n feature_encoder_norm_layer=InstanceNorm2d,\n # Context encoder\n context_encoder_layers=(64, 64, 96, 128, 256),\n context_encoder_block=ResidualBlock,\n context_encoder_norm_layer=BatchNorm2d,\n # Correlation block\n corr_block_num_levels=4,\n corr_block_radius=4,\n # Motion encoder\n motion_encoder_corr_layers=(256, 192),\n motion_encoder_flow_layers=(128, 64),\n motion_encoder_out_channels=128,\n # Recurrent block\n recurrent_block_hidden_state_size=128,\n recurrent_block_kernel_size=((1, 5), (5, 1)),\n recurrent_block_padding=((0, 2), (2, 0)),\n # Flow head\n flow_head_hidden_size=256,\n # Mask predictor\n use_mask_predictor=True,\n **kwargs,\n )\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 263, "n_words": 65, "vocab_size": 52, "complexity": 1, "nloc": 23, "token_counts": 152, "n_ast_nodes": 205, "n_identifiers": 25, "random_cut": "def raft_large(*, pretrained=False, progress=True, **kwargs):\n \n\n return _raft(\n arch=\"raft_large\",\n pretrained=pretrained,\n progress=progress,\n # Feature encoder\n feature_encoder_layers=(64, 64, 96, 128, 256),\n feature_encoder_block=ResidualBlock,\n feature_encoder_norm_layer=InstanceNorm2d,\n # Context encoder\n context_encoder_layers=(64, 64, 96, 128, 256),\n context_encoder_block=ResidualBlock,\n conte", "d_id": 46857, "documentation": { "docstring": "RAFT model from\n `RAFT: Recurrent All Pairs Field Transforms for Optical Flow `_.\n\n Args:\n pretrained (bool): Whether to use weights that have been pre-trained on\n :class:`~torchvsion.datasets.FlyingChairs` + :class:`~torchvsion.datasets.FlyingThings3D`\n with two fine-tuning steps:\n\n - one on :class:`~torchvsion.datasets.Sintel` + :class:`~torchvsion.datasets.FlyingThings3D`\n - one on :class:`~torchvsion.datasets.KittiFlow`.\n\n This corresponds to the ``C+T+S/K`` strategy in the paper.\n\n progress (bool): If True, displays a progress bar of the download to stderr.\n\n Returns:\n nn.Module: The model.\n ", "n_words": 68, "vocab_size": 56, "n_whitespaces": 156, "language": "en" } }, { "id": 11433, "commit_id": "c07f3c151d985b207af87ccc9115bc94c3164e55", "repo": "jina", "path": "jina/hubble/hubio.py", "file_name": "hubio.py", "fun_name": "_get_prettyprint_usage", "commit_message": "feat: add sandbox after push (#4349)", "code": "def _get_prettyprint_usage(self, console, executor_name, usage_kind=None):\n from rich.panel import Panel\n from rich.syntax import Syntax\n\n flow_plain = f\n\n flow_docker = f\n\n flow_sandbox = f\n panels = [\n Panel(\n Syntax(\n p[0],\n 'python',\n theme='monokai',\n word_wrap=True,\n ),\n title=p[1],\n width=80,\n expand=False,\n )\n for p in [\n (flow_plain, 'Use via source'),\n (flow_docker, 'Use in Docker'),\n (flow_sandbox, 'Use in Sandbox'),\n ]\n ]\n\n if usage_kind == 'docker':\n console.print(panels[2])\n elif usage_kind == 'source':\n console.print(panels[1])\n else:\n console.print(*reversed(panels))\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 408, "n_words": 66, "vocab_size": 51, "complexity": 4, "nloc": 39, "token_counts": 141, "n_ast_nodes": 231, "n_identifiers": 22, "random_cut": "def _get_prettyprint_usage(self, console, executor_name, usage_kind=None):\n from rich.panel import Panel\n from rich.syntax import Syntax\n\n flow_plain = f\n\n flow_docker = f\n\n flow_sandbox = f\n panels = [\n Panel(\n Syntax(\n p[0],\n 'python',\n theme='monokai',\n word_wrap=True,\n ),\n title=p[1],\n width=80,\n expand=False,\n )\n for p in [\n (flow_plain, 'Use via source'),\n (flow_docker, 'Use in Docker'),\n (flow_sandbox, 'Use in Sandbox'),\n ]\n ]\n\n if usage_kind == 'doc", "d_id": 2034, "documentation": { "docstring": "from jina import Flow\n\nf = Flow().add(uses='jinahub://{executor_name}')\nfrom jina import Flow\n\nf = Flow().add(uses='jinahub+docker://{executor_name}')\nfrom jina import Flow\n\nf = Flow().add(uses='jinahub+sandbox://{executor_name}')\n", "n_words": 21, "vocab_size": 9, "n_whitespaces": 15, "language": "en" } }, { "id": 206070, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/http/request.py", "file_name": "request.py", "fun_name": "encoding", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def encoding(self, val):\n \n self._encoding = val\n if hasattr(self, \"GET\"):\n del self.GET\n if hasattr(self, \"_post\"):\n del self._post\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 66, "n_words": 16, "vocab_size": 13, "complexity": 3, "nloc": 6, "token_counts": 37, "n_ast_nodes": 63, "n_identifiers": 7, "random_cut": "def encoding(self, val):\n \n self._encoding = val\n if hasattr(self, \"GET\"):\n del self.GET\n if hasattr(self, \"_post\"):\n del self._p", "d_id": 51352, "documentation": { "docstring": "\n Set the encoding used for GET/POST accesses. If the GET or POST\n dictionary has already been created, remove and recreate it on the\n next access (so that it is decoded correctly).\n ", "n_words": 31, "vocab_size": 28, "n_whitespaces": 60, "language": "en" } }, { "id": 189432, "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", "repo": "manim", "path": "manim/mobject/geometry.py", "file_name": "geometry.py", "fun_name": "scale", "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", "code": "def scale(self, factor, scale_tips=False, **kwargs):\n r\n if self.get_length() == 0:\n return self\n\n if scale_tips:\n super().scale(factor, **kwargs)\n self._set_stroke_width_from_length()\n return self\n\n has_tip = self.has_tip()\n has_start_tip = self.has_start_tip()\n if has_tip or has_start_tip:\n old_tips = self.pop_tips()\n\n super().scale(factor, **kwargs)\n self._set_stroke_width_from_length()\n\n if has_tip:\n self.add_tip(tip=old_tips[0])\n if has_start_tip:\n self.add_tip(tip=old_tips[1], at_start=True)\n return self\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 197, "n_words": 44, "vocab_size": 29, "complexity": 7, "nloc": 45, "token_counts": 124, "n_ast_nodes": 200, "n_identifiers": 15, "random_cut": "def scale(self, factor, scale_tips=False, **kwargs):\n r\n if self.get_length() == 0:\n return self\n\n if scale_tips:\n super().scale(factor, **kwargs)\n self._set_stroke_width_from_length()\n return self\n\n has_tip = self.has_tip()\n has_start_tip = self.has_start_tip()\n if has_tip or has_start_tip:\n old_tips = self.pop_tips()\n\n super().scale(factor, **kwargs)\n self._set_stroke_width_from_length()\n\n if has_tip:\n self.add_tip(tip=old_ti", "d_id": 46052, "documentation": { "docstring": "Scale an arrow, but keep stroke width and arrow tip size fixed.\n\n See Also\n --------\n :meth:`~.Mobject.scale`\n\n Examples\n --------\n ::\n\n >>> arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)\n >>> scaled_arrow = arrow.scale(2)\n >>> np.round(scaled_arrow.get_start_and_end(), 8) + 0\n array([[-2., -2., 0.],\n [ 2., 2., 0.]])\n >>> arrow.tip.length == scaled_arrow.tip.length\n True\n\n Manually scaling the object using the default method\n :meth:`~.Mobject.scale` does not have the same properties::\n\n >>> new_arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)\n >>> another_scaled_arrow = VMobject.scale(new_arrow, 2)\n >>> another_scaled_arrow.tip.length == arrow.tip.length\n False\n\n ", "n_words": 85, "vocab_size": 60, "n_whitespaces": 279, "language": "en" } }, { "id": 221110, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "effective", "commit_message": "add python 3.10.4 for windows", "code": "def effective(file, line, frame):\n \n possibles = Breakpoint.bplist[file, line]\n for b in possibles:\n if not b.enabled:\n continue\n if not checkfuncname(b, frame):\n continue\n # Count every hit when bp is enabled\n b.hits += 1\n if not b.cond:\n # If unconditional, and ignoring go on to next, else break\n if b.ignore > 0:\n b.ignore -= 1\n continue\n else:\n # breakpoint and marker that it's ok to delete if temporary\n return (b, True)\n else:\n # Conditional bp.\n # Ignore count applies only to those bpt hits where the\n # condition evaluates to true.\n try:\n val = eval(b.cond, frame.f_globals, frame.f_locals)\n if val:\n if b.ignore > 0:\n b.ignore -= 1\n # continue\n else:\n return (b, True)\n # else:\n # continue\n except:\n # if eval fails, most conservative thing is to stop on\n # breakpoint regardless of ignore count. Don't delete\n # temporary, as another hint to user.\n return (b, False)\n return (None, None)\n\n\n# -------------------- testing --------------------\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 604, "n_words": 151, "vocab_size": 96, "complexity": 9, "nloc": 25, "token_counts": 131, "n_ast_nodes": 217, "n_identifiers": 17, "random_cut": "def effective(file, line, frame):\n \n possibles = Breakpoint.bplist[file, line]\n for b in possibles:\n if not b.enabled:\n continue\n if not checkfuncname(b, frame):\n continue\n # Count every hit when bp is enabled\n b.hits += 1\n if not b.cond:\n # If unconditional, and ignoring go on to n", "d_id": 56213, "documentation": { "docstring": "Determine which breakpoint for this file:line is to be acted upon.\n\n Called only if we know there is a breakpoint at this location. Return\n the breakpoint that was triggered and a boolean that indicates if it is\n ok to delete a temporary breakpoint. Return (None, None) if there is no\n matching breakpoint.\n ", "n_words": 52, "vocab_size": 37, "n_whitespaces": 69, "language": "en" } }, { "id": 122380, "commit_id": "5784d61048facfa9dac1f1d309bde2d60a32810c", "repo": "jax", "path": "jax/_src/scipy/stats/truncnorm.py", "file_name": "truncnorm.py", "fun_name": "_log_gauss_mass", "commit_message": "implement truncnorm in jax.scipy.stats\n\nfix some shape and type issues\n\nimport into namespace\n\nimports into non-_src library\n\nworking logpdf test\n\ncleanup\n\nworking tests for cdf and sf after fixing select\n\nrelax need for x to be in (a, b)\n\nensure behavior with invalid input matches scipy\n\nremove enforcing valid parameters in tests\n\nadded truncnorm to docs\n\nwhoops alphabetical\n\nfix linter error\n\nfix circular import issue", "code": "def _log_gauss_mass(a, b):\n \n a, b = jnp.array(a), jnp.array(b)\n a, b = jnp.broadcast_arrays(a, b)\n\n # Note: Docstring carried over from scipy\n # Calculations in right tail are inaccurate, so we'll exploit the\n # symmetry and work only in the left tail\n case_left = b <= 0\n case_right = a > 0\n case_central = ~(case_left | case_right)\n\n def mass_case_left(a, b):\n return _log_diff(log_ndtr(b), log_ndtr(a))\n\n def mass_case_right(a, b):\n return mass_case_left(-b, -a)\n\n def mass_case_central(a, b):\n # Note: Docstring carried over from scipy\n # Previously, this was implemented as:\n # left_mass = mass_case_left(a, 0)\n # right_mass = mass_case_right(0, b)\n # return _log_sum(left_mass, right_mass)\n # Catastrophic cancellation occurs as np.exp(log_mass) approaches 1.\n # Correct for this with an alternative formulation.\n # We're not concerned with underflow here: if only one term\n # underflows, it was insignificant; if both terms underflow,\n # the result can't accurately be represented in logspace anyway\n # because sc.log1p(x) ~ x for small x.\n return jnp.log1p(-ndtr(a) - ndtr(-b))\n\n out = jnp.select(\n [case_left, case_right, case_central],\n [mass_case_left(a, b), mass_case_right(a, b), mass_case_central(a, b)]\n )\n return out\n\n\n@_wraps(osp_stats.truncnorm.logpdf, update_doc=False)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(osp_stats.truncnorm.logpdf, update_doc=False)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 234, "n_words": 172, "vocab_size": 115, "complexity": 1, "nloc": 14, "token_counts": 100, "n_ast_nodes": 271, "n_identifiers": 23, "random_cut": "def _log_gauss_mass(a, b):\n \n a, b = jnp.array(a), jnp.array(b)\n a, b = jnp.broadcast_arrays(a, b)\n\n # Note: Docstring carried over from scipy\n # Calculations in right tail are inaccurate, so we'll exploit the\n # symmetry and work only in the left tail\n case_left = b <= 0\n case_right = a > 0\n ca", "d_id": 27169, "documentation": { "docstring": "Log of Gaussian probability mass within an interval", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 241753, "commit_id": "82c8875f33addb0becd7761c95e9674ccc98c7ee", "repo": "lightning", "path": "tests/trainer/logging_/test_logger_connector.py", "file_name": "test_logger_connector.py", "fun_name": "test_fx_validator_integration", "commit_message": "Add `LightningModule.lr_scheduler_step` (#10249)\n\nCo-authored-by: Carlos Mocholi ", "code": "def test_fx_validator_integration(tmpdir):\n \n not_supported = {\n None: \"`self.trainer` reference is not registered\",\n \"on_before_accelerator_backend_setup\": \"You can't\",\n \"setup\": \"You can't\",\n \"configure_sharded_model\": \"You can't\",\n \"on_configure_sharded_model\": \"You can't\",\n \"configure_optimizers\": \"You can't\",\n \"on_fit_start\": \"You can't\",\n \"on_pretrain_routine_start\": \"You can't\",\n \"on_pretrain_routine_end\": \"You can't\",\n \"on_train_dataloader\": \"You can't\",\n \"train_dataloader\": \"You can't\",\n \"on_val_dataloader\": \"You can't\",\n \"val_dataloader\": \"You can't\",\n \"on_validation_end\": \"You can't\",\n \"on_train_end\": \"You can't\",\n \"on_fit_end\": \"You can't\",\n \"teardown\": \"You can't\",\n \"on_sanity_check_start\": \"You can't\",\n \"on_sanity_check_end\": \"You can't\",\n \"prepare_data\": \"You can't\",\n \"configure_callbacks\": \"You can't\",\n \"on_validation_model_eval\": \"You can't\",\n \"on_validation_model_train\": \"You can't\",\n \"lr_scheduler_step\": \"You can't\",\n \"summarize\": \"not managed by the `Trainer\",\n }\n model = HookedModel(not_supported)\n\n with pytest.warns(UserWarning, match=not_supported[None]):\n model.log(\"foo\", 1)\n\n callback = HookedCallback(not_supported)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=2,\n limit_train_batches=1,\n limit_val_batches=1,\n limit_test_batches=1,\n limit_predict_batches=1,\n callbacks=callback,\n )\n with pytest.deprecated_call(match=\"on_train_dataloader` is deprecated in v1.5\"):\n trainer.fit(model)\n\n not_supported.update(\n {\n # `lightning_module` ref is now present from the `fit` call\n \"on_before_accelerator_backend_setup\": \"You can't\",\n \"on_test_dataloader\": \"You can't\",\n \"test_dataloader\": \"You can't\",\n \"on_test_model_eval\": \"You can't\",\n \"on_test_model_train\": \"You can't\",\n \"on_test_end\": \"You can't\",\n }\n )\n with pytest.deprecated_call(match=\"on_test_dataloader` is deprecated in v1.5\"):\n trainer.test(model, verbose=False)\n\n not_supported.update({k: \"result collection is not registered yet\" for k in not_supported})\n not_supported.update(\n {\n \"on_predict_dataloader\": \"result collection is not registered yet\",\n \"predict_dataloader\": \"result collection is not registered yet\",\n \"on_predict_model_eval\": \"result collection is not registered yet\",\n \"on_predict_start\": \"result collection is not registered yet\",\n \"on_predict_epoch_start\": \"result collection is not registered yet\",\n \"on_predict_batch_start\": \"result collection is not registered yet\",\n \"predict_step\": \"result collection is not registered yet\",\n \"on_predict_batch_end\": \"result collection is not registered yet\",\n \"on_predict_epoch_end\": \"result collection is not registered yet\",\n \"on_predict_end\": \"result collection is not registered yet\",\n }\n )\n with pytest.deprecated_call(match=\"on_predict_dataloader` is deprecated in v1.5\"):\n trainer.predict(model)\n\n\n@RunIf(min_gpus=2)", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@RunIf(min_gpus=2)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 763, "n_words": 249, "vocab_size": 106, "complexity": 2, "nloc": 72, "token_counts": 322, "n_ast_nodes": 644, "n_identifiers": 30, "random_cut": "def test_fx_validator_integration(tmpdir):\n \n not_supported = {\n None: \"`self.trainer` reference is not registered\",\n \"on_before_accelerator_backend_setup\": \"You can't\",\n \"setup\": \"You can't\",\n \"configure_sharded_model\": \"You can't\",\n \"on_configure_sharded_model\": \"You can't\",\n \"configure_optimizers\": \"", "d_id": 69683, "documentation": { "docstring": "Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 182049, "commit_id": "988838a872d2c7af6a1113546ace4f15b74a3254", "repo": "textual", "path": "src/textual/drivers/win32.py", "file_name": "win32.py", "fun_name": "enable_application_mode", "commit_message": "working windows driver", "code": "def enable_application_mode() -> Callable[[], None]:\n \n\n terminal_in = sys.stdin\n terminal_out = sys.stdout\n\n current_console_mode_in = _get_console_mode(terminal_in)\n current_console_mode_out = _get_console_mode(terminal_out)\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 16, "token_counts": 53, "n_ast_nodes": 59, "n_identifiers": 10, "random_cut": "def enable_application_mode() -> Callable[[], None]:\n \n\n terminal_in = sys.stdin\n terminal_out = sys.stdout\n\n current_console_mode_in = _get_console_mode(terminal_in)\n current_console_m", "d_id": 43741, "documentation": { "docstring": "Enable application mode.\n\n Returns:\n Callable[[], None]: A callable that will restore terminal to previous state.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 28, "language": "en" } }, { "id": 266836, "commit_id": "871b2ca73adcba3a35551247cf839246cf121231", "repo": "ansible", "path": "lib/ansible/utils/_junit_xml.py", "file_name": "_junit_xml.py", "fun_name": "get_attributes", "commit_message": "Simplify existing type hints.", "code": "def get_attributes(self) -> dict[str, str]:\n \n return _attributes(\n message=self.message,\n type=self.type,\n )\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 53, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 29, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def get_attributes(self) -> dict[str, str]:\n ", "d_id": 78616, "documentation": { "docstring": "Return a dictionary of attributes for this instance.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 148549, "commit_id": "7bef9a9b3ec8593dac0701e7c5f8df6d77b5d4e0", "repo": "freqtrade", "path": "freqtrade/freqtradebot.py", "file_name": "freqtradebot.py", "fun_name": "check_handle_timedout", "commit_message": "Extract timeout handling from freqtradebot class", "code": "def check_handle_timedout(self) -> None:\n \n\n for trade in Trade.get_open_order_trades():\n try:\n if not trade.open_order_id:\n continue\n order = self.exchange.fetch_order(trade.open_order_id, trade.pair)\n except (ExchangeError):\n logger.info('Cannot query order for %s due to %s', trade, traceback.format_exc())\n continue\n\n fully_cancelled = self.update_trade_state(trade, trade.open_order_id, order)\n\n if (order['side'] == 'buy' and (order['status'] == 'open' or fully_cancelled) and (\n fully_cancelled\n or self.strategy.ft_check_timed_out(\n 'buy', trade, order, datetime.now(timezone.utc))\n )):\n self.handle_cancel_enter(trade, order, constants.CANCEL_REASON['TIMEOUT'])\n\n elif (order['side'] == 'sell' and (order['status'] == 'open' or fully_cancelled) and (\n fully_cancelled\n or self.strategy.ft_check_timed_out(\n 'sell', trade, order, datetime.now(timezone.utc)))\n ):\n self.handle_cancel_exit(trade, order, constants.CANCEL_REASON['TIMEOUT'])\n canceled_count = trade.get_exit_order_count()\n max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0)\n if max_timeouts > 0 and canceled_count >= max_timeouts:\n logger.warning(f'Emergencyselling trade {trade}, as the sell order '\n f'timed out {max_timeouts} times.')\n try:\n self.execute_trade_exit(\n trade, order.get('price'),\n sell_reason=SellCheckTuple(sell_type=SellType.EMERGENCY_SELL))\n except DependencyException as exception:\n logger.warning(f'Unable to emergency sell trade {trade.pair}: {exception}')\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 695, "n_words": 125, "vocab_size": 82, "complexity": 17, "nloc": 38, "token_counts": 283, "n_ast_nodes": 483, "n_identifiers": 41, "random_cut": "def check_handle_timedout(self) -> None:\n \n\n for trade in Trade.get_open_order_trades():\n try:\n if not trade.open_order_id:\n continue\n order = self.exchange.fetch_order(trade.open_order_id, trade.pair)\n except (ExchangeError):\n logger.info('Cannot query order for %s due to %s', trade, traceback.format_exc())\n continue\n\n fully_cancelled = self.update_trade_state(trade, trade.open_order_id, order)\n\n if (order['side'] == 'buy' and (order['status'] == 'open' or fully_cancelled) and (\n fu", "d_id": 34289, "documentation": { "docstring": "\n Check if any orders are timed out and cancel if necessary\n :param timeoutvalue: Number of minutes until order is considered timed out\n :return: None\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 53, "language": "en" } }, { "id": 87972, "commit_id": "618ae63cf2ba419e44e79ce578d88e8b062d7dd9", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events.py", "file_name": "test_organization_events.py", "fun_name": "test_user_misery_denominator", "commit_message": "fix(tests): Discover backend test flakes (#41057)\n\n- `MetricsQueryBuilder` wasn't sorting environment tags\r\n- Consistent timestamps on test_organization_events\r\n- Updated `apply_feature_flag_on_cls` to only apply decorator on the run\r\nmethod", "code": "def test_user_misery_denominator(self):\n \n ProjectTransactionThreshold.objects.create(\n project=self.project,\n organization=self.project.organization,\n threshold=600,\n metric=TransactionMetric.LCP.value,\n )\n lcps = [\n 400,\n 400,\n 300,\n 3000,\n 3000,\n 3000,\n ]\n for idx, lcp in enumerate(lcps):\n data = self.load_data(\n timestamp=before_now(minutes=(10 + idx)),\n )\n data[\"event_id\"] = f\"{idx}\" * 32\n data[\"transaction\"] = \"/misery/new/\"\n data[\"user\"] = {\"email\": f\"{idx}@example.com\"}\n data[\"measurements\"] = {\n \"lcp\": {\"value\": lcp},\n }\n self.store_event(data, project_id=self.project.id)\n\n # Shouldn't count towards misery\n data = self.load_data(timestamp=self.ten_mins_ago, duration=timedelta(milliseconds=0))\n data[\"transaction\"] = \"/misery/new/\"\n data[\"user\"] = {\"email\": \"7@example.com\"}\n data[\"measurements\"] = {}\n self.store_event(data, project_id=self.project.id)\n\n query = {\n \"field\": [\n \"transaction\",\n \"user_misery()\",\n ],\n \"query\": \"event.type:transaction\",\n \"project\": [self.project.id],\n \"sort\": \"-user_misery\",\n }\n\n response = self.do_request(\n query,\n )\n\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 1\n data = response.data[\"data\"]\n # (3 frustrated + 5.8875) / (6 + 117.75)\n assert abs(data[0][\"user_misery()\"] - 0.071818) < 0.0001\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 590, "n_words": 119, "vocab_size": 84, "complexity": 2, "nloc": 47, "token_counts": 287, "n_ast_nodes": 478, "n_identifiers": 35, "random_cut": "def test_user_misery_denominator(self):\n \n ProjectTransactionThreshold.objects.create(\n project=self.project,\n organization=self.project.organization,\n threshold=600,\n metric=TransactionMetric.LCP.value,\n )\n lcps = [\n 400,\n 400,\n 300,\n 3000,\n 3000,\n 3000,\n ]\n for idx, lcp in enumerate(lcps):\n data = self.load_data(\n ", "d_id": 18345, "documentation": { "docstring": "This is to test against a bug where the denominator of misery(total unique users) was wrong\n This is because the total unique users for a LCP misery should only count users that have had a txn with lcp,\n and not count all transactions (ie. uniq_if(transaction has lcp) not just uniq())\n ", "n_words": 50, "vocab_size": 41, "n_whitespaces": 71, "language": "en" } }, { "id": 60726, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/collector.py", "file_name": "collector.py", "fun_name": "_determine_base_url", "commit_message": "upd; format", "code": "def _determine_base_url(document, page_url):\n # type: (HTMLElement, str) -> str\n \n for base in document.findall(\".//base\"):\n href = base.get(\"href\")\n if href is not None:\n return href\n return page_url\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 25, "vocab_size": 22, "complexity": 3, "nloc": 6, "token_counts": 36, "n_ast_nodes": 63, "n_identifiers": 7, "random_cut": "def _determine_base_url(document, page_url):\n # type: (HTMLElement, str) -> str\n \n for base in document.findall(\".//base\"):\n href = base.get(\"href\")\n if href is not None:\n return href\n return page_url\n\n", "d_id": 12264, "documentation": { "docstring": "Determine the HTML document's base URL.\n\n This looks for a ```` tag in the HTML document. If present, its href\n attribute denotes the base URL of anchor tags in the document. If there is\n no such tag (or if it does not have a valid href attribute), the HTML\n file's URL is used as the base URL.\n\n :param document: An HTML document representation. The current\n implementation expects the result of ``html5lib.parse()``.\n :param page_url: The URL of the HTML document.\n ", "n_words": 79, "vocab_size": 51, "n_whitespaces": 107, "language": "en" } }, { "id": 9465, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/metrics/precision_recall.py", "file_name": "precision_recall.py", "fun_name": "pairwise_distances", "commit_message": "initialize ostec", "code": "def pairwise_distances(self, U, V):\n \n return self._distance_block.eval(feed_dict={self._features_batch1: U, self._features_batch2: V})\n\n#----------------------------------------------------------------------------\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 23, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 33, "n_ast_nodes": 52, "n_identifiers": 9, "random_cut": "def pairwise_distances(self, U, V):\n \n return self._distance_block.eval(feed_dict={self._features_batch1: U, self._features", "d_id": 1622, "documentation": { "docstring": "Evaluate pairwise distances between two batches of feature vectors.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 117187, "commit_id": "7c02e15aa403a4ca1fa34489dd2df9136d6c961c", "repo": "mindsdb", "path": "mindsdb/migrations/versions/2022-10-14_43c52d23845a_projects.py", "file_name": "2022-10-14_43c52d23845a_projects.py", "fun_name": "upgrade", "commit_message": "Projects structure (#3532)\n\nProjects structure", "code": "def upgrade():\n op.create_table(\n 'project',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('deleted_at', sa.DateTime(), nullable=True),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('company_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id')\n )\n\n conn = op.get_bind()\n session = sa.orm.Session(bind=conn)\n\n project_record = db.Project(name='mindsdb')\n session.add(project_record)\n session.commit()\n\n with op.batch_alter_table('predictor', schema=None) as batch_op:\n batch_op.add_column(sa.Column('project_id', sa.Integer()))\n batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id'])\n\n conn.execute(sa.sql.text(), project_id=project_record.id)\n\n with op.batch_alter_table('predictor', schema=None) as batch_op:\n batch_op.alter_column(\n 'project_id',\n existing_type=sa.INTEGER(),\n nullable=False\n )\n\n with op.batch_alter_table('view', schema=None) as batch_op:\n batch_op.add_column(sa.Column('project_id', sa.Integer()))\n batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id'])\n\n conn.execute(sa.sql.text(), project_id=project_record.id)\n\n with op.batch_alter_table('view', schema=None) as batch_op:\n batch_op.alter_column(\n 'project_id',\n existing_type=sa.INTEGER(),\n nullable=False\n )\n\n views = conn.execute().fetchall()\n\n for row in views:\n conn.execute(\n text(), {\n 'name': f\"{row['name']}_view\",\n 'view_id': row['id']\n }\n )\n\n view_integration = session.query(db.Integration).filter_by(name='views').first()\n if view_integration is not None:\n session.delete(view_integration)\n\n session.commit()\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 421, "n_words": 110, "vocab_size": 67, "complexity": 3, "nloc": 60, "token_counts": 446, "n_ast_nodes": 766, "n_identifiers": 45, "random_cut": "def upgrade():\n op.create_table(\n 'project',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('deleted_at', sa.DateTime(), nullable=True),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('company_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id')\n )\n\n conn = op.get_bind()\n session = sa.orm.Session(bind=conn)\n\n project_record = db.Project(name='mindsdb')\n session.add(project_record)\n session.commit()\n\n with op.ba", "d_id": 25917, "documentation": { "docstring": "\n update predictor set project_id = :project_id\n \n update view set project_id = :project_id\n \n select id, name from view\n where exists (select 1 from predictor where view.name = predictor.name)\n \n update view\n set name = :name\n where id = :view_id\n ", "n_words": 37, "vocab_size": 20, "n_whitespaces": 134, "language": "en" } }, { "id": 111338, "commit_id": "a322d6d5f2f85c2da6cded4fcd6143d41b5a9e96", "repo": "spaCy", "path": "spacy/pipeline/span_ruler.py", "file_name": "span_ruler.py", "fun_name": "clear", "commit_message": "Add SpanRuler component (#9880)\n\n* Add SpanRuler component\r\n\r\nAdd a `SpanRuler` component similar to `EntityRuler` that saves a list\r\nof matched spans to `Doc.spans[spans_key]`. The matches from the token\r\nand phrase matchers are deduplicated and sorted before assignment but\r\nare not otherwise filtered.\r\n\r\n* Update spacy/pipeline/span_ruler.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Fix cast\r\n\r\n* Add self.key property\r\n\r\n* Use number of patterns as length\r\n\r\n* Remove patterns kwarg from init\r\n\r\n* Update spacy/tests/pipeline/test_span_ruler.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Add options for spans filter and setting to ents\r\n\r\n* Add `spans_filter` option as a registered function'\r\n* Make `spans_key` optional and if `None`, set to `doc.ents` instead of\r\n`doc.spans[spans_key]`.\r\n\r\n* Update and generalize tests\r\n\r\n* Add test for setting doc.ents, fix key property type\r\n\r\n* Fix typing\r\n\r\n* Allow independent doc.spans and doc.ents\r\n\r\n* If `spans_key` is set, set `doc.spans` with `spans_filter`.\r\n* If `annotate_ents` is set, set `doc.ents` with `ents_fitler`.\r\n * Use `util.filter_spans` by default as `ents_filter`.\r\n * Use a custom warning if the filter does not work for `doc.ents`.\r\n\r\n* Enable use of SpanC.id in Span\r\n\r\n* Support id in SpanRuler as Span.id\r\n\r\n* Update types\r\n\r\n* `id` can only be provided as string (already by `PatternType`\r\ndefinition)\r\n\r\n* Update all uses of Span.id/ent_id in Doc\r\n\r\n* Rename Span id kwarg to span_id\r\n\r\n* Update types and docs\r\n\r\n* Add ents filter to mimic EntityRuler overwrite_ents\r\n\r\n* Refactor `ents_filter` to take `entities, spans` args for more\r\n filtering options\r\n* Give registered filters more descriptive names\r\n* Allow registered `filter_spans` filter\r\n (`spacy.first_longest_spans_filter.v1`) to take any number of\r\n `Iterable[Span]` objects as args so it can be used for spans filter\r\n or ents filter\r\n\r\n* Implement future entity ruler as span ruler\r\n\r\nImplement a compatible `entity_ruler` as `future_entity_ruler` using\r\n`SpanRuler` as the underlying component:\r\n* Add `sort_key` and `sort_reverse` to allow the sorting behavior to be\r\n customized. (Necessary for the same sorting/filtering as in\r\n `EntityRuler`.)\r\n* Implement `overwrite_overlapping_ents_filter` and\r\n `preserve_existing_ents_filter` to support\r\n `EntityRuler.overwrite_ents` settings.\r\n* Add `remove_by_id` to support `EntityRuler.remove` functionality.\r\n* Refactor `entity_ruler` tests to parametrize all tests to test both\r\n `entity_ruler` and `future_entity_ruler`\r\n* Implement `SpanRuler.token_patterns` and `SpanRuler.phrase_patterns`\r\n properties.\r\n\r\nAdditional changes:\r\n\r\n* Move all config settings to top-level attributes to avoid duplicating\r\n settings in the config vs. `span_ruler/cfg`. (Also avoids a lot of\r\n casting.)\r\n\r\n* Format\r\n\r\n* Fix filter make method name\r\n\r\n* Refactor to use same error for removing by label or ID\r\n\r\n* Also provide existing spans to spans filter\r\n\r\n* Support ids property\r\n\r\n* Remove token_patterns and phrase_patterns\r\n\r\n* Update docstrings\r\n\r\n* Add span ruler docs\r\n\r\n* Fix types\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Move sorting into filters\r\n\r\n* Check for all tokens in seen tokens in entity ruler filters\r\n\r\n* Remove registered sort key\r\n\r\n* Set Token.ent_id in a backwards-compatible way in Doc.set_ents\r\n\r\n* Remove sort options from API docs\r\n\r\n* Update docstrings\r\n\r\n* Rename entity ruler filters\r\n\r\n* Fix and parameterize scoring\r\n\r\n* Add id to Span API docs\r\n\r\n* Fix typo in API docs\r\n\r\n* Include explicit labeled=True for scorer\r\n\r\nCo-authored-by: Sofie Van Landeghem ", "code": "def clear(self) -> None:\n \n self._patterns: List[PatternType] = []\n self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate)\n self.phrase_matcher: PhraseMatcher = PhraseMatcher(\n self.nlp.vocab,\n attr=self.phrase_matcher_attr,\n validate=self.validate,\n )\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 89, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 13, "token_counts": 66, "n_ast_nodes": 102, "n_identifiers": 14, "random_cut": "def clear(self) -> None:\n \n self._patterns: List[PatternType] = []\n self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate)\n self.phrase_matcher: P", "d_id": 24381, "documentation": { "docstring": "Reset all patterns.\n\n RETURNS: None\n DOCS: https://spacy.io/api/spanruler#clear\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 28, "language": "en" } }, { "id": 223483, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/doctest.py", "file_name": "doctest.py", "fun_name": "_from_module", "commit_message": "add python 3.10.4 for windows", "code": "def _from_module(self, module, object):\n \n if module is None:\n return True\n elif inspect.getmodule(object) is not None:\n return module is inspect.getmodule(object)\n elif inspect.isfunction(object):\n return module.__dict__ is object.__globals__\n elif inspect.ismethoddescriptor(object):\n if hasattr(object, '__objclass__'):\n obj_mod = object.__objclass__.__module__\n elif hasattr(object, '__module__'):\n obj_mod = object.__module__\n else:\n return True # [XX] no easy way to tell otherwise\n return module.__name__ == obj_mod\n elif inspect.isclass(object):\n return module.__name__ == object.__module__\n elif hasattr(object, '__module__'):\n return module.__name__ == object.__module__\n elif isinstance(object, property):\n return True # [XX] no way not be sure.\n else:\n raise ValueError(\"object must be a class or function\")\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 317, "n_words": 88, "vocab_size": 47, "complexity": 10, "nloc": 23, "token_counts": 148, "n_ast_nodes": 242, "n_identifiers": 19, "random_cut": "def _from_module(self, module, object):\n \n if module is None:\n return True\n elif inspect.getmodule(object) is not None:\n return module is inspect.getmodule(object)\n elif inspect.isfunction(object):\n return module.__dict__ is object.__globals__\n elif inspect.ismethoddescriptor(object):\n if hasattr(object, '__objclass__'):\n obj_mod = object.__objclass__.__module__\n elif hasattr(object, '__module__'):\n obj_mod = object.__module__\n else:\n ", "d_id": 56928, "documentation": { "docstring": "\n Return true if the given object is defined in the given\n module.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 34, "language": "en" } }, { "id": 213001, "commit_id": "a35687ac51dac5a2a0664ca20e7dd7cba6836c7b", "repo": "PySimpleGUI", "path": "DemoPrograms/Demo_Script_Launcher_ANSI_Color_Output.py", "file_name": "Demo_Script_Launcher_ANSI_Color_Output.py", "fun_name": "cut_ansi_string_into_parts", "commit_message": "Removed old code that used Popen and instead uses the PySimpleGUI Exec API calls for an all-in-one demo. Added expansion of the Multilline and a SizeGrip so that it's obvious to user the window is resizable.", "code": "def cut_ansi_string_into_parts(string_with_ansi_codes):\n \n color_codes_english = ['Black', 'Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan', 'White', 'Reset']\n color_codes = [\"30m\", \"31m\", \"32m\", \"33m\", \"34m\", \"35m\", \"36m\", \"37m\", \"0m\"]\n effect_codes_english = ['Italic', 'Underline', 'Slow Blink', 'Rapid Blink', 'Crossed Out']\n effect_codes = [\"3m\", \"4m\", \"5m\", \"6m\", \"9m\"]\n background_codes = [\"40m\", \"41m\", \"42m\", \"43m\", \"44m\", \"45m\", \"46m\", \"47m\"]\n background_codes_english = [\"Black\", \"Red\", \"Green\", \"Yellow\", \"Blue\", \"Magenta\", \"Cyan\", \"White\"]\n\n ansi_codes = color_codes + effect_codes\n\n tuple_list = []\n\n string_list = string_with_ansi_codes.split(\"\\u001b[\")\n\n if (len(string_list)) == 1:\n string_list = string_with_ansi_codes.split(\"\\033[\")\n\n for teststring in string_list:\n if teststring == string_with_ansi_codes:\n tuple_list += [(teststring, None, None, None)]\n break\n if any(code in teststring for code in ansi_codes):\n static_string = None\n color_used = None\n effect_used = None\n background_used = None\n for color in range(0, len(color_codes)):\n if teststring.startswith(color_codes[color]):\n working_thread = teststring.split(color_codes[color])\n ansi_strip = re.compile(r'\\x1B[@-_][0-?]*[ -/]*[@-~]')\n static_string = ansi_strip.sub('', working_thread[1])\n color_used = color_codes_english[color]\n for effect in range(0, len(effect_codes)):\n if teststring.startswith(effect_codes[effect]):\n working_thread = teststring.split(effect_codes[effect])\n ansi_strip = re.compile(r'\\x1B[@-_][0-?]*[ -/]*[@-~]')\n static_string = ansi_strip.sub('', working_thread[1])\n effect_used = effect_codes_english[effect]\n for background in range(0, len(background_codes)):\n if teststring.startswith(background_codes[background]):\n working_thread = teststring.split(background_codes[background])\n ansi_strip = re.compile(r'\\x1B[@-_][0-?]*[ -/]*[@-~]')\n static_string = ansi_strip.sub('', working_thread[1])\n background_used = background_codes_english[background]\n try:\n if not tuple_list[len(tuple_list) - 1][0]:\n if not tuple_list[len(tuple_list) - 1][1] == None:\n color_used = tuple_list[len(tuple_list) - 1][1]\n if not tuple_list[len(tuple_list) - 1][2] == None:\n background_used = tuple_list[len(tuple_list) - 1][2]\n if not tuple_list[len(tuple_list) - 1][3] == None:\n effect_used = tuple_list[len(tuple_list) - 1][3]\n tuple_list += [(static_string, color_used, background_used, effect_used)]\n else:\n tuple_list += [(static_string, color_used, background_used, effect_used)]\n except Exception:\n tuple_list += [(static_string, color_used, background_used, effect_used)]\n\n new_tuple_list = []\n\n for x in range(0, len(tuple_list)):\n if tuple_list[x][0]:\n new_tuple_list += [[tuple_list[x][0], tuple_list[x][1], tuple_list[x][2], tuple_list[x][3]]]\n\n return new_tuple_list\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 945, "n_words": 258, "vocab_size": 131, "complexity": 19, "nloc": 57, "token_counts": 603, "n_ast_nodes": 973, "n_identifiers": 33, "random_cut": "def cut_ansi_string_into_parts(string_with_ansi_codes):\n \n color_codes_english = ['Black', 'Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan', 'White', 'Reset']\n color_codes = [\"30m\", \"31m\", \"32m\", \"33m\", \"34m\", \"35m\", \"36m\", \"37m\", \"0m\"]\n effect_codes_english = ['Italic', 'Underline', 'Slow Blink', 'Rapid Blink', 'Crossed Out']\n effect_codes", "d_id": 53568, "documentation": { "docstring": "\n Converts a string with ambedded ANSI Color Codes and parses it to create\n a list of tuples describing pieces of the input string.\n :param string_with_ansi_codes:\n :return: [(sty, str, str, str), ...] A list of tuples. Each tuple has format: (text, text color, background color, effects)\n ", "n_words": 45, "vocab_size": 39, "n_whitespaces": 61, "language": "en" } }, { "id": 162765, "commit_id": "9120cdffe618c6c2ff16fe6a311b6a1367efdbc8", "repo": "AutoEq", "path": "research/neo_peq/legacy_frequency_response.py", "file_name": "legacy_frequency_response.py", "fun_name": "interpolate", "commit_message": "Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.", "code": "def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX):\n \n # Remove None values\n i = 0\n while i < len(self.raw):\n if self.raw[i] is None:\n self.raw = np.delete(self.raw, i)\n self.frequency = np.delete(self.frequency, i)\n else:\n i += 1\n\n # Interpolation functions\n keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split()\n interpolators = dict()\n log_f = np.log10(self.frequency)\n for key in keys:\n if len(self.__dict__[key]):\n interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order)\n\n if f is None:\n self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step)\n else:\n self.frequency = np.array(f)\n\n # Prevent log10 from exploding by replacing zero frequency with small value\n zero_freq_fix = False\n if self.frequency[0] == 0:\n self.frequency[0] = 0.001\n zero_freq_fix = True\n\n # Run interpolators\n log_f = np.log10(self.frequency)\n for key in keys:\n if len(self.__dict__[key]) and key in interpolators:\n self.__dict__[key] = interpolators[key](log_f)\n\n if zero_freq_fix:\n # Restore zero frequency\n self.frequency[0] = 0\n\n # Everything but the interpolated data is affected by interpolating, reset them\n self.reset(**{key: False for key in keys})\n", "url": "https://github.com/jaakkopasanen/AutoEq.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 472, "n_words": 147, "vocab_size": 94, "complexity": 12, "nloc": 29, "token_counts": 273, "n_ast_nodes": 423, "n_identifiers": 30, "random_cut": "def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX):\n \n # Remove None values\n i = 0\n while i < len(self.raw):\n if self.raw[i] is None:\n self.raw = np.delete(self.raw, i)\n self.frequency = np.delete(self.frequency, i)\n else:\n i += 1\n\n # Interpolation functions\n keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split()\n interpolators = dict()\n log_f = np.log10(self.frequency)\n for key in keys:\n if len(self.__dict__[key]):\n interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order)\n\n if f is None:\n self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step)\n else:\n self.frequency = np.array(f)\n\n # Prevent log10 from exploding by replacing zero frequency with small value\n zero_freq_fix = False\n if self.frequency[0] == 0:\n self.frequency[0] = 0.001\n zero_freq_fix = True\n\n # Run interpolators\n log_f = np.log10(self.frequency)\n for key in keys:\n if len(self.__dict__[key]) and key in interpolators:\n self.__dict__[key] = interpolators[key](log_f)\n\n if zero_freq_fix:\n # Restore zero frequency\n self.frequency[0] = 0\n\n # Everything but the interpolated data is affected by interpolating, reset them\n ", "d_id": 39296, "documentation": { "docstring": "Interpolates missing values from previous and next value. Resets all but raw data.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 331584, "commit_id": "cdcd0a92ca8a3dc120336a5dde1b7d6ecd5e9186", "repo": "pytorch-image-models", "path": "timm/optim/lars.py", "file_name": "lars.py", "fun_name": "step", "commit_message": "fix lars", "code": "def step(self, closure=None):\n \n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n device = self.param_groups[0]['params'][0].device\n one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n trust_coeff = group['trust_coeff']\n eps = group['eps']\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad\n\n # apply LARS LR adaptation, LARC clipping, weight decay\n # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py\n if weight_decay != 0 or group['always_adapt']:\n w_norm = p.norm(2.0)\n g_norm = grad.norm(2.0)\n trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps)\n # FIXME nested where required since logical and/or not working in PT XLA\n trust_ratio = torch.where(\n w_norm > 0,\n torch.where(g_norm > 0, trust_ratio, one_tensor),\n one_tensor,\n )\n if group['trust_clip']:\n trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor)\n grad.add_(p, alpha=weight_decay)\n grad.mul_(trust_ratio)\n\n # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(grad).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(grad, alpha=1. - dampening)\n if nesterov:\n grad = grad.add(buf, alpha=momentum)\n else:\n grad = buf\n\n p.add_(grad, alpha=-group['lr'])\n\n return loss", "url": "https://github.com/huggingface/pytorch-image-models.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 947, "n_words": 182, "vocab_size": 118, "complexity": 11, "nloc": 44, "token_counts": 331, "n_ast_nodes": 534, "n_identifiers": 34, "random_cut": "def step(self, closure=None):\n \n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n device = self.param_groups[0]['params'][0].device\n one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n trust_coeff = group['trust_coeff']\n eps = group['eps']\n\n for p ", "d_id": 119869, "documentation": { "docstring": "Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 44, "language": "en" } }, { "id": 296416, "commit_id": "2c2b678e80db615e50a7b72c3ec107730cc6f8dd", "repo": "core", "path": "homeassistant/components/hunterdouglas_powerview/cover.py", "file_name": "cover.py", "fun_name": "_async_force_resync", "commit_message": "Fix handling of powerview stale state (#70195)", "code": "async def _async_force_resync(self, *_):\n \n self._forced_resync = None\n await self._async_force_refresh_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "async def _async_force_resync(self, *_):\n \n self._forced_resync = None\n ", "d_id": 95399, "documentation": { "docstring": "Force a resync after an update since the hub may have stale state.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 42251, "commit_id": "e644793f0ac2b1be178425f20f529121f37f29de", "repo": "seaborn", "path": "seaborn/palettes.py", "file_name": "palettes.py", "fun_name": "set_color_codes", "commit_message": "Convert color palette docstrings to notebooks (#3034)\n\n* Convert color palette docstrings to notebooks and rerun all with py310 kernel\r\n\r\n* Add v0.12.1 release notes to index\r\n\r\n* Improve failure mode when ipywidgets is not involved\r\n\r\n* Update palettes docstrings\r\n\r\n* Remove all other doctest-style examples\r\n\r\n* Remove doctest-oriented testing infrastructure\r\n\r\n* Mention in release notes\r\n\r\n* Skip colormap patch test on matplotlib's where it's not relevant\r\n\r\n* Use more robust approach to mpl backcompat", "code": "def set_color_codes(palette=\"deep\"):\n \n if palette == \"reset\":\n colors = [\n (0., 0., 1.),\n (0., .5, 0.),\n (1., 0., 0.),\n (.75, 0., .75),\n (.75, .75, 0.),\n (0., .75, .75),\n (0., 0., 0.)\n ]\n elif not isinstance(palette, str):\n err = \"set_color_codes requires a named seaborn palette\"\n raise TypeError(err)\n elif palette in SEABORN_PALETTES:\n if not palette.endswith(\"6\"):\n palette = palette + \"6\"\n colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]\n else:\n err = f\"Cannot set colors with palette '{palette}'\"\n raise ValueError(err)\n\n for code, color in zip(\"bgrmyck\", colors):\n rgb = mpl.colors.colorConverter.to_rgb(color)\n mpl.colors.colorConverter.colors[code] = rgb\n mpl.colors.colorConverter.cache[code] = rgb\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 273, "n_words": 90, "vocab_size": 57, "complexity": 6, "nloc": 25, "token_counts": 207, "n_ast_nodes": 280, "n_identifiers": 18, "random_cut": "def set_color_codes(palette=\"deep\"):\n \n if palette == \"reset\":\n colors = [\n ", "d_id": 7511, "documentation": { "docstring": "Change how matplotlib color shorthands are interpreted.\n\n Calling this will change how shorthand codes like \"b\" or \"g\"\n are interpreted by matplotlib in subsequent plots.\n\n Parameters\n ----------\n palette : {deep, muted, pastel, dark, bright, colorblind}\n Named seaborn palette to use as the source of colors.\n\n See Also\n --------\n set : Color codes can be set through the high-level seaborn style\n manager.\n set_palette : Color codes can also be set through the function that\n sets the matplotlib color cycle.\n\n ", "n_words": 78, "vocab_size": 58, "n_whitespaces": 141, "language": "en" } }, { "id": 43220, "commit_id": "b692517ce3aafb276e9d23570e9734c30a5f3d1f", "repo": "airflow", "path": "tests/models/test_dagrun.py", "file_name": "test_dagrun.py", "fun_name": "test_mapped_literal_length_increase_at_runtime_adds_additional_tis", "commit_message": "Fix mapped task immutability after clear (#23667)\n\nWe should be able to detect if the structure of mapped task has changed\r\nand verify the integrity.\r\n\r\nThis PR ensures this\r\nCo-authored-by: Tzu-ping Chung ", "code": "def test_mapped_literal_length_increase_at_runtime_adds_additional_tis(dag_maker, session):\n \n from airflow.models import Variable\n\n Variable.set(key='arg1', value=[1, 2, 3])\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 11, "vocab_size": 11, "complexity": 5, "nloc": 39, "token_counts": 311, "n_ast_nodes": 51, "n_identifiers": 9, "random_cut": "def test_mapped_literal_length_increase_at_runtime_adds_additional_tis(dag_maker, session):\n ", "d_id": 7877, "documentation": { "docstring": "Test that when the length of mapped literal increases at runtime, additional ti is added", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 319729, "commit_id": "08c3d6e84b17da2acfb10250438fe357398e5e0e", "repo": "paperless-ngx", "path": "src/documents/tests/test_management_convert_thumbnail.py", "file_name": "test_management_convert_thumbnail.py", "fun_name": "create_png_thumbnail_file", "commit_message": "Fixes existing testing, adds test coverage of new command", "code": "def create_png_thumbnail_file(self, thumb_dir):\n \n thumb_file = Path(thumb_dir) / Path(f\"{self.doc.pk:07}.png\")\n thumb_file.write_text(\"this is a dummy png file\")\n return thumb_file\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 44, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 28, "n_ast_nodes": 62, "n_identifiers": 8, "random_cut": "def create_png_thumbnail_file(self, thumb_dir):\n \n thumb_file = Path(thumb_dir) / Path(f\"{self.doc.pk:07}.png\")\n thumb_file.write_text(\"this is a dummy p", "d_id": 116988, "documentation": { "docstring": "\n Creates a dummy PNG thumbnail file in the given directory, based on\n the database Document\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 187141, "commit_id": "3d44da082b3ba202b9d0557bfd8ce747a1d7960c", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "test_parse_html", "commit_message": "plugin.api.validate: implement ValidationError\n\n- Implement `ValidationError`\n - Inherit from `ValueError` to preserve backwards compatiblity\n - Allow collecting multiple errors (AnySchema)\n - Keep an error stack of parent `ValidationError`s or other exceptions\n - Format error stack when converting error to string\n- Raise `ValidationError` instead of `ValueError`\n - Add error contexts where it makes sense\n - Add schema names to error instances\n- Add and update tests", "code": "def test_parse_html(self):\n assert validate(parse_html(), '"perfectly"valid
HTML').tag == \"html\"\n with self.assertRaises(ValueError) as cm:\n validate(parse_html(), None)\n assert_validationerror(cm.exception, )\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 47, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 8, "token_counts": 44, "n_ast_nodes": 79, "n_identifiers": 10, "random_cut": "def test_parse_html(self):\n a", "d_id": 45702, "documentation": { "docstring": "\n ValidationError:\n Unable to parse HTML: can only parse strings (None)\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 42, "language": "en" } }, { "id": 258712, "commit_id": "d7feac0ccfe1a7b8a55f2e16f249f77508a91fe1", "repo": "scikit-learn", "path": "sklearn/utils/validation.py", "file_name": "validation.py", "fun_name": "_check_feature_names_in", "commit_message": "ENH Adds feature_names_out to preprocessing module (#21079)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: 赵丰 (Zhao Feng) <616545598@qq.com>\r\nCo-authored-by: Niket Jain <51831161+nikJ13@users.noreply.github.com>\r\nCo-authored-by: Loïc Estève ", "code": "def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):\n \n\n feature_names_in_ = getattr(estimator, \"feature_names_in_\", None)\n n_features_in_ = getattr(estimator, \"n_features_in_\", None)\n\n if input_features is not None:\n input_features = np.asarray(input_features, dtype=object)\n if feature_names_in_ is not None and not np.array_equal(\n feature_names_in_, input_features\n ):\n raise ValueError(\"input_features is not equal to feature_names_in_\")\n\n if n_features_in_ is not None and len(input_features) != n_features_in_:\n raise ValueError(\n \"input_features should have length equal to number of \"\n f\"features ({n_features_in_}), got {len(input_features)}\"\n )\n return input_features\n\n if feature_names_in_ is not None:\n return feature_names_in_\n\n if not generate_names:\n return\n\n # Generates feature names if `n_features_in_` is defined\n if n_features_in_ is None:\n raise ValueError(\"Unable to generate feature names without n_features_in_\")\n\n return np.asarray([f\"x{i}\" for i in range(n_features_in_)], dtype=object)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 264, "n_words": 107, "vocab_size": 62, "complexity": 10, "nloc": 22, "token_counts": 141, "n_ast_nodes": 244, "n_identifiers": 16, "random_cut": "def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):\n \n\n feature_names_in_ = getattr(estimator, \"feature_names_in_\", None)\n n_features_in_ = getattr(estimator, \"n_features_in_\", None)\n\n if input_features is not None:\n input_features = np.asarray(input_features, dtype=object)\n if feature_names_in_ is not None and not np.array_equal(\n feature_names_in_, input_features\n ):\n raise ValueError(\"input_features is not equal to feature_names_in_\")\n\n if n_features_in_ is not None and len(input_features) != n_features_in_:\n raise ValueError(\n \"input_features should have length equal to number of \"\n f\"features ({n_features_in_}), got {len(input_features)}\"\n )\n return input_features\n\n if feature_names_in_ is not None:\n return feature_n", "d_id": 75377, "documentation": { "docstring": "Check `input_features` and generate names if needed.\n\n Commonly used in :term:`get_feature_names_out`.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then names are generated: `[x0, x1, ..., x(n_features_in_)]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n generate_names : bool, default=True\n Whether to generate names when `input_features` is `None` and\n `estimator.feature_names_in_` is not defined. This is useful for transformers\n that validates `input_features` but do not require them in\n :term:`get_feature_names_out` e.g. `PCA`.\n\n Returns\n -------\n feature_names_in : ndarray of str or `None`\n Feature names in.\n ", "n_words": 110, "vocab_size": 71, "n_whitespaces": 226, "language": "en" } }, { "id": 176293, "commit_id": "b5d41847b8db0c82372faf69cd3a339d11da7ef0", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/weighted.py", "file_name": "weighted.py", "fun_name": "all_pairs_bellman_ford_path", "commit_message": "DOC: Update documentation to include callables for weight argument (#5307)\n\nUpdate docs to include functions as valid input for weight argument.", "code": "def all_pairs_bellman_ford_path(G, weight=\"weight\"):\n \n path = single_source_bellman_ford_path\n # TODO This can be trivially parallelized.\n for n in G:\n yield (n, path(G, n, weight=weight))\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 41, "n_words": 22, "vocab_size": 22, "complexity": 2, "nloc": 4, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 6, "random_cut": "def all_pairs_bellman_ford_path(G, weight=\"weight\"):\n \n path = single_source_bellm", "d_id": 41810, "documentation": { "docstring": "Compute shortest paths between all nodes in a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n weight : string or function (default=\"weight\")\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n Returns\n -------\n distance : dictionary\n Dictionary, keyed by source and target, of shortest paths.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> path = dict(nx.all_pairs_bellman_ford_path(G))\n >>> path[0][4]\n [0, 1, 2, 3, 4]\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n See Also\n --------\n floyd_warshall, all_pairs_dijkstra_path\n\n ", "n_words": 170, "vocab_size": 109, "n_whitespaces": 310, "language": "en" } }, { "id": 68774, "commit_id": "3fa0a46f39f7024c5d0b235a7725eaa9ad0f3869", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/bom_update_log/test_bom_update_log.py", "file_name": "test_bom_update_log.py", "fun_name": "update_cost_in_all_boms_in_test", "commit_message": "chore: Less hacky tests, versioning (replace bom) and clearing log data (update cost)\n\n- Remove `auto_commit_on_many_writes` in `update_cost_in_level()` as commits happen every N BOMs\n- Auto commit every 50 BOMs\n- test: Remove hacky `frappe.flags.in_test` returns\n- test: Enqueue `now` if in tests (for update cost and replace bom)\n- Replace BOM: Copy bom object to `_doc_before_save` so that version.py finds a difference between the two\n- Replace BOM: Add reference to version\n- Update Cost: Unset `processed_boms` if Log is completed (useless after completion)\n- test: `update_cost_in_all_boms_in_test` works close to actual prod implementation (only call Cron job manually)\n- Test: use `enqueue_replace_bom` so that test works closest to production behaviour\n\nCo-authored-by: Ankush Menat ", "code": "def update_cost_in_all_boms_in_test():\n\t\n\tlog = enqueue_update_cost() # create BOM Update Log\n\n\twhile log.status != \"Completed\":\n\t\tresume_bom_cost_update_jobs() # run cron job until complete\n\t\tlog.reload()\n\n\treturn log\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 24, "vocab_size": 22, "complexity": 2, "nloc": 6, "token_counts": 27, "n_ast_nodes": 54, "n_identifiers": 6, "random_cut": "def update_cost_in_all_boms_in_test():\n\t\n\tlog = enqueue_update_cost() # create BOM Update Log\n\n\twhile log.status != \"Completed\":\n\t\tresume_bom_cost_update_jobs() # run cron job until complete\n\t\tlog.reload()\n\n\treturn log\n", "d_id": 14863, "documentation": { "docstring": "\n\tUtility to run 'Update Cost' job in tests without Cron job until fully complete.\n\t", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 185757, "commit_id": "b524fa08eecadc83b0b694278db1c79d90feb9d8", "repo": "textual", "path": "src/textual/widgets/_data_table.py", "file_name": "_data_table.py", "fun_name": "clear", "commit_message": "ffixed table refresh on add row", "code": "def clear(self) -> None:\n \n self.row_count = 0\n self._clear_caches()\n self._y_offsets.clear()\n self.data.clear()\n self.rows.clear()\n self._line_no = 0\n self._require_update_dimensions = True\n self.refresh()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 81, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 14, "token_counts": 54, "n_ast_nodes": 94, "n_identifiers": 10, "random_cut": "def clear(self) -> None:\n \n self.row_count = 0\n self._clear_caches()\n", "d_id": 45161, "documentation": { "docstring": "Clear the table.\n\n Args:\n columns (bool, optional): Also clear the columns. Defaults to False.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 39, "language": "en" } }, { "id": 178399, "commit_id": "2c20b90946a8aa5ad4ee39ad365ff1b83f182770", "repo": "Nuitka", "path": "nuitka/utils/FileOperations.py", "file_name": "FileOperations.py", "fun_name": "copyFile", "commit_message": "UI: In case of PermissionError, allow uses to retry\n\n* Esp. on Windows it happens a lot that running programs cannot be\n updated by Nuitka, this avoids the cryptic error somewhere ranomly.", "code": "def copyFile(source_path, dest_path):\n \n\n while 1:\n try:\n shutil.copyfile(source_path, dest_path)\n except PermissionError as e:\n if e.errno != errno.EACCES:\n raise\n\n general.warning(\"Problem copying file %s:\" % e)\n\n try:\n reply = raw_input(\"Retry? (YES/no) \") or \"yes\"\n except EOFError:\n reply = \"no\"\n\n if reply.upper() == \"YES\":\n continue\n\n raise\n\n break\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 207, "n_words": 43, "vocab_size": 37, "complexity": 7, "nloc": 16, "token_counts": 72, "n_ast_nodes": 132, "n_identifiers": 15, "random_cut": "def copyFile(source_path, dest_path):\n \n\n while 1:\n try:\n shutil.copyfile(source_path, dest_path)\n except PermissionError as e:\n if e.errno != errno.EACCES:\n raise\n\n general.warning(\"Problem copying file %s:\" % e)\n\n try:\n reply", "d_id": 42686, "documentation": { "docstring": "Improved version of shutil.copy\n\n This handles errors with a chance to correct them, e.g. on Windows, files might be\n locked by running program or virus checkers.\n ", "n_words": 26, "vocab_size": 26, "n_whitespaces": 35, "language": "en" } }, { "id": 259994, "commit_id": "6ca1f5e4d0d16bc9a7f28582079a15e14f012719", "repo": "scikit-learn", "path": "sklearn/ensemble/tests/test_iforest.py", "file_name": "test_iforest.py", "fun_name": "test_iforest", "commit_message": "TST use global_random_seed in sklearn/ensemble/tests/test_iforest.py (#22901)\n\n\r\n\r\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Olivier Grisel ", "code": "def test_iforest(global_random_seed):\n \n X_train = np.array([[0, 1], [1, 2]])\n X_test = np.array([[2, 1], [1, 1]])\n\n grid = ParameterGrid(\n {\"n_estimators\": [3], \"max_samples\": [0.5, 1.0, 3], \"bootstrap\": [True, False]}\n )\n\n with ignore_warnings():\n for params in grid:\n IsolationForest(random_state=global_random_seed, **params).fit(\n X_train\n ).predict(X_test)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 106, "n_words": 37, "vocab_size": 32, "complexity": 2, "nloc": 11, "token_counts": 109, "n_ast_nodes": 164, "n_identifiers": 14, "random_cut": "def test_iforest(global_random_seed):\n \n X_train = np.array([[0, 1], [1, 2]])\n X_test = np.array([[2, 1], [1, 1]])\n\n grid = ParameterGrid(\n {\"n_estimators\": [3], \"max_samples\": [0.5, 1.0, 3], \"bootstrap\": [True, False]}\n )\n\n with ignore_warnings():\n for params in grid:\n IsolationForest(random_state=global_random_seed, **params).fit(\n X_train\n ).predi", "d_id": 76027, "documentation": { "docstring": "Check Isolation Forest for various parameter settings.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 218189, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/abc.py", "file_name": "abc.py", "fun_name": "invalidate_caches", "commit_message": "add python 3.10.4 for windows", "code": "def invalidate_caches(self):\n \n\n_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter,\n machinery.PathFinder, machinery.WindowsRegistryFinder)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 1, "token_counts": 6, "n_ast_nodes": 40, "n_identifiers": 9, "random_cut": "def invalidate_caches(self):\n \n\n_register", "d_id": 55189, "documentation": { "docstring": "An optional method for clearing the finder's cache, if any.\n This method is used by importlib.invalidate_caches().\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 30, "language": "en" } }, { "id": 81589, "commit_id": "278db2cddebec97ec48011ecae45129be1ac43a4", "repo": "awx", "path": "awx/main/dispatch/reaper.py", "file_name": "reaper.py", "fun_name": "reap", "commit_message": "Split reaper for running and waiting jobs\n\nAvoid running jobs that have already been reapted\n\nCo-authored-by: Elijah DeLee \n\nRemove unnecessary extra actions\n\nFix waiting jobs in other cases of reaping", "code": "def reap(instance=None, status='failed', excluded_uuids=[]):\n \n me = instance\n if me is None:\n try:\n me = Instance.objects.me()\n except RuntimeError as e:\n logger.warning(f'Local instance is not registered, not running reaper: {e}')\n return\n workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id\n jobs = UnifiedJob.objects.filter(\n Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)\n ).exclude(celery_task_id__in=excluded_uuids)\n for j in jobs:\n reap_job(j, status)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 131, "n_words": 49, "vocab_size": 40, "complexity": 4, "nloc": 14, "token_counts": 122, "n_ast_nodes": 205, "n_identifiers": 28, "random_cut": "def reap(instance=None, status='failed', excluded_uuids=[]):\n \n me = instance\n if me is None:\n try:\n me = Instance.objects.me()\n except RuntimeError as e:\n logger.warning(f'Local instance is not registered, not running reaper: {e}')\n return\n workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id\n jobs = UnifiedJob.objects.", "d_id": 17223, "documentation": { "docstring": "\n Reap all jobs in running for this instance.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 42481, "commit_id": "692adaff901dd9daf29400fdf3385130aefbfb2a", "repo": "nltk", "path": "nltk/corpus/reader/wordnet.py", "file_name": "wordnet.py", "fun_name": "closure", "commit_message": "Fix some tests in Wordnet-related DocStrings", "code": "def closure(self, rel, depth=-1):\n \n\n from nltk.util import acyclic_breadth_first\n\n for synset in acyclic_breadth_first(self, rel, depth):\n if synset != self:\n yield synset\n\n from nltk.util import acyclic_depth_first as acyclic_tree\n from nltk.util import unweighted_minimum_spanning_tree as mst\n\n # Also add this shortcut?\n # from nltk.util import unweighted_minimum_spanning_digraph as umsd\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 106, "n_words": 44, "vocab_size": 29, "complexity": 3, "nloc": 5, "token_counts": 38, "n_ast_nodes": 89, "n_identifiers": 12, "random_cut": "def closure(self, rel, depth=-1):\n \n\n from nltk.util import acyclic_breadth_first\n\n for synset in acyclic_breadth_first(self, rel, depth):\n if s", "d_id": 7566, "documentation": { "docstring": "\n Return the transitive closure of source under the rel\n relationship, breadth-first, discarding cycles:\n\n >>> from nltk.corpus import wordnet as wn\n >>> computer = wn.synset('computer.n.01')\n >>> topic = lambda s:s.topic_domains()\n >>> print(list(computer.closure(topic)))\n [Synset('computer_science.n.01')]\n\n UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2\n\n\n Include redundant paths (but only once), avoiding duplicate searches\n (from 'animal.n.01' to 'entity.n.01'):\n\n >>> dog = wn.synset('dog.n.01')\n >>> hyp = lambda s:s.hypernyms()\n >>> print(list(dog.closure(hyp)))\n [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\\\n Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\\\n Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\\\n Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\\\n Synset('physical_entity.n.01'), Synset('entity.n.01')]\n\n UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7\n ", "n_words": 88, "vocab_size": 69, "n_whitespaces": 201, "language": "en" } }, { "id": 101068, "commit_id": "049314429f71a21e6595e9d27e9e36f6a3479c42", "repo": "faceswap", "path": "plugins/convert/writer/opencv.py", "file_name": "opencv.py", "fun_name": "_get_save_args", "commit_message": "Convert: Add option to output mask separately for draw-transparent", "code": "def _get_save_args(self) -> Tuple[int, ...]:\n \n filetype = self.config[\"format\"]\n args: Tuple[int, ...] = tuple()\n if filetype == \"jpg\" and self.config[\"jpg_quality\"] > 0:\n args = (cv2.IMWRITE_JPEG_QUALITY, # pylint: disable=no-member\n self.config[\"jpg_quality\"])\n if filetype == \"png\" and self.config[\"png_compress_level\"] > -1:\n args = (cv2.IMWRITE_PNG_COMPRESSION, # pylint: disable=no-member\n self.config[\"png_compress_level\"])\n logger.debug(args)\n return args\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 157, "n_words": 46, "vocab_size": 31, "complexity": 5, "nloc": 18, "token_counts": 98, "n_ast_nodes": 165, "n_identifiers": 13, "random_cut": "def _get_save_args(self) -> Tuple[int, ...]:\n \n filetype = self", "d_id": 20505, "documentation": { "docstring": " Obtain the save parameters for the file format.\n\n Returns\n -------\n tuple\n The OpenCV specific arguments for the selected file format\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 61, "language": "en" } }, { "id": 107133, "commit_id": "ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "set_constrained_layout_pads", "commit_message": "ENH: implement and use base layout_engine for more flexible layout.", "code": "def set_constrained_layout_pads(self, **kwargs):\n \n if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):\n self.get_layout_engine().set(**kwargs)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 3, "token_counts": 32, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def set_constrained_layout_pads(self, **kwargs):\n \n if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):\n self.", "d_id": 22598, "documentation": { "docstring": "\n Set padding for ``constrained_layout``.\n\n Tip: The parameters can be passed from a dictionary by using\n ``fig.set_constrained_layout(**pad_dict)``.\n\n See :doc:`/tutorials/intermediate/constrainedlayout_guide`.\n\n Parameters\n ----------\n w_pad : float, default: :rc:`figure.constrained_layout.w_pad`\n Width padding in inches. This is the pad around Axes\n and is meant to make sure there is enough room for fonts to\n look good. Defaults to 3 pts = 0.04167 inches\n\n h_pad : float, default: :rc:`figure.constrained_layout.h_pad`\n Height padding in inches. Defaults to 3 pts.\n\n wspace : float, default: :rc:`figure.constrained_layout.wspace`\n Width padding between subplots, expressed as a fraction of the\n subplot width. The total padding ends up being w_pad + wspace.\n\n hspace : float, default: :rc:`figure.constrained_layout.hspace`\n Height padding between subplots, expressed as a fraction of the\n subplot width. The total padding ends up being h_pad + hspace.\n\n ", "n_words": 122, "vocab_size": 74, "n_whitespaces": 291, "language": "en" } }, { "id": 151595, "commit_id": "255eb71270991fe480cd642ee5ea2ce69964f8a9", "repo": "freqtrade", "path": "freqtrade/freqai/freqai_interface.py", "file_name": "freqai_interface.py", "fun_name": "track_current_candle", "commit_message": "start tracking the current candle in FreqAI, add robustness to corr_df caching and inference timer, add test for cache corr_df", "code": "def track_current_candle(self):\n \n if self.dd.current_candle > self.current_candle:\n self.get_corr_dataframes = True\n self.pair_it = 0\n self.current_candle = self.dd.current_candle\n\n # Following methods which are overridden by user made prediction models.\n # See freqai/prediction_models/CatboostPredictionModel.py for an example.\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 85, "n_words": 32, "vocab_size": 28, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 62, "n_identifiers": 6, "random_cut": "def track_current_candle(self):\n \n if self.dd.current_candle > self.current_candle:\n self.get_corr_dataframes = True\n", "d_id": 35062, "documentation": { "docstring": "\n Checks if the latest candle appended by the datadrawer is\n equivalent to the latest candle seen by FreqAI. If not, it\n asks to refresh the cached corr_dfs, and resets the pair\n counter.\n ", "n_words": 32, "vocab_size": 24, "n_whitespaces": 68, "language": "en" } }, { "id": 63228, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "find", "commit_message": "upd; format", "code": "def find(self, req):\n \n dist = self.by_key.get(req.key)\n if dist is not None and dist not in req:\n # XXX add more info\n raise VersionConflict(dist, req)\n return dist\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 76, "n_words": 26, "vocab_size": 22, "complexity": 3, "nloc": 5, "token_counts": 40, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def find(self, req):\n \n dist = self.by_key.get(req.key)\n if dist is not None and dist not in req:\n # XXX a", "d_id": 13215, "documentation": { "docstring": "Find a distribution matching requirement `req`\n\n If there is an active distribution for the requested project, this\n returns it as long as it meets the version requirement specified by\n `req`. But, if there is an active distribution for the project and it\n does *not* meet the `req` requirement, ``VersionConflict`` is raised.\n If there is no active distribution for the requested project, ``None``\n is returned.\n ", "n_words": 64, "vocab_size": 38, "n_whitespaces": 114, "language": "en" } }, { "id": 113746, "commit_id": "a67180283b8d273b19f6a3497c6b898ab0c97b7d", "repo": "nni", "path": "nni/mutable/frozen.py", "file_name": "frozen.py", "fun_name": "current", "commit_message": "Mutable equal, frozen context, new labels (#5247)", "code": "def current() -> dict | None:\n \n try:\n ContextStack.top(_FROZEN_CONTEXT_KEY)\n sample: Sample = {}\n for ctx in ContextStack.stack(_FROZEN_CONTEXT_KEY):\n if not isinstance(ctx, dict):\n raise TypeError(f'Expect architecture to be a dict, found: {ctx}')\n sample.update(ctx)\n return sample\n except NoContextError:\n return None\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 161, "n_words": 36, "vocab_size": 35, "complexity": 4, "nloc": 19, "token_counts": 61, "n_ast_nodes": 106, "n_identifiers": 13, "random_cut": "def current() -> dict | None:\n \n try:\n ContextStack.top(_FROZEN_CONTEXT_KEY)\n sample: Sample = {}\n for ctx in ContextStack", "d_id": 25019, "documentation": { "docstring": "Retrieve the current frozen context.\n If multiple layers have been found, they would be merged from bottom to top.\n\n Returns\n -------\n The sample in frozen context.\n If no sample is found, return none.\n ", "n_words": 33, "vocab_size": 28, "n_whitespaces": 75, "language": "en" } }, { "id": 70, "commit_id": "10ae1d589044a6ae4722ead7aedc63fcdc4923b5", "repo": "PySyft", "path": "packages/syft/tests/syft/core/tensor/tensor_serde_test.py", "file_name": "tensor_serde_test.py", "fun_name": "test_rept_child", "commit_message": "Started DPTensor resource optimization\n\n- Added initial REPT and SEPT benchmarking tests\n- Deleted unused old Tensor classes\n- Added pympler for memory size tests\n\nCo-authored-by: @IshanMi\nCo-authored-by: @rasswanth-s", "code": "def test_rept_child() -> None:\n \n rows = 10_000\n cols = 7\n rept_row_count = 5\n\n # these times and sizes are based on the above constants\n # and Madhavas MacBook Pro 2019\n expected_rept_mem_size = 4.010650634765625\n expected_rept_ser_size = 7.4926300048828125\n macbook_pro_2019_ser_time = 0.18791760900000032\n macbook_pro_2019_de_time = 0.1726598199999998\n\n sept = make_sept(rows=rows, cols=cols)\n rept_rows = [sept.copy() for i in range(rept_row_count)]\n\n rept = REPT(rows=rept_rows)\n\n start = timeit.default_timer()\n ser = sy.serialize(rept, to_bytes=True)\n end = timeit.default_timer()\n time_ser = end - start\n\n start = timeit.default_timer()\n de = sy.deserialize(ser, from_bytes=True)\n end = timeit.default_timer()\n time_de = end - start\n\n assert rept == de\n\n current_rept_mem_size = size(rept)\n mem_diff = (current_rept_mem_size / expected_rept_mem_size * 100) - 100\n\n current_rept_bytes_size = size(ser)\n bytes_diff = (current_rept_bytes_size / expected_rept_ser_size * 100) - 100\n\n ser_time_diff = (time_ser / macbook_pro_2019_ser_time * 100) - 100\n de_time_diff = (time_de / macbook_pro_2019_de_time * 100) - 100\n\n print(\"REPT Stats\")\n print(\"==========\")\n print(\"In-memory size of REPT\", size(rept))\n print(\"Serialized size of REPT\", size(ser))\n print(f\"Serializing {rept_row_count}x{rows}x{cols} took {time_ser} secs\")\n print(f\"Deserializing {rept_row_count}x{rows}x{cols} took {time_de} secs\")\n\n print(\"Current Results\")\n print(\"===============\")\n print(f\"In-memory size delta: {mem_diff}%\")\n print(f\"Serialized size delta: {bytes_diff}%\")\n print(f\"Serializing time delta: {ser_time_diff}%\")\n print(f\"Deserializing time delta: {de_time_diff}%\")\n\n # we want to assert that our calculated values are smaller than the old values with\n # some tolerance\n assert (current_rept_mem_size - expected_rept_mem_size) < 1e-3\n assert (current_rept_bytes_size - expected_rept_ser_size) < 2e-2\n # TODO: make time benchmarks stable (probably can't run in parallel)\n # assert (time_ser - macbook_pro_2019_ser_time) < 2e-1\n # assert (time_de - macbook_pro_2019_de_time) < 2e-1\n\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 371, "n_words": 230, "vocab_size": 132, "complexity": 2, "nloc": 41, "token_counts": 278, "n_ast_nodes": 501, "n_identifiers": 37, "random_cut": "def test_rept_child() -> None:\n \n rows = 10_000\n cols = 7\n rept_row_count = 5\n\n # these times and sizes are based on the above constants\n # and Madhavas MacBook Pro 2019\n expected_rept_mem_size = 4.010650634765625\n expected_rept_ser_size = 7.4926300048828125\n macbook_pro_2019_ser_time = 0.18791760900000032\n macbook_pro_2019_de_time = 0.1726598199999998\n\n sept = make_sept(rows=rows, cols=cols)\n rept_rows = [sept.copy() for i in range(rept_row_count)]\n\n rept = REPT(rows=rept_rows)\n\n start = timeit.default_timer()\n ser = sy.serialize(rept, to_bytes=True)\n end = timeit.default_timer()\n time_ser = end - start\n\n start = timeit.default_timer()\n de = sy.deserialize(ser, from_byte", "d_id": 44, "documentation": { "docstring": "We need to benchmark both the size and time to serialize and deserialize REPTs", "n_words": 14, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 197037, "commit_id": "e0dc14eca132f37c5f49369eb4051eae37c9b119", "repo": "sympy", "path": "sympy/ntheory/ecm.py", "file_name": "ecm.py", "fun_name": "_ecm_one_factor", "commit_message": "Refactored import ordering in functions", "code": "def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200):\n \n n = as_int(n)\n if B1 % 2 != 0 or B2 % 2 != 0:\n raise ValueError(\"The Bounds should be an even integer\")\n sieve.extend(B2)\n\n if isprime(n):\n return n\n\n from sympy.functions.elementary.miscellaneous import sqrt\n from sympy.polys.polytools import gcd\n curve = 0\n D = int(sqrt(B2))\n beta = [0]*(D + 1)\n S = [0]*(D + 1)\n k = 1\n for p in sieve.primerange(1, B1 + 1):\n k *= pow(p, integer_log(B1, p)[0])\n while(curve <= max_curve):\n curve += 1\n\n #Suyama's Paramatrization\n sigma = rgen.randint(6, n - 1)\n u = (sigma*sigma - 5) % n\n v = (4*sigma) % n\n diff = v - u\n u_3 = pow(u, 3, n)\n\n try:\n C = (pow(diff, 3, n)*(3*u + v)*mod_inverse(4*u_3*v, n) - 2) % n\n except ValueError:\n #If the mod_inverse(4*u_3*v, n) doesn't exist\n return gcd(4*u_3*v, n)\n\n a24 = (C + 2)*mod_inverse(4, n) % n\n Q = Point(u_3, pow(v, 3, n), a24, n)\n Q = Q.mont_ladder(k)\n g = gcd(Q.z_cord, n)\n\n #Stage 1 factor\n if g != 1 and g != n:\n return g\n #Stage 1 failure. Q.z = 0, Try another curve\n elif g == n:\n continue\n\n #Stage 2 - Improved Standard Continuation\n S[1] = Q.double()\n S[2] = S[1].double()\n beta[1] = (S[1].x_cord*S[1].z_cord) % n\n beta[2] = (S[2].x_cord*S[2].z_cord) % n\n\n for d in range(3, D + 1):\n S[d] = S[d - 1].add(S[1], S[d - 2])\n beta[d] = (S[d].x_cord*S[d].z_cord) % n\n\n g = 1\n B = B1 - 1\n T = Q.mont_ladder(B - 2*D)\n R = Q.mont_ladder(B)\n\n for r in range(B, B2, 2*D):\n alpha = (R.x_cord*R.z_cord) % n\n for q in sieve.primerange(r + 2, r + 2*D + 1):\n delta = (q - r) // 2\n f = (R.x_cord - S[d].x_cord)*(R.z_cord + S[d].z_cord) -\\\n alpha + beta[delta]\n g = (g*f) % n\n #Swap\n T, R = R, R.add(S[D], T)\n g = gcd(n, g)\n\n #Stage 2 Factor found\n if g != 1 and g != n:\n return g\n\n #ECM failed, Increase the bounds\n raise ValueError(\"Increase the bounds\")\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 798, "n_words": 319, "vocab_size": 170, "complexity": 15, "nloc": 58, "token_counts": 615, "n_ast_nodes": 933, "n_identifiers": 56, "random_cut": "def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200):\n \n n = as_int(n)\n if B1 % 2 != 0 or B2 % 2 != 0:\n raise ValueError(\"The Bounds should be an even integer\")\n sieve.extend(B2)\n\n if isprime(n):\n return n\n\n from sympy.functions.elementary.miscellaneous import sqrt\n from sympy.polys.polytools import gcd\n curve = 0\n D = int(sqrt(B2))\n beta = [0]*(D + 1)\n S = [0]*(D + 1)\n k = 1\n for p in sieve.primerange(1, B1 + 1):\n k *= pow(p, integer_log(B1, p)[0])\n while(curve <= max_curve):\n curve += 1\n\n #Suyama's Paramatrization\n sigma = rgen.randint(6, n - 1)\n u = (sigma*sigma - 5) % n\n v = (4*sigma) % n\n diff = v - u\n u_3 = pow(u, 3, n)\n\n try:\n C = (pow(diff, 3, n)*(3*u + v)*mod_inverse(4*u_3*v, n) - 2) % n\n except ValueError:\n #If the mod_inverse(4*u_3*v, n) doesn't exist\n return gcd(4*u_3*v, n)\n\n a24 = (C + 2)*mod_inverse(4, n) % n\n Q = Point(u_3, pow(v, 3, n), a24, n)\n Q = Q.mont_ladder(k)\n g = gcd(Q.z_cord, n)\n\n #Stage 1 factor\n if g != 1 and g != n:\n return g\n #Stage 1 failure. Q.z = 0, Try another curve\n ", "d_id": 48294, "documentation": { "docstring": "Returns one factor of n using\n Lenstra's 2 Stage Elliptic curve Factorization\n with Suyama's Parameterization. Here Montgomery\n arithmetic is used for fast computation of addition\n and doubling of points in elliptic curve.\n\n This ECM method considers elliptic curves in Montgomery\n form (E : b*y**2*z = x**3 + a*x**2*z + x*z**2) and involves\n elliptic curve operations (mod N), where the elements in\n Z are reduced (mod N). Since N is not a prime, E over FF(N)\n is not really an elliptic curve but we can still do point additions\n and doubling as if FF(N) was a field.\n\n Stage 1 : The basic algorithm involves taking a random point (P) on an\n elliptic curve in FF(N). The compute k*P using Montgomery ladder algorithm.\n Let q be an unknown factor of N. Then the order of the curve E, |E(FF(q))|,\n might be a smooth number that divides k. Then we have k = l * |E(FF(q))|\n for some l. For any point belonging to the curve E, |E(FF(q))|*P = O,\n hence k*P = l*|E(FF(q))|*P. Thus kP.z_cord = 0 (mod q), and the unknownn\n factor of N (q) can be recovered by taking gcd(kP.z_cord, N).\n\n Stage 2 : This is a continuation of Stage 1 if k*P != O. The idea utilize\n the fact that even if kP != 0, the value of k might miss just one large\n prime divisor of |E(FF(q))|. In this case we only need to compute the\n scalar multiplication by p to get p*k*P = O. Here a second bound B2\n restrict the size of possible values of p.\n\n Parameters\n ==========\n\n n : Number to be Factored\n B1 : Stage 1 Bound\n B2 : Stage 2 Bound\n max_curve : Maximum number of curves generated\n\n References\n ==========\n\n .. [1] Carl Pomerance and Richard Crandall \"Prime Numbers:\n A Computational Perspective\" (2nd Ed.), page 344\n ", "n_words": 303, "vocab_size": 187, "n_whitespaces": 407, "language": "en" } }, { "id": 308463, "commit_id": "d26275011ae4e8ba0a8dcdc2a7ef81b5911d3900", "repo": "core", "path": "tests/components/command_line/test_cover.py", "file_name": "test_cover.py", "fun_name": "test_unique_id", "commit_message": "Add unique_id configuration variable to command_line integration (#58596)", "code": "async def test_unique_id(hass):\n \n await setup_test_entity(\n hass,\n {\n \"unique\": {\n \"command_open\": \"echo open\",\n \"command_close\": \"echo close\",\n \"command_stop\": \"echo stop\",\n \"unique_id\": \"unique\",\n },\n \"not_unique_1\": {\n \"command_open\": \"echo open\",\n \"command_close\": \"echo close\",\n \"command_stop\": \"echo stop\",\n \"unique_id\": \"not-so-unique-anymore\",\n },\n \"not_unique_2\": {\n \"command_open\": \"echo open\",\n \"command_close\": \"echo close\",\n \"command_stop\": \"echo stop\",\n \"unique_id\": \"not-so-unique-anymore\",\n },\n },\n )\n\n assert len(hass.states.async_all()) == 2\n\n ent_reg = entity_registry.async_get(hass)\n\n assert len(ent_reg.entities) == 2\n assert ent_reg.async_get_entity_id(\"cover\", \"command_line\", \"unique\") is not None\n assert (\n ent_reg.async_get_entity_id(\"cover\", \"command_line\", \"not-so-unique-anymore\")\n is not None\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 386, "n_words": 78, "vocab_size": 38, "complexity": 1, "nloc": 32, "token_counts": 138, "n_ast_nodes": 264, "n_identifiers": 11, "random_cut": "async def test_unique_id(hass):\n \n await setup_test_entity(\n hass,\n {\n \"unique\": {\n \"command_open\": \"echo open\",\n \"command_close\": \"echo close\",\n \"command_stop\": \"echo stop\",\n \"u", "d_id": 107219, "documentation": { "docstring": "Test unique_id option and if it only creates one cover per id.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 289693, "commit_id": "5e7f571f019c0b992b9cb8ffa545c12e8169d395", "repo": "core", "path": "tests/components/mqtt/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "mock_ssl_context", "commit_message": "Move advanced MQTT options to entry (#79351)\n\n* Move advanced broker settings to entry\r\n\r\n* Add repair issue for deprecated settings\r\n\r\n* Split CONFIG_SCHEMA\r\n\r\n* Do not store certificate UI flags in entry\r\n\r\n* Keep entered password in next dialog\r\n\r\n* Do not process yaml config in flow\r\n\r\n* Correct typo", "code": "def mock_ssl_context():\n \n with patch(\n \"homeassistant.components.mqtt.config_flow.SSLContext\"\n ) as mock_context, patch(\n \"homeassistant.components.mqtt.config_flow.load_pem_private_key\"\n ) as mock_key_check, patch(\n \"homeassistant.components.mqtt.config_flow.load_pem_x509_certificate\"\n ) as mock_cert_check:\n yield {\n \"context\": mock_context,\n \"load_pem_x509_certificate\": mock_cert_check,\n \"load_pem_private_key\": mock_key_check,\n }\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 110, "n_words": 28, "vocab_size": 20, "complexity": 1, "nloc": 13, "token_counts": 42, "n_ast_nodes": 92, "n_identifiers": 7, "random_cut": "def mock_ssl_context():\n \n with patch(\n \"homeassistant.components.mqtt.config_flow.SSLC", "d_id": 88829, "documentation": { "docstring": "Mock the SSL context used to load the cert chain and to load verify locations.", "n_words": 15, "vocab_size": 12, "n_whitespaces": 14, "language": "en" } }, { "id": 42640, "commit_id": "f352ee63a5d09546a7997ba8f2f8702a1ddb4af7", "repo": "airflow", "path": "tests/cli/commands/test_task_command.py", "file_name": "test_task_command.py", "fun_name": "test_task_states_for_dag_run_when_dag_run_not_exists", "commit_message": "Replaced all days_ago functions with datetime functions (#23237)\n\nCo-authored-by: Dev232001 ", "code": "def test_task_states_for_dag_run_when_dag_run_not_exists(self):\n \n with pytest.raises(DagRunNotFound):\n default_date2 = timezone.datetime(2016, 1, 9)\n task_command.task_states_for_dag_run(\n self.parser.parse_args(\n [\n 'tasks',\n 'states-for-dag-run',\n 'not_exists_dag',\n default_date2.isoformat(),\n '--output',\n \"json\",\n ]\n )\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 274, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 15, "token_counts": 56, "n_ast_nodes": 97, "n_identifiers": 13, "random_cut": "def test_task_states_for_dag_run_when_dag_run_not_exists(self):\n \n with pytest.raises(DagRunNotFound):\n default_date2 = t", "d_id": 7671, "documentation": { "docstring": "\n task_states_for_dag_run should return an AirflowException when invalid dag id is passed\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 73217, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/tests/test_page_modeladmin.py", "file_name": "test_page_modeladmin.py", "fun_name": "test_title_present", "commit_message": "Reformat with black", "code": "def test_title_present(self):\n \n response = self.get(4)\n self.assertContains(response, \"Christmas\", 3)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 5, "random_cut": "def test_title_present(self):\n \n response = self.get(4)\n self.assertConta", "d_id": 15993, "documentation": { "docstring": "\n The page title should appear three times. Once in the header, and two times\n in the field listing (as the actual title and as the draft title)\n ", "n_words": 27, "vocab_size": 21, "n_whitespaces": 49, "language": "en" } }, { "id": 107267, "commit_id": "f7e4349b6c20d127e88a8f750fe1df7462350971", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "_set_position", "commit_message": "Fix typos", "code": "def _set_position(self, pos, which='both'):\n \n if not isinstance(pos, mtransforms.BboxBase):\n pos = mtransforms.Bbox.from_bounds(*pos)\n for ax in self._twinned_axes.get_siblings(self):\n if which in ('both', 'active'):\n ax._position.set(pos)\n if which in ('both', 'original'):\n ax._originalPosition.set(pos)\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 121, "n_words": 30, "vocab_size": 23, "complexity": 5, "nloc": 9, "token_counts": 85, "n_ast_nodes": 143, "n_identifiers": 16, "random_cut": "def _set_position(self, pos, which='both'):\n \n i", "d_id": 22669, "documentation": { "docstring": "\n Private version of set_position.\n\n Call this internally to get the same functionality of `set_position`,\n but not to take the axis out of the constrained_layout hierarchy.\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 54, "language": "en" } }, { "id": 298807, "commit_id": "9342a1b5777a0d0d5d289c7f5b90cf059152d6af", "repo": "core", "path": "homeassistant/components/somfy/climate.py", "file_name": "climate.py", "fun_name": "hvac_modes", "commit_message": "Use climate enums in somfy (#70739)", "code": "def hvac_modes(self) -> list[HVACMode]:\n \n hvac_state = HVAC_MODES_MAPPING[self._climate.get_hvac_state()]\n return [HVACMode.AUTO, hvac_state]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 8, "token_counts": 31, "n_ast_nodes": 50, "n_identifiers": 9, "random_cut": "def hvac_modes(self) -> list[HVACMode]:\n \n hvac_state = HVAC_MODES_MAPPING[self._climat", "d_id": 97748, "documentation": { "docstring": "Return the list of available hvac operation modes.\n\n HEAT and COOL mode are exclusive. End user has to enable a mode manually within the Somfy application.\n So only one mode can be displayed. Auto mode is a scheduler.\n ", "n_words": 38, "vocab_size": 33, "n_whitespaces": 59, "language": "en" } }, { "id": 44348, "commit_id": "1970845c11ef0cfe4b41a8497a212aebc59bc1e2", "repo": "airflow", "path": "tests/operators/test_python.py", "file_name": "test_python.py", "fun_name": "_assert_expected_task_states", "commit_message": "Add ShortCircuitOperator configurability for respecting downstream trigger rules (#20044)\n\n* Add short-circuit mode handling", "code": "def _assert_expected_task_states(self, dagrun, expected_states):\n \n\n tis = dagrun.get_task_instances()\n for ti in tis:\n try:\n expected_state = expected_states[ti.task_id]\n except KeyError:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")\n else:\n assert ti.state == expected_state\n\n all_downstream_skipped_states = {\n \"short_circuit\": State.SUCCESS,\n \"op1\": State.SKIPPED,\n \"op2\": State.SKIPPED,\n }\n all_success_states = {\"short_circuit\": State.SUCCESS, \"op1\": State.SUCCESS, \"op2\": State.SUCCESS}\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 175, "n_words": 46, "vocab_size": 37, "complexity": 3, "nloc": 9, "token_counts": 49, "n_ast_nodes": 160, "n_identifiers": 17, "random_cut": "def _assert_expected_task_states(self, dagrun, expected_states):\n \n\n tis = dagrun.get_task_instances()\n for ti in tis:\n try:\n expected_state = expected_states[ti.task_id]\n except KeyError:\n raise ValueError(f\"Invalid task id {ti.task_id} found!\")\n else:\n assert ti.state == expected_state\n\n all_downstream_skipp", "d_id": 8242, "documentation": { "docstring": "Helper function that asserts `TaskInstances` of a given `task_id` are in a given state.", "n_words": 14, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 67378, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/selling/page/point_of_sale/point_of_sale.py", "file_name": "point_of_sale.py", "fun_name": "set_customer_info", "commit_message": "style: format code with black", "code": "def set_customer_info(fieldname, customer, value=\"\"):\n\tif fieldname == \"loyalty_program\":\n\t\tfrappe.db.set_value(\"Customer\", customer, \"loyalty_program\", value)\n\n\tcontact = frappe.get_cached_value(\"Customer\", customer, \"customer_primary_contact\")\n\tif not contact:\n\t\tcontact = frappe.db.sql(\n\t\t\t,\n\t\t\t(customer),\n\t\t\tas_dict=1,\n\t\t)\n\t\tcontact = contact[0].get(\"parent\") if contact else None\n\n\tif not contact:\n\t\tnew_contact = frappe.new_doc(\"Contact\")\n\t\tnew_contact.is_primary_contact = 1\n\t\tnew_contact.first_name = customer\n\t\tnew_contact.set(\"links\", [{\"link_doctype\": \"Customer\", \"link_name\": customer}])\n\t\tnew_contact.save()\n\t\tcontact = new_contact.name\n\t\tfrappe.db.set_value(\"Customer\", customer, \"customer_primary_contact\", contact)\n\n\tcontact_doc = frappe.get_doc(\"Contact\", contact)\n\tif fieldname == \"email_id\":\n\t\tcontact_doc.set(\"email_ids\", [{\"email_id\": value, \"is_primary\": 1}])\n\t\tfrappe.db.set_value(\"Customer\", customer, \"email_id\", value)\n\telif fieldname == \"mobile_no\":\n\t\tcontact_doc.set(\"phone_nos\", [{\"phone\": value, \"is_primary_mobile_no\": 1}])\n\t\tfrappe.db.set_value(\"Customer\", customer, \"mobile_no\", value)\n\tcontact_doc.save()\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 63, "n_words": 91, "vocab_size": 57, "complexity": 7, "nloc": 34, "token_counts": 233, "n_ast_nodes": 411, "n_identifiers": 22, "random_cut": "def set_customer_info(fieldname, customer, value=\"\"):\n\tif fieldname == \"loyalty_program\":\n\t\tfrappe.db.set_value(\"Customer\", customer, \"loyalty_program\", value)\n\n\tcontact = frappe.get_cached_value(\"Customer\", customer, \"customer_primary_contact\")\n\tif not contact:\n\t\tcontact = frappe.db.sql(\n\t\t\t,\n\t\t\t(customer),\n\t\t\tas_dict=1,\n\t\t)\n\t\tcontact = contact[0].get(\"parent\") if contact else None\n\n\tif not contact:\n\t\tnew_contact = frappe.new_doc(\"Contact\")\n\t\tnew_contact.is_primary_contact = 1\n\t\tnew_contact.first_name = customer\n\t\tnew_contact.set(\"links\", [{\"link_doctype\": \"Customer\", \"link_name\": customer}])\n\t\tnew_contact.save()\n\t\tcontact = new_contact.name\n\t\tfrappe.db.set_value(\"Customer\", customer, \"customer_primary_contact\", contact)\n\n\tcontact_doc = frappe.get_doc(\"Contact\", contact)\n\tif fieldname == \"email_id\":\n\t\tcontact_doc.se", "d_id": 14508, "documentation": { "docstring": "\n\t\t\tSELECT parent FROM `tabDynamic Link`\n\t\t\tWHERE\n\t\t\t\tparenttype = 'Contact' AND\n\t\t\t\tparentfield = 'links' AND\n\t\t\t\tlink_doctype = 'Customer' AND\n\t\t\t\tlink_name = %s\n\t\t\t", "n_words": 21, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 71742, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_unpublish_page.py", "file_name": "test_unpublish_page.py", "fun_name": "test_unpublish_not_include_children_view_post", "commit_message": "Reformat with black", "code": "def test_unpublish_not_include_children_view_post(self):\n \n # Post to the unpublish page\n response = self.client.post(\n reverse(\"wagtailadmin_pages:unpublish\", args=(self.test_page.id,)), {}\n )\n\n # Should be redirected to explorer page\n self.assertRedirects(\n response, reverse(\"wagtailadmin_explore\", args=(self.root_page.id,))\n )\n\n # Check that the page was unpublished\n self.assertFalse(SimplePage.objects.get(id=self.test_page.id).live)\n\n # Check that the descendant pages were not unpublished\n self.assertTrue(SimplePage.objects.get(id=self.test_child_page.id).live)\n self.assertTrue(SimplePage.objects.get(id=self.test_another_child_page.id).live)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 152, "n_words": 46, "vocab_size": 34, "complexity": 1, "nloc": 10, "token_counts": 118, "n_ast_nodes": 192, "n_identifiers": 19, "random_cut": "def test_unpublish_not_include_children_view_post(self):\n \n # Post to the unpublish page\n response = self.client.post(\n reverse(\"wagtailadmin_pages:unpublish\", args=(self.test_page.id,)), {}\n )\n\n # Should be redirected to explorer page\n self.assertRedirects(\n response, reverse(\"wagtailadmin_explore\", args=(self.root_page.id,))\n )\n\n # Check that the page was unpublished\n self.assertFalse(SimplePage.objects.get(id=self.test_page.id).live)\n\n # Check that the descendant pages wer", "d_id": 15725, "documentation": { "docstring": "\n This posts to the unpublish view and checks that the page was unpublished but its descendants were not\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 33, "language": "en" } }, { "id": 167015, "commit_id": "67045903306ac4a1cab108177e92df30d99912b4", "repo": "pandas", "path": "pandas/io/json/_json.py", "file_name": "_json.py", "fun_name": "_get_data_from_filepath", "commit_message": "Raise `FileNotFoundError` in `read_json` if input looks like file path but file is missing (#46718)\n\n* raise FileNotFoundError in _get_data_from_filepath()\r\n\r\n* update tests test_read_non_existent + test_read_expands_user_home_dir\r\n\r\n* add changelog entry in doc/source/whatsnew/v1.5.0.rst\r\n\r\n* use pandas.io.common._compression_to_extension instead of hard-coded extensions\r\n\r\n* move changelog entry from IO to other API changes\r\n\r\n* fix ImportError from _compression_to_extension -> _extension_to_compression rename\r\n\r\n* add test read_json very long file path\r\n\r\n* remove extra period in extension checking\r\n\r\nCo-authored-by: Matthew Roeschke ", "code": "def _get_data_from_filepath(self, filepath_or_buffer):\n \n # if it is a string but the file does not exist, it might be a JSON string\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n if (\n not isinstance(filepath_or_buffer, str)\n or is_url(filepath_or_buffer)\n or is_fsspec_url(filepath_or_buffer)\n or file_exists(filepath_or_buffer)\n ):\n self.handles = get_handle(\n filepath_or_buffer,\n \"r\",\n encoding=self.encoding,\n compression=self.compression,\n storage_options=self.storage_options,\n errors=self.encoding_errors,\n )\n filepath_or_buffer = self.handles.handle\n elif (\n isinstance(filepath_or_buffer, str)\n and filepath_or_buffer.lower().endswith(\n (\".json\",) + tuple(f\".json{c}\" for c in _extension_to_compression)\n )\n and not file_exists(filepath_or_buffer)\n ):\n raise FileNotFoundError(f\"File {filepath_or_buffer} does not exist\")\n\n return filepath_or_buffer\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 368, "n_words": 75, "vocab_size": 54, "complexity": 9, "nloc": 26, "token_counts": 130, "n_ast_nodes": 213, "n_identifiers": 23, "random_cut": "def _get_data_from_filepath(self, filepath_or_buffer):\n \n # if it is a string but the file does not exist, it might be a JSON string\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n if (\n not isinstance(filepath_or_buffer, str)\n or is_url(filepath_or_buffer)\n or is_fsspec_url(filepath_or_buffer)\n or file_exists(filepath_or_buffer)\n ):\n self.handles = get_handle(\n filepath_or_buffer,\n \"r\",\n encoding=self.encoding,\n compression=self.compression,\n storage_options=self.storage_options,\n errors=self.encoding_errors,\n )\n filepath_or_buffer = self.handles.handle\n elif (\n isinstance(filepath_or_buffer, str)\n ", "d_id": 39919, "documentation": { "docstring": "\n The function read_json accepts three input types:\n 1. filepath (string-like)\n 2. file-like object (e.g. open file object, StringIO)\n 3. JSON string\n\n This method turns (1) into (2) to simplify the rest of the processing.\n It returns input types (2) and (3) unchanged.\n\n It raises FileNotFoundError if the input is a string ending in\n one of .json, .json.gz, .json.bz2, etc. but no such file exists.\n ", "n_words": 64, "vocab_size": 55, "n_whitespaces": 140, "language": "en" } }, { "id": 217283, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ensurepip/__init__.py", "file_name": "__init__.py", "fun_name": "_run_pip", "commit_message": "add python 3.10.4 for windows", "code": "def _run_pip(args, additional_paths=None):\n # Run the bootstraping in a subprocess to avoid leaking any state that happens\n # after pip has executed. Particulary, this avoids the case when pip holds onto\n # the files in *additional_paths*, preventing us to remove them at the end of the\n # invocation.\n code = f\n return subprocess.run([sys.executable, '-W', 'ignore::DeprecationWarning',\n \"-c\", code], check=True).returncode\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 101, "n_words": 58, "vocab_size": 48, "complexity": 1, "nloc": 10, "token_counts": 38, "n_ast_nodes": 77, "n_identifiers": 10, "random_cut": "def _run_pip(args, additional_paths=None):\n # Run the bootstraping in a subprocess to avoid leaking any state that happens\n # after pip has executed. Particulary, this avoids the case when pip holds onto\n # the files in *additional_paths*, preventing us to remove them at the end of the\n ", "d_id": 54698, "documentation": { "docstring": "\nimport runpy\nimport sys\nsys.path = {additional_paths or []} + sys.path\nsys.argv[1:] = {args}\nrunpy.run_module(\"pip\", run_name=\"__main__\", alter_sys=True)\n", "n_words": 17, "vocab_size": 14, "n_whitespaces": 12, "language": "en" } }, { "id": 208740, "commit_id": "d858213d4088237e1481038865bc52ccdd074053", "repo": "ipython", "path": "IPython/lib/tests/test_pretty.py", "file_name": "test_pretty.py", "fun_name": "test_pprint_heap_allocated_type", "commit_message": "xxlimited_35 module now has the same name in repr in Py 3.11\n\nSee https://github.com/python/cpython/commit/a87c9b538fbfc42883417c4d5e69f1a5922690e3", "code": "def test_pprint_heap_allocated_type():\n \n module_name = \"xxlimited\" if sys.version_info < (3, 10) else \"xxlimited_35\"\n expected_output = (\n \"xxlimited.Null\" if sys.version_info < (3, 11) else \"xxlimited_35.Null\"\n )\n xxlimited = pytest.importorskip(module_name)\n output = pretty.pretty(xxlimited.Null)\n assert output == expected_output\n\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 62, "n_words": 34, "vocab_size": 24, "complexity": 3, "nloc": 8, "token_counts": 59, "n_ast_nodes": 100, "n_identifiers": 11, "random_cut": "def test_pprint_heap_allocated_type():\n \n module_name = \"xxlimited\" if sys.version_info < (3, 10) else \"xxlimited_35\"\n expected_output = (\n \"xxlimited.Null\" if sys.version_info < (3, 11) else \"xxlimited_35.Null\"\n )\n xxlimited = pyt", "d_id": 52497, "documentation": { "docstring": "\n Test that pprint works for heap allocated types.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 95663, "commit_id": "9af098891a8243d08ee5ab6e51925a082135e3f2", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metrics.py", "file_name": "test_organization_metrics.py", "fun_name": "test_orderby_percentile_with_many_fields_transactions_unsupported_fields", "commit_message": "feat(metrics): Support multi-field orderby for performance [INGEST-805] (#31162)\n\n* feat(metrics): Support metrics multi-field orderby queries\r\n\r\nAdds support for the performance table to the\r\nmetrics organization data endpoint", "code": "def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self):\n \n response = self.get_response(\n self.organization.slug,\n field=[\n \"p50(sentry.transactions.measurements.lcp)\",\n \"sum(user_misery)\",\n ],\n statsPeriod=\"1h\",\n interval=\"1h\",\n datasource=\"snuba\",\n groupBy=[\"project_id\", \"transaction\"],\n orderBy=\"p50(sentry.transactions.measurements.lcp)\",\n )\n assert response.status_code == 400\n assert (\n response.json()[\"detail\"]\n == \"Multi-field select order by queries is not supported for metric user_misery\"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 219, "n_words": 37, "vocab_size": 34, "complexity": 1, "nloc": 18, "token_counts": 71, "n_ast_nodes": 123, "n_identifiers": 14, "random_cut": "def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self):\n \n response = self.get_response(\n self.organization.slug,\n field=[\n \"p50(sentry.transactions.measurements.lcp)\",\n \"sum(user_misery)\",\n ],\n statsPeriod=\"1h\",\n interval=\"1h\",\n datasource=\"snuba\",\n groupBy=[\"project_id\", \"transaction\"],\n orderBy=\"p50(sentry.transactions.measurements.lcp)\",\n )\n assert response.status_code == 400\n assert (\n response.json()[\"detail\"]\n == \"Multi-field select order by queries is not supported for metric user_misery\"\n )\n", "d_id": 19226, "documentation": { "docstring": "\n Test that contains a field in the `select` that is performance related but currently\n not supported should return a 400\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 42, "language": "en" } }, { "id": 266736, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/integration/__init__.py", "file_name": "__init__.py", "fun_name": "generate_dependency_map", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[IntegrationTarget]]\n \n targets_dict = dict((target.name, target) for target in integration_targets)\n target_dependencies = analyze_integration_target_dependencies(integration_targets)\n dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]]\n\n invalid_targets = set()\n\n for dependency, dependents in target_dependencies.items():\n dependency_target = targets_dict.get(dependency)\n\n if not dependency_target:\n invalid_targets.add(dependency)\n continue\n\n for dependent in dependents:\n if dependent not in dependency_map:\n dependency_map[dependent] = set()\n\n dependency_map[dependent].add(dependency_target)\n\n if invalid_targets:\n raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))\n\n return dependency_map\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 180, "n_words": 67, "vocab_size": 46, "complexity": 7, "nloc": 17, "token_counts": 115, "n_ast_nodes": 192, "n_identifiers": 21, "random_cut": "def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[IntegrationTarget]]\n \n targets_dict = dict((target.name, target) for target in integration_targets)\n target_dependencies = analyze_integration_target_dependencies(integration_targets)\n dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]]\n\n invalid_targets = set()\n\n for d", "d_id": 78547, "documentation": { "docstring": "Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend.", "n_words": 22, "vocab_size": 19, "n_whitespaces": 21, "language": "en" } }, { "id": 321852, "commit_id": "ee4d6e0396a6b570f4d5592a9c4c1a9fee1027b6", "repo": "qutebrowser", "path": "qutebrowser/misc/sql.py", "file_name": "sql.py", "fun_name": "text", "commit_message": "sql: Add *all* primary sqlite result codes\n\nFor three reasons:\n\n- There are only 31 of them, and we don't really expect any more to\n turn up (last happened in 2013, and we have a test for it happening)\n- It makes for nicer debug output\n- It always felt strange to only have a small subset in the enum", "code": "def text(self) -> str:\n \n if self.error is None:\n return str(self)\n return self.error.databaseText()\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 8, "token_counts": 28, "n_ast_nodes": 48, "n_identifiers": 5, "random_cut": "def text(self) -> str:\n \n if self.error is None:\n return", "d_id": 117945, "documentation": { "docstring": "Get a short text description of the error.\n\n This is a string suitable to show to the user as error message.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 124881, "commit_id": "09a6e5336ad6ab3c41e4a16e906c778aee2450bc", "repo": "ray", "path": "python/ray/serve/tests/fault_tolerance_tests/test_controller_recovery.py", "file_name": "test_controller_recovery.py", "fun_name": "test_recover_start_from_replica_actor_names", "commit_message": "[Serve][Part2] Migrate the tests to use deployment graph api (#26507)", "code": "def test_recover_start_from_replica_actor_names(serve_instance):\n \n # Test failed to deploy with total of 2 replicas,\n # but first constructor call fails.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 27, "n_words": 18, "vocab_size": 17, "complexity": 14, "nloc": 62, "token_counts": 343, "n_ast_nodes": 15, "n_identifiers": 2, "random_cut": "def test_recover_start_from_replica_actor_names(serve_instance):\n \n # Test failed to deploy with tot", "d_id": 27710, "documentation": { "docstring": "Test controller is able to recover starting -> running replicas from\n actor names.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 314579, "commit_id": "fb108533580d5f4c326ca970d8e6fd4998cc5593", "repo": "core", "path": "homeassistant/components/zha/core/group.py", "file_name": "group.py", "fun_name": "associated_entities", "commit_message": "Fix mypy issues in zha core modules (#74028)\n\n* Fix mypy issues in zha gateway, group and helpers\r\n\r\n* Cleanup device\r\n\r\n* Apply suggestion\r\n\r\n* Raise ValueError\r\n\r\n* Use hass.config.path", "code": "def associated_entities(self) -> list[dict[str, Any]]:\n \n ha_entity_registry = self.device.gateway.ha_entity_registry\n zha_device_registry = self.device.gateway.device_registry\n return [\n GroupEntityReference(\n ha_entity_registry.async_get(entity_ref.reference_id).name,\n ha_entity_registry.async_get(entity_ref.reference_id).original_name,\n entity_ref.reference_id,\n )._asdict()\n for entity_ref in zha_device_registry.get(self.device.ieee)\n if list(entity_ref.cluster_channels.values())[\n 0\n ].cluster.endpoint.endpoint_id\n == self.endpoint_id\n ]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 190, "n_words": 29, "vocab_size": 28, "complexity": 3, "nloc": 16, "token_counts": 107, "n_ast_nodes": 164, "n_identifiers": 25, "random_cut": "def associated_entities(self) -> list[dict[str, Any]]:\n \n ha_entity_registry = self.device.gateway.ha_entity_registry\n zha_device_registry = self.device.gateway.devic", "d_id": 113185, "documentation": { "docstring": "Return the list of entities that were derived from this endpoint.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 217714, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/client.py", "file_name": "client.py", "fun_name": "getheader", "commit_message": "add python 3.10.4 for windows", "code": "def getheader(self, name, default=None):\n \n if self.headers is None:\n raise ResponseNotReady()\n headers = self.headers.get_all(name) or default\n if isinstance(headers, str) or not hasattr(headers, '__iter__'):\n return headers\n else:\n return ', '.join(headers)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 96, "n_words": 28, "vocab_size": 24, "complexity": 5, "nloc": 8, "token_counts": 62, "n_ast_nodes": 103, "n_identifiers": 11, "random_cut": "def getheader(self, name, default=None):\n \n if self.headers is None:\n raise ResponseNotReady()\n headers = self.headers.get_all(name) or default\n if isinstance(headers,", "d_id": 54896, "documentation": { "docstring": "Returns the value of the header matching *name*.\n\n If there are multiple matching headers, the values are\n combined into a single string separated by commas and spaces.\n\n If no matching header is found, returns *default* or None if\n the *default* is not specified.\n\n If the headers are unknown, raises http.client.ResponseNotReady.\n\n ", "n_words": 50, "vocab_size": 37, "n_whitespaces": 92, "language": "en" } }, { "id": 181609, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/export_tests.py", "file_name": "export_tests.py", "fun_name": "test_generate_pipeline_code_2", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_generate_pipeline_code_2():\n \n\n pipeline = [\n 'KNeighborsClassifier',\n [\n 'CombineDFs',\n [\n 'GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 5,\n 5,\n 5,\n 0.05,\n 0.5],\n [\n 'CombineDFs',\n [\n 'MinMaxScaler',\n 'input_matrix'\n ],\n ['ZeroCount',\n [\n 'MaxAbsScaler',\n 'input_matrix'\n ]\n ]\n ]\n ],\n 18,\n 'uniform',\n 2\n ]\n\n expected_code = \n\n assert expected_code == generate_pipeline_code(pipeline, tpot_obj.operators)\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 461, "n_words": 42, "vocab_size": 27, "complexity": 1, "nloc": 46, "token_counts": 78, "n_ast_nodes": 119, "n_identifiers": 6, "random_cut": "def test_generate_pipeline_code_2():\n \n\n pipeline = [\n 'KNeighborsClassifier',\n [\n 'CombineDFs',\n [\n 'GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 5,\n 5,\n 5,\n 0.05,\n 0.5],\n [\n 'CombineDFs',\n [\n 'MinMaxScaler',\n 'input_matrix'\n ],\n ['ZeroCount',\n [\n ", "d_id": 43397, "documentation": { "docstring": "Assert that generate_pipeline_code() returns the correct code given a specific pipeline with two CombineDFs.make_pipeline(\n make_union(\n StackingEstimator(estimator=GradientBoostingClassifier(learning_rate=38.0, max_depth=5, max_features=5, min_samples_leaf=5, min_samples_split=0.05, n_estimators=0.5)),\n make_union(\n MinMaxScaler(),\n make_pipeline(\n MaxAbsScaler(),\n ZeroCount()\n )\n )\n ),\n KNeighborsClassifier(n_neighbors=18, p=\"uniform\", weights=2)\n)", "n_words": 33, "vocab_size": 30, "n_whitespaces": 124, "language": "en" } }, { "id": 61054, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/req/req_uninstall.py", "file_name": "req_uninstall.py", "fun_name": "_script_names", "commit_message": "upd; format", "code": "def _script_names(dist, script_name, is_gui):\n # type: (Distribution, str, bool) -> List[str]\n \n if dist_in_usersite(dist):\n bin_dir = get_bin_user()\n else:\n bin_dir = get_bin_prefix()\n exe_name = os.path.join(bin_dir, script_name)\n paths_to_remove = [exe_name]\n if WINDOWS:\n paths_to_remove.append(exe_name + '.exe')\n paths_to_remove.append(exe_name + '.exe.manifest')\n if is_gui:\n paths_to_remove.append(exe_name + '-script.pyw')\n else:\n paths_to_remove.append(exe_name + '-script.py')\n return paths_to_remove\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 134, "n_words": 46, "vocab_size": 32, "complexity": 4, "nloc": 15, "token_counts": 87, "n_ast_nodes": 153, "n_identifiers": 15, "random_cut": "def _script_names(dist, script_name, is_gui):\n # type: (Distribution, str, bool) -> List[str", "d_id": 12400, "documentation": { "docstring": "Create the fully qualified name of the files created by\n {console,gui}_scripts for the given ``dist``.\n Returns the list of file names\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 30, "language": "en" } }, { "id": 210061, "commit_id": "ef83ab8a3f7814e9886a7a22c8dcc55f506b6081", "repo": "PaddleDetection", "path": "ppdet/modeling/bbox_utils.py", "file_name": "bbox_utils.py", "fun_name": "bbox_center", "commit_message": "Add PP-YOLOv3 code (#5281)\n\n* [ppyolov3] add ppyolov3 base code\r\n\r\n* add ppyolov3 s/m/x\r\n\r\n* modify ema\r\n\r\n* modify code to convert onnx successfully\r\n\r\n* support arbitrary shape\r\n\r\n* update config to use amp default\r\n\r\n* refine ppyolo_head code\r\n\r\n* modify reparameter code\r\n\r\n* refine act layer\r\n\r\n* adapter pico_head and tood_head code\r\n\r\n* remove ppyolov3 yaml\r\n\r\n* fix codestyle\r\n\r\nCo-authored-by: wangxinxin08 ", "code": "def bbox_center(boxes):\n \n boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2\n boxes_cy = (boxes[..., 1] + boxes[..., 3]) / 2\n return paddle.stack([boxes_cx, boxes_cy], axis=-1)\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 36, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 4, "token_counts": 60, "n_ast_nodes": 88, "n_identifiers": 7, "random_cut": "def bbox_center(boxes):\n \n boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2\n boxes_cy = (boxes[..., 1] + box", "d_id": 52853, "documentation": { "docstring": "Get bbox centers from boxes.\n Args:\n boxes (Tensor): boxes with shape (..., 4), \"xmin, ymin, xmax, ymax\" format.\n Returns:\n Tensor: boxes centers with shape (..., 2), \"cx, cy\" format.\n ", "n_words": 29, "vocab_size": 22, "n_whitespaces": 52, "language": "en" } }, { "id": 210756, "commit_id": "67f16ed9cac254612ddb141fcd8a14db3dbfd6d6", "repo": "PaddleDetection", "path": "deploy/python/video_action_infer.py", "file_name": "video_action_infer.py", "fun_name": "predict", "commit_message": "Develop branch: add fight action for pphuman (#6160)\n\n* add fight for PP-Human\r\n\r\n* add short_size and target_size for fight recognition\r\n\r\n* add short_size and target_size for fight_infer\r\n\r\n* modify code according to the reviews\r\n\r\n* add the wrong deleted lines`\r\n\r\n* Update pipeline.py\r\n\r\n* Update infer_cfg.yml\r\n\r\n* visualize fight when fight action occur\r\n\r\n* 乱码修改\r\n\r\n* delete useless parmas\r\n\r\n* delete useless code str2bool", "code": "def predict(self, input):\n \n\n input_names = self.predictor.get_input_names()\n input_tensor = self.predictor.get_input_handle(input_names[0])\n\n output_names = self.predictor.get_output_names()\n output_tensor = self.predictor.get_output_handle(output_names[0])\n\n # preprocess\n self.recognize_times.preprocess_time_s.start()\n if type(input) == str:\n inputs = self.preprocess_video(input)\n else:\n inputs = self.preprocess_frames(input)\n self.recognize_times.preprocess_time_s.end()\n\n inputs = np.expand_dims(\n inputs, axis=0).repeat(\n self.batch_size, axis=0).copy()\n\n input_tensor.copy_from_cpu(inputs)\n\n # model prediction\n self.recognize_times.inference_time_s.start()\n self.predictor.run()\n self.recognize_times.inference_time_s.end()\n\n output = output_tensor.copy_to_cpu()\n\n # postprocess\n self.recognize_times.postprocess_time_s.start()\n classes, scores = self.postprocess(output)\n self.recognize_times.postprocess_time_s.end()\n\n return classes, scores\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 260, "n_words": 58, "vocab_size": 44, "complexity": 2, "nloc": 23, "token_counts": 193, "n_ast_nodes": 318, "n_identifiers": 36, "random_cut": "def predict(self, input):\n \n\n input_names = self.predictor.get_input_names()\n input_tensor = self.predictor.get_input_handle(input_names[0])\n\n output_names = self.predictor.get_output_names()\n output_tensor = self.predictor.get_output_handle(output_names[0])\n\n ", "d_id": 52968, "documentation": { "docstring": "\n Args:\n input (str) or (list): video file path or image data list\n Returns:\n results (dict): \n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 60, "language": "en" } }, { "id": 205257, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/autodetector.py", "file_name": "autodetector.py", "fun_name": "deep_deconstruct", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def deep_deconstruct(self, obj):\n \n if isinstance(obj, list):\n return [self.deep_deconstruct(value) for value in obj]\n elif isinstance(obj, tuple):\n return tuple(self.deep_deconstruct(value) for value in obj)\n elif isinstance(obj, dict):\n return {key: self.deep_deconstruct(value) for key, value in obj.items()}\n elif isinstance(obj, functools.partial):\n return (\n obj.func,\n self.deep_deconstruct(obj.args),\n self.deep_deconstruct(obj.keywords),\n )\n elif isinstance(obj, COMPILED_REGEX_TYPE):\n return RegexObject(obj)\n elif isinstance(obj, type):\n # If this is a type that implements 'deconstruct' as an instance method,\n # avoid treating this as being deconstructible itself - see #22951\n return obj\n elif hasattr(obj, \"deconstruct\"):\n deconstructed = obj.deconstruct()\n if isinstance(obj, models.Field):\n # we have a field which also returns a name\n deconstructed = deconstructed[1:]\n path, args, kwargs = deconstructed\n return (\n path,\n [self.deep_deconstruct(value) for value in args],\n {key: self.deep_deconstruct(value) for key, value in kwargs.items()},\n )\n else:\n return obj\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 469, "n_words": 121, "vocab_size": 72, "complexity": 14, "nloc": 29, "token_counts": 220, "n_ast_nodes": 337, "n_identifiers": 25, "random_cut": "def deep_deconstruct(self, obj):\n \n if isinstance(obj, list):", "d_id": 51047, "documentation": { "docstring": "\n Recursive deconstruction for a field and its arguments.\n Used for full comparison for rename/alter; sometimes a single-level\n deconstruction will not compare correctly.\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 51, "language": "en" } }, { "id": 247147, "commit_id": "91bc15c772d22fbe814170ab2e0fdbfa50f9c372", "repo": "synapse", "path": "tests/util/test_async_helpers.py", "file_name": "test_async_helpers.py", "fun_name": "test_cancellation", "commit_message": "Add `stop_cancellation` utility function (#12106)", "code": "def test_cancellation(self):\n \n deferred: \"Deferred[str]\" = Deferred()\n wrapper_deferred = stop_cancellation(deferred)\n\n # Cancel the new `Deferred`.\n wrapper_deferred.cancel()\n self.assertTrue(wrapper_deferred.called)\n self.failureResultOf(wrapper_deferred, CancelledError)\n self.assertFalse(\n deferred.called, \"Original `Deferred` was unexpectedly cancelled.\"\n )\n\n # Now make the inner `Deferred` fail.\n # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed\n # in logs.\n deferred.errback(ValueError(\"abc\"))\n self.assertIsNone(deferred.result, \"`Failure` was not consumed\")\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 163, "n_words": 54, "vocab_size": 46, "complexity": 1, "nloc": 11, "token_counts": 69, "n_ast_nodes": 126, "n_identifiers": 16, "random_cut": "def test_cancellation(self):\n \n deferred: \"Deferred[str]\" = Deferred()\n wrapper_deferred = stop_cancellation(deferred)\n\n # Cancel the new `Deferred`.\n wrapper_deferred.cancel()\n self.assertTrue(wrapper_deferred.called)\n self.failureResultOf(wrapper_deferred, CancelledError)\n self.assertFalse(\n deferred.called, \"Original `Deferre", "d_id": 71509, "documentation": { "docstring": "Test that cancellation of the new `Deferred` leaves the original running.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 218447, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getgeneratorlocals", "commit_message": "add python 3.10.4 for windows", "code": "def getgeneratorlocals(generator):\n \n\n if not isgenerator(generator):\n raise TypeError(\"{!r} is not a Python generator\".format(generator))\n\n frame = getattr(generator, \"gi_frame\", None)\n if frame is not None:\n return generator.gi_frame.f_locals\n else:\n return {}\n\n\n# ------------------------------------------------ coroutine introspection\n\nCORO_CREATED = 'CORO_CREATED'\nCORO_RUNNING = 'CORO_RUNNING'\nCORO_SUSPENDED = 'CORO_SUSPENDED'\nCORO_CLOSED = 'CORO_CLOSED'\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 43, "vocab_size": 33, "complexity": 3, "nloc": 8, "token_counts": 50, "n_ast_nodes": 115, "n_identifiers": 13, "random_cut": "def getgeneratorlocals(generator):\n \n\n if not isgenerator(generator):\n raise TypeError(\"{!r} is not a Python generator\".format(generator))\n\n frame = getattr(generator, \"gi_frame\", None)\n if frame is not None:\n return generator.gi_frame.f_locals\n else:\n return {}\n\n\n# ------------------------------------------------ coroutine introspection\n\nCORO_CREATED = 'CORO_CREATED'\nCORO_RUNNING = 'CORO_RUNNING'\nCORO_SUSPENDED = 'CORO_SUSPENDED'\nCORO_CLOSED = 'CORO_CLOSED'\n", "d_id": 55315, "documentation": { "docstring": "\n Get the mapping of generator local variables to their current values.\n\n A dict is returned, with the keys the local variable names and values the\n bound values.", "n_words": 27, "vocab_size": 22, "n_whitespaces": 36, "language": "en" } }, { "id": 54966, "commit_id": "37549d157007f6eef07ed8b1e2e14efb73134840", "repo": "prefect", "path": "tests/orion/api/test_run_history.py", "file_name": "test_run_history.py", "fun_name": "test_last_bin_contains_end_date", "commit_message": "Use status constants instead of hardcoded values\n\nCloses: PrefectHQ/orion#1673", "code": "async def test_last_bin_contains_end_date(client, route):\n \n response = await client.post(\n f\"/{route}/history\",\n json=dict(\n history_start=str(dt),\n history_end=str(dt.add(days=1, minutes=30)),\n history_interval_seconds=timedelta(days=1).total_seconds(),\n ),\n )\n\n assert response.status_code == status.HTTP_200_OK\n parsed = pydantic.parse_obj_as(List[responses.HistoryResponse], response.json())\n assert len(parsed) == 2\n assert parsed[0].interval_start == dt\n assert parsed[0].interval_end == dt.add(days=1)\n assert parsed[1].interval_start == dt.add(days=1)\n assert parsed[1].interval_end == dt.add(days=2)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 128, "n_words": 44, "vocab_size": 32, "complexity": 1, "nloc": 16, "token_counts": 154, "n_ast_nodes": 240, "n_identifiers": 29, "random_cut": "async def test_last_bin_contains_end_date(client, route):\n \n response = await client.post(\n f\"/{route}/history\",\n json=dict(\n history_start=str(dt),\n history_end=str(dt.add(days=1, minutes=30)),\n history_interval_seconds=timedelta(days=1).total_seconds(),\n ),\n )\n\n assert r", "d_id": 11180, "documentation": { "docstring": "The last bin contains the end date, so its own end could be after the history end", "n_words": 17, "vocab_size": 14, "n_whitespaces": 16, "language": "en" } }, { "id": 124648, "commit_id": "b3878e26d765e28dd7c69abadbd856181037db97", "repo": "ray", "path": "python/ray/train/base_trainer.py", "file_name": "base_trainer.py", "fun_name": "_validate_attributes", "commit_message": "[AIR] Fix `ResourceChangingScheduler` not working with AIR (#26307)\n\nThis PR ensures that the new trial resources set by `ResourceChangingScheduler` are respected by the train loop logic by modifying the scaling config to match. Previously, even though trials had their resources updated, the scaling config was not modified which lead to eg. new workers not being spawned in the `DataParallelTrainer` even though resources were available.\r\n\r\nIn order to accomplish this, `ScalingConfigDataClass` is updated to allow equality comparisons with other `ScalingConfigDataClass`es (using the underlying PGF) and to create a `ScalingConfigDataClass` from a PGF.\r\n\r\nPlease note that this is an internal only change intended to actually make `ResourceChangingScheduler` work. In the future, `ResourceChangingScheduler` should be updated to operate on `ScalingConfigDataClass`es instead of PGFs as it is now. That will require a deprecation cycle.", "code": "def _validate_attributes(self):\n \n # Run config\n if not isinstance(self.run_config, RunConfig):\n raise ValueError(\n f\"`run_config` should be an instance of `ray.air.RunConfig`, \"\n f\"found {type(self.run_config)} with value `{self.run_config}`.\"\n )\n # Scaling config\n # Todo: move to ray.air.ScalingConfig\n if not isinstance(self.scaling_config, dict):\n raise ValueError(\n f\"`scaling_config` should be an instance of `dict`, \"\n f\"found {type(self.scaling_config)} with value `{self.scaling_config}`.\"\n )\n # Datasets\n if not isinstance(self.datasets, dict):\n raise ValueError(\n f\"`datasets` should be a dict mapping from a string to \"\n f\"`ray.data.Dataset` objects, \"\n f\"found {type(self.datasets)} with value `{self.datasets}`.\"\n )\n elif any(\n not isinstance(ds, ray.data.Dataset) and not callable(ds)\n for ds in self.datasets.values()\n ):\n raise ValueError(\n f\"At least one value in the `datasets` dict is not a \"\n f\"`ray.data.Dataset`: {self.datasets}\"\n )\n # Preprocessor\n if self.preprocessor is not None and not isinstance(\n self.preprocessor, ray.data.Preprocessor\n ):\n raise ValueError(\n f\"`preprocessor` should be an instance of `ray.data.Preprocessor`, \"\n f\"found {type(self.preprocessor)} with value `{self.preprocessor}`.\"\n )\n\n if self.resume_from_checkpoint is not None and not isinstance(\n self.resume_from_checkpoint, ray.air.Checkpoint\n ):\n raise ValueError(\n f\"`resume_from_checkpoint` should be an instance of \"\n f\"`ray.air.Checkpoint`, found {type(self.resume_from_checkpoint)} \"\n f\"with value `{self.resume_from_checkpoint}`.\"\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 659, "n_words": 168, "vocab_size": 86, "complexity": 11, "nloc": 40, "token_counts": 167, "n_ast_nodes": 377, "n_identifiers": 22, "random_cut": "def _validate_attributes(self):\n \n # Run config\n if not isinstance(self.run_config, RunConfig):\n raise ValueError(\n f\"`run_config` should be an instance of `ray.air.RunConfig`, \"\n f\"found {type(self.run_config)} with value `{self.run_config}`.\"\n )\n # Scaling config\n # Todo: move to ray.air.ScalingConfig\n if not isinstance(self.scaling_config, dict):\n raise ValueError(\n f\"`scaling_config` should be an instance of `dict`, \"\n f\"found {type(self.scaling_config)} with value `{self.scaling_config}`.\"\n )\n # Datasets\n if not isinstance(self.datasets, dict):\n raise ValueError(\n f\"`datasets` should be a dict mapping from a string to \"\n f\"`ray.data.Dataset` objects, \"\n f\"found {type(self.datasets)} with value `{self.datasets}`.\"\n )\n elif any(\n not isinstance(ds, ray.data.Dataset) and not callable(ds)\n for ds in self.datasets.values()\n ):\n raise ValueError(\n f\"At least one value in the `datasets` dict is not a \"\n f\"`ray.data.Dataset`: {self.datasets}\"\n )\n # P", "d_id": 27644, "documentation": { "docstring": "Called on __init()__ to validate trainer attributes.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 289963, "commit_id": "bcae6d604e2967c7475f0caa4b1b5e4e76ab88bf", "repo": "core", "path": "homeassistant/components/mqtt/device_tracker/schema_discovery.py", "file_name": "schema_discovery.py", "fun_name": "longitude", "commit_message": "Improve MQTT type hints part 8 (#81034)\n\n* Improve typing device_tracker discovery\r\n\r\n* Improve typing device_tracker yaml\r\n\r\n* Add test source_type attribute\r\n\r\n* Follow up comment\r\n\r\n* Initialize at `__init__` not at class level.\r\n\r\n* Use full name for return variable\r\n\r\n* Correct import, remove assert\r\n\r\n* Use AsyncSeeCallback", "code": "def longitude(self) -> float | None:\n \n if (\n self.extra_state_attributes is not None\n and ATTR_LONGITUDE in self.extra_state_attributes\n ):\n longitude: float = self.extra_state_attributes[ATTR_LONGITUDE]\n return longitude\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 97, "n_words": 25, "vocab_size": 21, "complexity": 3, "nloc": 9, "token_counts": 40, "n_ast_nodes": 64, "n_identifiers": 5, "random_cut": "def longitude(self) -> float | None:\n \n if (\n self.extra_state_attributes is not None\n and ATTR_LONG", "d_id": 89089, "documentation": { "docstring": "Return longitude if provided in extra_state_attributes or None.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 20240, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/windows.py", "file_name": "windows.py", "fun_name": "user_cache_dir", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def user_cache_dir(self) -> str:\n \n path = os.path.normpath(get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n return self._append_parts(path, opinion_value=\"Cache\")\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 7, "token_counts": 32, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "def user_cache_dir(self) -> str:\n \n path = os.path.normpath(get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n return self._append_parts(path, opinion_value=\"Cache\")\n", "d_id": 3291, "documentation": { "docstring": "\n :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.\n ``%USERPROFILE%\\\\AppData\\\\Local\\\\$appauthor\\\\$appname\\\\Cache\\\\$version``\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 39, "language": "en" } }, { "id": 87220, "commit_id": "30e13df85cc296e8eee62eb376a0310c2e0d0261", "repo": "sentry", "path": "src/sentry/relay/config/__init__.py", "file_name": "__init__.py", "fun_name": "get_project_config", "commit_message": "feat(dynamic-sampling): Add new bias for dev envs [TET-491] (#40382)\n\nThis PR add new bias for dev envs.\r\nAlso add common approach to adding new rules like: releases or health\r\nchecks to `generate_rules()` function.\r\n\r\nAlso enable mypy for `src/sentry/dynamic_sampling/`\r\n\r\nTODO (fix mypy issues after merge conflicts in) :\r\n- [x] src/sentry/dynamic_sampling/feature_multiplexer.py\r\n- [x] src/sentry/dynamic_sampling/utils.py", "code": "def get_project_config(project, full_config=True, project_keys=None):\n \n with sentry_sdk.push_scope() as scope:\n scope.set_tag(\"project\", project.id)\n with metrics.timer(\"relay.config.get_project_config.duration\"):\n return _get_project_config(project, full_config=full_config, project_keys=project_keys)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 47, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 54, "n_ast_nodes": 93, "n_identifiers": 12, "random_cut": "def get_project_config(project, full_config=True, project_keys=None):\n \n with sentry_sdk.push_scope() as scope:\n scope.set_tag(\"project\", project.id)\n with metri", "d_id": 18254, "documentation": { "docstring": "Constructs the ProjectConfig information.\n :param project: The project to load configuration for. Ensure that\n organization is bound on this object; otherwise it will be loaded from\n the database.\n :param full_config: True if only the full config is required, False\n if only the restricted (for external relays) is required\n (default True, i.e. full configuration)\n :param project_keys: Pre-fetched project keys for performance. However, if\n no project keys are provided it is assumed that the config does not\n need to contain auth information (this is the case when used in\n python's StoreView)\n :return: a ProjectConfig object for the given project\n ", "n_words": 97, "vocab_size": 71, "n_whitespaces": 161, "language": "en" } }, { "id": 256282, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "haystack/nodes/retriever/text2sparql.py", "file_name": "text2sparql.py", "fun_name": "_query_kg", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def _query_kg(self, sparql_query):\n \n try:\n response = self.knowledge_graph.query(sparql_query=sparql_query)\n\n # unpack different answer styles\n if isinstance(response, list):\n if len(response) == 0:\n result = \"\"\n else:\n result = []\n for x in response:\n for k, v in x.items():\n result.append(v[\"value\"])\n elif isinstance(response, bool):\n result = str(response)\n elif \"count\" in response[0]:\n result = str(int(response[0][\"count\"][\"value\"]))\n else:\n result = \"\"\n\n except Exception:\n result = \"\"\n\n return result, sparql_query\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 348, "n_words": 61, "vocab_size": 41, "complexity": 8, "nloc": 20, "token_counts": 127, "n_ast_nodes": 218, "n_identifiers": 19, "random_cut": "def _query_kg(self, sparql_query):\n \n try:\n response = self.knowledge_graph.query(sparql_query=sparql_query)\n\n # unpack different answer styles\n if isinstance(re", "d_id": 74852, "documentation": { "docstring": "\n Execute a single SPARQL query on the knowledge graph to retrieve an answer and unpack\n different answer styles for boolean queries, count queries, and list queries.\n\n :param sparql_query: SPARQL query that shall be executed on the knowledge graph\n ", "n_words": 38, "vocab_size": 29, "n_whitespaces": 67, "language": "en" } }, { "id": 218107, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/_bootstrap_external.py", "file_name": "_bootstrap_external.py", "fun_name": "_path_importer_cache", "commit_message": "add python 3.10.4 for windows", "code": "def _path_importer_cache(cls, path):\n \n if path == '':\n try:\n path = _os.getcwd()\n except FileNotFoundError:\n # Don't cache the failure as the cwd can easily change to\n # a valid directory later on.\n return None\n try:\n finder = sys.path_importer_cache[path]\n except KeyError:\n finder = cls._path_hooks(path)\n sys.path_importer_cache[path] = finder\n return finder\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 197, "n_words": 47, "vocab_size": 34, "complexity": 4, "nloc": 12, "token_counts": 58, "n_ast_nodes": 100, "n_identifiers": 11, "random_cut": "def _path_importer_cache(cls, path):\n \n if path == '':\n try:\n path = _os.getcwd()\n except FileNotFoundError:\n # Don't cache the failure as the cwd can easily change to\n # a valid directory later on.\n return None\n try:\n finder = sys.path_importer_cache[path]\n except KeyError:\n finder = cls._path_hooks(path)\n sys.path_importer_cache[path] = finder\n return finder\n", "d_id": 55135, "documentation": { "docstring": "Get the finder for the path entry from sys.path_importer_cache.\n\n If the path entry is not in the cache, find the appropriate finder\n and cache it. If no finder is available, store None.\n\n ", "n_words": 32, "vocab_size": 22, "n_whitespaces": 53, "language": "en" } }, { "id": 207077, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_docs/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_publish_parts", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_publish_parts(self):\n \n import docutils\n\n self.assertNotEqual(\n docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE, \"cmsreference\"\n )\n source = \"reST, `interpreted text`, default role.\"\n markup = \"

reST, interpreted text, default role.

\\n\"\n parts = docutils.core.publish_parts(source=source, writer_name=\"html4css1\")\n self.assertEqual(parts[\"fragment\"], markup)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 95, "n_words": 28, "vocab_size": 25, "complexity": 1, "nloc": 9, "token_counts": 57, "n_ast_nodes": 102, "n_identifiers": 15, "random_cut": "def test_publish_parts(self):\n \n import docutils\n\n self.asser", "d_id": 51857, "documentation": { "docstring": "\n Django shouldn't break the default role for interpreted text\n when ``publish_parts`` is used directly, by setting it to\n ``cmsreference`` (#6681).\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 49, "language": "en" } }, { "id": 249564, "commit_id": "df8b91ed2bba4995c59a5b067e3b252ab90c9a5e", "repo": "synapse", "path": "tests/storage/test_event_federation.py", "file_name": "test_event_federation.py", "fun_name": "test_get_backfill_points_in_room", "commit_message": "Limit and filter the number of backfill points to get from the database (#13879)\n\nThere is no need to grab thousands of backfill points when we only need 5 to make the `/backfill` request with. We need to grab a few extra in case the first few aren't visible in the history.\r\n\r\nPreviously, we grabbed thousands of backfill points from the database, then sorted and filtered them in the app. Fetching the 4.6k backfill points for `#matrix:matrix.org` from the database takes ~50ms - ~570ms so it's not like this saves a lot of time 🤷. But it might save us more time now that `get_backfill_points_in_room`/`get_insertion_event_backward_extremities_in_room` are more complicated after https://github.com/matrix-org/synapse/pull/13635 \r\n\r\nThis PR moves the filtering and limiting to the SQL query so we just have less data to work with in the first place.\r\n\r\nPart of https://github.com/matrix-org/synapse/issues/13356", "code": "def test_get_backfill_points_in_room(self):\n \n setup_info = self._setup_room_for_backfill_tests()\n room_id = setup_info.room_id\n depth_map = setup_info.depth_map\n\n # Try at \"B\"\n backfill_points = self.get_success(\n self.store.get_backfill_points_in_room(room_id, depth_map[\"B\"], limit=100)\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]\n self.assertListEqual(\n backfill_event_ids, [\"b6\", \"b5\", \"b4\", \"2\", \"b3\", \"b2\", \"b1\"]\n )\n\n # Try at \"A\"\n backfill_points = self.get_success(\n self.store.get_backfill_points_in_room(room_id, depth_map[\"A\"], limit=100)\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]\n # Event \"2\" has a depth of 2 but is not included here because we only\n # know the approximate depth of 5 from our event \"3\".\n self.assertListEqual(backfill_event_ids, [\"b3\", \"b2\", \"b1\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 240, "n_words": 88, "vocab_size": 62, "complexity": 3, "nloc": 16, "token_counts": 131, "n_ast_nodes": 219, "n_identifiers": 14, "random_cut": "def test_get_backfill_points_in_room(self):\n \n setup_info = self._setup_room_for_backfill_tests()\n room_id = setup_info.room_id\n depth_map = setup_info.depth_map\n\n # Try at \"B\"\n backfill_points = self.get_success(\n self.store.get_backfill_points_in_room(room_id, depth_map[\"B\"], limit=100)\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]\n self.assertListEqual(\n backfill_event_ids, [\"b6\", \"b5\", \"b4\", \"2\", \"b3\", \"b2\", \"b1\"]\n )\n\n # Try at \"A\"\n backfill_points = self.get_success(\n self.store.get_backfill_points_in_room(room_id, depth_map[\"A\"], limit=100)\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_p", "d_id": 72997, "documentation": { "docstring": "\n Test to make sure only backfill points that are older and come before\n the `current_depth` are returned.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 39, "language": "en" } }, { "id": 196753, "commit_id": "d54b0dc8170186cdd447bf40d55f805edd8a8d5a", "repo": "sympy", "path": "sympy/printing/theanocode.py", "file_name": "theanocode.py", "fun_name": "theano_code", "commit_message": "Update the deprecation warning for theanocode", "code": "def theano_code(expr, cache=None, **kwargs):\n \n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.8\",\n active_deprecations_target='theanocode-deprecated')\n\n if not theano:\n raise ImportError(\"theano is required for theano_code\")\n\n if cache is None:\n cache = global_cache\n\n return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 28, "vocab_size": 25, "complexity": 3, "nloc": 12, "token_counts": 62, "n_ast_nodes": 105, "n_identifiers": 13, "random_cut": "def theano_code(expr, cache=None, **kwargs):\n \n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.8\",\n active_deprecations_target='theanocode-deprecated')\n\n if not theano:\n raise Im", "d_id": 48149, "documentation": { "docstring": "\n Convert a SymPy expression into a Theano graph variable.\n\n .. deprecated:: 1.8\n\n ``sympy.printing.theanocode`` is deprecated. Theano has been renamed to\n Aesara. Use ``sympy.printing.aesaracode`` instead. See\n :ref:`theanocode-deprecated` for more information.\n\n Parameters\n ==========\n\n expr : sympy.core.expr.Expr\n SymPy expression object to convert.\n\n cache : dict\n Cached Theano variables (see :class:`TheanoPrinter.cache\n `). Defaults to the module-level global cache.\n\n dtypes : dict\n Passed to :meth:`.TheanoPrinter.doprint`.\n\n broadcastables : dict\n Passed to :meth:`.TheanoPrinter.doprint`.\n\n Returns\n =======\n\n theano.gof.graph.Variable\n A variable corresponding to the expression's value in a Theano symbolic\n expression graph.\n\n \n sympy.printing.theanocode is deprecated. Theano has been renamed to\n Aesara. Use sympy.printing.aesaracode instead.", "n_words": 94, "vocab_size": 63, "n_whitespaces": 209, "language": "en" } }, { "id": 154139, "commit_id": "adb16a17f721048005520388080627975c6852d8", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_validate_axes_lengths", "commit_message": "FEAT-#4725: Make index and columns lazy in Modin DataFrame (#4726)\n\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Vasily Litvinov ", "code": "def _validate_axes_lengths(self):\n \n if self._row_lengths_cache is not None and len(self.index) > 0:\n # An empty frame can have 0 rows but a nonempty index. If the frame\n # does have rows, the number of rows must equal the size of the\n # index.\n num_rows = sum(self._row_lengths_cache)\n if num_rows > 0:\n ErrorMessage.catch_bugs_and_request_email(\n num_rows != len(self._index_cache),\n f\"Row lengths: {num_rows} != {len(self._index_cache)}\",\n )\n ErrorMessage.catch_bugs_and_request_email(\n any(val < 0 for val in self._row_lengths_cache),\n f\"Row lengths cannot be negative: {self._row_lengths_cache}\",\n )\n if self._column_widths_cache is not None and len(self.columns) > 0:\n # An empty frame can have 0 column but a nonempty column index. If\n # the frame does have columns, the number of columns must equal the\n # size of the columns.\n num_columns = sum(self._column_widths_cache)\n if num_columns > 0:\n ErrorMessage.catch_bugs_and_request_email(\n num_columns != len(self._columns_cache),\n f\"Column widths: {num_columns} != {len(self._columns_cache)}\",\n )\n ErrorMessage.catch_bugs_and_request_email(\n any(val < 0 for val in self._column_widths_cache),\n f\"Column widths cannot be negative: {self._column_widths_cache}\",\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 518, "n_words": 147, "vocab_size": 70, "complexity": 9, "nloc": 23, "token_counts": 142, "n_ast_nodes": 273, "n_identifiers": 16, "random_cut": "def _validate_axes_lengths(self):\n \n if self._row_lengths_cache is not None and len(self.index) > 0:\n # An empty frame can have 0 rows but a nonempty index. If the frame\n # does have rows, the number of rows must equal the size of the\n # index.\n num_rows = sum(self._row_lengths_cache)\n if num_rows > 0:\n ErrorMessage.catch_bugs_and_request_email(\n num_rows != len(self._index_cache),\n f\"Row lengths: {num_rows} != {len(self._index_cache)}\",\n )\n ErrorMessage.catch_bugs_and_request_email(\n any(val < 0 for val in self._row_lengths_cache),\n f\"Row lengths cannot be negative: {self._row_lengths_cache}\",\n )\n if self._column_widths_cache is not None and len(self.columns) > 0:\n # An empty frame can have 0 column but a nonempty column index. If\n # the frame does have columns, the number of columns must equal the\n # size of the columns.\n num_columns = sum(self._column_widths_cache)\n if num_columns > 0:\n ErrorMessage.catch_bugs_and_request_email(\n num_columns != len(self._columns_cache),\n f\"Column widths: {num_columns} != {len(self._columns_cache)}\",\n )\n ErrorMessage.catch_bugs_and_request_email(\n any(val < 0 for val in self._column_widths_cache),\n f\"Column widths cannot be negative: {self._column_widths_cache}\",\n )\n", "d_id": 35804, "documentation": { "docstring": "Validate that labels are split correctly if split is known.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 215735, "commit_id": "3bb43882e727b1d36abe2e501759c9c5e9048ecf", "repo": "salt", "path": "tests/pytests/unit/utils/win_dacl/test_get_name.py", "file_name": "test_get_name.py", "fun_name": "test_get_name_capability_sid", "commit_message": "Add tests, migrate some tests to pytest", "code": "def test_get_name_capability_sid():\n \n cap_sid = \"S-1-15-3-1024-1065365936-1281604716-3511738428-1654721687-432734479-3232135806-4053264122-3456934681\"\n sid_obj = win32security.ConvertStringSidToSid(cap_sid)\n assert salt.utils.win_dacl.get_name(sid_obj) is None\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 29, "n_ast_nodes": 52, "n_identifiers": 9, "random_cut": "def test_get_name_capability_sid():\n \n cap_sid = \"S-1-15-3-1024-1065365936-1281604716-3511738428-1654721687-432734479-3232135806-4053264122-3456934681\"\n sid_obj = win32security.ConvertStringSidToSid(cap_sid)\n assert salt.utils.win_dacl.get_name(sid_obj) is No", "d_id": 54129, "documentation": { "docstring": "\n Test get_name with a compatibility SID. Should return `None` as we want to\n ignore these SIDs\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 156894, "commit_id": "a9ee6c2fdf0a3093747e675997143e0dbe584bad", "repo": "dask", "path": "dask/compatibility.py", "file_name": "compatibility.py", "fun_name": "entry_points", "commit_message": "Add `entry_points` compatibility utility (#9388)", "code": "def entry_points(group=None):\n \n eps = importlib.metadata.entry_points()\n if group:\n try:\n return eps.select(group=group)\n except AttributeError:\n return eps.get(group, [])\n return eps\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 65, "n_words": 17, "vocab_size": 14, "complexity": 3, "nloc": 8, "token_counts": 46, "n_ast_nodes": 77, "n_identifiers": 8, "random_cut": "def entry_points(group=None):\n \n eps = importlib.metadata.entry_points()\n if group:\n try:\n return eps.select(group=group)\n except AttributeError:\n return eps.get(group, [])\n return eps\n", "d_id": 36798, "documentation": { "docstring": "Returns an iterable of entrypoints.\n\n For compatibility with Python 3.8/3.9.\n In 3.10 the return type changed from a dict to an ``importlib.metadata.EntryPoints``.\n This compatibility utility can be removed once Python 3.10 is the minimum.\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 46, "language": "en" } }, { "id": 186900, "commit_id": "212c2ba990758cb9acd2b200e55302534988089a", "repo": "certbot", "path": "certbot/certbot/_internal/storage.py", "file_name": "storage.py", "fun_name": "elliptic_curve", "commit_message": "error out when --reuse-key conflicts with other flags (#9262)\n\n* error out when --reuse-key conflicts with other flags\r\n\r\n* add unit test\r\n\r\n* add integration tests\r\n\r\n* lint", "code": "def elliptic_curve(self) -> Optional[str]:\n \n key = self._private_key()\n if isinstance(key, EllipticCurvePrivateKey):\n return key.curve.name\n return None\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 53, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 9, "token_counts": 34, "n_ast_nodes": 56, "n_identifiers": 10, "random_cut": "def elliptic_curve(self) -> Optional[str]:\n \n key = self._private_key()\n if isinstance(key, EllipticCurvePrivateKey):\n return key.cu", "d_id": 45651, "documentation": { "docstring": "\n :returns: If the private key is an elliptic key, the name of its curve.\n :rtype: str\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 77682, "commit_id": "a3b1cb6c287a2a0c2957c8141c54453928e1b97e", "repo": "wagtail", "path": "wagtail/models/__init__.py", "file_name": "__init__.py", "fun_name": "page_type_display_name", "commit_message": "Add a page_type_display_name shortcut property", "code": "def page_type_display_name(self):\n \n if not self.specific_class or self.is_root():\n return \"\"\n else:\n return self.specific_class.get_verbose_name()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 55, "n_words": 12, "vocab_size": 11, "complexity": 3, "nloc": 5, "token_counts": 30, "n_ast_nodes": 55, "n_identifiers": 5, "random_cut": "def page_type_display_name(self):\n \n if no", "d_id": 16691, "documentation": { "docstring": "\n A human-readable version of this page's type\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 41675, "commit_id": "6357619ec08a59e4ecf00c6b1300ac6e014a753f", "repo": "seaborn", "path": "seaborn/_core/plot.py", "file_name": "plot.py", "fun_name": "save", "commit_message": "Add some docstrings and basic API docs", "code": "def save(self, fname, **kwargs) -> Plot:\n \n # TODO expose important keyword arugments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 47, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 13, "token_counts": 28, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def save(self, fname, **kwargs) -> Plot:\n \n # TODO expose important keyword arugments in our signature?\n se", "d_id": 7423, "documentation": { "docstring": "\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n ", "n_words": 41, "vocab_size": 30, "n_whitespaces": 95, "language": "en" } }, { "id": 60741, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py", "file_name": "package_finder.py", "fun_name": "get_install_candidate", "commit_message": "upd; format", "code": "def get_install_candidate(self, link_evaluator, link):\n # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate]\n \n is_candidate, result = link_evaluator.evaluate_link(link)\n if not is_candidate:\n if result:\n self._log_skipped_link(link, reason=result)\n return None\n\n return InstallationCandidate(\n name=link_evaluator.project_name,\n link=link,\n version=result,\n )\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 141, "n_words": 29, "vocab_size": 27, "complexity": 3, "nloc": 11, "token_counts": 57, "n_ast_nodes": 89, "n_identifiers": 13, "random_cut": "def get_install_candidate(self, link_evaluator, link):\n # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate]\n \n is_candidate, r", "d_id": 12270, "documentation": { "docstring": "\n If the link is a candidate for install, convert it to an\n InstallationCandidate and return it. Otherwise, return None.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 168029, "commit_id": "4d7cfc436f8a7bc65c11770aa16f05e875b74077", "repo": "pandas", "path": "pandas/plotting/_core.py", "file_name": "_core.py", "fun_name": "bar", "commit_message": "TYP: pandas/plotting annotations from pandas-stubs (#47827)\n\n* TYP: pandas/plotting annotations from pandas-stubs\r\n\r\n* xticks + pyright", "code": "def bar(self, x=None, y=None, **kwargs) -> PlotAccessor:\n \n return self(kind=\"bar\", x=x, y=y, **kwargs)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 26, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 11, "token_counts": 37, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "def bar(self, x=None, y=None, **kwargs) -> PlotAccessor:\n \n return self(kind=\"bar\", x=x, y=y, **kwargs)\n", "d_id": 40165, "documentation": { "docstring": "\n Vertical bar plot.\n\n A bar plot is a plot that presents categorical data with\n rectangular bars with lengths proportional to the values that they\n represent. A bar plot shows comparisons among discrete categories. One\n axis of the plot shows the specific categories being compared, and the\n other axis represents a measured value.\n ", "n_words": 52, "vocab_size": 38, "n_whitespaces": 102, "language": "en" } }, { "id": 269608, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "ctc_decode", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):\n \n input_shape = shape(y_pred)\n num_samples, num_steps = input_shape[0], input_shape[1]\n y_pred = tf.math.log(\n tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon()\n )\n input_length = tf.cast(input_length, tf.int32)\n\n if greedy:\n (decoded, log_prob) = tf.nn.ctc_greedy_decoder(\n inputs=y_pred, sequence_length=input_length\n )\n else:\n (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder(\n inputs=y_pred,\n sequence_length=input_length,\n beam_width=beam_width,\n top_paths=top_paths,\n )\n decoded_dense = []\n for st in decoded:\n st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps))\n decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1))\n return (decoded_dense, log_prob)\n\n\n# HIGH ORDER FUNCTIONS\n\n\n@keras_export(\"keras.backend.map_fn\")\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.map_fn\")\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 205, "n_words": 71, "vocab_size": 57, "complexity": 3, "nloc": 23, "token_counts": 197, "n_ast_nodes": 309, "n_identifiers": 40, "random_cut": "def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):\n \n input_shape = shape(y_pred)\n num_samples, num_steps = input_shape[0], input_shape[1]\n y_pred = tf.math.log(\n tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon()\n )\n input_length = tf.cast(input_length, tf.int32)\n\n if greedy:\n (decoded, log_prob) = tf.nn.ctc_greedy_decoder(\n inputs=y_pred, sequence_length=input_length\n )\n else:\n (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder(\n inputs=y_pred,\n sequence_length=input_length,\n beam_width=beam_width,\n top_paths=top_paths,\n )\n decoded_dense = []\n for st in decoded:\n st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps))\n decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1))\n return (decoded_dense, log_prob)\n\n\n# HIGH ", "d_id": 80228, "documentation": { "docstring": "Decodes the output of a softmax.\n\n Can use either greedy search (also known as best path)\n or a constrained dictionary search.\n\n Args:\n y_pred: tensor `(samples, time_steps, num_categories)`\n containing the prediction, or output of the softmax.\n input_length: tensor `(samples, )` containing the sequence length for\n each batch item in `y_pred`.\n greedy: perform much faster best-path search if `true`.\n This does not use a dictionary.\n beam_width: if `greedy` is `false`: a beam search decoder will be used\n with a beam of this width.\n top_paths: if `greedy` is `false`,\n how many of the most probable paths will be returned.\n\n Returns:\n Tuple:\n List: if `greedy` is `true`, returns a list of one element that\n contains the decoded sequence.\n If `false`, returns the `top_paths` most probable\n decoded sequences.\n Each decoded sequence has shape (samples, time_steps).\n Important: blank labels are returned as `-1`.\n Tensor `(top_paths, )` that contains\n the log probability of each decoded sequence.\n ", "n_words": 149, "vocab_size": 99, "n_whitespaces": 373, "language": "en" } }, { "id": 153340, "commit_id": "e7cb2e82f8b9c7a68f82abdd3b6011d661230b7e", "repo": "modin", "path": "modin/core/execution/ray/generic/modin_aqp.py", "file_name": "modin_aqp.py", "fun_name": "display_time_updates", "commit_message": "REFACTOR-#4251: define public interfaces in `modin.core.execution.ray` module (#3868)\n\nSigned-off-by: Anatoly Myachev ", "code": "def display_time_updates(bar):\n \n threading.Thread(target=_show_time_updates, args=(bar,)).start()\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 42, "n_identifiers": 8, "random_cut": "def display_time_updates(bar):\n \n threading.Thread(target", "d_id": 35376, "documentation": { "docstring": "\n Start displaying the progress `bar` in a notebook.\n\n Parameters\n ----------\n bar : tqdm.tqdm\n The progress bar wrapper to display in a notebook cell.\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 46, "language": "en" } }, { "id": 215074, "commit_id": "fbcc707e76f11770712e6828155258ac61e00ff8", "repo": "salt", "path": "salt/modules/aixpkg.py", "file_name": "aixpkg.py", "fun_name": "remove", "commit_message": "work in progress while resolve issue of python3_32 usage by dnf and yum", "code": "def remove(name=None, pkgs=None, **kwargs):\n \n targets = salt.utils.args.split_input(pkgs) if pkgs else [name]\n if not targets:\n return {}\n\n if pkgs:\n log.debug(\"Removing these fileset(s)/rpm package(s) %s: %s\", name, targets)\n\n errors = []\n\n # Get a list of the currently installed pkgs.\n old = list_pkgs()\n\n # Remove the fileset or rpm package(s)\n for target in targets:\n try:\n named, versionpkg, rpmpkg = _check_pkg(target)\n except CommandExecutionError as exc:\n if exc.info:\n errors.append(exc.info[\"errors\"])\n continue\n\n if rpmpkg:\n\n # assume use dnf or yum\n cmdflags = \" -y remove \"\n if pathlib.Path(\"/opt/freeware/bin/dnf\").is_file():\n cmdexe = \"/opt/freeware/bin/dnf\"\n elif pathlib.Path(\"/opt/freeware/bin/yum\").is_file():\n cmdexe = \"/opt/freeware/bin/yum\"\n elif pathlib.Path(\"/usr/bin/yum\").is_file():\n cmdexe = \"/usr/bin/yum\"\n else:\n cmdexe = \"/usr/bin/rpm\"\n cmdflags = \" -e \"\n\n cmd = [cmdexe, cmdflags, named]\n out = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n else:\n cmd = [\"/usr/sbin/installp\", \"-u\", named]\n out = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n\n # Get a list of the packages after the uninstall\n __context__.pop(\"pkg.list_pkgs\", None)\n new = list_pkgs()\n ret = salt.utils.data.compare_dicts(old, new)\n\n if errors:\n raise CommandExecutionError(\n \"Problems encountered removing filesets(s)/package(s)\",\n info={\"changes\": ret, \"errors\": errors},\n )\n\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 513, "n_words": 157, "vocab_size": 106, "complexity": 12, "nloc": 40, "token_counts": 256, "n_ast_nodes": 443, "n_identifiers": 38, "random_cut": "def remove(name=None, pkgs=None, **kwargs):\n \n targets = salt.utils.args.split_input(pkgs) if pkgs else [name]\n if not targets:\n return {}\n\n if pkgs:\n log.debug(\"Removing these fileset(s)/rpm package(s) %s: %s\", name, targets)\n\n errors = []\n\n # Get a list of the currently installed pkgs.\n old = list_pkgs()\n\n # Remove the fileset or rpm package(s)\n for target in targets:\n try:\n named, versionpkg, rpmpkg = _check_pkg(target)\n except CommandExecutionError as exc:\n if exc.info:\n errors.append(exc.info[\"errors\"])\n continue\n\n if rpmpkg:\n\n # assume use dnf or yum\n cmdflags = \" -y remove \"\n if pathlib.Path(\"/opt/freeware/bin/dnf\").is_file():\n cmdexe = \"/opt/freeware/bin/dnf\"\n elif pathlib.Path(\"/opt/freeware/bin/yum\").is_file():\n cmdexe = \"/opt/freeware/bin/yum\"\n elif pathlib.Path(\"/usr/bin/yum\").is_file():\n cmdexe = \"/usr/bin/yum\"\n else:\n cmdexe = \"/usr/bin/rpm\"\n cmdflags = \" -e \"\n\n cmd = [cmdexe, cmdflags, named]\n out = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n else:\n cmd = [\"/usr/sbin/installp\", \"-u\", named]\n out = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n\n # Get a list of the packages after the uninstall\n __context__.pop(\"pkg.list_pkgs\", None)\n new = list_pkgs()\n ret = salt.utils.data.compare_dicts(old, new)\n\n if errors:\n raise CommandExecutionError(\n \"Problems encountered removing filesets(s)/package(s)\",\n info={\"changes\": ret, \"errors\": errors},\n )\n\n return ret\n\n", "d_id": 53792, "documentation": { "docstring": "\n Remove specified fileset(s)/rpm package(s).\n\n name\n The name of the fileset or rpm package to be deleted.\n\n .. versionadded:: 3005\n\n preference to install rpm packages are to use in the following order:\n /opt/freeware/bin/dnf\n /opt/freeware/bin/yum\n /usr/bin/yum\n /usr/bin/rpm\n\n Multiple Package Options:\n\n pkgs\n A list of filesets and/or rpm packages to delete.\n Must be passed as a python list. The ``name`` parameter will be\n ignored if this option is passed.\n\n\n Returns a list containing the removed packages.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.remove \n salt '*' pkg.remove tcsh\n salt '*' pkg.remove xlC.rte\n salt '*' pkg.remove Firefox.base.adt\n salt '*' pkg.remove pkgs='[\"foo\", \"bar\"]'\n ", "n_words": 101, "vocab_size": 72, "n_whitespaces": 243, "language": "en" } }, { "id": 107327, "commit_id": "115877861608a869be63110a1e3917c3d1dda04a", "repo": "matplotlib", "path": "lib/matplotlib/dates.py", "file_name": "dates.py", "fun_name": "_from_ordinalf", "commit_message": "All classes and methods in dates support both string and tzinfo as tz-argument", "code": "def _from_ordinalf(x, tz=None):\n \n\n tz = _get_tzinfo(tz)\n\n dt = (np.datetime64(get_epoch()) +\n np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us'))\n if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'):\n raise ValueError(f'Date ordinal {x} converts to {dt} (using '\n f'epoch {get_epoch()}), but Matplotlib dates must be '\n 'between year 0001 and 9999.')\n # convert from datetime64 to datetime:\n dt = dt.tolist()\n\n # datetime64 is always UTC:\n dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC'))\n # but maybe we are working in a different timezone so move.\n dt = dt.astimezone(tz)\n # fix round off errors\n if np.abs(x) > 70 * 365:\n # if x is big, round off to nearest twenty microseconds.\n # This avoids floating point roundoff error\n ms = round(dt.microsecond / 20) * 20\n if ms == 1000000:\n dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1)\n else:\n dt = dt.replace(microsecond=ms)\n\n return dt\n\n\n# a version of _from_ordinalf that can operate on numpy arrays\n_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf, otypes=\"O\")\n\n\n# a version of dateutil.parser.parse that can operate on numpy arrays\n_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 313, "n_words": 156, "vocab_size": 107, "complexity": 5, "nloc": 18, "token_counts": 169, "n_ast_nodes": 346, "n_identifiers": 31, "random_cut": "def _from_ordinalf(x, tz=None):\n \n\n tz = _get_tzinfo(tz)\n\n dt = (np.datetime64(get_epoch()) +\n np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us'))\n if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'):\n raise ValueError(f'Date ordinal {x} converts to {dt} (using '\n f'epoch {get_epoch()}), but Matplotlib dates must be '\n 'between year 0001 and 9999.')\n # c", "d_id": 22693, "documentation": { "docstring": "\n Convert Gregorian float of the date, preserving hours, minutes,\n seconds and microseconds. Return value is a `.datetime`.\n\n The input date *x* is a float in ordinal days at UTC, and the output will\n be the specified `.datetime` object corresponding to that time in\n timezone *tz*, or if *tz* is ``None``, in the timezone specified in\n :rc:`timezone`.\n ", "n_words": 56, "vocab_size": 43, "n_whitespaces": 79, "language": "en" } }, { "id": 218387, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getdoc", "commit_message": "add python 3.10.4 for windows", "code": "def getdoc(object):\n \n try:\n doc = object.__doc__\n except AttributeError:\n return None\n if doc is None:\n try:\n doc = _finddoc(object)\n except (AttributeError, TypeError):\n return None\n if not isinstance(doc, str):\n return None\n return cleandoc(doc)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 106, "n_words": 31, "vocab_size": 20, "complexity": 5, "nloc": 13, "token_counts": 56, "n_ast_nodes": 93, "n_identifiers": 10, "random_cut": "def getdoc(object):\n \n try:\n doc = object.__doc__\n except A", "d_id": 55275, "documentation": { "docstring": "Get the documentation string for an object.\n\n All tabs are expanded to spaces. To clean up docstrings that are\n indented to line up with blocks of code, any whitespace than can be\n uniformly removed from the second line onwards is removed.", "n_words": 41, "vocab_size": 36, "n_whitespaces": 50, "language": "en" } }, { "id": 6541, "commit_id": "23a33eef3bc7ea3ba33ec56dc9b56ba38462648a", "repo": "ludwig", "path": "ludwig/marshmallow/marshmallow_schema_utils.py", "file_name": "marshmallow_schema_utils.py", "fun_name": "load_config_with_kwargs", "commit_message": "feat: Modify Trainer to use marshmallow_dataclass syntax for handling hyperparameters. Add basic scripting for docstring extraction to marshmallow schema. Fix some existing marshmallow issues. (#1606)", "code": "def load_config_with_kwargs(cls, kwargs):\n \n assert_is_a_marshmallow_class(cls)\n schema = cls.Schema()\n fields = schema.fields.keys()\n return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), {\n k: v for k, v in kwargs.items() if k not in fields\n }\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 62, "n_words": 37, "vocab_size": 24, "complexity": 5, "nloc": 7, "token_counts": 75, "n_ast_nodes": 119, "n_identifiers": 12, "random_cut": "def load_config_with_kwargs(cls, kwargs):\n \n assert_is_a_marshmallow_", "d_id": 1017, "documentation": { "docstring": "Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 26687, "commit_id": "0881beec1ac02dfa97525c5173687defb356d85c", "repo": "saleor", "path": "saleor/payment/tests/test_gateway.py", "file_name": "test_gateway.py", "fun_name": "test_payment_refund_or_void_refund_called_txn_exist", "commit_message": "Fix payment flow (#9504)\n\n* Do not capture payment again when it should be refunded or voided\r\n\r\n* Do not create order when then is ongoing refund", "code": "def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment):\n \n # given\n payment.charge_status = ChargeStatus.FULLY_CHARGED\n payment.save(update_fields=[\"charge_status\"])\n assert payment.can_refund() is True\n payment.captured_amount = payment.total\n payment.save(update_fields=[\"captured_amount\"])\n txn = payment.transactions.create(\n is_success=True,\n action_required=False,\n kind=TransactionKind.REFUND_ONGOING,\n amount=payment.captured_amount / 2,\n currency=payment.currency,\n token=\"test\",\n gateway_response={},\n )\n\n # when\n gateway.payment_refund_or_void(\n payment, get_plugins_manager(), None, transaction_id=txn.token\n )\n\n # then\n assert refund_mock.called_once()\n\n\n@patch(\"saleor.payment.gateway.refund\")", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@patch(\"saleor.payment.gateway.refund\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 140, "n_words": 43, "vocab_size": 37, "complexity": 1, "nloc": 19, "token_counts": 120, "n_ast_nodes": 202, "n_identifiers": 29, "random_cut": "def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment):\n \n # given\n payment.charge_status = ChargeStatus.FULLY_CHARGED\n payment.save(update_fi", "d_id": 5046, "documentation": { "docstring": "Ensure that the refund method is called when the refund process\n is already ongoing but not covered full payment captured amount.", "n_words": 21, "vocab_size": 18, "n_whitespaces": 23, "language": "en" } }, { "id": 218222, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/metadata/__init__.py", "file_name": "__init__.py", "fun_name": "_all", "commit_message": "add python 3.10.4 for windows", "code": "def _all(self):\n \n groups = super(Deprecated, self).values()\n return EntryPoints(itertools.chain.from_iterable(groups))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 10, "random_cut": "def _all(self):\n \n groups = super(Deprecated, self).values()\n return EntryPoin", "d_id": 55214, "documentation": { "docstring": "\n Reconstruct a list of all entrypoints from the groups.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 221366, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/codecs.py", "file_name": "codecs.py", "fun_name": "readlines", "commit_message": "add python 3.10.4 for windows", "code": "def readlines(self, sizehint=None, keepends=True):\n\n \n data = self.read()\n return data.splitlines(keepends)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 7, "random_cut": "def readlines(self, sizehint=None, keepends=True):\n\n \n data = self.read()\n re", "d_id": 56380, "documentation": { "docstring": " Read all lines available on the input stream\n and return them as a list.\n\n Line breaks are implemented using the codec's decoder\n method and are included in the list entries.\n\n sizehint, if given, is ignored since there is no efficient\n way to finding the true end-of-line.\n\n ", "n_words": 46, "vocab_size": 40, "n_whitespaces": 109, "language": "en" } }, { "id": 180689, "commit_id": "b1dfc9a172440e9c9736566f326ba339ff559604", "repo": "gradio", "path": "gradio/event_queue.py", "file_name": "event_queue.py", "fun_name": "notify_clients", "commit_message": "Release new queue beta (#1969)\n\n* queue-refactor-backend (#1489)\r\n\r\n* queue-refactor-backend\r\n\r\n- create a template for the new design\r\n\r\n* queue-refactor-backend\r\n\r\n- clean after the old queue\r\n\r\n* queue-refactor-backend\r\n\r\n- add basic test to websocket endpoint\r\n\r\n* queue-refactor-backend\r\n\r\n- small fix\r\n\r\n* queue-refactor-backend\r\n\r\n- debugs&fixes&finalizations\r\n- test the flow with postman\r\n\r\n* queue-refactor-backend\r\n\r\n- tweaks on websocket closing\r\n\r\n* queue-refactor-backend\r\n\r\n- cleanup\r\n\r\n* queue-refactor-backend\r\n\r\n- cleanup & tweaks\r\n\r\n* queue-refactor-backend\r\n\r\n- cleanup & tweaks\r\n\r\n* queue-refactor-backend\r\n\r\n- cleanup & tweaks\r\n- correct the exception handling\r\n\r\n* queue-refactor-backend\r\n\r\n- add websockets dependency\r\n\r\n* queue-refactor-backend\r\n- reformat\r\n\r\n* queue-refactor-backend\r\n- add single event test\r\n\r\n* queue-refactor-backend\r\n- tweaks\r\n- remove outdated tests\r\n\r\n* queue-refactor-backend\r\n- reformat\r\n\r\n* queue-refactor-backend\r\n- reformat\r\n\r\n* queue-refactor-backend\r\n- reformat\r\n\r\n* queue-refactor-backend\r\n- add Queue configurations to Blocks.launch()\r\n- add live_queue_update to send estimations whenever a job gets fetched from the Queue\r\n\r\n* queue-refactor-backend\r\n- add Queue configurations to Blocks.launch()\r\n- add live_queue_update to send estimations whenever a job gets fetched from the Queue\r\n\r\n* queue-refactor-backend\r\n- tweaks\r\n\r\n* queue-refactor-backend\r\n- make SLEEP_WHEN_FREE shorter\r\n\r\nCo-authored-by: Ali Abid \r\n\r\n* Add estimation parameters to queue (#1889)\r\n\r\n* - tweaks on Estimation\r\n\r\n* version\r\n\r\n* Revert \"version\"\r\n\r\nThis reverts commit bd1f4d7bfe3658a4967b93126859a62a511a70e2.\r\n\r\n* some fix and tweaks\r\n\r\n* implement queue frontend (#1950)\r\n\r\n* implement queue frontend\r\n\r\n* fix types\r\n\r\n* fix ws endpoint in build mode\r\n\r\n* cleanup\r\n\r\n* Queue tweaks (#1909)\r\n\r\n* tweaks on estimation payload\r\n\r\n* Queue keep ws connections open (#1910)\r\n\r\n* 1. keep ws connections open after the event process is completed\r\n2. do not send estimations periodically if live queue updates is open\r\n\r\n* fix calculation\r\n\r\n* 1. tweaks on event_queue\r\n\r\n* fix issue - create new ws for each request\r\n\r\n* format\r\n\r\n* fix\r\n\r\n* fix tests\r\n\r\n* fix tests\r\n\r\n* tets\r\n\r\n* test\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* change'\r\n\r\n* wtf\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* file perms\r\n\r\n* Release queue beta v1 (#1971)\r\n\r\n* - release the new queue\r\n\r\n* - bypass the issue in the tests\r\n- rewrite the lost part in the codebase\r\n\r\n* - add concurrent queue example (#1978)\r\n\r\n* rank_eta calc\r\n\r\n* Queue fixes (#1981)\r\n\r\n* change\r\n\r\n* format\r\n\r\n* - comment out queue tests as they dont work well\r\n\r\n* - reformat\r\n\r\n* Update gradio/event_queue.py\r\n\r\nCo-authored-by: Ömer Faruk Özdemir \r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* change\r\n\r\n* weird fix\r\n\r\nCo-authored-by: Ömer Faruk Özdemir \r\n\r\n* release-queue-v3 (#1988)\r\n\r\n* Fix frontend queuing to target secure WSS (#1996)\r\n\r\n* change\r\n\r\n* format\r\n\r\n* changes\r\n\r\n* queue-concurrency-tweaks (#2002)\r\n\r\n1. make gather_data and broadcast_estimation sequential instead of concurrent because they were deleting elements at the same time and raising expections which was lowering the performance\r\n\r\n* Update Queue API, documentation (#2026)\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* fixes\r\n\r\n* changes\r\n\r\n* change\r\n\r\n* fix\r\n\r\nCo-authored-by: Ömer Faruk Özdemir \r\nCo-authored-by: pngwn ", "code": "async def notify_clients(cls) -> None:\n \n while not cls.STOP:\n await asyncio.sleep(cls.UPDATE_INTERVALS)\n if cls.EVENT_QUEUE:\n await cls.broadcast_estimations()\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 8, "token_counts": 34, "n_ast_nodes": 61, "n_identifiers": 8, "random_cut": "async def notify_clients(cls) -> None:\n \n while not cls.STOP:\n await asyncio.sleep(cls.UPDATE_INTERVALS)\n", "d_id": 43215, "documentation": { "docstring": "\n Notify clients about events statuses in the queue periodically.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 178336, "commit_id": "1f5a2759dc7a3dda7baa4e599a803a34a0be5444", "repo": "Nuitka", "path": "nuitka/nodes/ModuleNodes.py", "file_name": "ModuleNodes.py", "fun_name": "_readPyPIFile", "commit_message": "Fix, the parsing of \".pyi\" files didn't handle relative imports", "code": "def _readPyPIFile(self):\n \n\n # Complex stuff, pylint: disable=too-many-branches,too-many-statements\n if self.used_modules is None:\n pyi_filename = self.getPyIFilename()\n\n if os.path.exists(pyi_filename):\n pyi_deps = OrderedSet()\n\n # Flag signalling multiline import handling\n in_import = False\n in_import_part = \"\"\n\n for line in getFileContentByLine(pyi_filename):\n line = line.strip()\n\n if not in_import:\n if line.startswith(\"import \"):\n imported = line[7:]\n\n pyi_deps.add(imported)\n elif line.startswith(\"from \"):\n parts = line.split(None, 3)\n assert parts[0] == \"from\"\n assert parts[2] == \"import\"\n\n origin_name = parts[1]\n\n if origin_name == \"typing\":\n continue\n\n if origin_name == \".\":\n origin_name = self.getFullName()\n else:\n dot_count = 0\n while origin_name.startswith(\".\"):\n origin_name = origin_name[1:]\n dot_count += 1\n\n if dot_count > 0:\n if origin_name:\n origin_name = (\n self.getFullName()\n .getRelativePackageName(level=dot_count + 1)\n .getChildNamed(origin_name)\n )\n else:\n origin_name = (\n self.getFullName().getRelativePackageName(\n level=dot_count + 1\n )\n )\n\n if origin_name != self.getFullName():\n pyi_deps.add(origin_name)\n\n imported = parts[3]\n if imported.startswith(\"(\"):\n # Handle multiline imports\n if not imported.endswith(\")\"):\n in_import = True\n imported = imported[1:]\n in_import_part = origin_name\n assert in_import_part, (\n \"Multiline part in file %s cannot be empty\"\n % pyi_filename\n )\n else:\n in_import = False\n imported = imported[1:-1]\n assert imported\n\n if imported == \"*\":\n continue\n\n for name in imported.split(\",\"):\n if name:\n name = name.strip()\n pyi_deps.add(origin_name + \".\" + name)\n\n else: # In import\n imported = line\n if imported.endswith(\")\"):\n imported = imported[0:-1]\n in_import = False\n\n for name in imported.split(\",\"):\n name = name.strip()\n if name:\n pyi_deps.add(in_import_part + \".\" + name)\n\n if \"typing\" in pyi_deps:\n pyi_deps.discard(\"typing\")\n if \"__future__\" in pyi_deps:\n pyi_deps.discard(\"__future__\")\n\n if self.getFullName() in pyi_deps:\n pyi_deps.discard(self.getFullName())\n if self.getFullName().getPackageName() in pyi_deps:\n pyi_deps.discard(self.getFullName().getPackageName())\n\n self.used_modules = tuple((pyi_dep, None) for pyi_dep in pyi_deps)\n else:\n self.used_modules = ()\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 31, "n_whitespaces": 2556, "n_words": 244, "vocab_size": 118, "complexity": 26, "nloc": 82, "token_counts": 469, "n_ast_nodes": 808, "n_identifiers": 32, "random_cut": "def _readPyPIFile(self):\n \n\n # Complex stuff, pylint: disable=too-many-branches,too-many-statements\n if self.used_modules is None:\n pyi_filename = self.getPyIFilename()\n\n if os.path.exists(pyi_filename):\n pyi_deps = OrderedSet()\n\n # Flag signalling multiline import handling\n in_import = False\n in_import_part = \"\"\n\n for line in getFileContentByLine(pyi_filename):\n line = line.strip()\n\n if not in_import:\n if line.startswith(\"import \"):\n imported = line[7:]\n\n pyi_deps.add(imported)\n elif line.startswith(\"from \"):\n parts = line.split(None, 3)\n assert parts[0] == \"from\"\n assert parts[2] == \"import\"\n\n origin_name = parts[1]\n\n if origin_name == \"typing\":\n continue\n\n if origin_name == \".\":\n origin_name = self.getFullName()\n else:\n ", "d_id": 42667, "documentation": { "docstring": "Read the .pyi file if present and scan for dependencies.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 61951, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "topological_sort", "commit_message": "upd; format", "code": "def topological_sort(self):\n \n result = []\n # Make a shallow copy of the adjacency list\n alist = {}\n for k, v in self.adjacency_list.items():\n alist[k] = v[:]\n while True:\n # See what we can remove in this run\n to_remove = []\n for k, v in list(alist.items())[:]:\n if not v:\n to_remove.append(k)\n del alist[k]\n if not to_remove:\n # What's left in alist (if anything) is a cycle.\n break\n # Remove from the adjacency list of others\n for k, v in alist.items():\n alist[k] = [(d, r) for d, r in v if d not in to_remove]\n logger.debug('Moving to result: %s',\n ['%s (%s)' % (d.name, d.version) for d in to_remove])\n result.extend(to_remove)\n return result, list(alist.keys())\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 378, "n_words": 108, "vocab_size": 71, "complexity": 10, "nloc": 19, "token_counts": 155, "n_ast_nodes": 252, "n_identifiers": 19, "random_cut": "def topological_sort(self):\n \n result = []\n # Make a shallow copy of the adjacency list\n alist = {}\n for k, v in self.adjacency_list.items():\n alist[k] = v[:]\n while True:\n # See what we can remove in this run\n to_remove = []\n for k, v in list(alist.items())[:]:\n if not v:\n to_remove.append(k)\n del alist[k]\n if not to_rem", "d_id": 12774, "documentation": { "docstring": "\n Perform a topological sort of the graph.\n :return: A tuple, the first element of which is a topologically sorted\n list of distributions, and the second element of which is a\n list of distributions that cannot be sorted because they have\n circular dependencies and so form a cycle.\n ", "n_words": 47, "vocab_size": 32, "n_whitespaces": 117, "language": "en" } }, { "id": 128528, "commit_id": "f448e33473c19854f47a93d7d55ccf72ad1b7fbf", "repo": "ray", "path": "rllib/evaluation/episode.py", "file_name": "episode.py", "fun_name": "soft_reset", "commit_message": "Convert floats to integers before using randrange (#28962)\n\nSigned-off-by: Ram Rachum ", "code": "def soft_reset(self) -> None:\n \n self.length = 0\n self.episode_id = random.randrange(int(2e9))\n self.total_reward = 0.0\n self.agent_rewards = defaultdict(float)\n self._agent_reward_history = defaultdict(list)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 19, "vocab_size": 15, "complexity": 1, "nloc": 11, "token_counts": 49, "n_ast_nodes": 79, "n_identifiers": 13, "random_cut": "def soft_reset(self) -> None:\n \n self.length = 0\n self.episode_id = random.randrange(", "d_id": 28732, "documentation": { "docstring": "Clears rewards and metrics, but retains RNN and other state.\n\n This is used to carry state across multiple logical episodes in the\n same env (i.e., if `soft_horizon` is set).\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 50, "language": "en" } }, { "id": 73290, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/views.py", "file_name": "views.py", "fun_name": "get_ordering_field", "commit_message": "Reformat with black", "code": "def get_ordering_field(self, field_name):\n \n try:\n field = self.opts.get_field(field_name)\n return field.name\n except FieldDoesNotExist:\n # See whether field_name is a name of a non-field\n # that allows sorting.\n if callable(field_name):\n attr = field_name\n elif hasattr(self.model_admin, field_name):\n attr = getattr(self.model_admin, field_name)\n else:\n attr = getattr(self.model, field_name)\n return getattr(attr, \"admin_order_field\", None)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 200, "n_words": 46, "vocab_size": 35, "complexity": 4, "nloc": 12, "token_counts": 77, "n_ast_nodes": 126, "n_identifiers": 14, "random_cut": "def get_ordering_field(self, field_name):\n \n try:\n field = self.opts.get_field(field_name)\n return field.name\n ", "d_id": 16005, "documentation": { "docstring": "\n Returns the proper model field name corresponding to the given\n field_name to use for ordering. field_name may either be the name of a\n proper model field or the name of a method (on the admin or model) or a\n callable with the 'admin_order_field' attribute. Returns None if no\n proper model field name can be matched.\n ", "n_words": 55, "vocab_size": 32, "n_whitespaces": 98, "language": "en" } }, { "id": 203347, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/checks.py", "file_name": "checks.py", "fun_name": "_check_ordering", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_ordering(self, obj):\n \n\n # ordering = None\n if obj.ordering is None: # The default value is None\n return []\n elif not isinstance(obj.ordering, (list, tuple)):\n return must_be(\n \"a list or tuple\", option=\"ordering\", obj=obj, id=\"admin.E031\"\n )\n else:\n return list(\n chain.from_iterable(\n self._check_ordering_item(obj, field_name, \"ordering[%d]\" % index)\n for index, field_name in enumerate(obj.ordering)\n )\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 224, "n_words": 50, "vocab_size": 43, "complexity": 4, "nloc": 14, "token_counts": 84, "n_ast_nodes": 137, "n_identifiers": 16, "random_cut": "def _check_ordering(self, obj):\n \n\n # ordering = None\n if obj.ordering i", "d_id": 50321, "documentation": { "docstring": "Check that ordering refers to existing fields or is random.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 186612, "commit_id": "16aad35d31a887dab157f9d4f5e0fe9218d06064", "repo": "certbot", "path": "certbot-nginx/certbot_nginx/_internal/parser_obj.py", "file_name": "parser_obj.py", "fun_name": "parsing_hooks", "commit_message": "Fully type certbot-nginx module (#9124)\n\n* Work in progress\r\n\r\n* Fix type\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Oups.\r\n\r\n* Fix typing in UnspacedList\r\n\r\n* Fix logic\r\n\r\n* Finish typing\r\n\r\n* List certbot-nginx as fully typed in tox\r\n\r\n* Fix lint\r\n\r\n* Fix checks\r\n\r\n* Organize imports\r\n\r\n* Fix typing for Python 3.6\r\n\r\n* Fix checks\r\n\r\n* Fix lint\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix signature of deploy_cert regarding the installer interface\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/obj.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix types\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/parser.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Precise type\r\n\r\n* Precise _coerce possible inputs/outputs\r\n\r\n* Fix type\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/http_01.py\r\n\r\nCo-authored-by: ohemorange \r\n\r\n* Fix type\r\n\r\n* Remove an undesirable implementation.\r\n\r\n* Fix type\r\n\r\nCo-authored-by: alexzorin \r\nCo-authored-by: ohemorange ", "code": "def parsing_hooks(cls) -> Tuple[Type[\"Block\"], Type[\"Sentence\"], Type[\"Statements\"]]:\n \n return Block, Sentence, Statements\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 8, "token_counts": 30, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def parsing_hooks(cls) -> Tuple[Type[\"Block\"], Type[\"Sentence\"], Type[\"Statements\"]]:\n \n return Block, Sentence, Statements\n", "d_id": 45524, "documentation": { "docstring": "Returns object types that this class should be able to `parse` recusrively.\n The order of the objects indicates the order in which the parser should\n try to parse each subitem.\n :returns: A list of Parsable classes.\n :rtype list:\n ", "n_words": 38, "vocab_size": 32, "n_whitespaces": 73, "language": "en" } }, { "id": 169009, "commit_id": "54347fe684e0f7844bf407b1fb958a5269646825", "repo": "pandas", "path": "pandas/core/computation/ops.py", "file_name": "ops.py", "fun_name": "_cast_inplace", "commit_message": "TYP: Autotyping (#48191)\n\n* annotate-magics\r\n\r\n* annotate-imprecise-magics\r\n\r\n* none-return\r\n\r\n* scalar-return\r\n\r\n* pyi files\r\n\r\n* ignore vendored file\r\n\r\n* manual changes\r\n\r\n* ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments)\r\n\r\n* run autotyping in pre-commit\r\n\r\n* remove final and expand safe (and add annotate-imprecise-magics)", "code": "def _cast_inplace(terms, acceptable_dtypes, dtype) -> None:\n \n dt = np.dtype(dtype)\n for term in terms:\n if term.type in acceptable_dtypes:\n continue\n\n try:\n new_value = term.value.astype(dt)\n except AttributeError:\n new_value = dt.type(term.value)\n term.update(new_value)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 98, "n_words": 28, "vocab_size": 24, "complexity": 4, "nloc": 22, "token_counts": 64, "n_ast_nodes": 104, "n_identifiers": 13, "random_cut": "def _cast_inplace(terms, acceptable_dtypes, dtype) -> None:\n \n dt = np.dtype(dtype", "d_id": 40371, "documentation": { "docstring": "\n Cast an expression inplace.\n\n Parameters\n ----------\n terms : Op\n The expression that should cast.\n acceptable_dtypes : list of acceptable numpy.dtype\n Will not cast if term's dtype in this list.\n dtype : str or numpy.dtype\n The dtype to cast to.\n ", "n_words": 39, "vocab_size": 31, "n_whitespaces": 82, "language": "en" } }, { "id": 89746, "commit_id": "199dee4680dcecac0c485576f3933d9de49d6e44", "repo": "sentry", "path": "src/sentry/integrations/vercel/webhook.py", "file_name": "webhook.py", "fun_name": "_deployment_created", "commit_message": "fix: Add functionality for new Vercel webhook payloads (#42340)\n\nFixes WOR-2493", "code": "def _deployment_created(self, external_id, request):\n payload = request.data[\"payload\"]\n vercel_project_id = (\n payload[\"projectId\"] if payload.get(\"projectId\") else payload[\"project\"][\"id\"]\n )\n # Only create releases for production deploys for now\n if payload[\"target\"] != \"production\":\n logger.info(\n f\"Ignoring deployment for environment: {payload['target']}\",\n extra={\"external_id\": external_id, \"vercel_project_id\": vercel_project_id},\n )\n return self.respond(status=204)\n \n\n logging_params = {\"external_id\": external_id, \"vercel_project_id\": vercel_project_id}\n\n org_integrations = OrganizationIntegration.objects.select_related(\"organization\").filter(\n integration__external_id=external_id, integration__provider=self.provider\n )\n if not org_integrations:\n logger.info(\"Integration not found\", extra=logging_params)\n return self.respond({\"detail\": \"Integration not found\"}, status=404)\n\n # for each org integration, search the configs to find one that matches the vercel project of the webhook\n for org_integration in org_integrations:\n project_mappings = org_integration.config.get(\"project_mappings\") or []\n matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings))\n if matched_mappings:\n organization = org_integration.organization\n sentry_project_id = matched_mappings[0][0]\n\n logging_params[\"organization_id\"] = organization.id\n logging_params[\"project_id\"] = sentry_project_id\n\n try:\n release_payload, token = get_payload_and_token(\n payload, organization.id, sentry_project_id\n )\n except Project.DoesNotExist:\n logger.info(\"Project not found\", extra=logging_params)\n return self.respond({\"detail\": \"Project not found\"}, status=404)\n except SentryAppInstallationForProvider.DoesNotExist:\n logger.info(\"Installation not found\", extra=logging_params)\n return self.respond({\"detail\": \"Installation not found\"}, status=404)\n except SentryAppInstallationToken.DoesNotExist:\n logger.info(\"Token not found\", extra=logging_params)\n return self.respond({\"detail\": \"Token not found\"}, status=404)\n except NoCommitFoundError:\n logger.info(\"No commit found\", extra=logging_params)\n return self.respond({\"detail\": \"No commit found\"}, status=404)\n except MissingRepositoryError:\n logger.info(\"Could not determine repository\", extra=logging_params)\n return self.respond({\"detail\": \"Could not determine repository\"}, status=400)\n\n url = absolute_uri(f\"/api/0/organizations/{organization.slug}/releases/\")\n headers = {\n \"Accept\": \"application/json\",\n \"Authorization\": f\"Bearer {token}\",\n \"User-Agent\": f\"sentry_vercel/{VERSION}\",\n }\n json_error = None\n\n # create the basic release payload without refs\n no_ref_payload = release_payload.copy()\n del no_ref_payload[\"refs\"]\n\n with http.build_session() as session:\n try:\n resp = session.post(url, json=no_ref_payload, headers=headers)\n json_error = safe_json_parse(resp)\n resp.raise_for_status()\n except RequestException as e:\n # errors here should be uncommon but we should be aware of them\n logger.error(\n f\"Error creating release: {e} - {json_error}\",\n extra=logging_params,\n exc_info=True,\n )\n # 400 probably isn't the right status code but oh well\n return self.respond({\"detail\": f\"Error creating release: {e}\"}, status=400)\n\n # set the refs\n try:\n resp = session.post(\n url,\n json=release_payload,\n headers=headers,\n )\n json_error = safe_json_parse(resp)\n resp.raise_for_status()\n except RequestException as e:\n # errors will probably be common if the user doesn't have repos set up\n logger.info(\n f\"Error setting refs: {e} - {json_error}\",\n extra=logging_params,\n exc_info=True,\n )\n # 400 probably isn't the right status code but oh well\n return self.respond({\"detail\": f\"Error setting refs: {e}\"}, status=400)\n\n # we are going to quit after the first project match as there shouldn't be multiple matches\n return self.respond(status=201)\n\n return self.respond(status=204)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1928, "n_words": 360, "vocab_size": 200, "complexity": 14, "nloc": 94, "token_counts": 571, "n_ast_nodes": 1010, "n_identifiers": 60, "random_cut": "def _deployment_created(self, external_id, request):\n payload = request.data[\"payload\"]\n vercel_project_id = (\n payload[\"projectId\"] if payload.get(\"projectId\") else payload[\"project\"][\"id\"]\n )\n # Only create releases for production deploys for now\n if payload[\"target\"] != \"production\":\n logger.info(\n f\"Ignoring deployment for environment: {payload['target']}\",\n extra={\"external_id\": external_id, \"vercel_project_id\": vercel_project_id},\n )\n return self.respond(status=204)\n \n\n logging_params = {\"external_id\": external_id, \"vercel_project_id\": vercel_project_id}\n\n org_integrations = OrganizationIntegration.objects.select_related(\"organization\").filter(\n integration__external_id=external_id, integration__provider=self.provider\n )\n if not org_integrations:\n logger.info(\"Integration not found\", extra=logging_params)\n return self.respond({\"detail\": \"Integration not found\"}, status=404)\n\n # for each org integration, search the configs to find one that matches the vercel project of the webhook\n for org_integration in org_integrations:\n project_mappings = org_integration.config.get(\"project_mappings\") or []\n matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings))\n if matched_mappings:\n organization = org_integration.organization\n sentry_project_id = matched_mappings[0][0]\n\n logging_params[\"organization_id\"] = organization.id\n logging_params[\"project_id\"] = sentry_project_id\n\n try:\n release_payload, token = get_payload_and_token(\n payload, organization.id, sentry_project_id\n )\n except Project.DoesNotExist:\n logger.info(\"Project not found\", extra=logging_params)\n return self.respond({\"detail\": \"Project not found\"}, status=404)\n except SentryAppInstallationForProvider.DoesNotExist:\n logger.info(\"Installation not found\", extra=logging_params)\n return self.respond({\"detail\": \"Installation not found\"}, status=404)\n except SentryAppInstallationToken.DoesNotExist:\n logger.info(\"Token not found\", extra=logging_params)\n return self.respond({\"detail\": \"Token not found\"}, status=404)\n except NoCommitFoundError:\n logger.info(\"No commit found\", extra=logging_params)\n return self.respond({\"detail\": \"No commit found\"}, status=404)\n except MissingRepositoryError:\n logger.info(\"Could not determine repository\", extra=logging_params)\n return self.respond({\"detail\": \"Could not determine repository\"}, status=400)\n\n url = absolute_uri(f\"/api/0/organizations/{organization.slug}/releases/\")\n headers = {\n \"Accept\": \"application/json\",\n \"Authorization\": f\"Bearer {token}\",\n \"User-Agent\": f\"sentry_vercel/{VERSION}\",\n }\n json_error = None\n\n # create the basic release payload without refs\n no_ref_payload = release_payload.copy()\n del no_ref_payload[\"refs\"]\n\n with http.build_session() as session:\n try:\n ", "d_id": 18567, "documentation": { "docstring": "\n Steps:\n 1. Find all org integrations that match the external id\n 2. Search the configs to find one that matches the vercel project of the webhook\n 3. Look up the Sentry project that matches\n 4. Look up the connected internal integration\n 5. Find the token associated with that installation\n 6. Determine the commit sha and repo based on what provider is used\n 7. Create the release using the token WITHOUT refs\n 8. Update the release with refs\n ", "n_words": 77, "vocab_size": 55, "n_whitespaces": 180, "language": "en" } }, { "id": 88354, "commit_id": "e451a0a5b06d082b9515406d933c78e5a3f6253a", "repo": "sentry", "path": "src/sentry/auth/helper.py", "file_name": "helper.py", "fun_name": "_app_user", "commit_message": "ref(auth): Type hints on auth/helper.py and related modules (#41158)", "code": "def _app_user(self) -> User | None:\n \n return self.user if isinstance(self.user, User) else None\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 3, "token_counts": 25, "n_ast_nodes": 40, "n_identifiers": 5, "random_cut": "def _app_user(self) -> User | None:\n \n return self.user if isinstance(self.user, Us", "d_id": 18378, "documentation": { "docstring": "The user, if they are represented persistently in our app.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 109881, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/axes_grid1/axes_divider.py", "file_name": "axes_divider.py", "fun_name": "new_locator", "commit_message": "Improve mpl_toolkit documentation", "code": "def new_locator(self, nx, nx1=None):\n \n return AxesLocator(self, nx, 0, nx1 if nx1 is not None else nx + 1, 1)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 33, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 2, "token_counts": 34, "n_ast_nodes": 48, "n_identifiers": 5, "random_cut": "def new_locator(self, nx, nx1=None):\n \n return AxesL", "d_id": 23790, "documentation": { "docstring": "\n Create a new `.AxesLocator` for the specified cell.\n\n Parameters\n ----------\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise, location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ", "n_words": 46, "vocab_size": 37, "n_whitespaces": 126, "language": "en" } }, { "id": 83152, "commit_id": "d560d124a304a2f6dd467200aab7f070a78bf155", "repo": "zulip", "path": "zerver/tests/test_message_edit.py", "file_name": "test_message_edit.py", "fun_name": "test_edit_cases", "commit_message": "python: Replace string concatenations with f-strings.", "code": "def test_edit_cases(self) -> None:\n \n self.login(\"hamlet\")\n hamlet = self.example_user(\"hamlet\")\n msg_id = self.send_stream_message(\n self.example_user(\"hamlet\"), \"Denmark\", topic_name=\"topic 1\", content=\"content 1\"\n )\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"content\": \"content 2\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][\"prev_content\"], \"content 1\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(\n set(history[0].keys()),\n {\n \"timestamp\",\n \"prev_content\",\n \"user_id\",\n \"prev_rendered_content\",\n \"prev_rendered_content_version\",\n },\n )\n\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"topic\": \"topic 2\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 1\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(set(history[0].keys()), {\"timestamp\", LEGACY_PREV_TOPIC, \"user_id\"})\n\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"content\": \"content 3\",\n \"topic\": \"topic 3\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][\"prev_content\"], \"content 2\")\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 2\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(\n set(history[0].keys()),\n {\n \"timestamp\",\n LEGACY_PREV_TOPIC,\n \"prev_content\",\n \"user_id\",\n \"prev_rendered_content\",\n \"prev_rendered_content_version\",\n },\n )\n\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"content\": \"content 4\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][\"prev_content\"], \"content 3\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n\n self.login(\"iago\")\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"topic\": \"topic 4\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 3\")\n self.assertEqual(history[0][\"user_id\"], self.example_user(\"iago\").id)\n\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 3\")\n self.assertEqual(history[2][LEGACY_PREV_TOPIC], \"topic 2\")\n self.assertEqual(history[3][LEGACY_PREV_TOPIC], \"topic 1\")\n self.assertEqual(history[1][\"prev_content\"], \"content 3\")\n self.assertEqual(history[2][\"prev_content\"], \"content 2\")\n self.assertEqual(history[4][\"prev_content\"], \"content 1\")\n\n # Now, we verify that the edit history data sent back has the\n # correct filled-out fields\n message_edit_history = self.client_get(f\"/json/messages/{msg_id}/history\")\n\n json_response = orjson.loads(message_edit_history.content)\n\n # We reverse the message history view output so that the IDs line up with the above.\n message_history = list(reversed(json_response[\"message_history\"]))\n i = 0\n for entry in message_history:\n expected_entries = {\"content\", \"rendered_content\", \"topic\", \"timestamp\", \"user_id\"}\n if i in {0, 2, 3}:\n expected_entries.add(\"prev_topic\")\n if i in {1, 2, 4}:\n expected_entries.add(\"prev_content\")\n expected_entries.add(\"prev_rendered_content\")\n expected_entries.add(\"content_html_diff\")\n i += 1\n self.assertEqual(expected_entries, set(entry.keys()))\n self.assert_length(message_history, 6)\n self.assertEqual(message_history[0][\"prev_topic\"], \"topic 3\")\n self.assertEqual(message_history[0][\"topic\"], \"topic 4\")\n self.assertEqual(message_history[1][\"topic\"], \"topic 3\")\n self.assertEqual(message_history[2][\"topic\"], \"topic 3\")\n self.assertEqual(message_history[2][\"prev_topic\"], \"topic 2\")\n self.assertEqual(message_history[3][\"topic\"], \"topic 2\")\n self.assertEqual(message_history[3][\"prev_topic\"], \"topic 1\")\n self.assertEqual(message_history[4][\"topic\"], \"topic 1\")\n\n self.assertEqual(message_history[0][\"content\"], \"content 4\")\n self.assertEqual(message_history[1][\"content\"], \"content 4\")\n self.assertEqual(message_history[1][\"prev_content\"], \"content 3\")\n self.assertEqual(message_history[2][\"content\"], \"content 3\")\n self.assertEqual(message_history[2][\"prev_content\"], \"content 2\")\n self.assertEqual(message_history[3][\"content\"], \"content 2\")\n self.assertEqual(message_history[4][\"content\"], \"content 2\")\n self.assertEqual(message_history[4][\"prev_content\"], \"content 1\")\n\n self.assertEqual(message_history[5][\"content\"], \"content 1\")\n self.assertEqual(message_history[5][\"topic\"], \"topic 1\")\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1529, "n_words": 310, "vocab_size": 136, "complexity": 4, "nloc": 128, "token_counts": 1019, "n_ast_nodes": 1737, "n_identifiers": 35, "random_cut": "def test_edit_cases(self) -> None:\n \n self.login(\"hamlet\")\n hamlet = self.example_user(\"hamlet\")\n msg_id = self.send_stream_message(\n self.example_user(\"hamlet\"), \"Denmark\", topic_name=\"topic 1\", content=\"content 1\"\n )\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"content\": \"content 2\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][\"prev_content\"], \"content 1\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(\n set(history[0].keys()),\n {\n \"timestamp\",\n \"prev_content\",\n \"user_id\",\n \"prev_rendered_content\",\n \"prev_rendered_content_version\",\n },\n )\n\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"topic\": \"topic 2\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 1\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(set(history[0].keys()), {\"timestamp\", LEGACY_PREV_TOPIC, \"user_id\"})\n\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"content\": \"content 3\",\n \"topic\": \"topic 3\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][\"prev_content\"], \"content 2\")\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 2\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(\n set(history[0].keys()),\n {\n \"timestamp\",\n LEGACY_PREV_TOPIC,\n \"prev_content\",\n ", "d_id": 17602, "documentation": { "docstring": "This test verifies the accuracy of construction of Zulip's edit\n history data structures.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 69102, "commit_id": "9baa2229761c5415f29646a1a5bed4a3f4981e05", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "get_project_name", "commit_message": "fix: specify allowed doctype in queries (#31761)", "code": "def get_project_name(doctype, txt, searchfield, start, page_len, filters):\n\tdoctype = \"Project\"\n\tcond = \"\"\n\tif filters and filters.get(\"customer\"):\n\t\tcond = % (\n\t\t\tfrappe.db.escape(filters.get(\"customer\"))\n\t\t)\n\n\tfields = get_fields(doctype, [\"name\", \"project_name\"])\n\tsearchfields = frappe.get_meta(doctype).get_search_fields()\n\tsearchfields = \" or \".join([\"`tabProject`.\" + field + \" like %(txt)s\" for field in searchfields])\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tfields=\", \".join([\"`tabProject`.{0}\".format(f) for f in fields]),\n\t\t\tcond=cond,\n\t\t\tscond=searchfields,\n\t\t\tmatch_cond=get_match_cond(doctype),\n\t\t\tstart=start,\n\t\t\tpage_len=page_len,\n\t\t),\n\t\t{\"txt\": \"%{0}%\".format(txt), \"_txt\": txt.replace(\"%\", \"\")},\n\t)\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 47, "n_words": 69, "vocab_size": 56, "complexity": 5, "nloc": 30, "token_counts": 171, "n_ast_nodes": 304, "n_identifiers": 28, "random_cut": "def get_project_name(doctype, txt, searchfield, start, page_len, filters):\n\tdoctype = \"Project\"\n\tcond = \"\"\n\tif filters and filters.get(\"customer\"):\n\t\tcond = % (\n\t\t\tfrappe.db.escape(filters.get(\"customer\"))\n\t\t)\n\n\tfields = get_fields(doctype, [\"name\", \"project_name\"])\n\tsearchfields = frappe.get_meta(doctype).get_search_fields()\n\tsearchfields = \" or \".join([\"`tabProject`.\" + field + \" li", "d_id": 14971, "documentation": { "docstring": "(`tabProject`.customer = %s or\n\t\t\tifnull(`tabProject`.customer,\"\")=\"\") andselect {fields} from `tabProject`\n\t\twhere\n\t\t\t`tabProject`.status not in ('Completed', 'Cancelled')\n\t\t\tand {cond} {scond} {match_cond}\n\t\torder by\n\t\t\t(case when locate(%(_txt)s, `tabProject`.name) > 0 then locate(%(_txt)s, `tabProject`.name) else 99999 end),\n\t\t\t`tabProject`.idx desc,\n\t\t\t`tabProject`.name asc\n\t\tlimit {page_len} offset {start}", "n_words": 41, "vocab_size": 39, "n_whitespaces": 31, "language": "en" } }, { "id": 208754, "commit_id": "517a92f878588484116edd6b88dfc738dcfe3cfb", "repo": "ipython", "path": "IPython/tests/test_shortcuts.py", "file_name": "test_shortcuts.py", "fun_name": "test_autosuggest_at_EOL", "commit_message": "Apply autosuggestion only at EOL.\n\nAs they are displayed only at EOL.\nFixes #13724", "code": "def test_autosuggest_at_EOL(text, cursor, suggestion, called):\n \n\n event = make_event(text, cursor, suggestion)\n event.current_buffer.insert_text = Mock()\n _apply_autosuggest(event)\n if called:\n event.current_buffer.insert_text.assert_called()\n else:\n event.current_buffer.insert_text.assert_not_called()\n # event.current_buffer.document.get_end_of_line_position.assert_called()\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 21, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 58, "n_ast_nodes": 95, "n_identifiers": 13, "random_cut": "def test_autosuggest_at_EOL(text, cursor, suggestion, called):\n \n\n event = make_event(text, cursor, suggestion)\n event.current_buffer.insert_text = Mock()\n _apply_autosuggest(event)\n if called:\n event.current_buffer.insert_t", "d_id": 52508, "documentation": { "docstring": "\n test that autosuggest is only applied at end of line.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 269419, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/applications/resnet.py", "file_name": "resnet.py", "fun_name": "stack3", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def stack3(x, filters, blocks, stride1=2, groups=32, name=None):\n \n x = block3(x, filters, stride=stride1, groups=groups, name=name + \"_block1\")\n for i in range(2, blocks + 1):\n x = block3(\n x,\n filters,\n groups=groups,\n conv_shortcut=False,\n name=name + \"_block\" + str(i),\n )\n return x\n\n\n@keras_export(\n \"keras.applications.resnet50.ResNet50\",\n \"keras.applications.resnet.ResNet50\",\n \"keras.applications.ResNet50\",\n)", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\n \"keras.applications.resnet50.ResNet50\",\n \"keras.applications.resnet.ResNet50\",\n \"keras.applications.ResNet50\",\n)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 131, "n_words": 43, "vocab_size": 32, "complexity": 2, "nloc": 11, "token_counts": 86, "n_ast_nodes": 145, "n_identifiers": 14, "random_cut": "def stack3(x, filters, blocks, stride1=2, groups=32, name=None):\n \n x = block3(x, filters, stride=stride1, groups=groups, name=name + \"_block1\")\n for i in range(2, blocks + 1):\n x = block3(\n x,\n filters,\n groups=groups,\n conv_shortcut=False,\n name=name + \"_block\" + s", "d_id": 80067, "documentation": { "docstring": "A set of stacked residual blocks.\n\n Args:\n x: input tensor.\n filters: integer, filters of the bottleneck layer in a block.\n blocks: integer, blocks in the stacked blocks.\n stride1: default 2, stride of the first layer in the first block.\n groups: default 32, group size for grouped convolution.\n name: string, stack label.\n\n Returns:\n Output tensor for the stacked blocks.\n ", "n_words": 58, "vocab_size": 40, "n_whitespaces": 102, "language": "en" } }, { "id": 168235, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "is_mixed", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def is_mixed(self) -> bool:\n \n warnings.warn(\n \"Index.is_mixed is deprecated and will be removed in a future version. \"\n \"Check index.inferred_type directly instead.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n return self.inferred_type in [\"mixed\"]\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 28, "vocab_size": 27, "complexity": 1, "nloc": 36, "token_counts": 37, "n_ast_nodes": 66, "n_identifiers": 11, "random_cut": "def is_mixed(self) -> bool:\n \n warnings.warn(\n \"Index.is_m", "d_id": 40247, "documentation": { "docstring": "\n Check if the Index holds data with mixed data types.\n\n Returns\n -------\n bool\n Whether or not the Index holds data with mixed data types.\n\n See Also\n --------\n is_boolean : Check if the Index only consists of booleans.\n is_integer : Check if the Index only consists of integers.\n is_floating : Check if the Index is a floating type.\n is_numeric : Check if the Index only consists of numeric data.\n is_object : Check if the Index is of the object dtype.\n is_categorical : Check if the Index holds categorical data.\n is_interval : Check if the Index holds Interval objects.\n\n Examples\n --------\n >>> idx = pd.Index(['a', np.nan, 'b'])\n >>> idx.is_mixed()\n True\n\n >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0])\n >>> idx.is_mixed()\n False\n ", "n_words": 118, "vocab_size": 56, "n_whitespaces": 284, "language": "en" } }, { "id": 163775, "commit_id": "c5ff649b11bd625ca36ad218539badb1c2057668", "repo": "pandas", "path": "pandas/tests/io/test_user_agent.py", "file_name": "test_user_agent.py", "fun_name": "responder", "commit_message": "CI/TST: Call join on server process test (#45628)", "code": "def responder(request):\n \n # Find an available port\n with socket.socket() as sock:\n sock.bind((\"localhost\", 0))\n port = sock.getsockname()[1]\n\n server_process = multiprocessing.Process(\n target=process_server, args=(request.param, port)\n )\n server_process.start()\n yield port\n server_process.join(10)\n server_process.terminate()\n kill_time = 5\n wait_time = 0\n while server_process.is_alive():\n if wait_time > kill_time:\n server_process.kill()\n break\n else:\n wait_time += 0.1\n time.sleep(0.1)\n server_process.close()\n\n\n@pytest.mark.parametrize(\n \"responder, read_method, parquet_engine\",\n [\n (CSVUserAgentResponder, pd.read_csv, None),\n (JSONUserAgentResponder, pd.read_json, None),\n (ParquetPyArrowUserAgentResponder, pd.read_parquet, \"pyarrow\"),\n pytest.param(\n ParquetFastParquetUserAgentResponder,\n pd.read_parquet,\n \"fastparquet\",\n # TODO(ArrayManager) fastparquet\n marks=[\n td.skip_array_manager_not_yet_implemented,\n pytest.mark.xfail(PY310, reason=\"fastparquet failing on 3.10\"),\n ],\n ),\n (PickleUserAgentResponder, pd.read_pickle, None),\n (StataUserAgentResponder, pd.read_stata, None),\n (GzippedCSVUserAgentResponder, pd.read_csv, None),\n (GzippedJSONUserAgentResponder, pd.read_json, None),\n ],\n indirect=[\"responder\"],\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"responder, read_method, parquet_engine\",\n [\n (CSVUserAgentResponder, pd.read_csv, None),\n (JSONUserAgentResponder, pd.read_json, None),\n (ParquetPyArrowUserAgentResponder, pd.read_parquet, \"pyarrow\"),\n pytest.param(\n ParquetFastParquetUserAgentResponder,\n pd.read_parquet,\n \"fastparquet\",\n # TODO(ArrayManager) fastparquet\n marks=[\n td.skip_array_manager_not_yet_implemented,\n pytest.mark.xfail(PY310, reason=\"fastparquet failing on 3.10\"),\n ],\n ),\n (PickleUserAgentResponder, pd.read_pickle, None),\n (StataUserAgentResponder, pd.read_stata, None),\n (GzippedCSVUserAgentResponder, pd.read_csv, None),\n (GzippedJSONUserAgentResponder, pd.read_json, None),\n ],\n indirect=[\"responder\"],\n)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 380, "n_words": 93, "vocab_size": 75, "complexity": 3, "nloc": 21, "token_counts": 117, "n_ast_nodes": 366, "n_identifiers": 48, "random_cut": "def responder(request):\n \n # Find an available port\n with socket.socket() as sock:\n sock.bind((\"localhost\", 0))\n port = sock.getsockname()[1]\n\n server_process = multiprocessing.Process(\n target=process_server, args=(request.param, port)\n )\n server_process.start()\n yield port\n server_process.join(10)\n server_process.terminate()\n kill_time = 5\n wait_time = 0\n while server_process.is_alive():\n if wait_time > kill_time:\n server_process.kill()\n break", "d_id": 39497, "documentation": { "docstring": "\n Fixture that starts a local http server in a separate process on localhost\n and returns the port.\n\n Running in a separate process instead of a thread to allow termination/killing\n of http server upon cleanup.\n ", "n_words": 34, "vocab_size": 25, "n_whitespaces": 50, "language": "en" } }, { "id": 206452, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/test/testcases.py", "file_name": "testcases.py", "fun_name": "_pre_setup", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _pre_setup(self):\n \n super()._pre_setup()\n if self.available_apps is not None:\n apps.set_available_apps(self.available_apps)\n setting_changed.send(\n sender=settings._wrapped.__class__,\n setting=\"INSTALLED_APPS\",\n value=self.available_apps,\n enter=True,\n )\n for db_name in self._databases_names(include_mirrors=False):\n emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)\n try:\n self._fixture_setup()\n except Exception:\n if self.available_apps is not None:\n apps.unset_available_apps()\n setting_changed.send(\n sender=settings._wrapped.__class__,\n setting=\"INSTALLED_APPS\",\n value=settings.INSTALLED_APPS,\n enter=False,\n )\n raise\n # Clear the queries_log so that it's less likely to overflow (a single\n # test probably won't execute 9K queries). If queries_log overflows,\n # then assertNumQueries() doesn't work.\n for db_name in self._databases_names(include_mirrors=False):\n connections[db_name].queries_log.clear()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 419, "n_words": 72, "vocab_size": 56, "complexity": 6, "nloc": 26, "token_counts": 155, "n_ast_nodes": 247, "n_identifiers": 29, "random_cut": "def _pre_setup(self):\n \n super()._pre_setup()\n if self.available_apps is not None:\n apps.set_available_apps(self.available_apps)\n setting_changed.send(\n sender=settings._wrapped.__class__,\n setting=\"INSTALLED_APPS\",\n value=self.available_apps,\n enter=True,\n )\n for db_name in self._databases_names(include_mirrors=False):\n ", "d_id": 51525, "documentation": { "docstring": "\n Perform pre-test setup:\n * If the class has an 'available_apps' attribute, restrict the app\n registry to these applications, then fire the post_migrate signal --\n it must run with the correct set of applications for the test case.\n * If the class has a 'fixtures' attribute, install those fixtures.\n ", "n_words": 48, "vocab_size": 38, "n_whitespaces": 95, "language": "en" } }, { "id": 207475, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/test_actions.py", "file_name": "test_actions.py", "fun_name": "test_multiple_actions_form", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_multiple_actions_form(self):\n \n action_data = {\n ACTION_CHECKBOX_NAME: [self.s1.pk],\n # Two different actions selected on the two forms...\n \"action\": [\"external_mail\", \"delete_selected\"],\n # ...but \"go\" was clicked on the top form.\n \"index\": 0,\n }\n self.client.post(\n reverse(\"admin:admin_views_externalsubscriber_changelist\"), action_data\n )\n # The action sends mail rather than deletes.\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Greetings from a function action\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 173, "n_words": 51, "vocab_size": 46, "complexity": 1, "nloc": 11, "token_counts": 73, "n_ast_nodes": 126, "n_identifiers": 14, "random_cut": "def test_multiple_actions_form(self):\n \n action_data = {\n ACTION_CHECKBOX_NAME: [self.s1.pk],\n # Two differen", "d_id": 51970, "documentation": { "docstring": "\n Actions come from the form whose submit button was pressed (#10618).\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 83350, "commit_id": "47056ef06fff67388ebe1bd09846280fc84f9660", "repo": "zulip", "path": "zerver/tests/test_message_send.py", "file_name": "test_message_send.py", "fun_name": "test_empty_message", "commit_message": "tests: Remove `client` parameter if test can use default `User-Agent`.\n\nRemoves `client` parameter from backend tests using the\n`POST /messages` endpoint when the test can use the default\n`User-Agent` as the client, which is set to `ZulipMobile` for API\nrequests and a browser user agent string for web app requests.", "code": "def test_empty_message(self) -> None:\n \n self.login(\"hamlet\")\n othello = self.example_user(\"othello\")\n result = self.client_post(\n \"/json/messages\",\n {\"type\": \"private\", \"content\": \" \", \"to\": othello.email},\n )\n self.assert_json_error(result, \"Message must not be empty\")\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 90, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 11, "token_counts": 55, "n_ast_nodes": 104, "n_identifiers": 9, "random_cut": "def test_empty_message(self) -> None:\n \n self.login(\"hamlet\")\n othello = self.example_user(\"othello\")\n result = se", "d_id": 17659, "documentation": { "docstring": "\n Sending a message that is empty or only whitespace should fail\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 151914, "commit_id": "8227b4aafe51b30e5942d293e8d0052c968442dd", "repo": "freqtrade", "path": "freqtrade/templates/FreqaiExampleStrategy.py", "file_name": "FreqaiExampleStrategy.py", "fun_name": "freqai_feature_engineering_generic", "commit_message": "freqAI Strategy - improve user experience", "code": "def freqai_feature_engineering_generic(self, dataframe, **kwargs):\n \n dataframe[\"%-pct-change\"] = dataframe[\"close\"].pct_change()\n dataframe[\"%-raw_volume\"] = dataframe[\"volume\"]\n dataframe[\"%-raw_price\"] = dataframe[\"close\"]\n return dataframe\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 44, "n_ast_nodes": 80, "n_identifiers": 5, "random_cut": "def freqai_feature_engineering_generic(self, dataframe, **kwargs):\n \n dataframe[\"%-pct-change\"] = dataframe[\"close\"].pct_change()\n dataframe[\"%-raw_volume\"] = dataframe[\"volume\"]\n dataframe[\"%-raw_price\"] = dataframe[\"close\"]\n return dat", "d_id": 35160, "documentation": { "docstring": "\n This optional function will be called for all include_timeframes (including corr_pairs).\n After that, the features will be shifted by the number of candles in the\n include_shifted_candles.\n :param df: strategy dataframe which will receive the features\n dataframe[\"%-pct-change\"] = dataframe[\"close\"].pct_change()\n ", "n_words": 38, "vocab_size": 31, "n_whitespaces": 81, "language": "en" } }, { "id": 276754, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/data_utils.py", "file_name": "data_utils.py", "fun_name": "next_sample", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def next_sample(uid):\n \n return next(_SHARED_SEQUENCES[uid])\n\n\n@keras_export(\"keras.utils.GeneratorEnqueuer\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.utils.GeneratorEnqueuer\")", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 10, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 36, "n_identifiers": 5, "random_cut": "def next_sample(uid):\n \n return next(_SHARED_SEQUENCES[uid])\n\n\n@keras_export(\"keras.utils.G", "d_id": 81723, "documentation": { "docstring": "Gets the next value from the generator `uid`.\n\n To allow multiple generators to be used at the same time, we use `uid` to\n get a specific one. A single generator would cause the validation to\n overwrite the training generator.\n\n Args:\n uid: int, generator identifier\n\n Returns:\n The next value of generator `uid`.\n ", "n_words": 51, "vocab_size": 39, "n_whitespaces": 83, "language": "en" } }, { "id": 203215, "commit_id": "c5cd8783825b5f6384417dac5f3889b4210b7d08", "repo": "django", "path": "django/contrib/staticfiles/utils.py", "file_name": "utils.py", "fun_name": "check_settings", "commit_message": "Refs #33476 -- Refactored problematic code before reformatting by Black.\n\nIn these cases Black produces unexpected results, e.g.\r\n\r\ndef make_random_password(\r\n self,\r\n length=10,\r\n allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',\r\n):\r\n\r\nor\r\n\r\ncursor.execute(\"\"\"\r\nSELECT ...\r\n\"\"\",\r\n [table name],\r\n)", "code": "def check_settings(base_url=None):\n \n if base_url is None:\n base_url = settings.STATIC_URL\n if not base_url:\n raise ImproperlyConfigured(\n \"You're using the staticfiles app \"\n \"without having set the required STATIC_URL setting.\")\n if settings.MEDIA_URL == base_url:\n raise ImproperlyConfigured(\n \"The MEDIA_URL and STATIC_URL settings must have different values\"\n )\n if (settings.DEBUG and settings.MEDIA_URL and settings.STATIC_URL and\n settings.MEDIA_URL.startswith(settings.STATIC_URL)):\n raise ImproperlyConfigured(\n \"runserver can't serve media if MEDIA_URL is within STATIC_URL.\"\n )\n if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and\n (settings.MEDIA_ROOT == settings.STATIC_ROOT)):\n raise ImproperlyConfigured(\n \"The MEDIA_ROOT and STATIC_ROOT settings must have different values\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 234, "n_words": 83, "vocab_size": 49, "complexity": 11, "nloc": 21, "token_counts": 99, "n_ast_nodes": 169, "n_identifiers": 10, "random_cut": "def check_settings(base_url=None):\n \n if base_url is None:\n base_url = settings.STATIC_URL\n if not base_url:\n raise ImproperlyConfigured(\n \"You're using the staticfiles app \"\n \"without having set the required STATIC_URL setting.\")\n if settings.MEDIA_URL == base_url:\n raise ImproperlyConfigured(\n \"The MEDIA_URL and STATIC_URL", "d_id": 50250, "documentation": { "docstring": "\n Check if the staticfiles settings have sane values.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 128704, "commit_id": "05e623ecc22788cfe3b8ebe7933135350d3e0a2d", "repo": "ray", "path": "python/ray/serve/schema.py", "file_name": "schema.py", "fun_name": "kubernetes_dict", "commit_message": "[Serve] [KubeRay] Add flag that allows `serve build` to print Kubernetes-formatted output (#28918)", "code": "def kubernetes_dict(self, **kwargs) -> Dict:\n \n\n config = self.dict(**kwargs)\n for idx, deployment in enumerate(config[\"deployments\"]):\n\n if isinstance(deployment.get(\"ray_actor_options\"), dict):\n\n # JSON-serialize ray_actor_options' resources dictionary\n if isinstance(deployment[\"ray_actor_options\"].get(\"resources\"), dict):\n deployment[\"ray_actor_options\"][\"resources\"] = json.dumps(\n deployment[\"ray_actor_options\"][\"resources\"]\n )\n\n # JSON-serialize ray_actor_options' runtime_env dictionary\n if isinstance(deployment[\"ray_actor_options\"].get(\"runtime_env\"), dict):\n deployment[\"ray_actor_options\"][\"runtime_env\"] = json.dumps(\n deployment[\"ray_actor_options\"][\"runtime_env\"]\n )\n\n # Convert ray_actor_options' keys\n deployment[\"ray_actor_options\"] = dict_keys_snake_to_camel_case(\n deployment[\"ray_actor_options\"]\n )\n\n # JSON-serialize user_config dictionary\n if isinstance(deployment.get(\"user_config\"), dict):\n deployment[\"user_config\"] = json.dumps(deployment[\"user_config\"])\n\n # Convert deployment's keys\n config[\"deployments\"][idx] = dict_keys_snake_to_camel_case(deployment)\n\n # Convert top-level runtime_env\n if isinstance(config.get(\"runtime_env\"), dict):\n config[\"runtime_env\"] = json.dumps(config[\"runtime_env\"])\n\n # Convert top-level option's keys\n config = dict_keys_snake_to_camel_case(config)\n\n return config\n\n\n@PublicAPI(stability=\"beta\")", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@PublicAPI(stability=\"beta\")", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 471, "n_words": 89, "vocab_size": 47, "complexity": 7, "nloc": 29, "token_counts": 204, "n_ast_nodes": 378, "n_identifiers": 16, "random_cut": "def kubernetes_dict(self, **kwargs) -> Dict:\n \n\n config = self.dict(**kwargs)\n for idx, deployment in enumerate(config[\"deployments\"]):\n\n if isinstance(deployment.get(\"ray_actor_options\"), dict):\n\n # JSON-serialize ray_actor_options' resources dictionary\n if isinstance(deployment[\"ray_actor_options\"].get(\"resources\"), dict):\n deployment[\"ray_actor_options\"][\"resources\"] = json.dumps(\n deployment[\"ray_actor_options\"][\"resources\"]\n )\n\n # JSON-serialize ray_actor_options' runtime_env dictionary\n if isinstance(deployment[\"ray_actor_options\"].get(\"runtime_env\"), dict):\n deployment[\"ray_actor_options\"][\"runtime_env\"] = json.dumps(\n deployment[\"ray_actor_options\"][\"runtime_env\"]\n )\n\n # Convert ray_actor_options' keys\n deployment[\"ray_actor_options\"] = dict_keys_snake_to_camel_case(\n deployment[\"ray_actor_options\"]\n )\n\n # JSON-serialize user_config dictionary\n if isinstance(deployment.get(\"user_config\"), dict):\n deployment[\"user_config\"] = json.dumps(deployment[\"user_config\"])\n\n # Convert deployment's keys\n config[\"deployments\"][idx] = dict_keys_snake_to_camel_case(deployment)\n\n # Convert top-level runtime_env\n if isinstance(config.get(\"runtime_env\"), dict):\n config[\"runtime_env\"] = json.dumps(config[\"runtime_env\"])\n\n # Convert top-level option's keys\n config = dict_keys_snake_to_camel_case(config)\n\n return config\n\n\n@PublicAPI(stability=\"b", "d_id": 28782, "documentation": { "docstring": "Returns dictionary in Kubernetes format.\n\n Dictionary can be yaml-dumped to a Serve config file directly and then\n copy-pasted into a RayService Kubernetes config.\n\n Args: all kwargs are passed directly into schema's dict() function.\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 61, "language": "en" } }, { "id": 101270, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/sort/sort.py", "file_name": "sort.py", "fun_name": "reload_images", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def reload_images(self, group_method, img_list):\n \n logger.info(\"Preparing to group...\")\n if group_method == 'group_blur':\n filename_list, image_list = self._get_images()\n blurs = [self.estimate_blur(img) for img in image_list]\n temp_list = list(zip(filename_list, blurs))\n elif group_method == 'group_blur_fft':\n filename_list, image_list = self._get_images()\n fft_blurs = [self.estimate_blur_fft(img) for img in image_list]\n temp_list = list(zip(filename_list, fft_blurs))\n elif group_method == 'group_face_cnn':\n filename_list, image_list, landmarks = self._get_landmarks()\n temp_list = list(zip(filename_list, landmarks))\n elif group_method == 'group_face_yaw':\n filename_list, image_list, landmarks = self._get_landmarks()\n yaws = [self.calc_landmarks_face_yaw(mark) for mark in landmarks]\n temp_list = list(zip(filename_list, yaws))\n elif group_method == 'group_hist':\n filename_list, image_list = self._get_images()\n histograms = [cv2.calcHist([img], [0], None, [256], [0, 256]) for img in image_list]\n temp_list = list(zip(filename_list, histograms))\n elif group_method == 'group_black_pixels':\n filename_list, image_list = self._get_images()\n black_pixels = [np.ndarray.all(img == [0, 0, 0], axis=2).sum()/img.size*100*3\n for img in image_list]\n temp_list = list(zip(filename_list, black_pixels))\n else:\n raise ValueError(f\"{group_method} group_method not found.\")\n\n return self.splice_lists(img_list, temp_list)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 430, "n_words": 135, "vocab_size": 64, "complexity": 12, "nloc": 29, "token_counts": 301, "n_ast_nodes": 480, "n_identifiers": 34, "random_cut": "def reload_images(self, group_method, img_list):\n \n logger.info(\"Preparing to group...\")\n if group_method == 'group_blur':\n filename_list, image_list = self._get_images()\n blurs = [self.estimate_blur(img) for img in image_list]\n temp_list = list(zip(filename_list, blurs))\n elif group_method == 'group_blur_fft':\n filename_list, image_list = self._get_images()\n fft_bl", "d_id": 20689, "documentation": { "docstring": "\n Reloads the image list by replacing the comparative values with those\n that the chosen grouping method expects.\n :param group_method: str name of the grouping method that will be used.\n :param img_list: image list that has been sorted by one of the sort\n methods.\n :return: img_list but with the comparative values that the chosen\n grouping method expects.\n ", "n_words": 56, "vocab_size": 33, "n_whitespaces": 113, "language": "en" } }, { "id": 67455, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/selling/report/territory_wise_sales/territory_wise_sales.py", "file_name": "territory_wise_sales.py", "fun_name": "get_sales_orders", "commit_message": "style: format code with black", "code": "def get_sales_orders(quotations):\n\tif not quotations:\n\t\treturn []\n\n\tquotation_names = [q.name for q in quotations]\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\t\", \".join([\"%s\"] * len(quotation_names))\n\t\t),\n\t\ttuple(quotation_names),\n\t\tas_dict=1,\n\t) # nosec\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 17, "n_words": 27, "vocab_size": 26, "complexity": 3, "nloc": 15, "token_counts": 59, "n_ast_nodes": 98, "n_identifiers": 13, "random_cut": "def get_sales_orders(quotations):\n\tif not quotations:\n\t\treturn []\n\n\tquotation_names = [q.name for q in quotations]\n\n\treturn frappe.db.sql(\n\t\t.form", "d_id": 14529, "documentation": { "docstring": "\n\tSELECT so.`name`, so.`base_grand_total`, soi.prevdoc_docname as quotation\n\tFROM `tabSales Order` so, `tabSales Order Item` soi\n\tWHERE so.docstatus=1 AND so.name = soi.parent AND soi.prevdoc_docname in ({0})\n\t", "n_words": 24, "vocab_size": 21, "n_whitespaces": 21, "language": "en" } }, { "id": 253927, "commit_id": "2b1acfbfe84b6c9c4756a615620f9b376d48085a", "repo": "d2l-en", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "download_extract", "commit_message": "JAX: Fix CI bug; enable build all sections", "code": "def download_extract(name, folder=None):\n \n fname = download(name)\n base_dir = os.path.dirname(fname)\n data_dir, ext = os.path.splitext(fname)\n if ext == '.zip':\n fp = zipfile.ZipFile(fname, 'r')\n elif ext in ('.tar', '.gz'):\n fp = tarfile.open(fname, 'r')\n else:\n assert False, 'Only zip/tar files can be extracted.'\n fp.extractall(base_dir)\n return os.path.join(base_dir, folder) if folder else data_dir\n\n", "url": "https://github.com/d2l-ai/d2l-en.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 95, "n_words": 47, "vocab_size": 38, "complexity": 4, "nloc": 12, "token_counts": 99, "n_ast_nodes": 166, "n_identifiers": 19, "random_cut": "def download_extract(name, folder=None):\n \n fname = download(name)\n base_dir = os.path.dirname(fname)\n data_dir, ext = os.path.splitext(fname)\n if ext == '.zip':\n fp = zipfile.ZipFile(fname, 'r')\n elif ext in ('.t", "d_id": 74310, "documentation": { "docstring": "Download and extract a zip/tar file.\n\n Defined in :numref:`sec_utils`", "n_words": 9, "vocab_size": 9, "n_whitespaces": 11, "language": "en" } }, { "id": 271957, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_v1.py", "file_name": "training_v1.py", "fun_name": "sample_weights_mismatch", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def sample_weights_mismatch(self):\n \n # If there is a mismatch between sample weight mode and the placeholders\n # created, then recompile the sub-graphs that depend on sample weights.\n return (\n self.sample_weight_mode is not None and self.sample_weight is None\n ) or (\n self.sample_weight_mode is None and self.sample_weight is not None\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 112, "n_words": 48, "vocab_size": 31, "complexity": 4, "nloc": 6, "token_counts": 36, "n_ast_nodes": 59, "n_identifiers": 4, "random_cut": "def sample_weights_mismatch(self):\n \n # If there is a mismatch between sample weight mode and the placeholders\n # created, then recompile the sub-graphs that depend on sample weights.\n return (\n self.sample_weight_mode is not None and self.sample_weight is N", "d_id": 80913, "documentation": { "docstring": "Check if the sample weight and the mode match or not.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 208426, "commit_id": "ce62a7a4b2c97bf8a30e8074e8fc18103a0718a0", "repo": "ipython", "path": "IPython/core/magics/script.py", "file_name": "script.py", "fun_name": "shebang", "commit_message": "avoid deprecated get_event_loop\n\nuse our own `async_helpers.get_asyncio_loop` to track the global event loop\n\nscript magics use dedicated background asyncio loop\ninstead of trying to work on the main loop, which may or may not exist\n\n_AsyncIOProxy wraps background script objects to transfer awaitables across loops\n\nonly works for coroutine methods, which might be good enough? Works for read, etc.", "code": "def shebang(self, line, cell):\n \n\n # Create the event loop in which to run script magics\n # this operates on a background thread\n if self.event_loop is None:\n if sys.platform == \"win32\":\n # don't override the current policy,\n # just create an event loop\n event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop()\n else:\n event_loop = asyncio.new_event_loop()\n self.event_loop = event_loop\n\n # start the loop in a background thread\n asyncio_thread = Thread(target=event_loop.run_forever, daemon=True)\n asyncio_thread.start()\n else:\n event_loop = self.event_loop\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 241, "n_words": 69, "vocab_size": 45, "complexity": 18, "nloc": 79, "token_counts": 515, "n_ast_nodes": 131, "n_identifiers": 16, "random_cut": "def shebang(self, line, cell):\n \n\n # Create the event loop in which to run script magics\n # this operates on a background thread\n if self.event_loop is None:\n if sys.platform == \"win32\":\n # don't override the current policy,\n # just create an event loop\n event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop()\n else:\n event_loop = asyncio.new_event_loop()\n self.event_loop =", "d_id": 52324, "documentation": { "docstring": "Run a cell via a shell command\n\n The `%%script` line is like the #! line of script,\n specifying a program (bash, perl, ruby, etc.) with which to run.\n\n The rest of the cell is run by that program.\n\n Examples\n --------\n ::\n\n In [1]: %%script bash\n ...: for i in 1 2 3; do\n ...: echo $i\n ...: done\n 1\n 2\n 3\n ", "n_words": 61, "vocab_size": 49, "n_whitespaces": 198, "language": "en" } }, { "id": 63781, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py", "file_name": "__init__.py", "fun_name": "statistics", "commit_message": "upd; format", "code": "def statistics(self):\n \n try:\n return self._local.statistics\n except AttributeError:\n self._local.statistics = {}\n return self._local.statistics\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 12, "vocab_size": 9, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 53, "n_identifiers": 4, "random_cut": "def statistics(self):\n \n try:\n return self._local.statistics\n ", "d_id": 13503, "documentation": { "docstring": "Return a dictionary of runtime statistics.\n\n This dictionary will be empty when the controller has never been\n ran. When it is running or has ran previously it should have (but\n may not) have useful and/or informational keys and values when\n running is underway and/or completed.\n\n .. warning:: The keys in this dictionary **should** be some what\n stable (not changing), but there existence **may**\n change between major releases as new statistics are\n gathered or removed so before accessing keys ensure that\n they actually exist and handle when they do not.\n\n .. note:: The values in this dictionary are local to the thread\n running call (so if multiple threads share the same retrying\n object - either directly or indirectly) they will each have\n there own view of statistics they have collected (in the\n future we may provide a way to aggregate the various\n statistics from each thread).\n ", "n_words": 145, "vocab_size": 103, "n_whitespaces": 359, "language": "en" } }, { "id": 205919, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/dispatch/dispatcher.py", "file_name": "dispatcher.py", "fun_name": "send", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def send(self, sender, **named):\n \n if (\n not self.receivers\n or self.sender_receivers_cache.get(sender) is NO_RECEIVERS\n ):\n return []\n\n return [\n (receiver, receiver(signal=self, sender=sender, **named))\n for receiver in self._live_receivers(sender)\n ]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 116, "n_words": 26, "vocab_size": 25, "complexity": 4, "nloc": 10, "token_counts": 62, "n_ast_nodes": 95, "n_identifiers": 11, "random_cut": "def send(self, sender, **named):\n \n if (\n not self.receivers\n or self.sender_receivers_cache.get(sender) is NO_RECEIVERS\n ):\n return []\n\n return [\n (receiver, receiver(signal=self, sender=sender, **named))\n for receiver in self._live_receivers(sender)\n ]\n", "d_id": 51281, "documentation": { "docstring": "\n Send signal from sender to all connected receivers.\n\n If any receiver raises an error, the error propagates back through send,\n terminating the dispatch loop. So it's possible that all receivers\n won't be called if an error is raised.\n\n Arguments:\n\n sender\n The sender of the signal. Either a specific object or None.\n\n named\n Named arguments which will be passed to receivers.\n\n Return a list of tuple pairs [(receiver, response), ... ].\n ", "n_words": 70, "vocab_size": 58, "n_whitespaces": 172, "language": "en" } }, { "id": 153192, "commit_id": "8d1004fdbdaa05700613c8e6287641a732acf606", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py", "file_name": "virtual_partition.py", "fun_name": "deploy_ray_func", "commit_message": "FIX-#3675: Expand virtual partitioning utility (#3886)\n\nCo-authored-by: mvashishtha \r\nCo-authored-by: jeffreykennethli \r\nCo-authored-by: Anatoly Myachev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Naren Krishna <92325366+naren-ponder@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Doris Lee \r\nCo-authored-by: Aditya Parameswaran \r\nCo-authored-by: Rehan Sohail Durrani \r\nCo-authored-by: Susmit Vengurlekar \r\nSigned-off-by: Devin Petersohn ", "code": "def deploy_ray_func(func, *args): # pragma: no cover\n \n result = func(*args)\n ip = get_node_ip_address()\n if isinstance(result, pandas.DataFrame):\n return result, len(result), len(result.columns), ip\n elif all(isinstance(r, pandas.DataFrame) for r in result):\n return [i for r in result for i in [r, len(r), len(r.columns), ip]]\n else:\n return [i for r in result for i in [r, None, None, ip]]\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 95, "n_words": 55, "vocab_size": 34, "complexity": 8, "nloc": 9, "token_counts": 114, "n_ast_nodes": 169, "n_identifiers": 14, "random_cut": "def deploy_ray_func(func, *args): # pragma: no cover\n \n result = func(*args)\n ip = get_node_ip_address()\n if isinstance(result, pandas.DataFrame):\n return result, len(result), len(result.columns), ip\n elif all(isinstance(r, pandas.DataFrame) for r in result):\n return [i for r in result for i in [r, len(r), l", "d_id": 35293, "documentation": { "docstring": "\n Execute a function on an axis partition in a worker process.\n\n Parameters\n ----------\n func : callable\n Function to be executed on an axis partition.\n *args : iterable\n Additional arguments that need to passed in ``func``.\n\n Returns\n -------\n list\n The result of the function ``func`` and metadata for it.\n\n Notes\n -----\n Ray functions are not detected by codecov (thus pragma: no cover).\n ", "n_words": 61, "vocab_size": 53, "n_whitespaces": 119, "language": "en" } }, { "id": 34307, "commit_id": "ac227093e41cecb07c7e0f2fc9a504850907bd06", "repo": "transformers", "path": "src/transformers/models/vilt/modeling_vilt.py", "file_name": "modeling_vilt.py", "fun_name": "_set_gradient_checkpointing", "commit_message": "Add ViLT (#14895)\n\n* First commit\r\n\r\n* Add conversion script\r\n\r\n* Make conversion script work for base model\r\n\r\n* More improvements\r\n\r\n* Update conversion script, works for vqa\r\n\r\n* Add indexing argument to meshgrid\r\n\r\n* Make conversion script work for ViltForPreTraining\r\n\r\n* Add ViltForPreTraining to docs\r\n\r\n* Fix device issue\r\n\r\n* Add processor\r\n\r\n* Add MinMaxResize to feature extractor\r\n\r\n* Implement call method of ViltProcessor\r\n\r\n* Fix tests\r\n\r\n* Add integration test\r\n\r\n* Add loss calculation for VQA\r\n\r\n* Improve tests\r\n\r\n* Improve some more tests\r\n\r\n* Debug tests\r\n\r\n* Small improvements\r\n\r\n* Add support for attention_mask\r\n\r\n* Remove mask_it\r\n\r\n* Add pixel_mask\r\n\r\n* Add tests for ViltFeatureExtractor\r\n\r\n* Improve tests\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning to conversion script\r\n\r\n* Minor fixes\r\n\r\n* Add support for image_embeds, update docstrings to markdown\r\n\r\n* Update docs to markdown\r\n\r\n* Improve conversion script\r\n\r\n* Rename ViltForPreTraining to ViltForMaskedLM\r\n\r\n* Improve conversion script\r\n\r\n* Convert docstrings to markdown\r\n\r\n* Fix code example of retrieval model\r\n\r\n* Properly convert masked language model\r\n\r\n* Add integration test for nlvr\r\n\r\n* Fix code quality\r\n\r\n* Apply suggestions from code review\r\n\r\n* Add copied from statements\r\n\r\n* Fix pretrained_config_archive_map\r\n\r\n* Fix docs\r\n\r\n* Add model to README\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply more suggestions from code review\r\n\r\n* Make code more readable\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning to the tests\r\n\r\n* Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering\r\n\r\n* Replace pixel_values_2 by single tensor\r\n\r\n* Add hidden_states and attentions\r\n\r\n* Fix one more test\r\n\r\n* Fix all tests\r\n\r\n* Update year\r\n\r\n* Fix rebase issues\r\n\r\n* Fix another rebase issue\r\n\r\n* Remove ViltForPreTraining from auto mapping\r\n\r\n* Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval\r\n\r\n* Make it possible to use BertTokenizerFast in the processor\r\n\r\n* Use BertTokenizerFast by default\r\n\r\n* Rename ViltForNaturalLanguageVisualReasoning, define custom model output\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, ViltEncoder):\n module.gradient_checkpointing = value\n\n\nVILT_START_DOCSTRING = r\n\nVILT_INPUTS_DOCSTRING = r\n\nVILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.\",\n VILT_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.\",\n VILT_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 54, "n_words": 36, "vocab_size": 31, "complexity": 2, "nloc": 3, "token_counts": 24, "n_ast_nodes": 71, "n_identifiers": 11, "random_cut": "def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, ViltEncoder):\n module.gradient_checkpointing = value\n\n\nVILT_START_DOCSTRING = r\n\nVILT_INPUTS_DOCST", "d_id": 6254, "documentation": { "docstring": "\n This model is a PyTorch `torch.nn.Module `_ subclass. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ViltConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See\n [`ViltFeatureExtractor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See\n [`ViltFeatureExtractor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "n_words": 802, "vocab_size": 200, "n_whitespaces": 1685, "language": "en" } }, { "id": 321601, "commit_id": "c5a51eb0bcbab0b68cdfbf3eba2e681cff2adf7a", "repo": "qutebrowser", "path": "qutebrowser/browser/greasemonkey.py", "file_name": "greasemonkey.py", "fun_name": "needs_document_end_workaround", "commit_message": "Drop Qt < 5.15\n\nFixes #7091\n\nTODO: Add changelog", "code": "def needs_document_end_workaround(self):\n \n if objects.backend == usertypes.Backend.QtWebKit:\n return False\n\n assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend\n\n broken_scripts = [\n ('http://userstyles.org', None),\n ('https://github.com/ParticleCore', 'Iridium'),\n ]\n return any(self._matches_id(namespace=namespace, name=name)\n for namespace, name in broken_scripts)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 122, "n_words": 29, "vocab_size": 25, "complexity": 3, "nloc": 10, "token_counts": 71, "n_ast_nodes": 112, "n_identifiers": 13, "random_cut": "def needs_document_end_workaround(self):\n \n if objects.backend == usertypes.Backend.QtWebKit:\n return False\n\n assert", "d_id": 117818, "documentation": { "docstring": "Check whether to force @run-at document-end.\n\n This needs to be done on QtWebEngine for known-broken scripts.\n\n On Qt 5.12, accessing the DOM isn't possible with \"@run-at\n document-start\". It was documented to be impossible before, but seems\n to work fine.\n\n However, some scripts do DOM access with \"@run-at document-start\". Fix\n those by forcing them to use document-end instead.\n ", "n_words": 57, "vocab_size": 48, "n_whitespaces": 106, "language": "en" } }, { "id": 314204, "commit_id": "90e1fb6ce2faadb9a35fdbe1774fce7b4456364f", "repo": "core", "path": "homeassistant/components/weather/__init__.py", "file_name": "__init__.py", "fun_name": "_temperature_unit", "commit_message": "Weather unit conversion (#73441)\n\nCo-authored-by: Erik ", "code": "def _temperature_unit(self) -> str:\n \n if (\n weather_option_temperature_unit := self._weather_option_temperature_unit\n ) is not None:\n return weather_option_temperature_unit\n\n return self._default_temperature_unit\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 67, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 10, "token_counts": 26, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def _temperature_unit(self) -> str:\n \n if (\n weather_option_temperature_unit := self._weather_o", "d_id": 112812, "documentation": { "docstring": "Return the converted unit of measurement for temperature.\n\n Should not be set by integrations.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 28, "language": "en" } }, { "id": 109202, "commit_id": "c5fd8804204ee715ee008c35f96d6e95f8dfcc29", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_pdf.py", "file_name": "backend_pdf.py", "fun_name": "fontName", "commit_message": "ENH: implement font fallback for PDF", "code": "def fontName(self, fontprop):\n \n\n if isinstance(fontprop, str):\n filenames = [fontprop]\n elif mpl.rcParams['pdf.use14corefonts']:\n filenames = _fontManager._find_fonts_by_props(\n fontprop, fontext='afm', directory=RendererPdf._afm_font_dir\n )\n else:\n filenames = _fontManager._find_fonts_by_props(fontprop)\n first_Fx = None\n for fname in filenames:\n Fx = self.fontNames.get(fname)\n if not first_Fx:\n first_Fx = Fx\n if Fx is None:\n Fx = next(self._internal_font_seq)\n self.fontNames[fname] = Fx\n _log.debug('Assigning font %s = %r', Fx, fname)\n if not first_Fx:\n first_Fx = Fx\n\n # find_fontsprop's first value always adheres to\n # findfont's value, so technically no behaviour change\n return first_Fx\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 327, "n_words": 78, "vocab_size": 53, "complexity": 7, "nloc": 21, "token_counts": 122, "n_ast_nodes": 200, "n_identifiers": 23, "random_cut": "def fontName(self, fontprop):\n \n\n if isinstance(fontprop, str):\n filenames = [fontprop]\n elif mpl.rcParams['pdf.use14corefonts']:\n filenames = _fontManager._find_fonts_by_props(\n fontprop, fontext='afm', directory=RendererPdf._afm_font_dir\n )\n else:\n filenames = _fontManager._find_fonts_by_props(fontprop)\n first_Fx = None\n for fname", "d_id": 23476, "documentation": { "docstring": "\n Select a font based on fontprop and return a name suitable for\n Op.selectfont. If fontprop is a string, it will be interpreted\n as the filename of the font.\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 57, "language": "en" } }, { "id": 32353, "commit_id": "12d66b47012c9258f9557e6d3a0c13bcd1c72871", "repo": "transformers", "path": "src/transformers/models/owlvit/feature_extraction_owlvit.py", "file_name": "feature_extraction_owlvit.py", "fun_name": "center_to_corners_format", "commit_message": "Add OWL-ViT model for zero-shot object detection (#17938)\n\n* add owlvit model skeleton\r\n\r\n* add class and box predictor heads\r\n\r\n* convert modified flax clip to pytorch\r\n\r\n* fix box and class predictors\r\n\r\n* add OwlViTImageTextEmbedder\r\n\r\n* convert class and box head checkpoints\r\n\r\n* convert image text embedder checkpoints\r\n\r\n* add object detection head\r\n\r\n* fix bugs\r\n\r\n* update conversion script\r\n\r\n* update conversion script\r\n\r\n* fix q,v,k,out weight conversion conversion\r\n\r\n* add owlvit object detection output\r\n\r\n* fix bug in image embedder\r\n\r\n* fix bugs in text embedder\r\n\r\n* fix positional embeddings\r\n\r\n* fix bug in inference mode vision pooling\r\n\r\n* update docs, init tokenizer and processor files\r\n\r\n* support batch processing\r\n\r\n* add OwlViTProcessor\r\n\r\n* remove merge conflicts\r\n\r\n* readd owlvit imports\r\n\r\n* fix bug in OwlViTProcessor imports\r\n\r\n* fix bugs in processor\r\n\r\n* update docs\r\n\r\n* fix bugs in processor\r\n\r\n* update owlvit docs\r\n\r\n* add OwlViTFeatureExtractor\r\n\r\n* style changes, add postprocess method to feature extractor\r\n\r\n* add feature extractor and processor tests\r\n\r\n* add object detection tests\r\n\r\n* update conversion script\r\n\r\n* update config paths\r\n\r\n* update config paths\r\n\r\n* fix configuration paths and bugs\r\n\r\n* fix bugs in OwlViT tests\r\n\r\n* add import checks to processor\r\n\r\n* fix docs and minor issues\r\n\r\n* fix docs and minor issues\r\n\r\n* fix bugs and issues\r\n\r\n* fix bugs and issues\r\n\r\n* fix bugs and issues\r\n\r\n* fix bugs and issues\r\n\r\n* update docs and examples\r\n\r\n* fix bugs and issues\r\n\r\n* update conversion script, fix positional embeddings\r\n\r\n* process 2D input ids, update tests\r\n\r\n* fix style and quality issues\r\n\r\n* update docs\r\n\r\n* update docs and imports\r\n\r\n* update OWL-ViT index.md\r\n\r\n* fix bug in OwlViT feature ext tests\r\n\r\n* fix code examples, return_dict by default\r\n\r\n* return_dict by default\r\n\r\n* minor fixes, add tests to processor\r\n\r\n* small fixes\r\n\r\n* add output_attentions arg to main model\r\n\r\n* fix bugs\r\n\r\n* remove output_hidden_states arg from main model\r\n\r\n* update self.config variables\r\n\r\n* add option to return last_hidden_states\r\n\r\n* fix bug in config variables\r\n\r\n* fix copied from statements\r\n\r\n* fix small issues and bugs\r\n\r\n* fix bugs\r\n\r\n* fix bugs, support greyscale images\r\n\r\n* run fixup\r\n\r\n* update repo name\r\n\r\n* merge OwlViTImageTextEmbedder with obj detection head\r\n\r\n* fix merge conflict\r\n\r\n* fix merge conflict\r\n\r\n* make fixup\r\n\r\n* fix bugs\r\n\r\n* fix bugs\r\n\r\n* add additional processor test", "code": "def center_to_corners_format(x):\n \n x_center, y_center, width, height = x.unbind(-1)\n boxes = [(x_center - 0.5 * width), (y_center - 0.5 * height), (x_center + 0.5 * width), (y_center + 0.5 * height)]\n return torch.stack(boxes, dim=-1)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 33, "vocab_size": 22, "complexity": 1, "nloc": 4, "token_counts": 76, "n_ast_nodes": 103, "n_identifiers": 11, "random_cut": "def center_to_corners_format(x):\n \n x_center, y_center, width, he", "d_id": 5912, "documentation": { "docstring": "\n Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format\n (left, top, right, bottom).\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 31, "language": "en" } }, { "id": 204395, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/cache/backends/base.py", "file_name": "base.py", "fun_name": "add", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):\n \n raise NotImplementedError(\n \"subclasses of BaseCache must provide an add() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 8, "random_cut": "def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):\n \n raise NotImplementedError(\n \"subclasses of BaseCache must pro", "d_id": 50726, "documentation": { "docstring": "\n Set a value in the cache if the key does not already exist. If\n timeout is given, use that timeout for the key; otherwise use the\n default cache timeout.\n\n Return True if the value was stored, False otherwise.\n ", "n_words": 38, "vocab_size": 29, "n_whitespaces": 74, "language": "en" } }, { "id": 177332, "commit_id": "8a325d26aa7fdd3a72580c4720fa97f971bbefcb", "repo": "networkx", "path": "networkx/linalg/laplacianmatrix.py", "file_name": "laplacianmatrix.py", "fun_name": "normalized_laplacian_matrix", "commit_message": "Use scipy.sparse array datastructure (#6037)\n\n* Use scipy.sparse array datastructure\r\n\r\n* Add reminder to rm wrapper when scipy adds creation fns.\r\n\r\n* Rm mention of np matrix from code comment.\r\n\r\n* Update networkx/algorithms/bipartite/matrix.py\r\n\r\nCo-authored-by: Stefan van der Walt \r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Stefan van der Walt ", "code": "def normalized_laplacian_matrix(G, nodelist=None, weight=\"weight\"):\n r\n import numpy as np\n import scipy as sp\n import scipy.sparse # call as sp.sparse\n\n if nodelist is None:\n nodelist = list(G)\n A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=\"csr\")\n n, m = A.shape\n diags = A.sum(axis=1)\n # TODO: rm csr_array wrapper when spdiags can produce arrays\n D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format=\"csr\"))\n L = D - A\n with sp.errstate(divide=\"ignore\"):\n diags_sqrt = 1.0 / np.sqrt(diags)\n diags_sqrt[np.isinf(diags_sqrt)] = 0\n # TODO: rm csr_array wrapper when spdiags can produce arrays\n DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format=\"csr\"))\n return DH @ (L @ DH)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 156, "n_words": 94, "vocab_size": 61, "complexity": 2, "nloc": 66, "token_counts": 175, "n_ast_nodes": 276, "n_identifiers": 30, "random_cut": "def normalized_laplacian_matrix(G, nodelist=None, weight=\"weight\"):\n r\n import numpy as np\n import scipy as sp\n import scipy.sparse # call as sp.sparse\n\n if nodelist is None:\n nodelist = list(G)\n A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=\"csr\")\n n, m = A.shape\n diags = A.sum(axis=1)\n # TODO: rm csr_array wrapper when spdiags can produce arrays\n D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format=\"csr\"))\n L = D - A\n with sp.errstate(divide=\"ignore\"):\n diags_sqrt = 1.0 / np.sqrt(diags)\n diags_sqrt[np.isinf(diags_sqrt)] = 0\n # TODO: rm csr_array wrapper when spdi", "d_id": 42351, "documentation": { "docstring": "Returns the normalized Laplacian matrix of G.\n\n The normalized graph Laplacian is the matrix\n\n .. math::\n\n N = D^{-1/2} L D^{-1/2}\n\n where `L` is the graph Laplacian and `D` is the diagonal matrix of\n node degrees [1]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n N : SciPy sparse array\n The normalized Laplacian matrix of G.\n\n Notes\n -----\n For MultiGraph, the edges weights are summed.\n See :func:`to_numpy_array` for other options.\n\n If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is\n the adjacency matrix [2]_.\n\n See Also\n --------\n laplacian_matrix\n normalized_laplacian_spectrum\n\n References\n ----------\n .. [1] Fan Chung-Graham, Spectral Graph Theory,\n CBMS Regional Conference Series in Mathematics, Number 92, 1997.\n .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized\n Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,\n March 2007.\n ", "n_words": 190, "vocab_size": 126, "n_whitespaces": 331, "language": "en" } }, { "id": 260486, "commit_id": "01e6449e653a058206e7a2a1aa3270f851769c4b", "repo": "scikit-learn", "path": "sklearn/feature_extraction/image.py", "file_name": "image.py", "fun_name": "extract_patches_2d", "commit_message": "DOC Ensures that extract_patches_2d passes numpydoc validation (#23926)\n\nCo-authored-by: Olivor Holman ", "code": "def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):\n \n i_h, i_w = image.shape[:2]\n p_h, p_w = patch_size\n\n if p_h > i_h:\n raise ValueError(\n \"Height of the patch should be less than the height of the image.\"\n )\n\n if p_w > i_w:\n raise ValueError(\n \"Width of the patch should be less than the width of the image.\"\n )\n\n image = check_array(image, allow_nd=True)\n image = image.reshape((i_h, i_w, -1))\n n_colors = image.shape[-1]\n\n extracted_patches = _extract_patches(\n image, patch_shape=(p_h, p_w, n_colors), extraction_step=1\n )\n\n n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)\n if max_patches:\n rng = check_random_state(random_state)\n i_s = rng.randint(i_h - p_h + 1, size=n_patches)\n j_s = rng.randint(i_w - p_w + 1, size=n_patches)\n patches = extracted_patches[i_s, j_s, 0]\n else:\n patches = extracted_patches\n\n patches = patches.reshape(-1, p_h, p_w, n_colors)\n # remove the color dimension if useless\n if patches.shape[-1] == 1:\n return patches.reshape((n_patches, p_h, p_w))\n else:\n return patches\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 293, "n_words": 136, "vocab_size": 80, "complexity": 5, "nloc": 30, "token_counts": 221, "n_ast_nodes": 336, "n_identifiers": 28, "random_cut": "def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):\n \n i_h, i_w = image.shape[:2]\n p_h, p_w = patch_size\n\n if p_h > i_h:\n raise ValueError(\n \"Height of the patch should be less than the height of the image.\"\n )\n\n if p_w > i_w:\n raise ValueError(\n \"Width of the patch should be less than the width of the image.\"\n )\n\n image = check_array(image, allow_nd=True)\n image = image.reshape((i_h, i_w, -1))\n n_colors = image.shape[-1]\n\n extracted_patches = _extract_patches(\n image, patch_shape=(p_h, p_w, n_colors), extraction_step=1\n )\n\n n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)\n if max_patches:\n rng = check_random_state(random_state)\n i_s = rng.randint(i_h - p_h + 1, size=n_patches)\n j_s = rng.randint(i_w - p_w + 1, size=n_patches)\n patches = extracted_patches[i_s, j_s, 0]\n else:\n patches = extracted_patches\n\n patches = patches.reshape(-1, p_h, p_w, n_colors)\n # remove the color dimension if useless\n if patches.s", "d_id": 76282, "documentation": { "docstring": "Reshape a 2D image into a collection of patches.\n\n The resulting patches are allocated in a dedicated array.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n image : ndarray of shape (image_height, image_width) or \\\n (image_height, image_width, n_channels)\n The original image data. For color images, the last dimension specifies\n the channel: a RGB image would have `n_channels=3`.\n\n patch_size : tuple of int (patch_height, patch_width)\n The dimensions of one patch.\n\n max_patches : int or float, default=None\n The maximum number of patches to extract. If `max_patches` is a float\n between 0 and 1, it is taken to be a proportion of the total number\n of patches.\n\n random_state : int, RandomState instance, default=None\n Determines the random number generator used for random sampling when\n `max_patches` is not None. Use an int to make the randomness\n deterministic.\n See :term:`Glossary `.\n\n Returns\n -------\n patches : array of shape (n_patches, patch_height, patch_width) or \\\n (n_patches, patch_height, patch_width, n_channels)\n The collection of patches extracted from the image, where `n_patches`\n is either `max_patches` or the total number of patches that can be\n extracted.\n\n Examples\n --------\n >>> from sklearn.datasets import load_sample_image\n >>> from sklearn.feature_extraction import image\n >>> # Use the array data from the first image in this dataset:\n >>> one_image = load_sample_image(\"china.jpg\")\n >>> print('Image shape: {}'.format(one_image.shape))\n Image shape: (427, 640, 3)\n >>> patches = image.extract_patches_2d(one_image, (2, 2))\n >>> print('Patches shape: {}'.format(patches.shape))\n Patches shape: (272214, 2, 2, 3)\n >>> # Here are just two of these patches:\n >>> print(patches[1])\n [[[174 201 231]\n [174 201 231]]\n [[173 200 230]\n [173 200 230]]]\n >>> print(patches[800])\n [[[187 214 243]\n [188 215 244]]\n [[187 214 243]\n [188 215 244]]]\n ", "n_words": 266, "vocab_size": 165, "n_whitespaces": 483, "language": "en" } }, { "id": 205422, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/base.py", "file_name": "base.py", "fun_name": "refresh_from_db", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def refresh_from_db(self, using=None, fields=None):\n \n if fields is None:\n self._prefetched_objects_cache = {}\n else:\n prefetched_objects_cache = getattr(self, \"_prefetched_objects_cache\", ())\n for field in fields:\n if field in prefetched_objects_cache:\n del prefetched_objects_cache[field]\n fields.remove(field)\n if not fields:\n return\n if any(LOOKUP_SEP in f for f in fields):\n raise ValueError(\n 'Found \"%s\" in fields argument. Relations and transforms '\n \"are not allowed in fields.\" % LOOKUP_SEP\n )\n\n hints = {\"instance\": self}\n db_instance_qs = self.__class__._base_manager.db_manager(\n using, hints=hints\n ).filter(pk=self.pk)\n\n # Use provided fields, if not set then reload all non-deferred fields.\n deferred_fields = self.get_deferred_fields()\n if fields is not None:\n fields = list(fields)\n db_instance_qs = db_instance_qs.only(*fields)\n elif deferred_fields:\n fields = [\n f.attname\n for f in self._meta.concrete_fields\n if f.attname not in deferred_fields\n ]\n db_instance_qs = db_instance_qs.only(*fields)\n\n db_instance = db_instance_qs.get()\n non_loaded_fields = db_instance.get_deferred_fields()\n for field in self._meta.concrete_fields:\n if field.attname in non_loaded_fields:\n # This field wasn't refreshed - skip ahead.\n continue\n setattr(self, field.attname, getattr(db_instance, field.attname))\n # Clear cached foreign keys.\n if field.is_relation and field.is_cached(self):\n field.delete_cached_value(self)\n\n # Clear cached relations.\n for field in self._meta.related_objects:\n if field.is_cached(self):\n field.delete_cached_value(self)\n\n self._state.db = db_instance._state.db\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 694, "n_words": 165, "vocab_size": 101, "complexity": 17, "nloc": 43, "token_counts": 278, "n_ast_nodes": 448, "n_identifiers": 37, "random_cut": "def refresh_from_db(self, using=None, fields=None):\n \n if fields is None:\n self._prefetched_objects_cache = {}\n else:\n prefetched_objects_cache = getattr(self, \"_prefetched_objects_cache\", ())\n for field in fields:\n if field in prefetched_objects_cache:\n del prefetched_objects_cache[field]\n fields.remove(field)\n if not fields:\n return\n if a", "d_id": 51124, "documentation": { "docstring": "\n Reload field values from the database.\n\n By default, the reloading happens from the database this instance was\n loaded from, or by the read router if this instance wasn't loaded from\n any database. The using parameter will override the default.\n\n Fields can be used to specify which fields to reload. The fields\n should be an iterable of field attnames. If fields is None, then\n all non-deferred fields are reloaded.\n\n When accessing deferred fields of an instance, the deferred loading\n of the field will call this method.\n ", "n_words": 85, "vocab_size": 58, "n_whitespaces": 156, "language": "en" } }, { "id": 217711, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/client.py", "file_name": "client.py", "fun_name": "set_tunnel", "commit_message": "add python 3.10.4 for windows", "code": "def set_tunnel(self, host, port=None, headers=None):\n \n\n if self.sock:\n raise RuntimeError(\"Can't set up tunnel for established connection\")\n\n self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)\n if headers:\n self._tunnel_headers = headers\n else:\n self._tunnel_headers.clear()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 95, "n_words": 27, "vocab_size": 25, "complexity": 3, "nloc": 8, "token_counts": 59, "n_ast_nodes": 96, "n_identifiers": 12, "random_cut": "def set_tunnel(self, host, port=None, headers=None):\n \n\n if self.sock:\n raise RuntimeError(\"Can't set up tunnel for established connection\")\n\n self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)\n if headers", "d_id": 54894, "documentation": { "docstring": "Set up host and port for HTTP CONNECT tunnelling.\n\n In a connection that uses HTTP CONNECT tunneling, the host passed to the\n constructor is used as a proxy server that relays all communication to\n the endpoint passed to `set_tunnel`. This done by sending an HTTP\n CONNECT request to the proxy server when the connection is established.\n\n This method must be called before the HTTP connection has been\n established.\n\n The headers argument should be a mapping of extra HTTP headers to send\n with the CONNECT request.\n ", "n_words": 85, "vocab_size": 54, "n_whitespaces": 148, "language": "en" } }, { "id": 120111, "commit_id": "36df8619d74672b0072e7880bcdd257c4a83e9f1", "repo": "jax", "path": "jax/_src/config.py", "file_name": "config.py", "fun_name": "explicit_device_get_scope", "commit_message": "Bump minimum jaxlib version to 0.3.2 and remove transfer guard compatibility code", "code": "def explicit_device_get_scope() -> Iterator[None]:\n \n state = transfer_guard_lib.thread_local_state()\n prev = state.explicit_device_get\n state.explicit_device_get = True\n try:\n yield\n finally:\n state.explicit_device_get = prev\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 19, "vocab_size": 13, "complexity": 2, "nloc": 9, "token_counts": 37, "n_ast_nodes": 66, "n_identifiers": 7, "random_cut": "def explicit_device_get_scope() -> Iterator[None]:\n \n state = transfer_", "d_id": 26780, "documentation": { "docstring": "Indicates that the current context is an explicit device_get() call.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 303482, "commit_id": "c580bce879b6c2f68c4ea45707b5a05ee88c6ecc", "repo": "core", "path": "homeassistant/components/homekit_controller/entity.py", "file_name": "entity.py", "fun_name": "accessory_info", "commit_message": "Move HKC entity classes into entity.py (#76333)", "code": "def accessory_info(self) -> Service:\n \n return self.accessory.services.first(\n service_type=ServicesTypes.ACCESSORY_INFORMATION\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 23, "n_ast_nodes": 39, "n_identifiers": 9, "random_cut": "def accessory_info(self) -> Service:\n \n return self.accessory.services.first(\n service_type=ServicesTypes.ACCESSORY_INFORMATION\n )\n", "d_id": 102302, "documentation": { "docstring": "Information about the make and model of an accessory.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 63181, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "_always_object", "commit_message": "upd; format", "code": "def _always_object(classes):\n \n if object not in classes:\n return classes + (object,)\n return classes\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 13, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 21, "n_ast_nodes": 35, "n_identifiers": 3, "random_cut": "def _always_object(classes):\n \n if object not in classes:\n return classes + (o", "d_id": 13188, "documentation": { "docstring": "\n Ensure object appears in the mro even\n for old-style classes.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 20, "language": "en" } }, { "id": 178909, "commit_id": "abfb99b0a05dd76d2ecc6ebc20732a271857c6c8", "repo": "Nuitka", "path": "nuitka/freezer/IncludedDataFiles.py", "file_name": "IncludedDataFiles.py", "fun_name": "addIncludedDataFilesFromFileOptions", "commit_message": "Plugins: Massive cleanup of data file handling\n\n* Move data file handling out of standalone only, allowing support\n for other modes as well.\n\n* Attach logger and tags to data file objects.", "code": "def addIncludedDataFilesFromFileOptions():\n \n\n for included_datafile in _addIncludedDataFilesFromFileOptions():\n addIncludedDataFile(included_datafile)\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 3, "token_counts": 16, "n_ast_nodes": 30, "n_identifiers": 4, "random_cut": "def addIncludedDataFilesFromFileOptions():\n \n\n for included_datafile in _addIncludedDataFilesFromFileOptions():\n ad", "d_id": 42857, "documentation": { "docstring": "Early data files, from user options that work with file system.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 266837, "commit_id": "871b2ca73adcba3a35551247cf839246cf121231", "repo": "ansible", "path": "lib/ansible/utils/_junit_xml.py", "file_name": "_junit_xml.py", "fun_name": "_attributes", "commit_message": "Simplify existing type hints.", "code": "def _attributes(**kwargs) -> dict[str, str]:\n \n return {key: str(value) for key, value in kwargs.items() if value is not None}\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 24, "n_words": 18, "vocab_size": 17, "complexity": 3, "nloc": 3, "token_counts": 38, "n_ast_nodes": 60, "n_identifiers": 7, "random_cut": "def _attributes(**kwargs) -> dict[str, str]:\n \n return {key: str(value) for key, valu", "d_id": 78617, "documentation": { "docstring": "Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted.", "n_words": 21, "vocab_size": 19, "n_whitespaces": 20, "language": "en" } }, { "id": 222132, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/test/test_pointers.py", "file_name": "test_pointers.py", "fun_name": "test_charpp", "commit_message": "add python 3.10.4 for windows", "code": "def test_charpp(self):\n \n dll = CDLL(_ctypes_test.__file__)\n func = dll._testfunc_c_p_p\n func.restype = c_char_p\n argv = (c_char_p * 2)()\n argc = c_int( 2 )\n argv[0] = b'hello'\n argv[1] = b'world'\n result = func( byref(argc), argv )\n self.assertEqual(result, b'world')\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 105, "n_words": 35, "vocab_size": 26, "complexity": 1, "nloc": 10, "token_counts": 73, "n_ast_nodes": 120, "n_identifiers": 16, "random_cut": "def test_charpp(self):\n \n dll = CDLL(_ctypes_test.__file__)\n func = dll._testfunc_c_p_p\n func.restype = c_char_p\n argv = (c_char_p * 2)()\n argc = c_int( 2 )\n arg", "d_id": 56530, "documentation": { "docstring": "Test that a character pointer-to-pointer is correctly passed", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 160904, "commit_id": "57d04d883e874c611091933c4c36e1cd43ea0e04", "repo": "numpy", "path": "numpy/testing/tests/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_error_message_unsigned", "commit_message": "TST: Add a failing test case to demonstrate the bug gh2176", "code": "def test_error_message_unsigned(self):\n \n # Ensure to test for potential overflow in the case of:\n # x - y\n # and\n # y - x\n x = np.asarray([0, 1, 8], dtype='uint8')\n y = np.asarray([4, 4, 4], dtype='uint8')\n with pytest.raises(AssertionError) as exc_info:\n assert_allclose(x, y, atol=3)\n msgs = str(exc_info.value).split('\\n')\n assert_equal(msgs[4], 'Max absolute difference: 4')\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 145, "n_words": 50, "vocab_size": 39, "complexity": 1, "nloc": 7, "token_counts": 84, "n_ast_nodes": 143, "n_identifiers": 18, "random_cut": "def test_error_message_unsigned(self):\n \n # Ensure to test for potential overflow in the case of:\n # x - y\n ", "d_id": 38792, "documentation": { "docstring": "Check the the message is formatted correctly when overflow can occur\n (gh21768)", "n_words": 12, "vocab_size": 11, "n_whitespaces": 21, "language": "en" } }, { "id": 293655, "commit_id": "2aaeb1fa99f3f691a5c4adfff984e25bf96d787d", "repo": "core", "path": "homeassistant/components/matrix/__init__.py", "file_name": "__init__.py", "fun_name": "_join_or_get_room", "commit_message": "Fix finding matrix room that is already joined (#67967)\n\nAfter some debugging, it seems room.canonical_alias contains the\r\nroom alias that matches the room_id_or_alias value but is not\r\ncontained in room.aliases (which is empty). As a result, the\r\nmatrix component thought the room wasn't alread joined, joins\r\nagain, and this replaces the previous room which had the listener.\r\nThis resulted in the component callback not being called for new\r\nmessages in the room.\r\n\r\nThis fixes #66372", "code": "def _join_or_get_room(self, room_id_or_alias):\n \n rooms = self._client.get_rooms()\n if room_id_or_alias in rooms:\n _LOGGER.debug(\"Already in room %s\", room_id_or_alias)\n return rooms[room_id_or_alias]\n\n for room in rooms.values():\n if room.room_id not in self._aliases_fetched_for:\n room.update_aliases()\n self._aliases_fetched_for.add(room.room_id)\n\n if (\n room_id_or_alias in room.aliases\n or room_id_or_alias == room.canonical_alias\n ):\n _LOGGER.debug(\n \"Already in room %s (known as %s)\", room.room_id, room_id_or_alias\n )\n return room\n\n room = self._client.join_room(room_id_or_alias)\n _LOGGER.info(\"Joined room %s (known as %s)\", room.room_id, room_id_or_alias)\n return room\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 292, "n_words": 64, "vocab_size": 39, "complexity": 6, "nloc": 20, "token_counts": 122, "n_ast_nodes": 196, "n_identifiers": 18, "random_cut": "def _join_or_get_room(self, room_id_or_alias):\n \n rooms = self._client.get_rooms()\n if room_id_or_alias in rooms:\n _LOGGER.debug(\"Already in room %s\", room_id_or_alias)\n return rooms[room_id_or_alias]\n\n for r", "d_id": 92712, "documentation": { "docstring": "Join a room or get it, if we are already in the room.\n\n We can't just always call join_room(), since that seems to crash\n the client if we're already in the room.\n ", "n_words": 32, "vocab_size": 26, "n_whitespaces": 53, "language": "en" } }, { "id": 197067, "commit_id": "e0dc14eca132f37c5f49369eb4051eae37c9b119", "repo": "sympy", "path": "sympy/solvers/solveset.py", "file_name": "solveset.py", "fun_name": "_is_function_class_equation", "commit_message": "Refactored import ordering in functions", "code": "def _is_function_class_equation(func_class, f, symbol):\n \n if f.is_Mul or f.is_Add:\n return all(_is_function_class_equation(func_class, arg, symbol)\n for arg in f.args)\n\n if f.is_Pow:\n if not f.exp.has(symbol):\n return _is_function_class_equation(func_class, f.base, symbol)\n else:\n return False\n\n if not f.has(symbol):\n return True\n\n if isinstance(f, func_class):\n try:\n g = Poly(f.args[0], symbol)\n return g.degree() <= 1\n except PolynomialError:\n return False\n else:\n return False\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 192, "n_words": 52, "vocab_size": 35, "complexity": 9, "nloc": 19, "token_counts": 119, "n_ast_nodes": 185, "n_identifiers": 18, "random_cut": "def _is_function_class_equation(func_class, f, symbol):\n \n if f.is_Mul or f.is_A", "d_id": 48321, "documentation": { "docstring": " Tests whether the equation is an equation of the given function class.\n\n The given equation belongs to the given function class if it is\n comprised of functions of the function class which are multiplied by\n or added to expressions independent of the symbol. In addition, the\n arguments of all such functions must be linear in the symbol as well.\n\n Examples\n ========\n\n >>> from sympy.solvers.solveset import _is_function_class_equation\n >>> from sympy import tan, sin, tanh, sinh, exp\n >>> from sympy.abc import x\n >>> from sympy.functions.elementary.trigonometric import TrigonometricFunction\n >>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction\n >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)\n False\n >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)\n True\n >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)\n False\n >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)\n True\n >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)\n True\n ", "n_words": 123, "vocab_size": 73, "n_whitespaces": 190, "language": "en" } }, { "id": 258647, "commit_id": "8991c3d7870df692fe01510e0fe6de62ea550cad", "repo": "scikit-learn", "path": "sklearn/isotonic.py", "file_name": "isotonic.py", "fun_name": "get_feature_names_out", "commit_message": "ENH Adds get_feature_names to isotonic module (#22249)", "code": "def get_feature_names_out(self, input_features=None):\n \n class_name = self.__class__.__name__.lower()\n return np.asarray([f\"{class_name}0\"], dtype=object)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 35, "n_ast_nodes": 61, "n_identifiers": 11, "random_cut": "def get_feature_names_out(self, input_features=None):\n \n class_name = self.__class__.__name__.lower()\n return np.as", "d_id": 75349, "documentation": { "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Ignored.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n An ndarray with one string i.e. [\"isotonicregression0\"].\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 103, "language": "en" } }, { "id": 203847, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/db/backends/postgis/operations.py", "file_name": "operations.py", "fun_name": "get_distance", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_distance(self, f, dist_val, lookup_type):\n \n # Getting the distance parameter\n value = dist_val[0]\n\n # Shorthand boolean flags.\n geodetic = f.geodetic(self.connection)\n geography = f.geography\n\n if isinstance(value, Distance):\n if geography:\n dist_param = value.m\n elif geodetic:\n if lookup_type == \"dwithin\":\n raise ValueError(\n \"Only numeric values of degree units are \"\n \"allowed on geographic DWithin queries.\"\n )\n dist_param = value.m\n else:\n dist_param = getattr(\n value, Distance.unit_attname(f.units_name(self.connection))\n )\n else:\n # Assuming the distance is in the units of the field.\n dist_param = value\n\n return [dist_param]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 376, "n_words": 80, "vocab_size": 57, "complexity": 5, "nloc": 21, "token_counts": 99, "n_ast_nodes": 167, "n_identifiers": 17, "random_cut": "def get_distance(self, f, dist_val, lookup_type):\n \n # Getting ", "d_id": 50555, "documentation": { "docstring": "\n Retrieve the distance parameters for the given geometry field,\n distance lookup value, and the distance lookup type.\n\n This is the most complex implementation of the spatial backends due to\n what is supported on geodetic geometry columns vs. what's available on\n projected geometry columns. In addition, it has to take into account\n the geography column type.\n ", "n_words": 55, "vocab_size": 41, "n_whitespaces": 106, "language": "en" } }, { "id": 72088, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_page_chooser.py", "file_name": "test_page_chooser.py", "fun_name": "test_type_eventpage_two_indexes", "commit_message": "Reformat with black", "code": "def test_type_eventpage_two_indexes(self):\n \n self.make_event_section(\"Other events\")\n self.assertEqual(\n self.get_best_root({\"page_type\": \"tests.EventPage\"}), self.home_page\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 48, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 31, "n_ast_nodes": 58, "n_identifiers": 6, "random_cut": "def test_type_eventpage_two_indexes(self):\n \n self.make_event_section(\"Other events\")\n self.assertEqual(\n self.get_best_root({\"page_type\": \"tests.EventPage\"}), se", "d_id": 15823, "documentation": { "docstring": "\n The chooser should start at the home page, as there are two\n EventIndexes with EventPages.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 60979, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/req/req_file.py", "file_name": "req_file.py", "fun_name": "parse", "commit_message": "upd; format", "code": "def parse(self, filename, constraint):\n # type: (str, bool) -> Iterator[ParsedLine]\n \n yield from self._parse_and_recurse(filename, constraint)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 5, "random_cut": "def parse(self, filename, constraint):\n # type: (", "d_id": 12368, "documentation": { "docstring": "Parse a given file, yielding parsed lines.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 273358, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/preprocessing/preprocessing_utils.py", "file_name": "preprocessing_utils.py", "fun_name": "sparse_bincount", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None):\n \n result = tf.sparse.bincount(\n inputs,\n weights=count_weights,\n minlength=depth,\n maxlength=depth,\n axis=-1,\n binary_output=binary_output,\n )\n result = tf.cast(result, dtype)\n if inputs.shape.rank == 1:\n output_shape = (depth,)\n else:\n batch_size = tf.shape(result)[0]\n output_shape = (batch_size, depth)\n result = tf.SparseTensor(\n indices=result.indices, values=result.values, dense_shape=output_shape\n )\n return result\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 141, "n_words": 44, "vocab_size": 34, "complexity": 2, "nloc": 19, "token_counts": 117, "n_ast_nodes": 172, "n_identifiers": 23, "random_cut": "def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None):\n \n result = tf.sparse.bincount(\n inputs,\n weights=count_weights,\n minlength=depth,\n maxlength=depth,\n axis=-1,\n binary_output=binary_output,\n )\n result = tf.cast(result, dtype)\n if inputs.shape.rank == 1:\n output_shape = (depth,)\n else:\n batch_size = tf.shape(result)[0]\n output_shape = (batch_size, depth)\n result = tf", "d_id": 81114, "documentation": { "docstring": "Apply binary or count encoding to an input and return a sparse tensor.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 100939, "commit_id": "bad5025aea1adb9126580e14e064e6c99089243d", "repo": "faceswap", "path": "lib/serializer.py", "file_name": "serializer.py", "fun_name": "unmarshal", "commit_message": "Core updates\n - Change loss loading mechanism\n - Autosize tooltips based on content size\n - Random linting + code modernisation", "code": "def unmarshal(self, serialized_data):\n \n logger.debug(\"data type: %s\", type(serialized_data))\n try:\n retval = self._unmarshal(serialized_data)\n except Exception as err:\n msg = f\"Error unserializing data for type {type(serialized_data)}: {str(err)}\"\n raise FaceswapError(msg) from err\n logger.debug(\"returned data type: %s\", type(retval))\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 110, "n_words": 35, "vocab_size": 30, "complexity": 2, "nloc": 9, "token_counts": 58, "n_ast_nodes": 117, "n_identifiers": 13, "random_cut": "def unmarshal(self, serialized_data):\n \n logger.debug(\"data type: %s\", type(serialized_data))\n try:\n retval = self._unmarshal(serialized_data)\n except Exception as err:\n msg", "d_id": 20386, "documentation": { "docstring": " Unserialize data to its original object type\n\n Parameters\n ----------\n serialized_data: varies\n Data in serializer format that is to be unmarshalled to its original object\n\n Returns\n -------\n data: varies\n The data in a python object format\n\n Example\n ------\n >>> serializer = get_serializer('json')\n >>> json_data = \n >>> data = serializer.unmarshal(json_data)\n ", "n_words": 50, "vocab_size": 34, "n_whitespaces": 157, "language": "en" } }, { "id": 178914, "commit_id": "abfb99b0a05dd76d2ecc6ebc20732a271857c6c8", "repo": "Nuitka", "path": "nuitka/freezer/IncludedDataFiles.py", "file_name": "IncludedDataFiles.py", "fun_name": "copyDataFiles", "commit_message": "Plugins: Massive cleanup of data file handling\n\n* Move data file handling out of standalone only, allowing support\n for other modes as well.\n\n* Attach logger and tags to data file objects.", "code": "def copyDataFiles():\n \n\n for included_datafile in getIncludedDataFiles():\n # TODO: directories should be resolved to files.\n if (\n not isinstance(included_datafile, (IncludedDataFile))\n or included_datafile.needsCopy()\n ):\n _handleDataFile(\n included_datafile,\n )\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 111, "n_words": 25, "vocab_size": 25, "complexity": 4, "nloc": 9, "token_counts": 36, "n_ast_nodes": 62, "n_identifiers": 7, "random_cut": "def copyDataFiles():\n \n\n for included_datafile in getIncludedDataFiles():\n # TODO: directories should be resolved", "d_id": 42859, "documentation": { "docstring": "Copy the data files needed for standalone distribution.\n\n Notes:\n This is for data files only, not DLLs or even extension modules,\n those must be registered as entry points, and would not go through\n necessary handling if provided like this.\n ", "n_words": 39, "vocab_size": 35, "n_whitespaces": 66, "language": "en" } }, { "id": 251426, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/optmanager.py", "file_name": "optmanager.py", "fun_name": "toggler", "commit_message": "make it black!", "code": "def toggler(self, attr):\n \n if attr not in self._options:\n raise KeyError(\"No such option: %s\" % attr)\n o = self._options[attr]\n if o.typespec != bool:\n raise ValueError(\"Toggler can only be used with boolean options\")\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 81, "n_words": 31, "vocab_size": 29, "complexity": 3, "nloc": 8, "token_counts": 47, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def toggler(self, attr):\n \n if attr not in self._options:\n raise KeyErr", "d_id": 73733, "documentation": { "docstring": "\n Generate a toggler for a boolean attribute. This returns a callable\n that takes no arguments.\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 37, "language": "en" } }, { "id": 55127, "commit_id": "05b2cf58e0610cedcea27e4d8cb96ad95307a068", "repo": "prefect", "path": "src/prefect/testing/cli.py", "file_name": "cli.py", "fun_name": "disable_terminal_wrapping", "commit_message": "Continue moving objects to sensible locations", "code": "def disable_terminal_wrapping(monkeypatch):\n \n monkeypatch.setattr(\n \"prefect.cli.profile.console\", rich.console.Console(soft_wrap=True)\n )\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 7, "random_cut": "def disable_terminal_wrapping(monkeypatch):\n \n monkeypatch.setattr(\n \"prefect.cli.profile.console\", rich.console.Console(soft_wrap=True)\n )\n", "d_id": 11221, "documentation": { "docstring": "\n Sometimes, line wrapping makes it hard to make deterministic assertions about the\n output of a CLI command. Wrapping can be disabled by using this fixture.\n ", "n_words": 25, "vocab_size": 25, "n_whitespaces": 35, "language": "en" } }, { "id": 196160, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/permutations.py", "file_name": "permutations.py", "fun_name": "__add__", "commit_message": "Updated import locations", "code": "def __add__(self, other):\n \n rank = (self.rank() + other) % self.cardinality\n rv = self.unrank_lex(self.size, rank)\n rv._rank = rank\n return rv\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 19, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 68, "n_identifiers": 9, "random_cut": "def __add__(self, other):\n \n rank = (self.rank() + other) % self.cardinality\n rv = self.unrank_lex(self.size, rank)\n rv._rank = rank\n", "d_id": 47660, "documentation": { "docstring": "Return permutation that is other higher in rank than self.\n\n The rank is the lexicographical rank, with the identity permutation\n having rank of 0.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> I = Permutation([0, 1, 2, 3])\n >>> a = Permutation([2, 1, 3, 0])\n >>> I + a.rank() == a\n True\n\n See Also\n ========\n\n __sub__, inversion_vector\n\n ", "n_words": 57, "vocab_size": 44, "n_whitespaces": 148, "language": "en" } }, { "id": 248031, "commit_id": "73d8ded0b030a81e828c07bb134c08db67569e5d", "repo": "synapse", "path": "tests/handlers/test_presence.py", "file_name": "test_presence.py", "fun_name": "test_set_presence_from_syncing_not_set", "commit_message": "Prevent a sync request from removing a user's busy presence status (#12213)\n\nIn trying to use the MSC3026 busy presence status, the user's status\r\nwould be set back to 'online' next time they synced. This change makes\r\nit so that syncing does not affect a user's presence status if it\r\nis currently set to 'busy': it must be removed through the presence\r\nAPI.\r\n\r\nThe MSC defers to implementations on the behaviour of busy presence,\r\nso this ought to remain compatible with the MSC.", "code": "def test_set_presence_from_syncing_not_set(self):\n \n user_id = \"@test:server\"\n status_msg = \"I'm here!\"\n\n self._set_presencestate_with_status_msg(\n user_id, PresenceState.UNAVAILABLE, status_msg\n )\n\n self.get_success(\n self.presence_handler.user_syncing(user_id, False, PresenceState.ONLINE)\n )\n\n state = self.get_success(\n self.presence_handler.get_state(UserID.from_string(user_id))\n )\n # we should still be unavailable\n self.assertEqual(state.state, PresenceState.UNAVAILABLE)\n # and status message should still be the same\n self.assertEqual(state.status_msg, status_msg)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 167, "n_words": 43, "vocab_size": 33, "complexity": 1, "nloc": 14, "token_counts": 85, "n_ast_nodes": 139, "n_identifiers": 16, "random_cut": "def test_set_presence_from_syncing_not_set(self):\n \n user_id = \"@test:server\"\n status_msg = \"I'm here!\"\n\n self._set_presencestate_with", "d_id": 72059, "documentation": { "docstring": "Test that presence is not set by syncing if affect_presence is false", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 248209, "commit_id": "051a1c3f220938a0ea1a5b328c268bdb3d1ad592", "repo": "synapse", "path": "tests/events/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_stringy_integers", "commit_message": "Convert stringy power levels to integers on room upgrade (#12657)", "code": "def test_stringy_integers(self):\n \n input = {\n \"a\": \"100\",\n \"b\": {\n \"foo\": 99,\n \"bar\": \"-98\",\n },\n \"d\": \"0999\",\n }\n output = copy_and_fixup_power_levels_contents(input)\n expected_output = {\n \"a\": 100,\n \"b\": {\n \"foo\": 99,\n \"bar\": -98,\n },\n \"d\": 999,\n }\n\n self.assertEqual(output, expected_output)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 234, "n_words": 37, "vocab_size": 24, "complexity": 1, "nloc": 19, "token_counts": 71, "n_ast_nodes": 131, "n_identifiers": 7, "random_cut": "def test_stringy_integers(self):\n \n input = {\n \"a\": \"100\",\n \"b\": {\n \"foo\": 99,\n \"", "d_id": 72155, "documentation": { "docstring": "String representations of decimal integers are converted to integers.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 64785, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py", "file_name": "bank_reconciliation_tool.py", "fun_name": "get_ec_matching_query", "commit_message": "style: format code with black", "code": "def get_ec_matching_query(bank_account, company, amount_condition):\n\t# get matching Expense Claim query\n\tmode_of_payments = [\n\t\tx[\"parent\"]\n\t\tfor x in frappe.db.get_all(\n\t\t\t\"Mode of Payment Account\", filters={\"default_account\": bank_account}, fields=[\"parent\"]\n\t\t)\n\t]\n\tmode_of_payments = \"('\" + \"', '\".join(mode_of_payments) + \"' )\"\n\tcompany_currency = get_company_currency(company)\n\treturn f\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 30, "n_words": 41, "vocab_size": 37, "complexity": 2, "nloc": 31, "token_counts": 63, "n_ast_nodes": 121, "n_identifiers": 14, "random_cut": "def get_ec_matching_query(bank_account, company, amount_condition):\n\t# get matching Expense Claim query\n\tmod", "d_id": 13722, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\t( CASE WHEN employee = %(party)s THEN 1 ELSE 0 END\n\t\t\t+ 1 ) AS rank,\n\t\t\t'Expense Claim' as doctype,\n\t\t\tname,\n\t\t\ttotal_sanctioned_amount as paid_amount,\n\t\t\t'' as reference_no,\n\t\t\t'' as reference_date,\n\t\t\temployee as party,\n\t\t\t'Employee' as party_type,\n\t\t\tposting_date,\n\t\t\t'{company_currency}' as currency\n\t\tFROM\n\t\t\t`tabExpense Claim`\n\t\tWHERE\n\t\t\ttotal_sanctioned_amount {amount_condition} %(amount)s\n\t\t\tAND docstatus = 1\n\t\t\tAND is_paid = 1\n\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\tAND mode_of_payment in {mode_of_payments}\n\t", "n_words": 65, "vocab_size": 47, "n_whitespaces": 45, "language": "en" } }, { "id": 154602, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/partitioning/partition_manager.py", "file_name": "partition_manager.py", "fun_name": "run_exec_plan", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def run_exec_plan(cls, plan, index_cols, dtypes, columns):\n \n omniSession = DbWorker()\n\n # First step is to make sure all partitions are in HDK.\n frames = plan.collect_frames()\n for frame in frames:\n if frame._partitions.size != 1:\n raise NotImplementedError(\n \"HdkOnNative engine doesn't suport partitioned frames\"\n )\n for p in frame._partitions.flatten():\n if p.frame_id is None:\n obj = p.get()\n if isinstance(obj, (pandas.DataFrame, pandas.Series)):\n p.frame_id = omniSession.import_pandas_dataframe(obj)\n else:\n assert isinstance(obj, pyarrow.Table)\n p.frame_id = omniSession.import_arrow_table(obj)\n\n calcite_plan = CalciteBuilder().build(plan)\n calcite_json = CalciteSerializer().serialize(calcite_plan)\n\n cmd_prefix = \"execute relalg \"\n\n if DoUseCalcite.get():\n cmd_prefix = \"execute calcite \"\n\n at = omniSession.executeRA(cmd_prefix + calcite_json)\n\n res = np.empty((1, 1), dtype=np.dtype(object))\n # workaround for https://github.com/modin-project/modin/issues/1851\n if DoUseCalcite.get():\n at = at.rename_columns([\"F_\" + str(c) for c in columns])\n res[0][0] = cls._partition_class.put_arrow(at)\n\n return res\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 453, "n_words": 114, "vocab_size": 80, "complexity": 9, "nloc": 27, "token_counts": 225, "n_ast_nodes": 364, "n_identifiers": 47, "random_cut": "def run_exec_plan(cls, plan, index_cols, dtypes, columns):\n \n omniSession = DbWorker()\n\n # First step is to make sure all partitions are in", "d_id": 36108, "documentation": { "docstring": "\n Run execution plan in HDK storage format to materialize frame.\n\n Parameters\n ----------\n plan : DFAlgNode\n A root of an execution plan tree.\n index_cols : list of str\n A list of index columns.\n dtypes : pandas.Index\n Column data types.\n columns : list of str\n A frame column names.\n\n Returns\n -------\n np.array\n Created frame's partitions.\n ", "n_words": 53, "vocab_size": 39, "n_whitespaces": 186, "language": "en" } }, { "id": 121448, "commit_id": "3f0619599499fc0751cd6181c04d50245ef5dcce", "repo": "jax", "path": "jax/_src/dtypes.py", "file_name": "dtypes.py", "fun_name": "to_numeric_dtype", "commit_message": "jax.numpy: improve support for boolean inputs", "code": "def to_numeric_dtype(dtype):\n \n dtype = np.dtype(dtype)\n return np.dtype('int32') if dtype == np.dtype('bool') else dtype\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 16, "n_words": 13, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 32, "n_ast_nodes": 57, "n_identifiers": 3, "random_cut": "def to_numeric_dtype(dtype):\n \n dtype = np.dt", "d_id": 27062, "documentation": { "docstring": "Promotes a dtype into an numeric dtype, if it is not already one.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 47501, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_do_schedule_max_active_runs_task_removed", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_do_schedule_max_active_runs_task_removed(self, session, dag_maker):\n \n with dag_maker(\n dag_id='test_do_schedule_max_active_runs_task_removed',\n start_date=DEFAULT_DATE,\n schedule_interval='@once',\n max_active_runs=1,\n session=session,\n ):\n # Can't use EmptyOperator as that goes straight to success\n BashOperator(task_id='dummy1', bash_command='true')\n\n run1 = dag_maker.create_dagrun(\n run_type=DagRunType.SCHEDULED,\n execution_date=DEFAULT_DATE + timedelta(hours=1),\n state=State.RUNNING,\n )\n\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n self.scheduler_job.executor = MockExecutor(do_update=False)\n self.scheduler_job.processor_agent = mock.MagicMock(spec=DagFileProcessorAgent)\n\n num_queued = self.scheduler_job._do_scheduling(session)\n assert num_queued == 1\n\n session.flush()\n ti = run1.task_instances[0]\n ti.refresh_from_db(session=session)\n assert ti.state == State.QUEUED\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 266, "n_words": 58, "vocab_size": 50, "complexity": 1, "nloc": 23, "token_counts": 156, "n_ast_nodes": 249, "n_identifiers": 43, "random_cut": "def test_do_schedule_max_active_runs_task_removed(self, session, dag_maker):\n \n with dag_maker(\n dag_id='test_do_schedule_max_active_runs_task_removed',\n start_date=DEFAULT_DATE,\n schedule_interval='@once',\n max_active_runs=1,\n session=session,\n ):\n # Can't use EmptyOperator as that goes straight to success\n BashOperator(task_id='dummy1', bash_command='true')\n\n run1 = dag_maker.create_dagrun(\n run_type=DagRunType.SCHEDULED,\n execution_date=DEFAULT_DATE + timedelta(hours=1),\n state=State.RUNNING,\n )\n\n self.scheduler_job = Schedule", "d_id": 9133, "documentation": { "docstring": "Test that tasks in removed state don't count as actively running.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 161097, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg_extractor/encoder/encoder.py", "file_name": "encoder.py", "fun_name": "forward", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def forward(self, xs, masks):\n \n if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n\n xs, masks = self.encoders(xs, masks)\n if isinstance(xs, tuple):\n xs = xs[0]\n\n if self.normalize_before:\n xs = self.after_norm(xs)\n return xs, masks\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 129, "n_words": 36, "vocab_size": 22, "complexity": 4, "nloc": 11, "token_counts": 89, "n_ast_nodes": 138, "n_identifiers": 12, "random_cut": "def forward(self, xs, masks):\n \n if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n\n xs, masks = self.encoders(xs, masks)\n if isinstance(xs, tuple):\n xs = xs[0]\n\n if self.normalize_before:\n ", "d_id": 38908, "documentation": { "docstring": "Encode input sequence.\n\n :param torch.Tensor xs: input tensor\n :param torch.Tensor masks: input mask\n :return: position embedded tensor and mask\n :rtype Tuple[torch.Tensor, torch.Tensor]:\n ", "n_words": 22, "vocab_size": 16, "n_whitespaces": 57, "language": "en" } }, { "id": 281630, "commit_id": "9e671aeba98dacc69ecbbfec1f087aca3b139ee7", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/parent_classes.py", "file_name": "parent_classes.py", "fun_name": "save_class", "commit_message": "Remember Contexts (#1187)\n\n* Refacotred classes\r\n\r\n* Handling for new instance desired\r\n\r\n* Added feature flag\r\n\r\n* Converted all menu calls", "code": "def save_class(self):\n \n if gtff.REMEMBER_CONTEXTS:\n controllers[self.PATH] = self\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 3, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 6, "random_cut": "def save_class(self):\n \n if gtff.REMEMBER_CONTEXTS:\n ", "d_id": 83921, "documentation": { "docstring": "Saves the current instance of the class to be loaded later", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 215291, "commit_id": "d4e6111086ff713eb6609dc6c98cec98aded2564", "repo": "salt", "path": "salt/transport/zeromq.py", "file_name": "zeromq.py", "fun_name": "_decode_messages", "commit_message": "Refactor into transports and channels", "code": "def _decode_messages(self, messages):\n \n messages_len = len(messages)\n # if it was one message, then its old style\n if messages_len == 1:\n payload = salt.payload.loads(messages[0])\n # 2 includes a header which says who should do it\n elif messages_len == 2:\n message_target = salt.utils.stringutils.to_str(messages[0])\n if (\n self.opts.get(\"__role\") != \"syndic\"\n and message_target not in (\"broadcast\", self.hexid)\n ) or (\n self.opts.get(\"__role\") == \"syndic\"\n and message_target not in (\"broadcast\", \"syndic\")\n ):\n log.debug(\"Publish received for not this minion: %s\", message_target)\n raise salt.ext.tornado.gen.Return(None)\n payload = salt.payload.loads(messages[1])\n else:\n raise Exception(\n \"Invalid number of messages ({}) in zeromq pubmessage from master\".format(\n len(messages_len)\n )\n )\n # Yield control back to the caller. When the payload has been decoded, assign\n # the decoded payload to 'ret' and resume operation\n raise salt.ext.tornado.gen.Return(payload)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 416, "n_words": 119, "vocab_size": 84, "complexity": 7, "nloc": 23, "token_counts": 161, "n_ast_nodes": 272, "n_identifiers": 23, "random_cut": "def _decode_messages(self, messages):\n \n messages_len = len(messages)\n # if it was one message, then its old style\n if messages_len == 1:\n payload = salt.payload.loads(messages[0])\n # 2 includes a header which says who should do it\n elif messages_len == 2:\n message_target = salt.utils.stringutils.to_str(messages[0])\n if (\n self.opts.get(\"__role\") != \"syndic\"\n and message_target not in (\"broadcast\", self.hexid)\n ) or (\n self.opts.get(\"__role\") == \"syndic\"\n and message_target not in (\"broadcast\", \"syndic\")\n ):\n log.debug(\"Publish received for not this minion: %s\", message_target)\n raise salt.ext.tornado.gen.Return(None)\n payload = salt.payload.loads(messages[1])\n else:\n raise Exception(\n", "d_id": 53912, "documentation": { "docstring": "\n Take the zmq messages, decrypt/decode them into a payload\n\n :param list messages: A list of messages to be decoded\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 137593, "commit_id": "98fef7732852cdb3e9377cd87c1ee1085b894928", "repo": "ray", "path": "python/ray/tests/test_runtime_env.py", "file_name": "test_runtime_env.py", "fun_name": "test_get_release_wheel_url", "commit_message": "[runtime env] Support python 3.10 for runtime_env conda (#30970)\n\nSigned-off-by: Archit Kulkarni \r\n\r\nconda environments are isolated, so when runtime_env sets up a conda environment it must download the Ray wheel into the conda environment. It must download the wheel that matches the current Python and Ray version running, otherwise there will be incompatibility issues between the workers that use this runtime_env and the other workers and Ray processes.\r\n\r\nThis PR updates the wheel name format logic to support Python 3.10.", "code": "def test_get_release_wheel_url():\n \n # This should be a commit for which wheels have already been built for\n # all platforms and python versions at\n # `s3://ray-wheels/releases/2.2.0//`.\n test_commits = {\"2.2.0\": \"b6af0887ee5f2e460202133791ad941a41f15beb\"}\n for sys_platform in [\"darwin\", \"linux\", \"win32\"]:\n for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS:\n for version, commit in test_commits.items():\n if sys_platform == \"win32\" and py_version == (3, 6):\n # Windows wheels are not built for py3.6 anymore\n continue\n url = get_release_wheel_url(commit, sys_platform, version, py_version)\n assert requests.head(url).status_code == 200, url\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 193, "n_words": 74, "vocab_size": 53, "complexity": 6, "nloc": 9, "token_counts": 80, "n_ast_nodes": 136, "n_identifiers": 14, "random_cut": "def test_get_release_wheel_url():\n \n # This should be a commit for which wheels have al", "d_id": 31197, "documentation": { "docstring": "Test the code that generates the filenames of the `release` branch wheels.", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 249111, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_media.py", "file_name": "test_media.py", "fun_name": "test_keep_media_by_date", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_keep_media_by_date(self) -> None:\n \n\n # timestamp before upload\n now_ms = self.clock.time_msec()\n server_and_media_id = self._create_media()\n\n self._access_media(server_and_media_id)\n\n channel = self.make_request(\n \"POST\",\n self.url + \"?before_ts=\" + str(now_ms),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(0, channel.json_body[\"total\"])\n\n self._access_media(server_and_media_id)\n\n # timestamp after upload\n now_ms = self.clock.time_msec()\n channel = self.make_request(\n \"POST\",\n self.url + \"?before_ts=\" + str(now_ms),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(1, channel.json_body[\"total\"])\n self.assertEqual(\n server_and_media_id.split(\"/\")[1],\n channel.json_body[\"deleted_media\"][0],\n )\n\n self._access_media(server_and_media_id, False)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 282, "n_words": 61, "vocab_size": 35, "complexity": 1, "nloc": 28, "token_counts": 188, "n_ast_nodes": 304, "n_identifiers": 19, "random_cut": "def test_keep_media_by_date(self) -> None:\n \n\n # timestamp before upload\n now_ms = self.clock.time_msec()\n server_and_media_id = self._create_media()\n\n self._access_media(server_and_media_id)\n\n channel = self.make_request(\n \"POST\",\n self.url + \"?before_ts=\" + str(now_ms),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(0, channel.json_body[\"total\"])\n\n self._access_media(server_and_media_id)\n\n # timestamp after upload\n now_ms = sel", "d_id": 72618, "documentation": { "docstring": "\n Tests that media is not deleted if it is newer than `before_ts`\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 259885, "commit_id": "a47d569e670fd4102af37c3165c9b1ddf6fd3005", "repo": "scikit-learn", "path": "sklearn/datasets/tests/test_openml.py", "file_name": "test_openml.py", "fun_name": "test_fetch_openml_requires_pandas_in_future", "commit_message": "ENH improve ARFF parser using pandas (#21938)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Adrin Jalali ", "code": "def test_fetch_openml_requires_pandas_in_future(monkeypatch):\n \n params = {\"as_frame\": False, \"parser\": \"auto\"}\n data_id = 1119\n try:\n check_pandas_support(\"test_fetch_openml_requires_pandas\")\n except ImportError:\n _monkey_patch_webbased_functions(monkeypatch, data_id, True)\n warn_msg = (\n \"From version 1.4, `parser='auto'` with `as_frame=False` will use pandas\"\n )\n with pytest.warns(FutureWarning, match=warn_msg):\n fetch_openml(data_id=data_id, **params)\n else:\n raise SkipTest(\"This test requires pandas to not be installed.\")\n\n\n@pytest.mark.filterwarnings(\"ignore:Version 1 of dataset Australian is inactive\")\n# TODO(1.4): remove this filterwarning decorator for `parser`\n@pytest.mark.filterwarnings(\"ignore:The default value of `parser` will change\")\n@pytest.mark.parametrize(\n \"params, err_msg\",\n [\n (\n {\"parser\": \"pandas\"},\n \"Sparse ARFF datasets cannot be loaded with parser='pandas'\",\n ),\n (\n {\"as_frame\": True},\n \"Sparse ARFF datasets cannot be loaded with as_frame=True.\",\n ),\n (\n {\"parser\": \"pandas\", \"as_frame\": True},\n \"Sparse ARFF datasets cannot be loaded with as_frame=True.\",\n ),\n ],\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.filterwarnings(\"ignore:Version 1 of dataset Australian is inactive\")\n# TODO(1.4): remove this filterwarning decorator for `parser`\n@pytest.mark.filterwarnings(\"ignore:The default value of `parser` will change\")\n@pytest.mark.parametrize(\n \"params, err_msg\",\n [\n (\n {\"parser\": \"pandas\"},\n \"Sparse ARFF datasets cannot be loaded with parser='pandas'\",\n ),\n (\n {\"as_frame\": True},\n \"Sparse ARFF datasets cannot be loaded with as_frame=True.\",\n ),\n (\n {\"parser\": \"pandas\", \"as_frame\": True},\n \"Sparse ARFF datasets cannot be loaded with as_frame=True.\",\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 306, "n_words": 112, "vocab_size": 80, "complexity": 2, "nloc": 14, "token_counts": 70, "n_ast_nodes": 247, "n_identifiers": 17, "random_cut": "def test_fetch_openml_requires_pandas_in_future(monkeypatch):\n \n params = {\"as_frame\": False, \"parser\": \"auto\"}\n data_id = 1119\n try:\n check_pandas_support(\"test_fetch_openml_requires_pandas\")\n except ImportError:\n _monkey_patch_webbased_functions(monk", "d_id": 75970, "documentation": { "docstring": "Check that we raise a warning that pandas will be required in the future.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 22041, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/adapters.py", "file_name": "adapters.py", "fun_name": "get_connection", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def get_connection(self, url, proxies=None):\n \n proxy = select_proxy(url, proxies)\n\n if proxy:\n proxy = prepend_scheme_if_needed(proxy, \"http\")\n proxy_url = parse_url(proxy)\n if not proxy_url.host:\n raise InvalidProxyURL(\n \"Please check proxy URL. It is malformed \"\n \"and could be missing the host.\"\n )\n proxy_manager = self.proxy_manager_for(proxy)\n conn = proxy_manager.connection_from_url(url)\n else:\n # Only scheme should be lower case\n parsed = urlparse(url)\n url = parsed.geturl()\n conn = self.poolmanager.connection_from_url(url)\n\n return conn\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 264, "n_words": 62, "vocab_size": 49, "complexity": 3, "nloc": 17, "token_counts": 92, "n_ast_nodes": 157, "n_identifiers": 19, "random_cut": "def get_connection(self, url, proxies=None):\n \n proxy = select_proxy(url, proxies)\n\n if proxy:\n proxy = prepend_scheme_if_needed(proxy, \"http\")\n proxy_url = parse_url(proxy)\n if not proxy_url.host:\n raise InvalidProxyURL(\n \"Please check proxy URL. It is malformed \"\n \"and could be missing the host.\"\n ", "d_id": 4130, "documentation": { "docstring": "Returns a urllib3 connection for the given URL. This should not be\n called from user code, and is only exposed for use when subclassing the\n :class:`HTTPAdapter `.\n\n :param url: The URL to connect to.\n :param proxies: (optional) A Requests-style dictionary of proxies used on this request.\n :rtype: urllib3.ConnectionPool\n ", "n_words": 48, "vocab_size": 45, "n_whitespaces": 90, "language": "en" } }, { "id": 151923, "commit_id": "c2936d551b8ad6ccf7b57e2ac6cb55d8550622cf", "repo": "freqtrade", "path": "freqtrade/templates/FreqaiExampleStrategy.py", "file_name": "FreqaiExampleStrategy.py", "fun_name": "feature_engineering_expand_all", "commit_message": "improve doc, update test strats, change function names", "code": "def feature_engineering_expand_all(self, dataframe, period, **kwargs):\n \n\n dataframe[\"%-rsi-period\"] = ta.RSI(dataframe, timeperiod=period)\n dataframe[\"%-mfi-period\"] = ta.MFI(dataframe, timeperiod=period)\n dataframe[\"%-adx-period\"] = ta.ADX(dataframe, timeperiod=period)\n dataframe[\"%-sma-period\"] = ta.SMA(dataframe, timeperiod=period)\n dataframe[\"%-ema-period\"] = ta.EMA(dataframe, timeperiod=period)\n\n bollinger = qtpylib.bollinger_bands(\n qtpylib.typical_price(dataframe), window=period, stds=2.2\n )\n dataframe[\"bb_lowerband-period\"] = bollinger[\"lower\"]\n dataframe[\"bb_middleband-period\"] = bollinger[\"mid\"]\n dataframe[\"bb_upperband-period\"] = bollinger[\"upper\"]\n\n dataframe[\"%-bb_width-period\"] = (\n dataframe[\"bb_upperband-period\"]\n - dataframe[\"bb_lowerband-period\"]\n ) / dataframe[\"bb_middleband-period\"]\n dataframe[\"%-close-bb_lower-period\"] = (\n dataframe[\"close\"] / dataframe[\"bb_lowerband-period\"]\n )\n\n dataframe[\"%-roc-period\"] = ta.ROC(dataframe, timeperiod=period)\n\n dataframe[\"%-relative_volume-period\"] = (\n dataframe[\"volume\"] / dataframe[\"volume\"].rolling(period).mean()\n )\n\n return dataframe\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 258, "n_words": 70, "vocab_size": 42, "complexity": 1, "nloc": 24, "token_counts": 217, "n_ast_nodes": 361, "n_identifiers": 21, "random_cut": "def feature_engineering_expand_all(self, dataframe, period, **kwargs):\n \n\n dataframe[\"%-rsi-period\"] = ta.RSI(dataframe, timeperiod=period)\n dataframe[\"%-mfi-period\"] = ta.MFI(dataframe, timeperiod=period)\n dataframe[\"%-adx-period\"] = ta.ADX(dataframe, timeperiod=period)\n dataframe[\"%-sma-period\"] = ta.SMA(dataframe, timeperiod=period)\n dataframe[\"%-ema-period\"] = ta.EMA(dataframe, timeperiod=period)\n\n bollinger = qtpylib.bollinger_bands(\n qtpylib.typical_price(dataframe), window=period, stds=2.2\n )\n dataframe[\"bb_lowerband-period\"] = bollinger[\"lower\"]\n dataframe[\"bb_middleband-period\"] = bollinger[\"mid\"]\n dataframe[\"bb_upperband-period\"] = bollinger[\"upper\"]\n\n dataframe[\"%-bb_width-period\"] = (\n dataframe[\"bb_upperband-period\"]\n - dataframe[\"bb_lowerband-period\"]\n ) / dataframe[\"bb_middleband-period\"]\n dataframe[\"%-close-bb_lower-period\"] = (\n dataframe[\"close\"] / dataframe[\"bb_lowerband-period\"]\n )\n\n dataframe[\"%-roc-period\"] = ta.ROC(dataframe, timeperiod=period)\n\n dataframe[\"%-relative_volume-period\"] = (\n dataframe[\"volume\"] / dataframe[\"volume\"].rolling(period).mean()\n )\n\n return dataframe\n", "d_id": 35165, "documentation": { "docstring": "\n *Only functional with FreqAI enabled strategies*\n This function will automatically expand the defined features on the config defined\n `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and\n `include_corr_pairs`. In other words, a single feature defined in this function\n will automatically expand to a total of\n `indicator_periods_candles` * `include_timeframes` * `include_shifted_candles` *\n `include_corr_pairs` numbers of features added to the model.\n\n All features must be prepended with `%` to be recognized by FreqAI internals.\n\n More details on how these config defined parameters accelerate feature engineering\n in the documentation at:\n\n https://www.freqtrade.io/en/latest/freqai-parameter-table/#feature-parameters\n\n https://www.freqtrade.io/en/latest/freqai-feature-engineering/#defining-the-features\n\n :param df: strategy dataframe which will receive the features\n :param period: period of the indicator - usage example:\n dataframe[\"%-ema-period\"] = ta.EMA(dataframe, timeperiod=period)\n ", "n_words": 106, "vocab_size": 75, "n_whitespaces": 219, "language": "en" } }, { "id": 204521, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/handlers/asgi.py", "file_name": "asgi.py", "fun_name": "get_script_prefix", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_script_prefix(self, scope):\n \n if settings.FORCE_SCRIPT_NAME:\n return settings.FORCE_SCRIPT_NAME\n return scope.get(\"root_path\", \"\") or \"\"\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 12, "vocab_size": 11, "complexity": 3, "nloc": 4, "token_counts": 28, "n_ast_nodes": 51, "n_identifiers": 6, "random_cut": "def get_script_prefix(self, scope):\n \n if settings", "d_id": 50767, "documentation": { "docstring": "\n Return the script prefix to use from either the scope or a setting.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 55476, "commit_id": "11638691240b7595c0d02542af506a96d344ae8b", "repo": "prefect", "path": "tests/cli/test_storage_cli.py", "file_name": "test_storage_cli.py", "fun_name": "test_get_first_menu_and_fail", "commit_message": "Update tests", "code": "def test_get_first_menu_and_fail():\n \n part_one = f\n\n part_two = f\n command = [\"storage\", \"create\"]\n invoke_and_assert_in(\n command=command,\n desired_contents=(part_one, part_two),\n expected_code=1,\n user_input=f\"{INVALID_OPTION}\\n\",\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 23, "token_counts": 44, "n_ast_nodes": 82, "n_identifiers": 9, "random_cut": "def test_get_first_menu_and_fail():\n \n part_one = f\n\n part_two = f\n command = [\"storage\", \"create\"]\n invoke_and_assert_in(\n command=command,\n desired_contents=(part_one, part_two),\n e", "d_id": 11327, "documentation": { "docstring": "\n Make sure that our utility function is returning as expected\n \n Found the following storage types:\n 0) Azure Blob Storage\n Store data in an Azure blob storage container.\n 1) File Storage\n Store data as a file on local or remote file systems.\n 2) Google Cloud Storage\n Store data in a GCS bucket.\n 3) Local Storage\n Store data in a run's local file system.\n \n Select a storage type to create: 99999999\n Invalid selection {INVALID_OPTION}\n ", "n_words": 72, "vocab_size": 51, "n_whitespaces": 136, "language": "en" } }, { "id": 176544, "commit_id": "1af7d49d70869081e5cb64d17165652f1b26c57b", "repo": "networkx", "path": "networkx/algorithms/planarity.py", "file_name": "planarity.py", "fun_name": "check_planarity", "commit_message": "Improve documentation of PlanarEmbedding class (#5523)\n\n* Improve documentation of PlanarEmbedding\r\n\r\n* Fix type\r\n\r\n* Make suggested changes\r\n\r\n* rst formatting nits.\r\n\r\n* Update networkx/algorithms/planarity.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Run black for formatting\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult ", "code": "def check_planarity(G, counterexample=False):\n \n\n planarity_state = LRPlanarity(G)\n embedding = planarity_state.lr_planarity()\n if embedding is None:\n # graph is not planar\n if counterexample:\n return False, get_counterexample(G)\n else:\n return False, None\n else:\n # graph is planar\n return True, embedding\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 35, "vocab_size": 22, "complexity": 3, "nloc": 10, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 8, "random_cut": "def check_planarity(G, counterexample=False):\n \n\n planarity_state = LRPlanarity(G)\n embedding = planarity_state.lr_planarity()\n if embedding is None:\n # graph is not planar\n if counterexample:\n return False, get_counterexample(G)\n else:\n return False, None\n else:\n ", "d_id": 41953, "documentation": { "docstring": "Check if a graph is planar and return a counterexample or an embedding.\n\n A graph is planar iff it can be drawn in a plane without\n any edge intersections.\n\n Parameters\n ----------\n G : NetworkX graph\n counterexample : bool\n A Kuratowski subgraph (to proof non planarity) is only returned if set\n to true.\n\n Returns\n -------\n (is_planar, certificate) : (bool, NetworkX graph) tuple\n is_planar is true if the graph is planar.\n If the graph is planar `certificate` is a PlanarEmbedding\n otherwise it is a Kuratowski subgraph.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2)])\n >>> is_planar, P = nx.check_planarity(G)\n >>> print(is_planar)\n True\n\n When `G` is planar, a `PlanarEmbedding` instance is returned:\n\n >>> P.get_data()\n {0: [1, 2], 1: [0], 2: [0]}\n\n Notes\n -----\n A (combinatorial) embedding consists of cyclic orderings of the incident\n edges at each vertex. Given such an embedding there are multiple approaches\n discussed in literature to drawing the graph (subject to various\n constraints, e.g. integer coordinates), see e.g. [2].\n\n The planarity check algorithm and extraction of the combinatorial embedding\n is based on the Left-Right Planarity Test [1].\n\n A counterexample is only generated if the corresponding parameter is set,\n because the complexity of the counterexample generation is higher.\n\n References\n ----------\n .. [1] Ulrik Brandes:\n The Left-Right Planarity Test\n 2009\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208\n .. [2] Takao Nishizeki, Md Saidur Rahman:\n Planar graph drawing\n Lecture Notes Series on Computing: Volume 12\n 2004\n ", "n_words": 228, "vocab_size": 154, "n_whitespaces": 404, "language": "en" } }, { "id": 42786, "commit_id": "60eb9e106f5915398eafd6aa339ec710c102dc09", "repo": "airflow", "path": "airflow/providers/cncf/kubernetes/hooks/kubernetes.py", "file_name": "kubernetes.py", "fun_name": "_get_bool", "commit_message": "Use KubernetesHook to create api client in KubernetesPodOperator (#20578)\n\nAdd support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them.\r\n\r\nKPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.", "code": "def _get_bool(val) -> Optional[bool]:\n \n if isinstance(val, bool):\n return val\n elif isinstance(val, str):\n if val.strip().lower() == 'true':\n return True\n elif val.strip().lower() == 'false':\n return False\n return None\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 81, "n_words": 26, "vocab_size": 18, "complexity": 5, "nloc": 13, "token_counts": 61, "n_ast_nodes": 104, "n_identifiers": 8, "random_cut": "def _get_bool(val) -> Optional[bool]:\n \n if isinstance(val, bool):\n return val\n elif isinstance(val, str):\n if val.strip().lower() == 'true':\n return True\n ", "d_id": 7734, "documentation": { "docstring": "\n Converts val to bool if can be done with certainty.\n If we cannot infer intention we return None.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 28, "language": "en" } }, { "id": 158166, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "load_data_ptb", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def load_data_ptb(batch_size, max_window_size, num_noise_words):\n \n sentences = read_ptb()\n vocab = d2l.Vocab(sentences, min_freq=10)\n subsampled, counter = subsample(sentences, vocab)\n corpus = [vocab[line] for line in subsampled]\n all_centers, all_contexts = get_centers_and_contexts(\n corpus, max_window_size)\n all_negatives = get_negatives(\n all_contexts, vocab, counter, num_noise_words)\n dataset = gluon.data.ArrayDataset(\n all_centers, all_contexts, all_negatives)\n data_iter = gluon.data.DataLoader(\n dataset, batch_size, shuffle=True,batchify_fn=batchify,\n num_workers=d2l.get_dataloader_workers())\n return data_iter, vocab\n\nd2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',\n '0b8703943ccdb6eb788e6f091b8946e82231bc4d')\n\nd2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',\n 'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')\n\nd2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',\n 'b5116e234e9eb9076672cfeabf5469f3eec904fa')\n\nd2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip',\n 'c1816da3821ae9f43899be655002f6c723e91b88')\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 259, "n_words": 76, "vocab_size": 56, "complexity": 2, "nloc": 15, "token_counts": 117, "n_ast_nodes": 289, "n_identifiers": 33, "random_cut": "def load_data_ptb(batch_size, max_window_size, num_noise_words):\n \n sentences = read_ptb()\n vocab = d2l.Vocab(sentences, min_freq=10)\n subsampled, counter = subsample(sentences, vocab)\n corpus = [vocab[line] for line in subsampled]\n all_centers, all_contexts = get_centers_and_contexts(\n corpus, max_window_size)\n all_negatives = get_negatives(\n all_contexts, vocab, counter, num_noise_words)\n dataset = gluon.data.ArrayDataset(\n all_centers, all_contexts, all_negatives)\n data_iter = gluon.data.DataLoader(\n dataset, batch_size, shuffle=True,batchify_fn=batc", "d_id": 37345, "documentation": { "docstring": "Download the PTB dataset and then load it into memory.\n\n Defined in :numref:`subsec_word2vec-minibatch-loading`", "n_words": 13, "vocab_size": 13, "n_whitespaces": 15, "language": "en" } }, { "id": 289104, "commit_id": "3b33e0d832b238b40360383099391e2093ea05cb", "repo": "core", "path": "homeassistant/components/homekit/__init__.py", "file_name": "__init__.py", "fun_name": "async_config_changed", "commit_message": "Add support for restoring HomeKit IIDs (#79913)", "code": "async def async_config_changed(self) -> None:\n \n assert self.driver is not None\n await self.hass.async_add_executor_job(self.driver.config_changed)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 28, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "async def async_config_changed(self) -> None:\n \n assert self.driver is not None\n await self.hass.async_add_executor_job(self.driver.config_changed)\n", "d_id": 88252, "documentation": { "docstring": "Call config changed which writes out the new config to disk.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 63461, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "setDebugActions", "commit_message": "upd; format", "code": "def setDebugActions(self, startAction, successAction, exceptionAction):\n \n self.debugActions = (startAction or _defaultStartDebugAction,\n successAction or _defaultSuccessDebugAction,\n exceptionAction or _defaultExceptionDebugAction)\n self.debug = True\n return self\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 105, "n_words": 21, "vocab_size": 18, "complexity": 4, "nloc": 6, "token_counts": 36, "n_ast_nodes": 54, "n_identifiers": 10, "random_cut": "def setDebugActions(self, startAction, successAction, exceptionAction):\n \n self.debugActions = (startAction or _defaultStartDebug", "d_id": 13314, "documentation": { "docstring": "\n Enable display of debugging messages while doing pattern matching.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 64990, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/pricing_rule/utils.py", "file_name": "utils.py", "fun_name": "get_qty_amount_data_for_cumulative", "commit_message": "style: format code with black", "code": "def get_qty_amount_data_for_cumulative(pr_doc, doc, items=None):\n\tif items is None:\n\t\titems = []\n\tsum_qty, sum_amt = [0, 0]\n\tdoctype = doc.get(\"parenttype\") or doc.doctype\n\n\tdate_field = (\n\t\t\"transaction_date\" if frappe.get_meta(doctype).has_field(\"transaction_date\") else \"posting_date\"\n\t)\n\n\tchild_doctype = \"{0} Item\".format(doctype)\n\tapply_on = frappe.scrub(pr_doc.get(\"apply_on\"))\n\n\tvalues = [pr_doc.valid_from, pr_doc.valid_upto]\n\tcondition = \"\"\n\n\tif pr_doc.warehouse:\n\t\twarehouses = get_child_warehouses(pr_doc.warehouse)\n\n\t\tcondition += .format(\n\t\t\tchild_doc=child_doctype, warehouses=\",\".join([\"%s\"] * len(warehouses))\n\t\t)\n\n\t\tvalues.extend(warehouses)\n\n\tif items:\n\t\tcondition = \" and `tab{child_doc}`.{apply_on} in ({items})\".format(\n\t\t\tchild_doc=child_doctype, apply_on=apply_on, items=\",\".join([\"%s\"] * len(items))\n\t\t)\n\n\t\tvalues.extend(items)\n\n\tdata_set = frappe.db.sql(\n\t\t.format(\n\t\t\tparent_doc=doctype, child_doc=child_doctype, condition=condition, date_field=date_field\n\t\t),\n\t\ttuple(values),\n\t\tas_dict=1,\n\t)\n\n\tfor data in data_set:\n\t\tsum_qty += data.get(\"stock_qty\")\n\t\tsum_amt += data.get(\"amount\")\n\n\treturn [sum_qty, sum_amt]\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 65, "n_words": 99, "vocab_size": 72, "complexity": 7, "nloc": 42, "token_counts": 245, "n_ast_nodes": 406, "n_identifiers": 34, "random_cut": "def get_qty_amount_data_for_cumulative(pr_doc, doc, items=None):\n\tif items is None:\n\t\titems = []\n\tsum_qty, sum_amt = [0, 0]\n\tdoctype = doc.get(\"parenttype\") or doc.doctype\n\n\tdate_field = (\n\t\t\"transaction_date\" if frappe.get_meta(doctype).has_field(\"transaction_date\") else \"posting_date\"\n\t)\n\n\tchild_doctype = \"{0} Item\".format(doct", "d_id": 13771, "documentation": { "docstring": " and `tab{child_doc}`.warehouse in ({warehouses})\n\t\t\t SELECT `tab{child_doc}`.stock_qty,\n\t\t\t`tab{child_doc}`.amount\n\t\tFROM `tab{child_doc}`, `tab{parent_doc}`\n\t\tWHERE\n\t\t\t`tab{child_doc}`.parent = `tab{parent_doc}`.name and `tab{parent_doc}`.{date_field}\n\t\t\tbetween %s and %s and `tab{parent_doc}`.docstatus = 1\n\t\t\t{condition} group by `tab{child_doc}`.name\n\t", "n_words": 28, "vocab_size": 23, "n_whitespaces": 22, "language": "en" } }, { "id": 241795, "commit_id": "5628849933f1ba002f34b88b4d3af24f68008b39", "repo": "scipy", "path": "scipy/sparse/linalg/_isolve/utils.py", "file_name": "utils.py", "fun_name": "make_system", "commit_message": "MAINT: sparse.linalg: Remove unnecessary operations", "code": "def make_system(A, M, x0, b):\n \n A_ = A\n A = aslinearoperator(A)\n\n if A.shape[0] != A.shape[1]:\n raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')\n\n N = A.shape[0]\n\n b = asanyarray(b)\n\n if not (b.shape == (N,1) or b.shape == (N,)):\n raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '\n 'incompatible')\n\n if b.dtype.char not in 'fdFD':\n b = b.astype('d') # upcast non-FP types to double\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 132, "n_words": 62, "vocab_size": 48, "complexity": 17, "nloc": 51, "token_counts": 379, "n_ast_nodes": 194, "n_identifiers": 14, "random_cut": "def make_system(A, M, x0, b):\n \n A_ = A\n A = aslinearoperator(A)\n\n if A.shape[0] != A.shape[1]:\n raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')\n\n N = A.shape[0]\n\n b = asanyarray(b)\n\n if not (b.shape == (N,1) or b.shape == (N,)):\n raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '\n 'incompatib", "d_id": 69699, "documentation": { "docstring": "Make a linear system Ax=b\n\n Parameters\n ----------\n A : LinearOperator\n sparse or dense matrix (or any valid input to aslinearoperator)\n M : {LinearOperator, Nones}\n preconditioner\n sparse or dense matrix (or any valid input to aslinearoperator)\n x0 : {array_like, str, None}\n initial guess to iterative method.\n ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.\n Default is `None`, which means using the zero initial guess.\n b : array_like\n right hand side\n\n Returns\n -------\n (A, M, x, b, postprocess)\n A : LinearOperator\n matrix of the linear system\n M : LinearOperator\n preconditioner\n x : rank 1 ndarray\n initial guess\n b : rank 1 ndarray\n right hand side\n postprocess : function\n converts the solution vector to the appropriate\n type and dimensions (e.g. (N,1) matrix)\n\n ", "n_words": 123, "vocab_size": 77, "n_whitespaces": 303, "language": "en" } }, { "id": 176163, "commit_id": "dec723f072eb997a497a159dbe8674cd39999ee9", "repo": "networkx", "path": "networkx/generators/small.py", "file_name": "small.py", "fun_name": "dodecahedral_graph", "commit_message": "Docstrings for the small.py module (#5240)\n\n* added description for the first 5 small graphs\r\n\r\n* modified descriptions based on comment and added description for two more functions\r\n\r\n* added doctrings to all the functions\r\n\r\n* Minor touchups.\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def dodecahedral_graph(create_using=None):\n \n G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using)\n G.name = \"Dodecahedral Graph\"\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 35, "n_words": 23, "vocab_size": 18, "complexity": 1, "nloc": 4, "token_counts": 51, "n_ast_nodes": 74, "n_identifiers": 5, "random_cut": "def dodecahedral_graph(create_using=None):\n \n G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using)\n G.name = \"Dodecahedral Graph\"\n re", "d_id": 41733, "documentation": { "docstring": "\n Returns the Platonic Dodecahedral graph.\n\n The dodecahedral graph has 20 nodes and 30 edges. The skeleton of the\n dodecahedron forms a graph. It is one of 5 Platonic graphs [1]_.\n It can be described in LCF notation as:\n ``[10, 7, 4, -4, -7, 10, -4, 7, -7, 4]^2`` [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Dodecahedral Graph with 20 nodes and 30 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Regular_dodecahedron#Dodecahedral_graph\n .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html\n\n ", "n_words": 91, "vocab_size": 69, "n_whitespaces": 153, "language": "en" } }, { "id": 300618, "commit_id": "4885331509eeffe50f42d76b234996467b06170f", "repo": "core", "path": "homeassistant/helpers/template.py", "file_name": "template.py", "fun_name": "arc_tangent", "commit_message": "Fail template functions when no default specified (#71687)", "code": "def arc_tangent(value, default=_SENTINEL):\n \n try:\n return math.atan(float(value))\n except (ValueError, TypeError):\n if default is _SENTINEL:\n raise_no_default(\"atan\", value)\n return default\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 58, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 7, "token_counts": 42, "n_ast_nodes": 70, "n_identifiers": 10, "random_cut": "def arc_tangent(value, default=_SENTINEL):\n \n try:\n return math.atan(float(value))\n except (ValueError, TypeError):\n if default is _SENTINEL:\n ra", "d_id": 99478, "documentation": { "docstring": "Filter and function to get arc tangent of the value.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 276983, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/metrics_utils.py", "file_name": "metrics_utils.py", "fun_name": "sparse_top_k_categorical_matches", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def sparse_top_k_categorical_matches(y_true, y_pred, k=5):\n \n reshape_matches = False\n y_true = tf.convert_to_tensor(y_true)\n y_pred = tf.convert_to_tensor(y_pred)\n y_true_rank = y_true.shape.ndims\n y_pred_rank = y_pred.shape.ndims\n y_true_org_shape = tf.shape(y_true)\n\n # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)\n if (y_true_rank is not None) and (y_pred_rank is not None):\n if y_pred_rank > 2:\n y_pred = tf.reshape(y_pred, [-1, y_pred.shape[-1]])\n if y_true_rank > 1:\n reshape_matches = True\n y_true = tf.reshape(y_true, [-1])\n\n matches = tf.cast(\n tf.math.in_top_k(\n predictions=y_pred, targets=tf.cast(y_true, \"int32\"), k=k\n ),\n dtype=backend.floatx(),\n )\n\n # returned matches is expected to have same shape as y_true input\n if reshape_matches:\n return tf.reshape(matches, shape=y_true_org_shape)\n\n return matches\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 220, "n_words": 92, "vocab_size": 61, "complexity": 6, "nloc": 22, "token_counts": 172, "n_ast_nodes": 268, "n_identifiers": 22, "random_cut": "def sparse_top_k_categorical_matches(y_true, y_pred, k=5):\n \n reshape_matches = False\n y_true = tf.convert_to_tensor(y_true)\n y_pred = tf.convert_to_tensor(y_pred)\n y_true_rank = y_true.shape.ndims\n y_pred_rank = y_pred.shape.ndims\n y_true_org_shape = tf.shape(y_true)\n\n # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)\n if (y_true_rank is not None) and (y_pred_rank is not None):\n if y_pred_rank > 2:\n y_pred = tf.reshape(y_pred, [-1, y_pred.shape[-1]])\n if y_true_rank > 1:\n reshape_matches = True\n y_true = tf.reshape(y_true, [-1])\n\n matches = tf.cast(\n tf.math.in_top_k(\n predictions=y_pred, ta", "d_id": 81815, "documentation": { "docstring": "Creates float Tensor, 1.0 for label-TopK_prediction match, 0.0 for mismatch.\n\n Args:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n k: (Optional) Number of top elements to look at for computing accuracy.\n Defaults to 5.\n\n Returns:\n Match tensor: 1.0 for label-prediction match, 0.0 for mismatch.\n ", "n_words": 46, "vocab_size": 33, "n_whitespaces": 82, "language": "en" } }, { "id": 68540, "commit_id": "a1e3ae8869194a487acccc706a381db74c4aa1ff", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "tax_account_query", "commit_message": "fix: user can select disabled accounts in taxes table", "code": "def tax_account_query(doctype, txt, searchfield, start, page_len, filters):\n\tcompany_currency = erpnext.get_company_currency(filters.get(\"company\"))\n\n\tdef get_accounts(with_account_type_filter):\n\t\taccount_type_condition = \"\"\n\t\tif with_account_type_filter:\n\t\t\taccount_type_condition = \"AND account_type in %(account_types)s\"\n\n\t\taccounts = frappe.db.sql(\n\t\t\t.format(\n\t\t\t\taccount_type_condition=account_type_condition,\n\t\t\t\tsearchfield=searchfield,\n\t\t\t\tmcond=get_match_cond(doctype),\n\t\t\t),\n\t\t\tdict(\n\t\t\t\taccount_types=filters.get(\"account_type\"),\n\t\t\t\tcompany=filters.get(\"company\"),\n\t\t\t\tdisabled=filters.get(\"disabled\", 0),\n\t\t\t\tcurrency=company_currency,\n\t\t\t\ttxt=\"%{}%\".format(txt),\n\t\t\t\toffset=start,\n\t\t\t\tlimit=page_len,\n\t\t\t),\n\t\t)\n\n\t\treturn accounts\n\n\ttax_accounts = get_accounts(True)\n\n\tif not tax_accounts:\n\t\ttax_accounts = get_accounts(False)\n\n\treturn tax_accounts\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 28, "n_words": 57, "vocab_size": 44, "complexity": 2, "nloc": 7, "token_counts": 48, "n_ast_nodes": 249, "n_identifiers": 31, "random_cut": "def tax_account_query(doctype, txt, searchfield, start, page_len, filters):\n\tcompany_currency = erpnext.get_company_currency(filters.get(\"company\"))\n\n\tdef get_accounts(with_account_type_filter):\n\t\taccount_type_condition = \"\"\n\t\tif with_account_type_filter:\n\t\t\taccount_type_condition = \"AND account_type in %(account_types)s\"\n\n\t\taccounts = frappe.db.sql(\n\t\t\t.format(\n\t\t\t\taccount_type_condition=account_type_condition,\n\t\t\t\tsearchfield=searchfield,\n\t\t\t\tmcond=get_match_cond(doctype),\n\t\t\t),\n\t\t\tdict(\n\t\t\t\taccount_types=filters.get(\"account_type\"),\n\t\t\t\tcompany=filters.get(\"company\"),\n\t\t\t\tdisabled=filters.get(\"disabled\", 0),\n\t\t\t\tcurrency=company_currency,\n\t\t\t\ttxt=\"%{}%\".format(txt),\n\t\t\t\toffset=start,\n\t\t\t\tlimit=page_len,\n\t\t\t),\n\t\t)\n\n\t\treturn accounts\n\n\ttax_accounts = get_accounts(True)\n\n\tif not tax_accounts:\n\t\ttax_accounts = get_accounts(False)\n\n\treturn tax_accounts\n\n\n", "d_id": 14815, "documentation": { "docstring": "\n\t\t\tSELECT name, parent_account\n\t\t\tFROM `tabAccount`\n\t\t\tWHERE `tabAccount`.docstatus!=2\n\t\t\t\t{account_type_condition}\n\t\t\t\tAND is_group = 0\n\t\t\t\tAND company = %(company)s\n\t\t\t\tAND disabled = %(disabled)s\n\t\t\t\tAND (account_currency = %(currency)s or ifnull(account_currency, '') = '')\n\t\t\t\tAND `{searchfield}` LIKE %(txt)s\n\t\t\t\t{mcond}\n\t\t\tORDER BY idx DESC, name\n\t\t\tLIMIT %(offset)s, %(limit)s\n\t\t", "n_words": 42, "vocab_size": 33, "n_whitespaces": 30, "language": "en" } }, { "id": 77101, "commit_id": "c136f461bc052cef362991458e1bd1fca37a3da9", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_add_post_duplicate_choose_permission", "commit_message": "Add duplicate detection to multiple image upload view\n\nAdd utility function to find an image's potential duplicates\n\nAdd logic to detect duplicates on multiple images upload view\n\nAdd template shown when a user is prompted to confirm a duplicate upload\n\nAdd client-side logic to confirm a duplicate upload\n\nAdd/update styles\n\nAdd tests for duplicate image uploads\n\nIndex Image file_hash field\n\nEnsure that a user can choose an image from duplicates returned by find_image_duplicates\n\nUse CSS classes instead of HTML elements to hide edit form on duplicate upload\n\nAdd ImagesPermissionPolicy helper to retrieve the permission policy dynamically\n\nThis allows test cases that override the base image model to pick up the corresponding permission policy, should they need it.\n\nRemove usage of sibling selector\n\nUse wagtail image templatetag to generate image\n\nRenamed ImagesPermissionPolicy to ImagesPermissionPolicyGetter\n\nFail loudly when setting permission policy and a wromg image model is provided\n\nAdd decorator to disconnect a signal's receiver during a test execution and use it in get_image_model tests\n\nImprove warning message on duplicate upload in multiple upload view\n\nShow matching form when confirming a duplicate upload", "code": "def test_add_post_duplicate_choose_permission(self):\n \n\n # Create group with access to admin and add permission.\n bakers_group = Group.objects.create(name=\"Bakers\")\n access_admin_perm = Permission.objects.get(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n bakers_group.permissions.add(access_admin_perm)\n\n # Create the \"Bakery\" Collection and grant \"add\" permission to the Bakers group.\n root = Collection.objects.get(id=get_root_collection_id())\n bakery_collection = root.add_child(instance=Collection(name=\"Bakery\"))\n GroupCollectionPermission.objects.create(\n group=bakers_group,\n collection=bakery_collection,\n permission=Permission.objects.get(\n content_type__app_label=\"wagtailimages\", codename=\"add_image\"\n ),\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 196, "n_words": 49, "vocab_size": 40, "complexity": 1, "nloc": 31, "token_counts": 221, "n_ast_nodes": 176, "n_identifiers": 25, "random_cut": "def test_add_post_duplicate_choose_permission(self):\n \n\n # Create group with access to admin and add permission.\n bakers_group = Group.objects.create(name=\"Bakers\")\n access_admin_perm = Permission.objects.get(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n bakers_group.permissions.add(access_admin_perm)\n\n # Create the \"Bakery\" Collection and grant \"add\" permission to the Bakers group.\n root = Collection.objects.get(id=get_root_collection_id())\n bak", "d_id": 16628, "documentation": { "docstring": "\n When a duplicate image is added but the user doesn't have permission to choose the original image,\n the add views lets the user upload it as if it weren't a duplicate.\n ", "n_words": 31, "vocab_size": 25, "n_whitespaces": 53, "language": "en" } }, { "id": 216561, "commit_id": "d8305bfaa7b98d898f5963b01ca75f277c266322", "repo": "salt", "path": "salt/modules/napalm_mod.py", "file_name": "napalm_mod.py", "fun_name": "netmiko_commands", "commit_message": "Deprecated netmiko_conn and pyeapi_conn in napalm_mod.py as these function should not be called from the CLI", "code": "def netmiko_commands(*commands, **kwargs):\n \n conn = _netmiko_conn(**kwargs)\n ret = []\n for cmd in commands:\n ret.append(conn.send_command(cmd))\n return ret\n\n\n@proxy_napalm_wrap", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "@proxy_napalm_wrap", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 38, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 39, "n_ast_nodes": 70, "n_identifiers": 10, "random_cut": "def netmiko_commands(*commands, **kwargs):\n \n conn = _netmiko_conn(**kwargs)\n ret = []\n for cmd in commands:\n ret.append(conn.send_command(cmd))", "d_id": 54638, "documentation": { "docstring": "\n .. versionadded:: 2019.2.0\n\n Invoke one or more commands to be executed on the remote device, via Netmiko.\n Returns a list of strings, with the output from each command.\n\n commands\n A list of commands to be executed.\n\n expect_string\n Regular expression pattern to use for determining end of output.\n If left blank will default to being based on router prompt.\n\n delay_factor: ``1``\n Multiplying factor used to adjust delays (default: ``1``).\n\n max_loops: ``500``\n Controls wait time in conjunction with delay_factor. Will default to be\n based upon self.timeout.\n\n auto_find_prompt: ``True``\n Whether it should try to auto-detect the prompt (default: ``True``).\n\n strip_prompt: ``True``\n Remove the trailing router prompt from the output (default: ``True``).\n\n strip_command: ``True``\n Remove the echo of the command from the output (default: ``True``).\n\n normalize: ``True``\n Ensure the proper enter is sent at end of command (default: ``True``).\n\n use_textfsm: ``False``\n Process command output through TextFSM template (default: ``False``).\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' napalm.netmiko_commands 'show version' 'show interfaces'\n ", "n_words": 157, "vocab_size": 106, "n_whitespaces": 287, "language": "en" } }, { "id": 100464, "commit_id": "aa39234538a8f83e6aa2b60b8275a570e8876ac2", "repo": "faceswap", "path": "plugins/train/model/original.py", "file_name": "original.py", "fun_name": "decoder", "commit_message": "Update all Keras Imports to be conditional (#1214)\n\n* Remove custom keras importer\r\n\r\n* first round keras imports fix\r\n\r\n* launcher.py: Remove KerasFinder references\r\n\r\n* 2nd round keras imports update (lib and extract)\r\n\r\n* 3rd round keras imports update (train)\r\n\r\n* remove KerasFinder from tests\r\n\r\n* 4th round keras imports update (tests)", "code": "def decoder(self, side):\r\n \r\n input_ = Input(shape=(8, 8, 512))\r\n var_x = input_\r\n var_x = UpscaleBlock(256, activation=\"leakyrelu\")(var_x)\r\n var_x = UpscaleBlock(128, activation=\"leakyrelu\")(var_x)\r\n var_x = UpscaleBlock(64, activation=\"leakyrelu\")(var_x)\r\n var_x = Conv2DOutput(3, 5, name=f\"face_out_{side}\")(var_x)\r\n outputs = [var_x]\r\n\r\n if self.learn_mask:\r\n var_y = input_\r\n var_y = UpscaleBlock(256, activation=\"leakyrelu\")(var_y)\r\n var_y = UpscaleBlock(128, activation=\"leakyrelu\")(var_y)\r\n var_y = UpscaleBlock(64, activation=\"leakyrelu\")(var_y)\r\n var_y = Conv2DOutput(1, 5, name=f\"mask_out_{side}\")(var_y)\r\n outputs.append(var_y)\r\n return KerasModel(input_, outputs=outputs, name=f\"decoder_{side}\")\r\n\r", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 194, "n_words": 58, "vocab_size": 29, "complexity": 2, "nloc": 16, "token_counts": 168, "n_ast_nodes": 283, "n_identifiers": 16, "random_cut": "def decoder(self, side):\r\n \r\n input_ = Input(shape=(8, 8, 512))\r\n var_x = input_\r\n var_x = UpscaleBlock(256, activation=\"leakyrelu\")(var_x)\r\n var_x = UpscaleBlock(128, activation=\"leakyrelu\")(var_x)\r\n var_x = UpscaleBlock(64, activation=\"leakyrelu\")(var_x)\r\n var_x = Conv2DOutput(3, 5, name=f\"face_out_{side}\")(var_x)\r\n outputs = [var_x]\r\n\r\n if self.learn_mask:\r\n var_y = input_\r\n var_y = UpscaleBlock(256, activation=\"leakyrelu\")(var_y)\r\n var_y = UpscaleBlock(128, activation=\"leakyrelu\")(var_y)\r\n var_y = UpscaleBlock(64, activation=\"leakyrelu\")(var_y)\r\n var_y = Conv2DOutput(1, 5, name=f\"mask_out_{side}\")(var_y)\r\n outputs.append(var_y)\r\n return KerasModel(input_, outputs=outputs, name=f\"dec", "d_id": 19938, "documentation": { "docstring": " The original Faceswap Decoder Network.\r\n\r\n The decoders for the original model have separate weights for each side \"A\" and \"B\", so two\r\n instances are created in :func:`build_model`, one for each side.\r\n\r\n Parameters\r\n ----------\r\n side: str\r\n Either `\"a` or `\"b\"`. This is used for naming the decoder model.\r\n\r\n Returns\r\n -------\r\n :class:`keras.models.Model`\r\n The Keras decoder model. This will be called twice, once for each side.\r\n ", "n_words": 63, "vocab_size": 49, "n_whitespaces": 149, "language": "en" } }, { "id": 83758, "commit_id": "6331a314d464f9c49a612023a5969e5d7b8e00a0", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_users_getting_add_peer_event", "commit_message": "Correctly hyphenate “non-”.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_users_getting_add_peer_event(self) -> None:\n \n streams_to_sub = [\"multi_user_stream\"]\n othello = self.example_user(\"othello\")\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n orig_user_ids_to_subscribe = [self.test_user.id, othello.id]\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),\n )\n\n new_user_ids_to_subscribe = [iago.id, cordelia.id]\n events: List[Mapping[str, Any]] = []\n with self.tornado_redirected_to_list(events, expected_num_events=5):\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),\n )\n\n add_peer_events = [event for event in events if event[\"event\"].get(\"op\") == \"peer_add\"]\n (add_peer_event,) = add_peer_events\n\n self.assertEqual(add_peer_event[\"event\"][\"type\"], \"subscription\")\n self.assertEqual(add_peer_event[\"event\"][\"op\"], \"peer_add\")\n event_sent_to_ids = add_peer_event[\"users\"]\n for user_id in new_user_ids_to_subscribe:\n # Make sure new users subscribed to stream is not in\n # peer_add event recipient list\n self.assertNotIn(user_id, event_sent_to_ids)\n for old_user in orig_user_ids_to_subscribe:\n # Check non-new users are in peer_add event recipient list.\n self.assertIn(old_user, event_sent_to_ids)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 382, "n_words": 101, "vocab_size": 73, "complexity": 5, "nloc": 31, "token_counts": 228, "n_ast_nodes": 378, "n_identifiers": 34, "random_cut": "def test_users_getting_add_peer_event(self) -> None:\n \n streams_to_sub = [\"multi_user_stream\"]\n othello = self.example_user(\"othello\")\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n orig_user_ids_to_subscribe = [self.test_user.id, othello.id]\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),\n )\n\n new_user_ids_to_subscribe = [iago.id, cordelia.id]\n events: List[Mapping[str, Any]] = []\n with self.tornado_redirected_to_list(events, expected_num_events=5):\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),\n )\n\n add_peer_events = [event for event in events if event[\"event\"].get(\"op\") == \"peer_add\"]\n (add_peer_event,) = add_peer_events\n\n self.assertEqual(add_peer_event[\"event\"][\"type\"], \"subscription\")\n self.a", "d_id": 17720, "documentation": { "docstring": "\n Check users getting add_peer_event is correct\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 158164, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "show_trace_2d", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def show_trace_2d(f, results):\n \n d2l.set_figsize()\n d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = d2l.meshgrid(d2l.arange(-5.5, 1.0, 0.1),\n d2l.arange(-3.0, 1.0, 0.1))\n d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n d2l.plt.xlabel('x1')\n d2l.plt.ylabel('x2')\n\nd2l.DATA_HUB['airfoil'] = (d2l.DATA_URL + 'airfoil_self_noise.dat',\n '76e5be1548fd8222e5074cf0faae75edff8cf93f')\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 29, "vocab_size": 27, "complexity": 1, "nloc": 8, "token_counts": 113, "n_ast_nodes": 193, "n_identifiers": 19, "random_cut": "def show_trace_2d(f, results):\n \n d2l.set_figsize()\n d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = d2", "d_id": 37343, "documentation": { "docstring": "Show the trace of 2D variables during optimization.\n\n Defined in :numref:`subsec_gd-learningrate`", "n_words": 11, "vocab_size": 11, "n_whitespaces": 13, "language": "en" } }, { "id": 133242, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/tests/test_torch_2.py", "file_name": "test_torch_2.py", "fun_name": "test_dataset", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_dataset(ray_start_4_cpus, use_local):\n \n\n model_creator = mlp_identity.model_creator\n optimizer_creator = mlp_identity.optimizer_creator\n dataset_creator = mlp_identity.dataset_creator\n\n DatasetOperator = TrainingOperator.from_creators(\n model_creator=model_creator,\n optimizer_creator=optimizer_creator,\n loss_creator=nn.MSELoss,\n )\n\n trainer = TorchTrainer(\n training_operator_cls=DatasetOperator,\n use_local=use_local,\n num_workers=2,\n )\n\n dataset = dataset_creator()\n for i in range(5):\n trainer.train(dataset=dataset, num_steps=100)\n\n x = mlp_identity.to_mat(0.5)\n prediction = float(trainer.get_model()(x)[0][0])\n assert 0.4 <= prediction <= 0.6\n trainer.shutdown()\n\n\n@pytest.mark.parametrize(\"use_local\", [True, False])", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"use_local\", [True, False])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 141, "n_words": 51, "vocab_size": 41, "complexity": 2, "nloc": 21, "token_counts": 130, "n_ast_nodes": 216, "n_identifiers": 31, "random_cut": "def test_dataset(ray_start_4_cpus, use_local):\n \n\n model_creator = mlp_identity.model_creator\n optimizer_creator = mlp_identity.optimizer_creator\n dataset_creator = mlp_identity.dataset_creator\n\n DatasetOperator = TrainingOperator.from_creators(\n model_creator=model_creator,\n optimizer_creator=optimizer_creator,\n loss_creator=nn.MSELoss,\n )\n\n trainer = TorchTrainer(\n training_operator_cls=DatasetOperator,\n use_local=use_local,\n num_workers=2,\n )\n\n dataset = dataset_creator()\n for i in range(5):\n trainer.train(dataset=dataset, num_steps=100)\n\n x = mlp_identity.to_mat(0.5)\n prediction = float(trainer.get_model()(x)[0][0])\n assert 0.4 <= prediction <= 0.6\n trainer.shutdown(", "d_id": 29963, "documentation": { "docstring": "\n This test tries training the mlp_identity example. We check the accuracy of\n the model as an all inclusive way of ensuring that we are properly sharding\n and iterating over the entire dataset (instead of repeating the first set\n of points for example).\n ", "n_words": 42, "vocab_size": 35, "n_whitespaces": 58, "language": "en" } }, { "id": 9114, "commit_id": "db307ffb12d6ba1f8eaeeafd29ee6d4a3fd6fa97", "repo": "insightface", "path": "parsing/dml_csr/loss/lovasz_softmax.py", "file_name": "lovasz_softmax.py", "fun_name": "lovasz_softmax_flat", "commit_message": "Create lovasz_softmax.py", "code": "def lovasz_softmax_flat(probas, labels, classes='present', weighted=None):\n \n if probas.numel() == 0:\n # only void pixels, the gradients should be 0\n return probas * 0.\n C = probas.size(1)\n losses = []\n class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes\n for c in class_to_sum:\n fg = (labels == c).float() # foreground for class c\n if (classes is 'present' and fg.sum() == 0):\n continue\n if C == 1:\n if len(classes) > 1:\n raise ValueError('Sigmoid output possible only with 1 class')\n class_pred = probas[:, 0]\n else:\n class_pred = probas[:, c]\n errors = (Variable(fg) - class_pred).abs()\n errors_sorted, perm = torch.sort(errors, 0, descending=True)\n perm = perm.data\n fg_sorted = fg[perm]\n if weighted is not None:\n losses.append(weighted[c]*torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))\n else:\n losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))\n return mean(losses)\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 302, "n_words": 115, "vocab_size": 83, "complexity": 9, "nloc": 25, "token_counts": 226, "n_ast_nodes": 365, "n_identifiers": 33, "random_cut": "def lovasz_softmax_flat(probas, labels, classes='present', weighted=None):\n \n if probas.numel() == 0:\n # only void pixels, the gradients should be 0\n return probas * 0.\n C = probas.size(1)\n losses = []\n class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes\n for c in class_to_sum:\n fg = (labels == c).float() # foreground for class c\n if (classes is 'present' and fg.sum() == 0):\n continue\n if C == 1:\n if len(classes) > 1:\n raise ValueError('Sigmoid output possible only with 1 class')\n class_pred = probas[:, 0]\n else:\n class_pred = probas[:, c]\n errors = (Variable(fg) - class_pred).abs()\n errors_sorted, perm = torch.sort(errors, 0, descending=True)\n perm = perm.data\n fg_sorted = fg[perm]\n if weighted is not None:\n losses.append(wei", "d_id": 1551, "documentation": { "docstring": "\n Multi-class Lovasz-Softmax loss\n probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)\n labels: [P] Tensor, ground truth labels (between 0 and C - 1)\n classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.\n ", "n_words": 45, "vocab_size": 39, "n_whitespaces": 67, "language": "en" } }, { "id": 10924, "commit_id": "13edc16d806fb5d77a6849551178ccc75937f25f", "repo": "jina", "path": "jina/parsers/orchestrate/runtimes/distributed.py", "file_name": "distributed.py", "fun_name": "mixin_distributed_feature_parser", "commit_message": "refactor: rename pod to deployment (#4230)\n\n* refactor: rename pod to deployment\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: undo daemon mistake\r\n\r\n* refactor: leftover cleanup\r\n\r\n* fix: more test fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more tests\r\n\r\n* fix: fix more tests\r\n\r\n* refactor: fix more tests\r\n\r\n* refactor: more tests fixes\r\n\r\n* refactor: rename pea to pod\r\n\r\n* refactor: adjust docs\r\n\r\n* refactor: complete pea renaming\r\n\r\n* refactor: more fixes\r\n\r\n* fix: pea_type in k8s yamls\r\n\r\n* fix: adjust pod args name\r\n\r\n* refactor: rename peapods parser folder\r\n\r\n* fix: da init\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def mixin_distributed_feature_parser(parser):\n \n\n gp = add_arg_group(parser, title='Distributed')\n\n gp.add_argument(\n '--quiet-remote-logs',\n action='store_true',\n default=False,\n help='Do not display the streaming of remote logs on local console',\n )\n\n gp.add_argument(\n '--upload-files',\n type=str,\n nargs='*',\n metavar='FILE',\n help=,\n )\n\n gp.add_argument(\n '--disable-remote',\n action='store_true',\n default=False,\n help='If set, remote pod invocation is avoided. This is used by pods created by JinaD'\n if _SHOW_ALL_ARGS\n else argparse.SUPPRESS,\n )\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 182, "n_words": 53, "vocab_size": 44, "complexity": 2, "nloc": 33, "token_counts": 83, "n_ast_nodes": 141, "n_identifiers": 16, "random_cut": "def mixin_distributed_feature_parser(parser):\n \n\n gp = add_arg_group(parser, title='Distributed')\n\n gp.add_argument(\n '--quiet-remote-logs',\n action='store_true',\n default=False,\n help='Do not display the streaming of remote logs on local console',\n )\n\n gp.add_argument(\n '--upload-files',\n type=str,\n nargs='*',\n metavar='FILE',\n help=,\n )\n\n gp.add_argument(\n ", "d_id": 1999, "documentation": { "docstring": "Mixing in arguments required by :class:`BaseDeployment` into the given parser.\n :param parser: the parser instance to which we add arguments\n \nThe files on the host to be uploaded to the remote\nworkspace. This can be useful when your Deployment has more\nfile dependencies beyond a single YAML file, e.g.\nPython files, data files.\n\nNote,\n- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.\n- by default, `--uses` YAML file is always uploaded.\n- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.\n", "n_words": 121, "vocab_size": 90, "n_whitespaces": 119, "language": "en" } }, { "id": 263858, "commit_id": "83193a1897232e133966d15e30758a149de50407", "repo": "pyinstaller", "path": "PyInstaller/depend/analysis.py", "file_name": "analysis.py", "fun_name": "get_bootstrap_modules", "commit_message": "utils: remove compile_py_files helper\n\nThe only remaining use is in `PYZ.__init__`, and that can be\nreplaced with a loop that uses the new `compile_pymodule` helper.\n\nThis change, however, requires `get_boostrap_modules()` helper\nfrom `PyInstaller.depend˙ to return paths to source `.py`\nfiles instead of non-existing `.pyc` files (the old\n`compile_py_files` helper went to great lengths to convert\nthese back to source file names...).", "code": "def get_bootstrap_modules():\n \n # Import 'struct' modules to get real paths to module file names.\n mod_struct = __import__('struct')\n # Basic modules necessary for the bootstrap process.\n loader_mods = TOC()\n loaderpath = os.path.join(HOMEPATH, 'PyInstaller', 'loader')\n # On some platforms (Windows, Debian/Ubuntu) '_struct' and zlib modules are built-in modules (linked statically)\n # and thus does not have attribute __file__. 'struct' module is required for reading Python bytecode from\n # executable. 'zlib' is required to decompress this bytecode.\n for mod_name in ['_struct', 'zlib']:\n mod = __import__(mod_name) # C extension.\n if hasattr(mod, '__file__'):\n mod_file = os.path.abspath(mod.__file__)\n if os.path.basename(os.path.dirname(mod_file)) == 'lib-dynload':\n # Divert extensions originating from python's lib-dynload directory, to match behavior of #5604.\n mod_name = os.path.join('lib-dynload', mod_name)\n loader_mods.append((mod_name, mod_file, 'EXTENSION'))\n # NOTE:These modules should be kept simple without any complicated dependencies.\n loader_mods += [\n ('struct', os.path.abspath(mod_struct.__file__), 'PYMODULE'),\n ('pyimod01_os_path', os.path.join(loaderpath, 'pyimod01_os_path.py'), 'PYMODULE'),\n ('pyimod02_archive', os.path.join(loaderpath, 'pyimod02_archive.py'), 'PYMODULE'),\n ('pyimod03_importers', os.path.join(loaderpath, 'pyimod03_importers.py'), 'PYMODULE'),\n ('pyimod04_ctypes', os.path.join(loaderpath, 'pyimod04_ctypes.py'), 'PYMODULE'),\n ('pyiboot01_bootstrap', os.path.join(loaderpath, 'pyiboot01_bootstrap.py'), 'PYSOURCE'),\n ]\n return loader_mods\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 317, "n_words": 155, "vocab_size": 116, "complexity": 4, "nloc": 20, "token_counts": 216, "n_ast_nodes": 372, "n_identifiers": 19, "random_cut": "def get_bootstrap_modules():\n \n # Import 'struct' modules to get real paths to module file names.\n mod_struct = __import__('struct')\n # Basic modules necessary for the bootstrap process.\n loader_mods = TOC()\n loaderpath = os.path.join(HOMEPATH, 'PyInstaller', 'loader')\n # On some platforms (Windows, Debian/Ubuntu) '_struct' and zlib modules are built-in modules (linked statically)\n # and thus does not have attribute __file__. 'struct' module is required for reading Python bytecode from\n # executable. 'zlib' is required to decompress this bytecode.\n for mod_name in ['_struct', 'zlib']:\n mod = __import__(mod_name) # C extension.\n if hasattr(mod, '__file__'):\n mod_file = os.path.abspath(mod.__file__)\n if os.path.basename(os.path.dirname(mod_file)) == 'lib-dynload':\n # Divert extensions originating from python's lib-dynload directory, to match behavior of #5604.\n mod_name = os.path.join('lib-dynload', mod_name)\n loader_mods.append((mod_name, mod_file, 'EXTEN", "d_id": 77473, "documentation": { "docstring": "\n Get TOC with the bootstrapping modules and their dependencies.\n :return: TOC with modules\n ", "n_words": 13, "vocab_size": 10, "n_whitespaces": 23, "language": "en" } }, { "id": 203823, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/db/backends/postgis/adapter.py", "file_name": "adapter.py", "fun_name": "getquoted", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def getquoted(self):\n \n if self.is_geometry:\n # Psycopg will figure out whether to use E'\\\\000' or '\\000'.\n return b\"%s(%s)\" % (\n b\"ST_GeogFromWKB\" if self.geography else b\"ST_GeomFromEWKB\",\n self._adapter.getquoted(),\n )\n else:\n # For rasters, add explicit type cast to WKB string.\n return b\"'%s'::raster\" % self.ewkb.encode()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 147, "n_words": 41, "vocab_size": 36, "complexity": 3, "nloc": 8, "token_counts": 48, "n_ast_nodes": 81, "n_identifiers": 7, "random_cut": "def getquoted(self):\n ", "d_id": 50544, "documentation": { "docstring": "\n Return a properly quoted string for use in PostgreSQL/PostGIS.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 100650, "commit_id": "0d23714875f81ddabdbe8f4e40bef6e5f29eeb19", "repo": "faceswap", "path": "scripts/extract.py", "file_name": "extract.py", "fun_name": "_set_skip_list", "commit_message": "bugfix: extract - stop progress bar from going over max value", "code": "def _set_skip_list(self) -> None:\n \n if self._skip_num == 1 and not self._alignments.data:\n logger.debug(\"No frames to be skipped\")\n return\n skip_list = []\n for idx, filename in enumerate(self._images.file_list):\n if idx % self._skip_num != 0:\n logger.trace(\"Adding image '%s' to skip list due to extract_every_n = %s\",\n filename, self._skip_num)\n skip_list.append(idx)\n # Items may be in the alignments file if skip-existing[-faces] is selected\n elif os.path.basename(filename) in self._alignments.data:\n self._existing_count += 1\n logger.trace(\"Removing image: '%s' due to previously existing\", filename)\n skip_list.append(idx)\n if self._existing_count != 0:\n logger.info(\"Skipping %s frames due to skip_existing/skip_existing_faces.\",\n self._existing_count)\n logger.debug(\"Adding skip list: %s\", skip_list)\n self._images.add_skip_list(skip_list)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 331, "n_words": 90, "vocab_size": 66, "complexity": 7, "nloc": 25, "token_counts": 142, "n_ast_nodes": 236, "n_identifiers": 21, "random_cut": "def _set_skip_list(self) -> None:\n \n if self._skip_num == 1 and not self._alignments.data:\n logger.debug(\"No frames to be skipped\")\n return\n skip_list = []\n for idx, filename in enumerate(self._images.file_list):\n if idx % self._skip_num != 0:\n logger.trace(\"Adding image '%s' to skip list due to extract_every_n = %s\",\n filename, self._skip_num)\n skip_list.append", "d_id": 20112, "documentation": { "docstring": " Add the skip list to the image loader\n\n Checks against `extract_every_n` and the existence of alignments data (can exist if\n `skip_existing` or `skip_existing_faces` has been provided) and compiles a list of frame\n indices that should not be processed, providing these to :class:`lib.image.ImagesLoader`.\n ", "n_words": 42, "vocab_size": 36, "n_whitespaces": 71, "language": "en" } }, { "id": 59781, "commit_id": "8ac2498a0203d3ccb9070d30d7b3a0c475afab92", "repo": "prefect", "path": "tests/conftest.py", "file_name": "conftest.py", "fun_name": "caplog", "commit_message": "Update logging setup to support incremental configuration (#7569)", "code": "def caplog(caplog):\n \n\n config = setup_logging()\n\n for name, logger_config in config[\"loggers\"].items():\n if not logger_config.get(\"propagate\", True):\n logger = get_logger(name)\n logger.handlers.append(caplog.handler)\n\n yield caplog\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 61, "n_words": 20, "vocab_size": 19, "complexity": 3, "nloc": 7, "token_counts": 54, "n_ast_nodes": 94, "n_identifiers": 12, "random_cut": "def caplog(caplog):\n \n\n config = setup_logging()\n\n for name, logg", "d_id": 11947, "documentation": { "docstring": "\n Overrides caplog to apply to all of our loggers that do not propagate and\n consequently would not be captured by caplog.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 31, "language": "en" } }, { "id": 101266, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/manual/faceviewer/viewport.py", "file_name": "viewport.py", "fun_name": "_show_mesh", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def _show_mesh(self, mesh_ids, face_index, detected_face, top_left):\n \n state = \"normal\" if (self._tk_vars[\"selected_editor\"].get() != \"Mask\" or\n self._optional_annotations[\"mesh\"]) else \"hidden\"\n kwargs = dict(polygon=dict(fill=\"\", width=2, outline=self._canvas.control_colors[\"Mesh\"]),\n line=dict(fill=self._canvas.control_colors[\"Mesh\"], width=2))\n\n edited = (self._tk_vars[\"edited\"].get() and\n self._tk_vars[\"selected_editor\"].get() not in (\"Mask\", \"View\"))\n landmarks = self._viewport.get_landmarks(self.frame_index,\n face_index,\n detected_face,\n top_left,\n edited)\n for key, kwarg in kwargs.items():\n for idx, mesh_id in enumerate(mesh_ids[key]):\n self._canvas.coords(mesh_id, *landmarks[key][idx].flatten())\n self._canvas.itemconfig(mesh_id, state=state, **kwarg)\n self._canvas.addtag_withtag(f\"active_mesh_{key}\", mesh_id)\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 413, "n_words": 57, "vocab_size": 49, "complexity": 6, "nloc": 17, "token_counts": 212, "n_ast_nodes": 340, "n_identifiers": 34, "random_cut": "def _show_mesh(self, mesh_ids, face_index, detected_face, top_left):\n \n state = \"normal\" if (self._tk_vars[\"selected_editor\"].get() != \"Mask\" or\n self._optional_annotations[\"mesh\"]) else \"hidden\"\n kwargs = dict(polygon=dict(fill=\"\", width=2, outline=self._canvas.control_colors[\"Mesh\"]),\n line=dict(fill=self._canvas.control_colors[\"Mesh\"], width=2))\n\n edi", "d_id": 20685, "documentation": { "docstring": " Display the mesh annotation for the given face, at the given location.\n\n Parameters\n ----------\n mesh_ids: dict\n Dictionary containing the `polygon` and `line` tkinter canvas identifiers that make up\n the mesh for the given face\n face_index: int\n The face index within the frame for the given face\n detected_face: :class:`~lib.align.DetectedFace`\n The detected face object that contains the landmarks for generating the mesh\n top_left: tuple\n The (x, y) top left co-ordinates of the mesh's bounding box\n ", "n_words": 73, "vocab_size": 49, "n_whitespaces": 178, "language": "en" } }, { "id": 144666, "commit_id": "48adb6f7bb335b28fb0fb0d1190bd6c5dfc8ddfa", "repo": "ray", "path": "python/ray/serve/deployment_state.py", "file_name": "deployment_state.py", "fun_name": "_get_curr_status", "commit_message": "[serve] Introduce DeploymentStatus, poll for statuses instead of using async goals (#22121)", "code": "def _get_curr_status(self) -> Tuple[DeploymentStatusInfo, bool]:\n \n # TODO(edoakes): we could make this more efficient in steady-state by\n # having a \"healthy\" flag that gets flipped if an update or replica\n # failure happens.\n\n target_version = self._target_version\n target_replica_count = self._target_replicas\n\n all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING])\n running_at_target_version_replica_cnt = self._replicas.count(\n states=[ReplicaState.RUNNING], version=target_version\n )\n\n failed_to_start_count = self._replica_constructor_retry_counter\n failed_to_start_threshold = min(\n MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, target_replica_count * 3\n )\n\n # Got to make a call to complete current deploy() goal after\n # start failure threshold reached, while we might still have\n # pending replicas in current goal.\n if (\n failed_to_start_count >= failed_to_start_threshold\n and failed_to_start_threshold != 0\n ):\n if running_at_target_version_replica_cnt > 0:\n # At least one RUNNING replica at target state, partial\n # success; We can stop tracking constructor failures and\n # leave it to the controller to fully scale to target\n # number of replicas and only return as completed once\n # reached target replica count\n self._replica_constructor_retry_counter = -1\n else:\n return (\n DeploymentStatusInfo(\n status=DeploymentStatus.FAILED,\n message=(\n \"The Deployment constructor failed \"\n f\"{failed_to_start_count} times in a row. See \"\n \"logs for details.\"\n ),\n ),\n False,\n )\n\n # If we have pending ops, the current goal is *not* ready.\n if (\n self._replicas.count(\n states=[\n ReplicaState.STARTING,\n ReplicaState.UPDATING,\n ReplicaState.RECOVERING,\n ReplicaState.STOPPING,\n ]\n )\n == 0\n ):\n # Check for deleting.\n if target_replica_count == 0 and all_running_replica_cnt == 0:\n return DeploymentStatusInfo(status=DeploymentStatus.UPDATING), True\n\n # Check for a non-zero number of deployments.\n elif target_replica_count == running_at_target_version_replica_cnt:\n return DeploymentStatusInfo(status=DeploymentStatus.RUNNING), False\n\n return (\n DeploymentStatusInfo(\n status=DeploymentStatus.UPDATING,\n message=(\n f\"Running replicas of target version: \"\n f\"{running_at_target_version_replica_cnt}, target \"\n \"replicas: {target_replica_count}\"\n ),\n ),\n False,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1143, "n_words": 248, "vocab_size": 151, "complexity": 8, "nloc": 66, "token_counts": 216, "n_ast_nodes": 356, "n_identifiers": 30, "random_cut": "def _get_curr_status(self) -> Tuple[DeploymentStatusInfo, bool]:\n \n # TODO(edoakes): we could make this more efficient in steady-state by\n # having a \"healthy\" flag that gets flipped if an update or replica\n # failure happens.\n\n target_version = self._target_version\n target_replica_count = self._target_replicas\n\n all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING])\n running_at_target_version_replica_cnt = self._replicas.count(\n states=[ReplicaState.RUNNING], version=target_version\n )\n\n failed_to_start_count = self._replica_constructor_retry_counter\n failed_to_start_threshold = min(\n MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_CO", "d_id": 33279, "documentation": { "docstring": "Get the current deployment status.\n\n Checks the difference between the target vs. running replica count for\n the target version.\n\n TODO(edoakes): we should report the status as FAILED if replicas are\n repeatedly failing health checks. Need a reasonable heuristic here.\n\n Returns:\n (DeploymentStatusInfo, was_deleted)\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 95, "language": "en" } }, { "id": 322996, "commit_id": "93cae49c0c572b5c1ac972759140fbe924b0374d", "repo": "PaddleNLP", "path": "examples/model_interpretation/task/transformer.py", "file_name": "transformer.py", "fun_name": "generate_square_subsequent_mask", "commit_message": "Add NLP model interpretation (#1752)\n\n* upload NLP interpretation\r\n\r\n* fix problems and relocate project\r\n\r\n* remove abandoned picture\r\n\r\n* remove abandoned picture\r\n\r\n* fix dead link in README\r\n\r\n* fix dead link in README\r\n\r\n* fix code style problems\r\n\r\n* fix CR round 1\r\n\r\n* remove .gitkeep files\r\n\r\n* fix code style\r\n\r\n* fix file encoding problem\r\n\r\n* fix code style\r\n\r\n* delete duplicated files due to directory rebuild\r\n\r\n* fix CR round 2\r\n\r\n* fix code style\r\n\r\n* fix ernie tokenizer\r\n\r\n* fix code style\r\n\r\n* fix problem from CR round 1\r\n\r\n* fix bugs\r\n\r\n* fix README\r\n\r\n* remove duplicated files\r\n\r\n* deal with diff of old and new tokenizer results\r\n\r\n* fix CR round 4\r\n\r\n* fix code style\r\n\r\n* add missing dependence\r\n\r\n* fix broken import path\r\n\r\n* move some data file to cloud\r\n\r\n* MRC upper case to lower case\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: binlinquge \r\nCo-authored-by: Guo Sheng ", "code": "def generate_square_subsequent_mask(self, length):\n \n return paddle.tensor.triu(\n (paddle.ones(\n (length, length), dtype=paddle.get_default_dtype()) * -np.inf),\n 1)\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 63, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 43, "n_ast_nodes": 67, "n_identifiers": 11, "random_cut": "def generate_square_subsequent_mask(self, length):\n \n return paddle.tensor.triu(\n (paddle.ones(\n (lengt", "d_id": 118325, "documentation": { "docstring": "\n Generate a square mask for the sequence. The mask ensures that the\n predictions for position i can depend only on the known outputs at\n positions less than i.\n\n Parameters:\n length (int|Tensor): The length of sequence.\n\n Returns:\n Tensor: Generated square mask according to the given length.\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.nn.layer.transformer import Transformer\n length = 5\n d_model, n_head, dim_feedforward = 8, 4, 64\n transformer_paddle = Transformer(\n d_model, n_head, dim_feedforward=dim_feedforward)\n mask = transformer_paddle.generate_square_subsequent_mask(length)\n print(mask)\n\n # [[ 0. -inf -inf -inf -inf]\n # [ 0. 0. -inf -inf -inf]\n # [ 0. 0. 0. -inf -inf]\n # [ 0. 0. 0. 0. -inf]\n # [ 0. 0. 0. 0. 0.]]\n\n ", "n_words": 110, "vocab_size": 64, "n_whitespaces": 417, "language": "en" } }, { "id": 60728, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py", "file_name": "package_finder.py", "fun_name": "find_requirement", "commit_message": "upd; format", "code": "def find_requirement(self, req, upgrade):\n # type: (InstallRequirement, bool) -> Optional[InstallationCandidate]\n \n hashes = req.hashes(trust_internet=False)\n best_candidate_result = self.find_best_candidate(\n req.name, specifier=req.specifier, hashes=hashes,\n )\n best_candidate = best_candidate_result.best_candidate\n\n installed_version = None # type: Optional[_BaseVersion]\n if req.satisfied_by is not None:\n installed_version = parse_version(req.satisfied_by.version)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 37, "vocab_size": 30, "complexity": 11, "nloc": 55, "token_counts": 214, "n_ast_nodes": 106, "n_identifiers": 15, "random_cut": "def find_requirement(self, req, upgrade):\n # type: (InstallRequirement, bool) -> Optional[InstallationCandidate]\n \n hashes = req.hashes(trust_internet=False)\n best_candidate_result = self.find_best_candidate(\n req.name, specifier=req.specifier, hashes=hashes,\n )\n best_candidate = best_candidate_result.best_candidate\n\n ", "d_id": 12265, "documentation": { "docstring": "Try to find a Link matching req\n\n Expects req, an InstallRequirement and upgrade, a boolean\n Returns a InstallationCandidate if found,\n Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 53, "language": "en" } }, { "id": 20307, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/html.py", "file_name": "html.py", "fun_name": "wrap", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def wrap(self, source, outfile):\n \n if self.wrapcode:\n return self._wrap_div(self._wrap_pre(self._wrap_code(source)))\n else:\n return self._wrap_div(self._wrap_pre(source))\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 54, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 46, "n_ast_nodes": 75, "n_identifiers": 8, "random_cut": "def wrap(self, source, outfile):\n \n if s", "d_id": 3317, "documentation": { "docstring": "\n Wrap the ``source``, which is a generator yielding\n individual lines, in custom generators. See docstring\n for `format`. Can be overridden.\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 49, "language": "en" } }, { "id": 65846, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/api.py", "file_name": "api.py", "fun_name": "get_assessment_criteria", "commit_message": "style: format code with black", "code": "def get_assessment_criteria(course):\n\t\n\treturn frappe.get_all(\n\t\t\"Course Assessment Criteria\",\n\t\tfields=[\"assessment_criteria\", \"weightage\"],\n\t\tfilters={\"parent\": course},\n\t\torder_by=\"idx\",\n\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 6, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 34, "n_ast_nodes": 72, "n_identifiers": 8, "random_cut": "def get_assessment_criteria(course):\n\t\n\treturn frappe.get_all(\n\t\t\"Course Assessment Criteria\",\n\t\tfields=[\"assessment_criteria\", \"weightage\"],\n\t\tfilt", "d_id": 14035, "documentation": { "docstring": "Returns Assessmemt Criteria and their Weightage from Course Master.\n\n\t:param Course: Course\n\t", "n_words": 12, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 216002, "commit_id": "9354c15e0818715d055242d14b1308643a6918d7", "repo": "salt", "path": "salt/modules/mount.py", "file_name": "mount.py", "fun_name": "rm_filesystems", "commit_message": "Convert Py 2'isms to Python 3, and add tests for set_filesystems on AIX", "code": "def rm_filesystems(name, device, config=\"/etc/filesystems\"):\n \n modified = False\n view_lines = []\n\n if \"AIX\" not in __grains__[\"kernel\"]:\n return modified\n\n criteria = _FileSystemsEntry(name=name, dev=device)\n try:\n fsys_filedict = _filesystems(config, False)\n for fsys_view in fsys_filedict.items():\n try:\n if criteria.match(fsys_view):\n modified = True\n else:\n view_lines.append(fsys_view)\n\n except _FileSystemsEntry.ParseError:\n view_lines.append(fsys_view)\n\n except OSError as exc:\n raise CommandExecutionError(\"Couldn't read from {}: {}\".format(config, exc))\n\n if modified:\n try:\n with salt.utils.files.fopen(config, \"wb\") as ofile:\n for fsys_view in view_lines:\n entry = fsys_view[1]\n list_strgs = _FileSystemsEntry.dict_to_list_lines(entry)\n ofile.writelines(salt.utils.data.encode(list_strgs))\n except OSError as exc:\n raise CommandExecutionError(\"Couldn't write to {}: {}\".format(config, exc))\n except Exception as exc:\n raise CommandExecutionError(\"rm_filesystems error exception {exc}\")\n\n return modified\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 379, "n_words": 93, "vocab_size": 59, "complexity": 10, "nloc": 30, "token_counts": 194, "n_ast_nodes": 327, "n_identifiers": 33, "random_cut": "def rm_filesystems(name, device, config=\"/etc/filesystems\"):\n \n modified = False\n view_lines = []\n\n if \"AIX\" not in __grains__[\"kernel\"]:\n return modified\n\n criteria = _FileSystemsEntry(name=name, dev=device)\n try:\n fsys_filedict = _filesystems(config, False)\n for fsys_view in fsys_filedict.items():\n try:\n if criteria.match(fsys_view):\n modified = True\n else:\n view_lines.append(fsys_view)\n\n except _FileSystemsEntry.ParseError:\n view_lines.append(fsys_view)\n\n except OSError as exc:\n raise CommandExecutionError(\"Couldn't read from {}: {}\".format(config, exc))\n\n if modified:\n try:\n with salt.utils.files.fopen(config, \"wb\") as ofile:\n for fsys_view in view_lines:\n entry = fsys_view[1]\n list_strgs = _FileSystemsEntry.dict_to_list_lines(entry)\n ofile.writelines(salt.utils.data.encode(list_strgs))\n except OSError as exc:\n raise CommandExecutionError(\"Couldn't write to {}: {}\".format(config, exc))\n except Exception as exc:\n raise CommandExecutionError(\"rm_filesystems error exception {exc}\")\n\n return modified\n", "d_id": 54316, "documentation": { "docstring": "\n .. versionadded:: 2018.3.3\n\n Remove the mount point from the filesystems\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' mount.rm_filesystems /mnt/foo /dev/sdg\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 43, "language": "en" } }, { "id": 176622, "commit_id": "de1d00f20e0bc14f1cc911b3486e50225a8fa168", "repo": "networkx", "path": "networkx/generators/classic.py", "file_name": "classic.py", "fun_name": "complete_graph", "commit_message": "Adjust the usage of nodes_or_number decorator (#5599)\n\n* recorrect typo in decorators.py\r\n\r\n* Update tests to show troubles in current code\r\n\r\n* fix troubles with usage of nodes_or_number\r\n\r\n* fix typo\r\n\r\n* remove nodes_or_number where that makes sense\r\n\r\n* Reinclude nodes_or_numbers and add some tests for nonstandard usage\r\n\r\n* fix typowq\r\n\r\n* hopefully final tweaks (no behavior changes\r\n\r\n* Update test_classic.py\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def complete_graph(n, create_using=None):\n \n _, nodes = n\n G = empty_graph(nodes, create_using)\n if len(nodes) > 1:\n if G.is_directed():\n edges = itertools.permutations(nodes, 2)\n else:\n edges = itertools.combinations(nodes, 2)\n G.add_edges_from(edges)\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 87, "n_words": 29, "vocab_size": 22, "complexity": 3, "nloc": 10, "token_counts": 68, "n_ast_nodes": 110, "n_identifiers": 14, "random_cut": "def complete_graph(n, create_using=None):\n \n _, nodes = n\n G = empty_graph(nodes, create_using)\n if len(nodes) > 1:\n if G.is_directed():\n edges = itertools.permutations(nodes, 2)\n else:\n edges = itertools.combinations(nodes, 2)\n G.add_edges_from(edges)\n return G\n\n", "d_id": 42004, "documentation": { "docstring": "Return the complete graph `K_n` with n nodes.\n\n A complete graph on `n` nodes means that all pairs\n of distinct nodes have an edge connecting them.\n\n Parameters\n ----------\n n : int or iterable container of nodes\n If n is an integer, nodes are from range(n).\n If n is a container of nodes, those nodes appear in the graph.\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Examples\n --------\n >>> G = nx.complete_graph(9)\n >>> len(G)\n 9\n >>> G.size()\n 36\n >>> G = nx.complete_graph(range(11, 14))\n >>> list(G.nodes())\n [11, 12, 13]\n >>> G = nx.complete_graph(4, nx.DiGraph())\n >>> G.is_directed()\n True\n\n ", "n_words": 106, "vocab_size": 76, "n_whitespaces": 186, "language": "en" } }, { "id": 261735, "commit_id": "754bd5245aa46b89a1d686a3326c2b853012ff3e", "repo": "scikit-learn", "path": "sklearn/pipeline.py", "file_name": "pipeline.py", "fun_name": "fit_predict", "commit_message": "MAINT validate parameters of Pipeline (#25133)", "code": "def fit_predict(self, X, y=None, **fit_params):\n \n self._validate_params()\n fit_params_steps = self._check_fit_params(**fit_params)\n Xt = self._fit(X, y, **fit_params_steps)\n\n fit_params_last_step = fit_params_steps[self.steps[-1][0]]\n with _print_elapsed_time(\"Pipeline\", self._log_message(len(self.steps) - 1)):\n y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step)\n return y_pred\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 89, "n_words": 29, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 101, "n_ast_nodes": 159, "n_identifiers": 16, "random_cut": "def fit_predict(self, X, y=None, **fit_params):\n \n self._validate_params()\n fit_params_steps = self._check_fit_params(**fit_params)\n Xt = self._fit(X, y, **fit_params_steps)\n\n fit_params_last_step = fit_params_steps[self.steps[-1][0]]\n with _print_elapsed_time(\"Pipeline\", self._log_message(len(self.steps) - 1)):\n y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step)\n r", "d_id": 76967, "documentation": { "docstring": "Transform the data, and apply `fit_predict` with the final estimator.\n\n Call `fit_transform` of each transformer in the pipeline. The\n transformed data are finally passed to the final estimator that calls\n `fit_predict` method. Only valid if the final estimator implements\n `fit_predict`.\n\n Parameters\n ----------\n X : iterable\n Training data. Must fulfill input requirements of first step of\n the pipeline.\n\n y : iterable, default=None\n Training targets. Must fulfill label requirements for all steps\n of the pipeline.\n\n **fit_params : dict of string -> object\n Parameters passed to the ``fit`` method of each step, where\n each parameter name is prefixed such that parameter ``p`` for step\n ``s`` has key ``s__p``.\n\n Returns\n -------\n y_pred : ndarray\n Result of calling `fit_predict` on the final estimator.\n ", "n_words": 118, "vocab_size": 79, "n_whitespaces": 297, "language": "en" } }, { "id": 111358, "commit_id": "a322d6d5f2f85c2da6cded4fcd6143d41b5a9e96", "repo": "spaCy", "path": "spacy/tests/pipeline/test_entity_ruler.py", "file_name": "test_entity_ruler.py", "fun_name": "test_issue4849", "commit_message": "Add SpanRuler component (#9880)\n\n* Add SpanRuler component\r\n\r\nAdd a `SpanRuler` component similar to `EntityRuler` that saves a list\r\nof matched spans to `Doc.spans[spans_key]`. The matches from the token\r\nand phrase matchers are deduplicated and sorted before assignment but\r\nare not otherwise filtered.\r\n\r\n* Update spacy/pipeline/span_ruler.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Fix cast\r\n\r\n* Add self.key property\r\n\r\n* Use number of patterns as length\r\n\r\n* Remove patterns kwarg from init\r\n\r\n* Update spacy/tests/pipeline/test_span_ruler.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Add options for spans filter and setting to ents\r\n\r\n* Add `spans_filter` option as a registered function'\r\n* Make `spans_key` optional and if `None`, set to `doc.ents` instead of\r\n`doc.spans[spans_key]`.\r\n\r\n* Update and generalize tests\r\n\r\n* Add test for setting doc.ents, fix key property type\r\n\r\n* Fix typing\r\n\r\n* Allow independent doc.spans and doc.ents\r\n\r\n* If `spans_key` is set, set `doc.spans` with `spans_filter`.\r\n* If `annotate_ents` is set, set `doc.ents` with `ents_fitler`.\r\n * Use `util.filter_spans` by default as `ents_filter`.\r\n * Use a custom warning if the filter does not work for `doc.ents`.\r\n\r\n* Enable use of SpanC.id in Span\r\n\r\n* Support id in SpanRuler as Span.id\r\n\r\n* Update types\r\n\r\n* `id` can only be provided as string (already by `PatternType`\r\ndefinition)\r\n\r\n* Update all uses of Span.id/ent_id in Doc\r\n\r\n* Rename Span id kwarg to span_id\r\n\r\n* Update types and docs\r\n\r\n* Add ents filter to mimic EntityRuler overwrite_ents\r\n\r\n* Refactor `ents_filter` to take `entities, spans` args for more\r\n filtering options\r\n* Give registered filters more descriptive names\r\n* Allow registered `filter_spans` filter\r\n (`spacy.first_longest_spans_filter.v1`) to take any number of\r\n `Iterable[Span]` objects as args so it can be used for spans filter\r\n or ents filter\r\n\r\n* Implement future entity ruler as span ruler\r\n\r\nImplement a compatible `entity_ruler` as `future_entity_ruler` using\r\n`SpanRuler` as the underlying component:\r\n* Add `sort_key` and `sort_reverse` to allow the sorting behavior to be\r\n customized. (Necessary for the same sorting/filtering as in\r\n `EntityRuler`.)\r\n* Implement `overwrite_overlapping_ents_filter` and\r\n `preserve_existing_ents_filter` to support\r\n `EntityRuler.overwrite_ents` settings.\r\n* Add `remove_by_id` to support `EntityRuler.remove` functionality.\r\n* Refactor `entity_ruler` tests to parametrize all tests to test both\r\n `entity_ruler` and `future_entity_ruler`\r\n* Implement `SpanRuler.token_patterns` and `SpanRuler.phrase_patterns`\r\n properties.\r\n\r\nAdditional changes:\r\n\r\n* Move all config settings to top-level attributes to avoid duplicating\r\n settings in the config vs. `span_ruler/cfg`. (Also avoids a lot of\r\n casting.)\r\n\r\n* Format\r\n\r\n* Fix filter make method name\r\n\r\n* Refactor to use same error for removing by label or ID\r\n\r\n* Also provide existing spans to spans filter\r\n\r\n* Support ids property\r\n\r\n* Remove token_patterns and phrase_patterns\r\n\r\n* Update docstrings\r\n\r\n* Add span ruler docs\r\n\r\n* Fix types\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Move sorting into filters\r\n\r\n* Check for all tokens in seen tokens in entity ruler filters\r\n\r\n* Remove registered sort key\r\n\r\n* Set Token.ent_id in a backwards-compatible way in Doc.set_ents\r\n\r\n* Remove sort options from API docs\r\n\r\n* Update docstrings\r\n\r\n* Rename entity ruler filters\r\n\r\n* Fix and parameterize scoring\r\n\r\n* Add id to Span API docs\r\n\r\n* Fix typo in API docs\r\n\r\n* Include explicit labeled=True for scorer\r\n\r\nCo-authored-by: Sofie Van Landeghem ", "code": "def test_issue4849(entity_ruler_factory):\n nlp = English()\n patterns = [\n {\"label\": \"PERSON\", \"pattern\": \"joe biden\", \"id\": \"joe-biden\"},\n {\"label\": \"PERSON\", \"pattern\": \"bernie sanders\", \"id\": \"bernie-sanders\"},\n ]\n ruler = nlp.add_pipe(\n entity_ruler_factory,\n name=\"entity_ruler\",\n config={\"phrase_matcher_attr\": \"LOWER\"},\n )\n ruler.add_patterns(patterns)\n text = \n # USING 1 PROCESS\n count_ents = 0\n for doc in nlp.pipe([text], n_process=1):\n count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])\n assert count_ents == 2\n # USING 2 PROCESSES\n if isinstance(get_current_ops, NumpyOps):\n count_ents = 0\n for doc in nlp.pipe([text], n_process=2):\n count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])\n assert count_ents == 2\n\n\n@pytest.mark.issue(5918)\n@pytest.mark.parametrize(\"entity_ruler_factory\", ENTITY_RULERS)", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "@pytest.mark.issue(5918)\n@pytest.mark.parametrize(\"entity_ruler_factory\", ENTITY_RULERS)", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 205, "n_words": 94, "vocab_size": 56, "complexity": 8, "nloc": 25, "token_counts": 166, "n_ast_nodes": 313, "n_identifiers": 27, "random_cut": "def test_issue4849(entity_ruler_factory):\n nlp = English()\n patterns = [\n {\"label\": \"PERSON\", \"pattern\": \"joe biden\", \"id\": \"joe-biden\"},\n {\"label\": \"PERSON\", \"pattern\": \"bernie sanders\", \"id\": \"bernie-sanders\"},\n ]\n ruler = nlp.add_pipe(\n entity_ruler_factory,\n name=\"entity_ruler\",\n config={\"phrase_matcher_attr\": \"LOWER\"},\n )\n ruler.add_patterns(patterns)\n text = \n # USING 1 PROCESS\n count_ents = 0\n for doc in nlp.pipe([text], n_process=1):\n count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])\n asser", "d_id": 24386, "documentation": { "docstring": "\n The left is starting to take aim at Democratic front-runner Joe Biden.\n Sen. Bernie Sanders joined in her criticism: \"There is no 'middle ground' when it comes to climate policy.\"\n ", "n_words": 30, "vocab_size": 28, "n_whitespaces": 40, "language": "en" } }, { "id": 264576, "commit_id": "bbdeae0ed9bcc06fb96ffa2970272e1a3447448c", "repo": "netbox", "path": "netbox/netbox/api/viewsets/__init__.py", "file_name": "__init__.py", "fun_name": "get_serializer_context", "commit_message": "Move CustomFieldModelViewSet functionality into NetBoxModelViewSet", "code": "def get_serializer_context(self):\n \n context = super().get_serializer_context()\n\n if hasattr(self.queryset.model, 'custom_fields'):\n content_type = ContentType.objects.get_for_model(self.queryset.model)\n context.update({\n 'custom_fields': content_type.custom_fields.all(),\n })\n\n return context\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 93, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 8, "token_counts": 60, "n_ast_nodes": 103, "n_identifiers": 14, "random_cut": "def get_serializer_context(self):\n \n context = super().get_serializer_context()\n\n if hasattr(self.queryset.model, 'custom_fields'):\n content_type = ContentType.objects.get_for_model(self.queryset.model)\n context.update({\n 'custom_fields': content_type.custom_fields.all(),\n ", "d_id": 77757, "documentation": { "docstring": "\n For models which support custom fields, populate the `custom_fields` context.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 223398, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/util.py", "file_name": "util.py", "fun_name": "execute", "commit_message": "add python 3.10.4 for windows", "code": "def execute (func, args, msg=None, verbose=0, dry_run=0):\n \n if msg is None:\n msg = \"%s%r\" % (func.__name__, args)\n if msg[-2:] == ',)': # correct for singleton tuple\n msg = msg[0:-2] + ')'\n\n log.info(msg)\n if not dry_run:\n func(*args)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 87, "n_words": 36, "vocab_size": 31, "complexity": 4, "nloc": 8, "token_counts": 72, "n_ast_nodes": 120, "n_identifiers": 9, "random_cut": "def execute (func, args, msg=None, verbose=0, dry_run=0):\n \n if msg is None:\n msg = \"%s%r\" % (func.__name__, args)\n if msg[-2:] == ',)': # correct for singleton tuple\n msg = msg[0:-2] + ')'\n\n log.info(msg)\n if not dry_run:\n ", "d_id": 56885, "documentation": { "docstring": "Perform some action that affects the outside world (eg. by\n writing to the filesystem). Such actions are special because they\n are disabled by the 'dry_run' flag. This method takes care of all\n that bureaucracy for you; all you have to do is supply the\n function to call and an argument tuple for it (to embody the\n \"external action\" being performed), and an optional message to\n print.\n ", "n_words": 66, "vocab_size": 52, "n_whitespaces": 90, "language": "en" } }, { "id": 272035, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/feature_column/dense_features.py", "file_name": "dense_features.py", "fun_name": "call", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def call(self, features, cols_to_output_tensors=None, training=None):\n \n if training is None:\n training = backend.learning_phase()\n if not isinstance(features, dict):\n raise ValueError(\n \"We expected a dictionary here. Instead we got: \", features\n )\n transformation_cache = (\n tf.__internal__.feature_column.FeatureTransformationCache(features)\n )\n output_tensors = []\n for column in self._feature_columns:\n with backend.name_scope(column.name):\n try:\n tensor = column.get_dense_tensor(\n transformation_cache,\n self._state_manager,\n training=training,\n )\n except TypeError:\n tensor = column.get_dense_tensor(\n transformation_cache, self._state_manager\n )\n processed_tensors = self._process_dense_tensor(column, tensor)\n if cols_to_output_tensors is not None:\n cols_to_output_tensors[column] = processed_tensors\n output_tensors.append(processed_tensors)\n return self._verify_and_concat_tensors(output_tensors)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 462, "n_words": 74, "vocab_size": 55, "complexity": 6, "nloc": 28, "token_counts": 148, "n_ast_nodes": 233, "n_identifiers": 28, "random_cut": "def call(self, features, cols_to_output_tensors=None, training=None):\n \n if training is None:\n training = backend.learning_phase()\n if not isinstance(features, dict):\n raise ValueError(\n \"We expected a dictionary here. Instead we got: \", features\n )\n transformation_cache = (\n tf.__internal__.feature_column.FeatureTransformationCache(features)\n )\n output_tens", "d_id": 80946, "documentation": { "docstring": "Returns a dense tensor corresponding to the `feature_columns`.\n\n Example usage:\n\n >>> t1 = tf.feature_column.embedding_column(\n ... tf.feature_column.categorical_column_with_hash_bucket(\"t1\", 2),\n ... dimension=8)\n >>> t2 = tf.feature_column.numeric_column('t2')\n >>> feature_layer = tf.compat.v1.keras.layers.DenseFeatures([t1, t2])\n >>> features = {\"t1\": tf.constant([\"a\", \"b\"]), \"t2\": tf.constant([1, 2])}\n >>> dense_tensor = feature_layer(features, training=True)\n\n Args:\n features: A mapping from key to tensors. `FeatureColumn`s look up via\n these keys. For example `numeric_column('price')` will look at 'price'\n key in this dict. Values can be a `SparseTensor` or a `Tensor` depends\n on corresponding `FeatureColumn`.\n cols_to_output_tensors: If not `None`, this will be filled with a dict\n mapping feature columns to output tensors created.\n training: Python boolean or None, indicating whether to the layer is being\n run in training mode. This argument is passed to the call method of any\n `FeatureColumn` that takes a `training` argument. For example, if a\n `FeatureColumn` performed dropout, the column could expose a `training`\n argument to control whether the dropout should be applied. If `None`,\n defaults to `tf.keras.backend.learning_phase()`.\n\n\n Returns:\n A `Tensor` which represents input layer of a model. Its shape\n is (batch_size, first_layer_dimension) and its dtype is `float32`.\n first_layer_dimension is determined based on given `feature_columns`.\n\n Raises:\n ValueError: If features are not a dictionary.\n ", "n_words": 191, "vocab_size": 134, "n_whitespaces": 443, "language": "en" } }, { "id": 167600, "commit_id": "f538568afc2c76c2d738d32e3544cf9fe6742960", "repo": "pandas", "path": "pandas/compat/pickle_compat.py", "file_name": "pickle_compat.py", "fun_name": "patch_pickle", "commit_message": "TYP: misc return type annotations (#47558)", "code": "def patch_pickle() -> Iterator[None]:\n \n orig_loads = pkl.loads\n try:\n setattr(pkl, \"loads\", loads)\n yield\n finally:\n setattr(pkl, \"loads\", orig_loads)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 10, "token_counts": 36, "n_ast_nodes": 64, "n_identifiers": 6, "random_cut": "def patch_pickle() -> Iterator[None]:\n \n orig_loads = pkl.loads\n try:\n setattr(pkl, \"loads\", loads)\n yield\n finally:\n setattr(pkl, \"loads\", orig_loads)\n", "d_id": 40053, "documentation": { "docstring": "\n Temporarily patch pickle to use our unpickler.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 46791, "commit_id": "4ffd4f09532fceb67675fce4c1f5cd383eff992e", "repo": "airflow", "path": "dev/breeze/src/airflow_breeze/utils/run_utils.py", "file_name": "run_utils.py", "fun_name": "get_filesystem_type", "commit_message": "Prepare Breeze2 for prime time :) (#22713)\n\nThis is a review and clean-up for all the parameters and\r\ncommands for Breeze2 in order to prepare it for being\r\nused by the contribugors.\r\n\r\nThere are various small fixes here and there, removal\r\nof duplicated code, refactoring and moving code around\r\nas well as cleanup and review all the parameters used\r\nfor all implemented commands.\r\n\r\nThe parameters, default values and their behaviours were\r\nupdated to match \"new\" life of Breeze rather than old\r\none.\r\n\r\nSome improvements are made to the autocomplete and\r\nclick help messages printed. Full list of choices is\r\nalways displayed, parameters are groups according to\r\ntheir target audience, and they were sorted according\r\nto importance and frequency of use.\r\n\r\nVarious messages have been colourised according to their\r\nmeaning - warnings as yellow, errors as red and\r\ninformational messages as bright_blue.\r\n\r\nThe `dry-run` option has been added to just show what\r\nwould have been run without actually running some\r\npotentially \"write\" commands (read commands are still\r\nexecuted) so that you can easily verify and manually\r\ncopy and execute the commands with option to modify\r\nthem before. The `dry_run` and `verbose` options are\r\nnow used for all commands.\r\n\r\nThe \"main\" command now runs \"shell\" by default similarly\r\nas the original Breeze.\r\n\r\nAll \"shortcut\" parameters have been standardized - i.e\r\ncommon options (verbose/dry run/help) have one and all\r\ncommon flags that are likely to be used often have an\r\nassigned shortcute.\r\n\r\nThe \"stop\" and \"cleanup\" command have been added\r\nas they are necessary for average user to complete the\r\nregular usage cycle.\r\n\r\nDocumentation for all the important methods have been\r\nupdated.", "code": "def get_filesystem_type(filepath):\n \n # We import it locally so that click autocomplete works\n import psutil\n\n root_type = \"unknown\"\n for part in psutil.disk_partitions():\n if part.mountpoint == '/':\n root_type = part.fstype\n continue\n if filepath.startswith(part.mountpoint):\n return part.fstype\n\n return root_type\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 35, "vocab_size": 28, "complexity": 4, "nloc": 10, "token_counts": 49, "n_ast_nodes": 87, "n_identifiers": 9, "random_cut": "def get_filesystem_type(filepath):\n \n # We import it locally so that click autocomplete wor", "d_id": 8995, "documentation": { "docstring": "\n Determine the type of filesystem used - we might want to use different parameters if tmpfs is used.\n :param filepath: path to check\n :return: type of filesystem\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 40, "language": "en" } }, { "id": 176480, "commit_id": "f6755ffa00211b523c6c0bec5398bc6c3c43c8b1", "repo": "networkx", "path": "networkx/algorithms/similarity.py", "file_name": "similarity.py", "fun_name": "panther_similarity", "commit_message": "Update black (#5438)\n\n* CI: sync up black dev requirements version with precommit\r\n\r\n* Run black\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None):\n r\n import numpy as np\n\n num_nodes = G.number_of_nodes()\n if num_nodes < k:\n warnings.warn(\n f\"Number of nodes is {num_nodes}, but requested k is {k}. \"\n \"Setting k to number of nodes.\"\n )\n k = num_nodes\n # According to [1], they empirically determined\n # a good value for ``eps`` to be sqrt( 1 / |E| )\n if eps is None:\n eps = np.sqrt(1.0 / G.number_of_edges())\n\n inv_node_map = {name: index for index, name in enumerate(G.nodes)}\n node_map = np.array(G)\n\n # Calculate the sample size ``R`` for how many paths\n # to randomly generate\n t_choose_2 = math.comb(path_length, 2)\n sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta)))\n index_map = {}\n _ = list(\n generate_random_paths(\n G, sample_size, path_length=path_length, index_map=index_map\n )\n )\n S = np.zeros(num_nodes)\n\n inv_sample_size = 1 / sample_size\n\n source_paths = set(index_map[source])\n\n # Calculate the path similarities\n # between ``source`` (v) and ``node`` (v_j)\n # using our inverted index mapping of\n # vertices to paths\n for node, paths in index_map.items():\n # Only consider paths where both\n # ``node`` and ``source`` are present\n common_paths = source_paths.intersection(paths)\n S[inv_node_map[node]] = len(common_paths) * inv_sample_size\n\n # Retrieve top ``k`` similar\n # Note: the below performed anywhere from 4-10x faster\n # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]``\n top_k_unsorted = np.argpartition(S, -k)[-k:]\n top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1]\n\n # Add back the similarity scores\n top_k_sorted_names = map(lambda n: node_map[n], top_k_sorted)\n top_k_with_val = dict(zip(top_k_sorted_names, S[top_k_sorted]))\n\n # Remove the self-similarity\n top_k_with_val.pop(source, None)\n return top_k_with_val\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 447, "n_words": 240, "vocab_size": 164, "complexity": 5, "nloc": 79, "token_counts": 300, "n_ast_nodes": 479, "n_identifiers": 56, "random_cut": "def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None):\n r\n import numpy as np\n\n num_nodes = G.number_of_nodes()\n if num_nodes < k:\n warnings.warn(\n f\"Number of nodes is {num_nodes}, but requested k is {k}. \"\n \"Setting k to number of nodes.\"\n )\n k = num_nodes\n # According to [1], they empirically determined\n # a good value for ``eps`` to be sqrt( 1 / |E| )\n if eps is None:\n eps = np.sqrt(1.0 / G.number_of_edges())\n\n inv_node_map = {name: index for index, name in enumerate(G.nodes)}\n node_map = np.array(G)\n\n # Calculate the sample size ``R`` for how many paths\n # to randomly generate\n t_choose_2 = math.comb(path_length, 2)\n sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta)))\n index_map = {}\n _ = list(\n generate_random_paths(\n G, sample_size, path_length=path_length, index_map=index_map\n )\n )\n S = np.zeros(num_nodes)\n\n inv_sample_size = 1 / sample_size\n\n source_paths = set(index_map[source])\n\n # Calculate the path similarities\n # between ``source`` (v) and ``node`` (v_j)\n # using our inverted index mapping of\n # vertices to paths\n for node, paths in index_map.items():\n # Only consider paths where both\n # ``node`` and ``source`` are present\n common_paths = source_paths.intersection(paths)\n S[inv_node_map[node]] = len(common_paths) * inv_sample_size\n\n # Retrieve top ``k`` similar\n # Note: the below performed anywhere from 4-10x faster\n # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]``\n top_k_unsorted = np.argpartition(S, -k)[-k:]\n top_k_sorte", "d_id": 41928, "documentation": { "docstring": "Returns the Panther similarity of nodes in the graph `G` to node ``v``.\n\n Panther is a similarity metric that says \"two objects are considered\n to be similar if they frequently appear on the same paths.\" [1]_.\n\n Parameters\n ----------\n G : NetworkX graph\n A NetworkX graph\n source : node\n Source node for which to find the top `k` similar other nodes\n k : int (default = 5)\n The number of most similar nodes to return\n path_length : int (default = 5)\n How long the randomly generated paths should be (``T`` in [1]_)\n c : float (default = 0.5)\n A universal positive constant used to scale the number\n of sample random paths to generate.\n delta : float (default = 0.1)\n The probability that the similarity $S$ is not an epsilon-approximation to (R, phi),\n where $R$ is the number of random paths and $\\phi$ is the probability\n that an element sampled from a set $A \\subseteq D$, where $D$ is the domain.\n eps : float or None (default = None)\n The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore,\n if no value is provided, the recommended computed value will be used.\n\n Returns\n -------\n similarity : dictionary\n Dictionary of nodes to similarity scores (as floats). Note:\n the self-similarity (i.e., ``v``) will not be included in\n the returned dictionary.\n\n Examples\n --------\n >>> G = nx.star_graph(10)\n >>> sim = nx.panther_similarity(G, 0)\n\n References\n ----------\n .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J.\n Panther: Fast top-k similarity search on large networks.\n In Proceedings of the ACM SIGKDD International Conference\n on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).\n Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.\n ", "n_words": 275, "vocab_size": 173, "n_whitespaces": 479, "language": "en" } }, { "id": 262017, "commit_id": "ff7c3858389ba250f761d76592cb060ac6be05c0", "repo": "TTS", "path": "TTS/tts/utils/text/phonemizers/base.py", "file_name": "base.py", "fun_name": "_phonemize_preprocess", "commit_message": "Fix BasePhonemizer", "code": "def _phonemize_preprocess(self, text) -> Tuple[List[str], List]:\n \n text = text.strip()\n if self._keep_puncs:\n # a tuple (text, punctuation marks)\n return self._punctuator.strip_to_restore(text)\n return [self._punctuator.strip(text)], []\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 12, "token_counts": 53, "n_ast_nodes": 85, "n_identifiers": 10, "random_cut": "def _phonemize_preprocess(self, text) -> Tuple[List[str], List]:\n \n text = text.strip()\n if self._keep_puncs:\n # a tuple (text, punctuation marks)\n return self._punctuator.strip_to_restore(text)\n return [self._punctuator.strip(text)], []\n", "d_id": 77103, "documentation": { "docstring": "Preprocess the text before phonemization\n\n 1. remove spaces\n 2. remove punctuation\n\n Override this if you need a different behaviour\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 47, "language": "en" } }, { "id": 111596, "commit_id": "1ebe7db07c8dbb1a55dafb09131b1d08242b79c5", "repo": "spaCy", "path": "spacy/cli/_util.py", "file_name": "_util.py", "fun_name": "ensure_pathy", "commit_message": "Support local filesystem remotes for projects (#11762)\n\n* Support local filesystem remotes for projects\r\n\r\n* Fix support for local filesystem remotes for projects\r\n * Use `FluidPath` instead of `Pathy` to support both filesystem and\r\n remote paths\r\n * Create missing parent directories if required for local filesystem\r\n * Add a more general `_file_exists` method to support both `Pathy`,\r\n `Path`, and `smart_open`-compatible URLs\r\n* Add explicit `smart_open` dependency starting with support for\r\n `compression` flag\r\n* Update `pathy` dependency to exclude older versions that aren't\r\n compatible with required `smart_open` version\r\n* Update docs to refer to `Pathy` instead of `smart_open` for project\r\n remotes (technically you can still push to any `smart_open`-compatible\r\n path but you can't pull from them)\r\n* Add tests for local filesystem remotes\r\n\r\n* Update pathy for general BlobStat sorting\r\n\r\n* Add import\r\n\r\n* Remove _file_exists since only Pathy remotes are supported\r\n\r\n* Format CLI docs\r\n\r\n* Clean up merge", "code": "def ensure_pathy(path):\n \n from pathy import Pathy # noqa: F811\n\n return Pathy.fluid(path)\n\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 32, "n_identifiers": 5, "random_cut": "def ensure_pathy(path):\n \n from pathy import Pathy # noqa: F811\n\n return Pathy.fluid(path)\n\n", "d_id": 24448, "documentation": { "docstring": "Temporary helper to prevent importing Pathy globally (which can cause\n slow and annoying Google Cloud warning).", "n_words": 16, "vocab_size": 16, "n_whitespaces": 18, "language": "en" } }, { "id": 203480, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/sites.py", "file_name": "sites.py", "fun_name": "index", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def index(self, request, extra_context=None):\n \n app_list = self.get_app_list(request)\n\n context = {\n **self.each_context(request),\n \"title\": self.index_title,\n \"subtitle\": None,\n \"app_list\": app_list,\n **(extra_context or {}),\n }\n\n request.current_app = self.name\n\n return TemplateResponse(\n request, self.index_template or \"admin/index.html\", context\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 147, "n_words": 32, "vocab_size": 27, "complexity": 3, "nloc": 13, "token_counts": 74, "n_ast_nodes": 119, "n_identifiers": 13, "random_cut": "def index(self, request, extra_context=None):\n \n app_list = self.get_app_list(request)\n\n context = {\n **self.each_context(request),\n \"title\": self.index_title,\n \"subtitle\": None,\n \"app_list\": app_list,\n **(extra_context or {}),\n }\n\n ", "d_id": 50400, "documentation": { "docstring": "\n Display the main admin index page, which lists all of the installed\n apps that have been registered in this site.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 42, "language": "en" } }, { "id": 247955, "commit_id": "3a7e97c7ade17a47517aadc0e9e305a1894119ac", "repo": "synapse", "path": "docker/start.py", "file_name": "start.py", "fun_name": "generate_config_from_template", "commit_message": "Poetry: use locked environment in Docker images (#12385)", "code": "def generate_config_from_template(config_dir, config_path, environ, ownership):\n \n for v in (\"SYNAPSE_SERVER_NAME\", \"SYNAPSE_REPORT_STATS\"):\n if v not in environ:\n error(\n \"Environment variable '%s' is mandatory when generating a config file.\"\n % (v,)\n )\n\n # populate some params from data files (if they exist, else create new ones)\n environ = environ.copy()\n secrets = {\n \"registration\": \"SYNAPSE_REGISTRATION_SHARED_SECRET\",\n \"macaroon\": \"SYNAPSE_MACAROON_SECRET_KEY\",\n }\n\n for name, secret in secrets.items():\n if secret not in environ:\n filename = \"/data/%s.%s.key\" % (environ[\"SYNAPSE_SERVER_NAME\"], name)\n\n # if the file already exists, load in the existing value; otherwise,\n # generate a new secret and write it to a file\n\n if os.path.exists(filename):\n log(\"Reading %s from %s\" % (secret, filename))\n with open(filename) as handle:\n value = handle.read()\n else:\n log(\"Generating a random secret for {}\".format(secret))\n value = codecs.encode(os.urandom(32), \"hex\").decode()\n with open(filename, \"w\") as handle:\n handle.write(value)\n environ[secret] = value\n\n environ[\"SYNAPSE_APPSERVICES\"] = glob.glob(\"/data/appservices/*.yaml\")\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n\n # Convert SYNAPSE_NO_TLS to boolean if exists\n if \"SYNAPSE_NO_TLS\" in environ:\n tlsanswerstring = str.lower(environ[\"SYNAPSE_NO_TLS\"])\n if tlsanswerstring in (\"true\", \"on\", \"1\", \"yes\"):\n environ[\"SYNAPSE_NO_TLS\"] = True\n else:\n if tlsanswerstring in (\"false\", \"off\", \"0\", \"no\"):\n environ[\"SYNAPSE_NO_TLS\"] = False\n else:\n error(\n 'Environment variable \"SYNAPSE_NO_TLS\" found but value \"'\n + tlsanswerstring\n + '\" unrecognized; exiting.'\n )\n\n if \"SYNAPSE_LOG_CONFIG\" not in environ:\n environ[\"SYNAPSE_LOG_CONFIG\"] = config_dir + \"/log.config\"\n\n log(\"Generating synapse config file \" + config_path)\n convert(\"/conf/homeserver.yaml\", config_path, environ)\n\n log_config_file = environ[\"SYNAPSE_LOG_CONFIG\"]\n log(\"Generating log config file \" + log_config_file)\n convert(\"/conf/log.config\", log_config_file, environ)\n\n # Hopefully we already have a signing key, but generate one if not.\n args = [\n sys.executable,\n \"-m\",\n \"synapse.app.homeserver\",\n \"--config-path\",\n config_path,\n # tell synapse to put generated keys in /data rather than /compiled\n \"--keys-directory\",\n config_dir,\n \"--generate-keys\",\n ]\n\n if ownership is not None:\n log(f\"Setting ownership on /data to {ownership}\")\n subprocess.check_output([\"chown\", \"-R\", ownership, \"/data\"])\n args = [\"gosu\", ownership] + args\n\n subprocess.check_output(args)\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 858, "n_words": 279, "vocab_size": 174, "complexity": 12, "nloc": 63, "token_counts": 375, "n_ast_nodes": 674, "n_identifiers": 39, "random_cut": "def generate_config_from_template(config_dir, config_path, environ, ownership):\n \n for v in (\"SYNAPSE_SERVER_NAME\", \"SYNAPSE_REPORT_STATS\"):\n if v not in environ:\n error(\n \"Environment variable '%s' is mandatory when generating a config file.\"\n % (v,)\n )\n\n # populate some params from data files (if they exist, else create new ones)\n environ = environ.copy()\n secrets = {\n \"registration\": \"SYNAPSE_REGISTRATION_SHARED_SECRET\",\n \"macaroon\": \"SYNAPSE_MACAROON_SECRET_KEY\",\n }\n\n for name, secret in secrets.items():\n if secret not in environ:\n filename = \"/data/%s.%s.key\" % (environ[\"SYNAPSE_SERVER_NAME\"], name)\n\n # if the file already exists, load in the existing value; otherwise,\n # generate a new secret and write ", "d_id": 72022, "documentation": { "docstring": "Generate a homeserver.yaml from environment variables\n\n Args:\n config_dir (str): where to put generated config files\n config_path (str): where to put the main config file\n environ (dict): environment dictionary\n ownership (str|None): \":\" string which will be used to set\n ownership of the generated configs. If None, ownership will not change.\n ", "n_words": 49, "vocab_size": 37, "n_whitespaces": 94, "language": "en" } }, { "id": 122389, "commit_id": "b742b04380ebe2e824403e603924ca505173bf7a", "repo": "jax", "path": "jax/_src/api_util.py", "file_name": "api_util.py", "fun_name": "donation_vector", "commit_message": "Annotate tree_util", "code": "def donation_vector(donate_argnums, args, kwargs) -> Tuple[bool, ...]:\n \n res: List[bool] = []\n for i, arg in enumerate(args):\n donate = bool(i in donate_argnums)\n res.extend((donate,) * tree_structure(arg).num_leaves)\n res.extend((False,) * tree_structure(kwargs).num_leaves)\n return tuple(res)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 40, "n_words": 29, "vocab_size": 26, "complexity": 2, "nloc": 8, "token_counts": 81, "n_ast_nodes": 126, "n_identifiers": 16, "random_cut": "def donation_vector(donate_argnums, args, kwargs) -> Tuple[bool, ...]:\n \n res: List[bool] = []\n for i, arg in enumerate(args):\n donate = bool(i in donate_argnums)\n res.extend((donate,) * tree_structur", "d_id": 27173, "documentation": { "docstring": "Returns a tuple with a boolean value for each leaf in args.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 220514, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/futures.py", "file_name": "futures.py", "fun_name": "set_exception", "commit_message": "add python 3.10.4 for windows", "code": "def set_exception(self, exception):\n \n if self._state != _PENDING:\n raise exceptions.InvalidStateError(f'{self._state}: {self!r}')\n if isinstance(exception, type):\n exception = exception()\n if type(exception) is StopIteration:\n raise TypeError(\"StopIteration interacts badly with generators \"\n \"and cannot be raised into a Future\")\n self._exception = exception\n self._state = _FINISHED\n self.__schedule_callbacks()\n self.__log_traceback = True\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 160, "n_words": 44, "vocab_size": 36, "complexity": 4, "nloc": 12, "token_counts": 70, "n_ast_nodes": 132, "n_identifiers": 15, "random_cut": "def set_exception(self, exception):\n \n if self._state != _PENDING:\n raise exceptions.InvalidStateError(f'{self._state}: {self!r}')\n if isinstance(exception, type):\n exception = exception()\n if type(exception) is StopIteration:\n raise TypeError(\"StopIteration interacts badly with generators \"\n \"and cannot be raised into a Future\")\n self._exception = exception", "d_id": 56024, "documentation": { "docstring": "Mark the future done and set an exception.\n\n If the future is already done when this method is called, raises\n InvalidStateError.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 204002, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/gdal/raster/band.py", "file_name": "band.py", "fun_name": "statistics", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def statistics(self, refresh=False, approximate=False):\n \n # Prepare array with arguments for capi function\n smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()\n stats_args = [\n self._ptr,\n c_int(approximate),\n byref(smin),\n byref(smax),\n byref(smean),\n byref(sstd),\n c_void_p(),\n c_void_p(),\n ]\n\n if refresh or self._stats_refresh:\n func = capi.compute_band_statistics\n else:\n # Add additional argument to force computation if there is no\n # existing PAM file to take the values from.\n force = True\n stats_args.insert(2, c_int(force))\n func = capi.get_band_statistics\n\n # Computation of statistics fails for empty bands.\n try:\n func(*stats_args)\n result = smin.value, smax.value, smean.value, sstd.value\n except GDALException:\n result = (None, None, None, None)\n\n self._stats_refresh = False\n\n return result\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 369, "n_words": 98, "vocab_size": 77, "complexity": 4, "nloc": 25, "token_counts": 156, "n_ast_nodes": 241, "n_identifiers": 24, "random_cut": "def statistics(self, refresh=False, approximate=False):\n \n # Prepare array with arguments for capi function\n smin, smax", "d_id": 50606, "documentation": { "docstring": "\n Compute statistics on the pixel values of this band.\n\n The return value is a tuple with the following structure:\n (minimum, maximum, mean, standard deviation).\n\n If approximate=True, the statistics may be computed based on overviews\n or a subset of image tiles.\n\n If refresh=True, the statistics will be computed from the data directly,\n and the cache will be updated where applicable.\n\n For empty bands (where all pixel values are nodata), all statistics\n values are returned as None.\n\n For raster formats using Persistent Auxiliary Metadata (PAM) services,\n the statistics might be cached in an auxiliary file.\n ", "n_words": 93, "vocab_size": 68, "n_whitespaces": 178, "language": "en" } }, { "id": 246367, "commit_id": "546b9c9e648f5e2b25bb7c8350570787ff9befae", "repo": "synapse", "path": "tests/storage/databases/test_state_store.py", "file_name": "test_state_store.py", "fun_name": "test_smaller_request_deduplicated", "commit_message": "Add more tests for in-flight state query duplication. (#12033)", "code": "def test_smaller_request_deduplicated(self) -> None:\n \n req1 = ensureDeferred(\n self.state_datastore._get_state_for_group_using_inflight_cache(\n 42, StateFilter.from_types(((\"test.type\", None),))\n )\n )\n self.pump(by=0.1)\n\n # This should have gone to the database\n self.assertEqual(len(self.get_state_group_calls), 1)\n self.assertFalse(req1.called)\n\n req2 = ensureDeferred(\n self.state_datastore._get_state_for_group_using_inflight_cache(\n 42, StateFilter.from_types(((\"test.type\", \"b\"),))\n )\n )\n self.pump(by=0.1)\n\n # No more calls should have gone to the database, because the second\n # request was already in the in-flight cache!\n self.assertEqual(len(self.get_state_group_calls), 1)\n self.assertFalse(req1.called)\n self.assertFalse(req2.called)\n\n groups, sf, d = self.get_state_group_calls[0]\n self.assertEqual(groups, (42,))\n # The state filter is expanded internally for increased cache hit rate,\n # so we the database sees a wider state filter than requested.\n self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER)\n\n # Now we can complete the request\n self._complete_request_fake(groups, sf, d)\n\n self.assertEqual(\n self.get_success(req1),\n {(\"test.type\", \"a\"): \"AAA\", (\"test.type\", \"b\"): \"BBB\"},\n )\n self.assertEqual(self.get_success(req2), {(\"test.type\", \"b\"): \"BBB\"})\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 387, "n_words": 116, "vocab_size": 80, "complexity": 1, "nloc": 37, "token_counts": 224, "n_ast_nodes": 363, "n_identifiers": 22, "random_cut": "def test_smaller_request_deduplicated(self) -> None:\n \n req1 = ensureDeferred(\n self.state_datastore._get_state_for_group_using_inflight_cache(\n 42, StateFilter.from_types(((\"test.type\", None),))\n )\n )\n self.pump(by=0.1)\n\n # This should have gone to the database\n self.assertEqual(len(self.get_state_group_calls), 1)\n self.assertFalse(req1.called)\n\n req2 = ensureDeferred(\n self.state_datastore._get_state_for_group_using_inflight_cache(\n 42, StateFilter.from_types(((\"test.type\", \"b\"),))\n )\n )\n self.pump(by=0.1)\n\n # No more calls should have gone to the database, because the second\n # request was already in the i", "d_id": 71179, "documentation": { "docstring": "\n Tests that duplicate requests for state are deduplicated.\n\n This test:\n - requests some state (state group 42, 'all' state filter)\n - requests a subset of that state, before the first request finishes\n - checks to see that only one database query was made\n - completes the database query\n - checks that both requests see the correct retrieved state\n ", "n_words": 58, "vocab_size": 39, "n_whitespaces": 115, "language": "en" } }, { "id": 143733, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/examples/simulators/sumo/marlenvironment.py", "file_name": "marlenvironment.py", "fun_name": "get_observation", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_observation(self, agent):\n \n speed = 0\n distance = self._config[\"scenario_config\"][\"misc\"][\"max_distance\"]\n if agent in self.simulation.veh_subscriptions:\n speed = round(\n self.simulation.veh_subscriptions[agent][tc.VAR_SPEED] * MS_TO_KMH\n )\n leader = self.simulation.veh_subscriptions[agent][tc.VAR_LEADER]\n if leader: # compatible with traci\n veh, dist = leader\n if veh:\n # compatible with libsumo\n distance = round(dist)\n ret = [speed, distance]\n logger.debug(\"Agent %s --> Obs: %s\", agent, pformat(ret))\n return ret\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 232, "n_words": 55, "vocab_size": 40, "complexity": 4, "nloc": 15, "token_counts": 108, "n_ast_nodes": 176, "n_identifiers": 20, "random_cut": "def get_observation(self, agent):\n \n speed = 0\n distance = self._config[\"scenari", "d_id": 33038, "documentation": { "docstring": "\n Returns the observation of a given agent.\n See http://sumo.sourceforge.net/pydoc/traci._simulation.html\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 31, "language": "en" } }, { "id": 108332, "commit_id": "0abe0ce2f2748d1d0383154d045da3609a4b871b", "repo": "matplotlib", "path": "lib/matplotlib/colors.py", "file_name": "colors.py", "fun_name": "register", "commit_message": "Add a registry for color sequences\n\nColor sequences are simply lists of colors, that we store by name in\na registry. The registry is modelled similar to the ColormapRegistry\nto 1) support immutable builtin color sequences and 2) to return copies\nso that one cannot mess with the global definition of the color sequence\nthrough an obtained instance.\n\nFor now, I've made the sequences used for `ListedColormap`s available\nas builtin sequences, but that's open for discussion.\n\nMore usage documentation should be added in the color examples and/or\ntutorials, but I'll wait with that till after the general approval of\nthe structure and API. One common use case will be\n\n```\nplt.rc_params['axes.prop_cycle'] = plt.cycler(color=plt.color_sequences['Pastel1')\n```\n\nCo-authored-by: Elliott Sales de Andrade ", "code": "def register(self, name, color_list):\n \n if name in self._BUILTIN_COLOR_SEQUENCES:\n raise ValueError(f\"{name!r} is a reserved name for a builtin \"\n \"color sequence\")\n\n color_list = list(color_list) # force copy and coerce type to list\n for color in color_list:\n try:\n to_rgba(color)\n except ValueError:\n raise ValueError(\n f\"{color!r} is not a valid color specification\")\n\n self._color_sequences[name] = color_list\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 197, "n_words": 51, "vocab_size": 41, "complexity": 4, "nloc": 12, "token_counts": 58, "n_ast_nodes": 108, "n_identifiers": 10, "random_cut": "def register(self, name, color_list):\n \n if name in self._BUILTIN_COLOR_SEQUENCES:\n raise ValueError(f\"{name!r} is a reserved name for a builtin \"\n \"color sequence\")\n\n color_list = list(color_list) # force copy and coerce type to list\n for color in color_list:\n try:\n to_rgba(color)\n except ValueError:\n raise ValueError(\n f\"{color!r} is not a valid color specification\")\n\n self._color_sequences[name] = color_list\n", "d_id": 23144, "documentation": { "docstring": "\n Register a new color sequence.\n\n The color sequence registry stores a copy of the given *color_list*, so\n that future changes to the original list do not affect the registered\n color sequence. Think of this as the registry taking a snapshot\n of *color_list* at registration.\n\n Parameters\n ----------\n name : str\n The name for the color sequence.\n\n color_list : list of colors\n An iterable returning valid Matplotlib colors when iterating over.\n Note however that the returned color sequence will always be a\n list regardless of the input type.\n\n ", "n_words": 86, "vocab_size": 58, "n_whitespaces": 201, "language": "en" } }, { "id": 293911, "commit_id": "816695cc96c19110ccda10431d92160ea6064d32", "repo": "core", "path": "tests/components/recorder/test_history.py", "file_name": "test_history.py", "fun_name": "test_get_states_no_attributes", "commit_message": "Avoid selecting attributes in the history api when `no_attributes` is passed (#68352)", "code": "def test_get_states_no_attributes(hass_recorder):\n \n hass = hass_recorder()\n now, future, states = _setup_get_states(hass)\n for state in states:\n state.attributes = {}\n\n # Get states returns everything before POINT for all entities\n for state1, state2 in zip(\n states,\n sorted(\n history.get_states(hass, future, no_attributes=True),\n key=lambda state: state.entity_id,\n ),\n ):\n assert state1 == state2\n\n # Get states returns everything before POINT for tested entities\n entities = [f\"test.point_in_time_{i % 5}\" for i in range(5)]\n for state1, state2 in zip(\n states,\n sorted(\n history.get_states(hass, future, entities, no_attributes=True),\n key=lambda state: state.entity_id,\n ),\n ):\n assert state1 == state2\n\n # Test get_state here because we have a DB setup\n assert states[0] == history.get_state(\n hass, future, states[0].entity_id, no_attributes=True\n )\n\n time_before_recorder_ran = now - timedelta(days=1000)\n assert history.get_states(hass, time_before_recorder_ran, no_attributes=True) == []\n\n assert (\n history.get_state(hass, time_before_recorder_ran, \"demo.id\", no_attributes=True)\n is None\n )\n\n\n@pytest.mark.parametrize(\n \"attributes, no_attributes, limit\",\n [\n ({\"attr\": True}, False, 5000),\n ({}, True, 5000),\n ({\"attr\": True}, False, 3),\n ({}, True, 3),\n ],\n)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"attributes, no_attributes, limit\",\n [\n ({\"attr\": True}, False, 5000),\n ({}, True, 5000),\n ({\"attr\": True}, False, 3),\n ({}, True, 3),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 362, "n_words": 145, "vocab_size": 85, "complexity": 5, "nloc": 31, "token_counts": 199, "n_ast_nodes": 381, "n_identifiers": 28, "random_cut": "def test_get_states_no_attributes(hass_recorder):\n \n hass = hass_recorder()\n now, future, states = _setup_get_states(hass)\n for state in states:\n state.attributes = {}\n\n # Get states returns everything before POINT for all entities\n for state1, state2 in zip(\n states,\n sorted(\n history.get_states(hass, future, no_attributes=True),\n key=lambda state: state.entity_id,\n ),\n ):\n assert state1 == state2\n\n # Get states returns everything before POINT for tested entities\n entities = [f\"test.point_in_time_{i % 5}", "d_id": 92957, "documentation": { "docstring": "Test getting states without attributes at a specific point in time.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 221098, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "set_until", "commit_message": "add python 3.10.4 for windows", "code": "def set_until(self, frame, lineno=None):\n \n # the name \"until\" is borrowed from gdb\n if lineno is None:\n lineno = frame.f_lineno + 1\n self._set_stopinfo(frame, frame, lineno)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 24, "vocab_size": 21, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 54, "n_identifiers": 6, "random_cut": "def set_until(self, frame, lineno=None):\n \n # the name \"until\" is borrowed from gdb\n if lineno is None:\n lineno = frame.f_lineno + 1\n self._set_stopinfo(frame, frame, lineno)\n", "d_id": 56203, "documentation": { "docstring": "Stop when the line with the lineno greater than the current one is\n reached or when returning from current frame.", "n_words": 20, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 290079, "commit_id": "fee3898f648d4fffdf9dbec748aab2410a0bd227", "repo": "core", "path": "homeassistant/components/rest/switch.py", "file_name": "switch.py", "fun_name": "get_device_state", "commit_message": "Use _attr_is_on in rest (#81305)", "code": "async def get_device_state(self, hass):\n \n websession = async_get_clientsession(hass, self._verify_ssl)\n\n rendered_headers = template.render_complex(self._headers, parse_result=False)\n rendered_params = template.render_complex(self._params)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 43, "n_words": 15, "vocab_size": 13, "complexity": 6, "nloc": 31, "token_counts": 180, "n_ast_nodes": 68, "n_identifiers": 13, "random_cut": "async def get_device_state(self, hass):\n \n websession = async_get_clientsession(hass, self._verify_ssl)\n\n rendered_headers = template.render_complex(self._headers, parse_res", "d_id": 89205, "documentation": { "docstring": "Get the latest data from REST API and update the state.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 131429, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_client_reconnect.py", "file_name": "test_client_reconnect.py", "fun_name": "reset_channel", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def reset_channel(self) -> None:\n \n if self.channel:\n self.channel.close()\n self.channel = grpc.insecure_channel(self.real_addr, options=GRPC_OPTIONS)\n grpc.channel_ready_future(self.channel)\n self.task_servicer.set_channel(self.channel)\n self.data_servicer.set_channel(self.channel)\n self.logs_servicer.set_channel(self.channel)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 12, "token_counts": 74, "n_ast_nodes": 121, "n_identifiers": 14, "random_cut": "def reset_channel(self) -> None:\n \n if self.channel:\n self.", "d_id": 29525, "documentation": { "docstring": "\n Manually close and reopen the channel to the real ray server. This\n simulates a disconnection between the client and the server.\n ", "n_words": 21, "vocab_size": 16, "n_whitespaces": 43, "language": "en" } }, { "id": 321575, "commit_id": "deb21acdebd77c6dc6d5fe4d8cad75e4ca074138", "repo": "qutebrowser", "path": "tests/end2end/fixtures/quteprocess.py", "file_name": "quteprocess.py", "fun_name": "wait_scroll_pos_changed", "commit_message": "qt6 tests: Fix remaining PyQt5 references", "code": "def wait_scroll_pos_changed(self, x=None, y=None):\n \n __tracebackhide__ = (lambda e:\n e.errisinstance(testprocess.WaitForTimeout))\n if (x is None and y is not None) or (y is None and x is not None):\n raise ValueError(\"Either both x/y or neither must be given!\")\n\n if x is None and y is None:\n point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here\n elif x == '0' and y == '0':\n point = 'Py*.QtCore.QPoint()'\n else:\n point = 'Py*.QtCore.QPoint({}, {})'.format(x, y)\n self.wait_for(category='webview',\n message='Scroll position changed to ' + point)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 221, "n_words": 78, "vocab_size": 54, "complexity": 9, "nloc": 13, "token_counts": 107, "n_ast_nodes": 184, "n_identifiers": 15, "random_cut": "def wait_scroll_pos_changed(self, x=None, y=None):\n \n __tracebackhide__ = (lambda e:\n e.errisinstance(testprocess.WaitForTimeout))\n if (x is None and y is not None) or (y is None and x is not None):\n raise ValueError(\"Either both x/y or neither must be given!\")\n\n if x is None and y is None:\n point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here\n elif x == '0' and y == '0':\n ", "d_id": 117805, "documentation": { "docstring": "Wait until a \"Scroll position changed\" message was found.\n\n With QtWebEngine, on older Qt versions which lack\n QWebEnginePage.scrollPositionChanged, this also skips the test.\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 44, "language": "en" } }, { "id": 135586, "commit_id": "9fab504fe776f96fecf85e12ea006264cbe92f4a", "repo": "ray", "path": "python/ray/data/tests/test_dataset_tfrecords.py", "file_name": "test_dataset_tfrecords.py", "fun_name": "test_write_tfrecords", "commit_message": "[Datasets] Add writer for TFRecords. (#29448)\n\nThis PR enables users to write TFRecords from datasets.\r\n\r\nIn particular, the master branch already includes an API for reading TFRecords from datasets. Users have requested the ability to write these datasets back to TFRecords.", "code": "def test_write_tfrecords(ray_start_regular_shared, tmp_path):\n \n\n import tensorflow as tf\n\n # The dataset we will write to a .tfrecords file.\n ds = ray.data.from_items(\n [\n # Row one.\n {\n \"int_item\": 1,\n \"int_list\": [2, 2, 3],\n \"float_item\": 1.0,\n \"float_list\": [2.0, 3.0, 4.0],\n \"bytes_item\": b\"abc\",\n \"bytes_list\": [b\"abc\", b\"1234\"],\n },\n # Row two.\n {\n \"int_item\": 2,\n \"int_list\": [3, 3, 4],\n \"float_item\": 2.0,\n \"float_list\": [2.0, 2.0, 3.0],\n \"bytes_item\": b\"def\",\n \"bytes_list\": [b\"def\", b\"1234\"],\n },\n ]\n )\n\n # The corresponding tf.train.Example that we would expect to read\n # from this dataset.\n\n expected_records = [\n # Record one (corresponding to row one).\n tf.train.Example(\n features=tf.train.Features(\n feature={\n \"int_item\": tf.train.Feature(\n int64_list=tf.train.Int64List(value=[1])\n ),\n \"int_list\": tf.train.Feature(\n int64_list=tf.train.Int64List(value=[2, 2, 3])\n ),\n \"float_item\": tf.train.Feature(\n float_list=tf.train.FloatList(value=[1.0])\n ),\n \"float_list\": tf.train.Feature(\n float_list=tf.train.FloatList(value=[2.0, 3.0, 4.0])\n ),\n \"bytes_item\": tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[b\"abc\"])\n ),\n \"bytes_list\": tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[b\"abc\", b\"1234\"])\n ),\n }\n )\n ),\n # Record two (corresponding to row two).\n tf.train.Example(\n features=tf.train.Features(\n feature={\n \"int_item\": tf.train.Feature(\n int64_list=tf.train.Int64List(value=[2])\n ),\n \"int_list\": tf.train.Feature(\n int64_list=tf.train.Int64List(value=[3, 3, 4])\n ),\n \"float_item\": tf.train.Feature(\n float_list=tf.train.FloatList(value=[2.0])\n ),\n \"float_list\": tf.train.Feature(\n float_list=tf.train.FloatList(value=[2.0, 2.0, 3.0])\n ),\n \"bytes_item\": tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[b\"def\"])\n ),\n \"bytes_list\": tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[b\"def\", b\"1234\"])\n ),\n }\n )\n ),\n ]\n\n # Perform the test.\n # Write the dataset to a .tfrecords file.\n ds.write_tfrecords(tmp_path)\n\n # Read the Examples back out from the .tfrecords file.\n # This follows the offical TFRecords tutorial:\n # https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file_2\n\n filenames = sorted(os.listdir(tmp_path))\n filepaths = [os.path.join(tmp_path, filename) for filename in filenames]\n raw_dataset = tf.data.TFRecordDataset(filepaths)\n\n tfrecords = []\n for raw_record in raw_dataset:\n example = tf.train.Example()\n example.ParseFromString(raw_record.numpy())\n tfrecords.append(example)\n\n assert tfrecords == expected_records\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 1453, "n_words": 231, "vocab_size": 127, "complexity": 3, "nloc": 82, "token_counts": 590, "n_ast_nodes": 885, "n_identifiers": 40, "random_cut": "def test_write_tfrecords(ray_start_regular_shared, tmp_path):\n \n\n import tensorflow as tf\n\n # The dataset we will write to a .tfrecords file.\n ds = ray.data.from_items(\n [\n # Row one.\n {\n \"int_item\": 1,\n \"int_list\": [2, 2, 3],\n \"float_item\": 1.0,\n \"float_list\": [2.0, 3.0, 4.0],\n \"bytes_item\": b\"abc\",\n \"bytes_list\": [b\"abc\", b\"1234\"],\n },\n # Row two.\n {\n \"int_item\": 2,\n \"int_list\": [3, 3, 4],\n \"float_item\": 2.0,\n \"float_list\": [2.0, 2.0, 3.0],\n \"bytes_item\": b\"def\",\n \"bytes_list\": [b\"def\", b\"1234\"],\n },\n ]\n )\n\n # The corresponding tf.train.Example that we would expect to read\n # from this dataset.\n\n expected_records = [\n # Record one (corresponding to row one).\n tf.train.Example(\n features=tf.train.Features(\n feature={\n \"int_item\": tf.train.Feature(\n int64_list=tf.train.Int64List(value=[1])\n ),\n \"int_list\": tf.train.Feature(\n int64_list=tf.train.Int64List(value=[2, 2, 3])\n ),\n \"float_item\": tf.train.Feature(\n float_list=tf.train.FloatList(value=[1.0])\n ),\n \"float_l", "d_id": 30665, "documentation": { "docstring": "Test that write_tfrecords writes TFRecords correctly.\n\n Test this by writing a Dataset to a TFRecord (function under test),\n reading it back out into a tf.train.Example,\n and checking that the result is analogous to the original Dataset.\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 48, "language": "en" } }, { "id": 179373, "commit_id": "cc0cff893f9d7d472788adc2510c123967b384fe", "repo": "gradio", "path": "test/test_processing_utils.py", "file_name": "test_processing_utils.py", "fun_name": "test_float_conversion_dtype", "commit_message": "Format The Codebase\n- black formatting\n- isort formatting", "code": "def test_float_conversion_dtype(self):\n \n\n x = np.array([-1, 1])\n # Test all combinations of dtypes conversions\n dtype_combin = np.array(\n np.meshgrid(\n OutputPreprocessing.float_dtype_list,\n OutputPreprocessing.float_dtype_list,\n )\n ).T.reshape(-1, 2)\n\n for dtype_in, dtype_out in dtype_combin:\n x = x.astype(dtype_in)\n y = gr.processing_utils._convert(x, dtype_out)\n assert y.dtype == np.dtype(dtype_out)\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 165, "n_words": 38, "vocab_size": 33, "complexity": 2, "nloc": 12, "token_counts": 87, "n_ast_nodes": 137, "n_identifiers": 19, "random_cut": "def test_float_conversion_dtype(self):\n \n\n x = np.a", "d_id": 42946, "documentation": { "docstring": "Test any convertion from a float dtype to an other.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 30395, "commit_id": "de31601550e5b6b243f7a00b2bc82300f43f2d9d", "repo": "spotify-downloader", "path": "spotdl/console/web.py", "file_name": "web.py", "fun_name": "fix_mime_types", "commit_message": "fix: broken mimetypes #1540", "code": "def fix_mime_types():\n \n # Known to be problematic when Visual Studio is installed:\n # \n # https://github.com/spotDL/spotify-downloader/issues/1540\n mimetypes.add_type(\"application/javascript\", \".js\")\n # Not known to be problematic, but used by spotDL:\n mimetypes.add_type(\"text/css\", \".css\")\n mimetypes.add_type(\"image/svg+xml\", \".svg\")\n mimetypes.add_type(\"text/html\", \".html\")\n\n\n@app.server.websocket(\"/api/ws\")", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "@app.server.websocket(\"/api/ws\")", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 61, "n_words": 35, "vocab_size": 30, "complexity": 1, "nloc": 5, "token_counts": 37, "n_ast_nodes": 97, "n_identifiers": 6, "random_cut": "def fix_mime_types():\n \n # Known to be problematic when Visual Studio is installed:\n # \n # https://github.com/spotDL/spotify-downloader/issues/1540\n mimetypes.add_type(\"application/javascript\", \".js\")\n # Not known to be problematic, but used by spotDL:\n mimetypes.add_type(\"text/css\", \".css\")\n mimetypes.add_type(\"image/", "d_id": 5544, "documentation": { "docstring": "Fix incorrect entries in the `mimetypes` registry.\n On Windows, the Python standard library's `mimetypes` reads in\n mappings from file extension to MIME type from the Windows\n registry. Other applications can and do write incorrect values\n to this registry, which causes `mimetypes.guess_type` to return\n incorrect values, which causes spotDL to fail to render on\n the frontend.\n This method hard-codes the correct mappings for certain MIME\n types that are known to be either used by TensorBoard or\n problematic in general.\n ", "n_words": 78, "vocab_size": 58, "n_whitespaces": 108, "language": "en" } }, { "id": 101382, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "scripts/convert.py", "file_name": "convert.py", "fun_name": "_get_threads", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def _get_threads(self) -> MultiThread:\n \n # TODO Check if multiple threads actually speeds anything up\n save_queue = queue_manager.get_queue(\"convert_out\")\n patch_queue = queue_manager.get_queue(\"patch\")\n return MultiThread(self._converter.process, patch_queue, save_queue,\n thread_count=self._pool_processes, name=\"patch\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 87, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 11, "token_counts": 47, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "def _get_threads(self) -> MultiThread:\n \n # TODO Check if multiple threads actually speeds anything up\n save_queue = queue_manager.get_queue(\"convert_out\")\n patch_queue = queue_manager.get_queue(\"patch\")\n return MultiThread(self._converter.process, patch_queue, save_queue,\n thread_count=self._pool_p", "d_id": 20797, "documentation": { "docstring": " Get the threads for patching the converted faces onto the frames.\n\n Returns\n :class:`lib.multithreading.MultiThread`\n The threads that perform the patching of swapped faces onto the output frames\n ", "n_words": 26, "vocab_size": 18, "n_whitespaces": 59, "language": "en" } }, { "id": 222788, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/register.py", "file_name": "register.py", "fun_name": "verify_metadata", "commit_message": "add python 3.10.4 for windows", "code": "def verify_metadata(self):\n \n # send the info to the server and report the result\n (code, result) = self.post_to_server(self.build_post_data('verify'))\n log.info('Server response (%s): %s', code, result)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 51, "n_words": 23, "vocab_size": 20, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 58, "n_identifiers": 8, "random_cut": "def verify_metadata(self):\n \n # send the info to the server and report the result\n (code, result) = self.post_to_server(self.build_post_da", "d_id": 56740, "documentation": { "docstring": " Send the metadata to the package index server to be checked.\n ", "n_words": 11, "vocab_size": 9, "n_whitespaces": 19, "language": "en" } }, { "id": 83381, "commit_id": "4b9770e270823b7ed2bbbeda0e4450f0ba6a288b", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_subscriptions_add_for_principal_invite_only", "commit_message": "stream_settings: Show stream privacy & description in stream events.\n\nProvide stream privacy and description in stream notification events\nwhen stream is created.\nIn function \"send_messages_for_new_subscribers\" for when stream is\ncreated, put policy name and description of the stream.\n\nFixes #21004", "code": "def test_subscriptions_add_for_principal_invite_only(self) -> None:\n \n invitee = self.example_user(\"iago\")\n current_streams = self.get_streams(invitee)\n invite_streams = self.make_random_stream_names(current_streams)\n self.assert_adding_subscriptions_for_principal(\n invitee.id,\n invitee.realm,\n invite_streams,\n invite_only=True,\n policy_name=\"Private, protected history\",\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 119, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 14, "token_counts": 55, "n_ast_nodes": 90, "n_identifiers": 13, "random_cut": "def test_subscriptions_add_for_principal_invite_only(self) -> None:\n \n invitee = self.example_user(\"iago\")\n current_streams = self.get_streams(invitee)\n invite_streams = self.make_random_stream_names(current_streams)\n self.assert_adding_subscriptions_for_principal(\n invitee.id,\n invitee.realm,\n invite_streams,\n invite_only=True,\n ", "d_id": 17668, "documentation": { "docstring": "\n You can subscribe other people to invite only streams.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 6328, "commit_id": "c9b6f4dfa32631c320d122ad07f09013769d9d5d", "repo": "ludwig", "path": "ludwig/features/feature_utils.py", "file_name": "feature_utils.py", "fun_name": "get_module_dict_key_from_name", "commit_message": "Enable feature names with periods in them. (#1787)\n\n* Enable feature names with periods in them.\r\n\r\n* Simplify new unit test.", "code": "def get_module_dict_key_from_name(name):\n \n key = name.replace(\".\", \"__ludwig_punct_period__\")\n return key + FEATURE_NAME_SUFFIX\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 38, "n_identifiers": 5, "random_cut": "def get_module_dict_key_from_name(name):\n \n key = name.replace(\".\", \"__ludwig_punct_peri", "d_id": 960, "documentation": { "docstring": "Returns a key that's guaranteed to be compatible with torch.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 154641, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/test/storage_formats/hdk/test_internals.py", "file_name": "test_internals.py", "fun_name": "test_hdk_import", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def test_hdk_import(import_strategy, has_other_engines):\n \n\n remove_other_engines = \n\n if not has_other_engines:\n import_strategy = f\"{remove_other_engines}\\n{import_strategy}\"\n\n res = subprocess.run(\n [sys.executable, \"-c\", import_strategy],\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n\n if res.returncode != 0:\n pytest.fail(str(res.stderr))\n\n\n@pytest.mark.parametrize(\n \"import_strategy, expected_to_fail\",\n [\n pytest.param(\n ,\n True,\n id=\"import_pydbe_first-pyarrow_gandiva_second\",\n ),\n pytest.param(\n ,\n False,\n id=\"import_pyarrow_gandiva_first-pydbe_second\",\n ),\n ],\n)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"import_strategy, expected_to_fail\",\n [\n pytest.param(\n \"\"\"\nfrom modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker\nimport pyarrow.gandiva\n\"\"\",\n True,\n id=\"import_pydbe_first-pyarrow_gandiva_second\",\n ),\n pytest.param(\n \"\"\"\nimport pyarrow.gandiva\nfrom modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker\n\"\"\",\n False,\n id=\"import_pyarrow_gandiva_first-pydbe_second\",\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 196, "n_words": 41, "vocab_size": 34, "complexity": 3, "nloc": 15, "token_counts": 66, "n_ast_nodes": 182, "n_identifiers": 20, "random_cut": "def test_hdk_import(import_strategy, has_other_engines):\n \n\n remove_other_engines = \n\n if not has_other_engines:\n import_strategy = f\"{remove_oth", "d_id": 36121, "documentation": { "docstring": "\n Test import of HDK engine.\n\n The import of DbWorker requires to set special dlopen flags which make it then\n incompatible to import some other libraries further (like ``pyarrow.gandiva``).\n This test verifies that it's not the case when a user naturally imports Modin\n with HDK engine.\n\n Parameters\n ----------\n import_strategy : str\n There are several scenarios of how a user can import Modin with HDK engine:\n configure Modin first to use HDK engine and then import ``modin.pandas`` or vice versa.\n This parameters holds a python code, implementing one of these scenarios.\n has_other_engines : bool\n The problem with import may appear depending on whether other engines are\n installed. This parameter indicates whether to remove modules for\n non-hdk engines before the test.\n\n Notes\n -----\n The failed import flow may cause segfault, which causes to crash the pytest itself.\n This makes us to run the test in a separate process and check its exit-code to\n decide the success of the test.\n \nimport sys\nsys.modules['ray'] = None\nsys.modules['dask'] = None\n\nfrom modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker\nimport pyarrow.gandiva\n\nimport pyarrow.gandiva\nfrom modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker\n", "n_words": 176, "vocab_size": 115, "n_whitespaces": 257, "language": "en" } }, { "id": 130289, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "_iter_tree_entries_next", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _iter_tree_entries_next(root_full, dir_rel, memo, on_error, follow_links):\n \n dir_full = os.path.join(root_full, dir_rel)\n dir_real = os.path.realpath(dir_full)\n\n # Remember each encountered ancestor directory and its canonical\n # (real) path. If a canonical path is encountered more than once,\n # recursion has occurred.\n if dir_real not in memo:\n memo[dir_real] = dir_rel\n else:\n raise RecursionError(\n real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel\n )\n\n for node_name in os.listdir(dir_full):\n node_rel = os.path.join(dir_rel, node_name)\n node_full = os.path.join(root_full, node_rel)\n\n # Inspect child node.\n try:\n node_lstat = os.lstat(node_full)\n except OSError as e:\n if on_error is not None:\n on_error(e)\n continue\n\n if stat.S_ISLNK(node_lstat.st_mode):\n # Child node is a link, inspect the target node.\n is_link = True\n try:\n node_stat = os.stat(node_full)\n except OSError as e:\n if on_error is not None:\n on_error(e)\n continue\n else:\n is_link = False\n node_stat = node_lstat\n\n if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):\n # Child node is a directory, recurse into it and yield its\n # descendant files.\n yield TreeEntry(node_name, node_rel, node_lstat, node_stat)\n\n for entry in _iter_tree_entries_next(\n root_full, node_rel, memo, on_error, follow_links\n ):\n yield entry\n\n elif stat.S_ISREG(node_stat.st_mode) or is_link:\n # Child node is either a file or an unfollowed link, yield it.\n yield TreeEntry(node_name, node_rel, node_lstat, node_stat)\n\n # NOTE: Make sure to remove the canonical (real) path of the directory\n # from the ancestors memo once we are done with it. This allows the\n # same directory to appear multiple times. If this is not done, the\n # second occurrence of the directory will be incorrectly interpreted\n # as a recursion. See .\n del memo[dir_real]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 665, "n_words": 240, "vocab_size": 140, "complexity": 14, "nloc": 38, "token_counts": 249, "n_ast_nodes": 396, "n_identifiers": 33, "random_cut": "def _iter_tree_entries_next(root_full, dir_rel, memo, on_error, follow_links):\n \n dir_full = os.path.join(root_full, dir_rel)\n dir_real = os.path.realpath(dir_full)\n\n # Remember each encountered ancestor directory and its canonical\n # (real) path. If a canonical path is encountered more than once,\n # recursion has occurred.\n if dir_real not in memo:\n ", "d_id": 29214, "documentation": { "docstring": "\n Scan the directory for all descendant files.\n\n *root_full* (:class:`str`) the absolute path to the root directory.\n\n *dir_rel* (:class:`str`) the path to the directory to scan relative to\n *root_full*.\n\n *memo* (:class:`dict`) keeps track of ancestor directories\n encountered. Maps each ancestor real path (:class:`str`) to relative\n path (:class:`str`).\n\n *on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n optionally is the error handler for file-system exceptions.\n\n *follow_links* (:class:`bool`) is whether to walk symbolic links that\n resolve to directories.\n\n Yields each entry (:class:`.TreeEntry`).\n ", "n_words": 74, "vocab_size": 52, "n_whitespaces": 114, "language": "en" } }, { "id": 196223, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/util.py", "file_name": "util.py", "fun_name": "_remove_gens", "commit_message": "Updated import locations", "code": "def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None):\n \n from sympy.combinatorics.perm_groups import _orbit\n base_len = len(base)\n degree = strong_gens[0].size\n if strong_gens_distr is None:\n strong_gens_distr = _distribute_gens_by_base(base, strong_gens)\n if basic_orbits is None:\n basic_orbits = []\n for i in range(base_len):\n basic_orbit = _orbit(degree, strong_gens_distr[i], base[i])\n basic_orbits.append(basic_orbit)\n strong_gens_distr.append([])\n res = strong_gens[:]\n for i in range(base_len - 1, -1, -1):\n gens_copy = strong_gens_distr[i][:]\n for gen in strong_gens_distr[i]:\n if gen not in strong_gens_distr[i + 1]:\n temp_gens = gens_copy[:]\n temp_gens.remove(gen)\n if temp_gens == []:\n continue\n temp_orbit = _orbit(degree, temp_gens, base[i])\n if temp_orbit == basic_orbits[i]:\n gens_copy.remove(gen)\n res.remove(gen)\n return res\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 318, "n_words": 88, "vocab_size": 59, "complexity": 9, "nloc": 26, "token_counts": 201, "n_ast_nodes": 308, "n_identifiers": 24, "random_cut": "def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None):\n \n from sympy.combinatorics.perm_groups import _orbit\n base_len = len(base)\n degree = strong_gens[0].size\n if strong_gens_distr is None:\n strong_gens_distr = _distribute_gens_by_base(base, strong_gens)\n if basic_orbits is None:\n basic_orbits = []\n for i in range(base_len):\n basic_orbit = _orbit(degree, strong_gens_distr[i], base[i])\n basic_orbits.append(basic_orbit)\n strong_gens_distr.append([])\n res = strong_gens[:]\n for i in range(base_len - 1, -1, -1):\n gens_copy = strong_gens_distr[i][:]\n for gen in strong_gens_distr[i]:\n if gen not in strong_gens_distr[i + 1]:\n temp_gens = gens_copy[:]\n temp_gens.remove(gen)\n if temp_gens == []:\n continue\n temp_orbit = _orbit(degree, temp_gens,", "d_id": 47723, "documentation": { "docstring": "\n Remove redundant generators from a strong generating set.\n\n Parameters\n ==========\n\n ``base`` - a base\n ``strong_gens`` - a strong generating set relative to ``base``\n ``basic_orbits`` - basic orbits\n ``strong_gens_distr`` - strong generators distributed by membership in basic\n stabilizers\n\n Returns\n =======\n\n A strong generating set with respect to ``base`` which is a subset of\n ``strong_gens``.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import SymmetricGroup\n >>> from sympy.combinatorics.util import _remove_gens\n >>> from sympy.combinatorics.testutil import _verify_bsgs\n >>> S = SymmetricGroup(15)\n >>> base, strong_gens = S.schreier_sims_incremental()\n >>> new_gens = _remove_gens(base, strong_gens)\n >>> len(new_gens)\n 14\n >>> _verify_bsgs(S, base, new_gens)\n True\n\n Notes\n =====\n\n This procedure is outlined in [1],p.95.\n\n References\n ==========\n\n .. [1] Holt, D., Eick, B., O'Brien, E.\n \"Handbook of computational group theory\"\n\n ", "n_words": 115, "vocab_size": 79, "n_whitespaces": 219, "language": "en" } }, { "id": 204505, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/files/uploadedfile.py", "file_name": "uploadedfile.py", "fun_name": "from_dict", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def from_dict(cls, file_dict):\n \n return cls(\n file_dict[\"filename\"],\n file_dict[\"content\"],\n file_dict.get(\"content-type\", \"text/plain\"),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 64, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 31, "n_ast_nodes": 54, "n_identifiers": 4, "random_cut": "def from_dict(cls, file_dict):\n \n return cls(\n file_dict[\"filename\"],\n file_dict[\"content\"],\n file_dict.get(\"con", "d_id": 50758, "documentation": { "docstring": "\n Create a SimpleUploadedFile object from a dictionary with keys:\n - filename\n - content-type\n - content\n ", "n_words": 15, "vocab_size": 12, "n_whitespaces": 60, "language": "en" } }, { "id": 207730, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_change_list_sorting_callable", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_change_list_sorting_callable(self):\n \n response = self.client.get(\n reverse(\"admin:admin_views_article_changelist\"), {\"o\": 2}\n )\n self.assertContentBefore(\n response,\n \"Oldest content\",\n \"Middle content\",\n \"Results of sorting on callable are out of order.\",\n )\n self.assertContentBefore(\n response,\n \"Middle content\",\n \"Newest content\",\n \"Results of sorting on callable are out of order.\",\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 189, "n_words": 41, "vocab_size": 23, "complexity": 1, "nloc": 16, "token_counts": 51, "n_ast_nodes": 92, "n_identifiers": 7, "random_cut": "def test_change_list_sorting_callable(self):\n \n response = self.client.get(\n reverse(\"admin:admin_views_article_changelist\"), {\"o\": 2}\n )\n self.assertContentBefore", "d_id": 52073, "documentation": { "docstring": "\n Ensure we can sort on a list_display field that is a callable\n (column 2 is callable_year in ArticleAdmin)\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 101374, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "scripts/convert.py", "file_name": "convert.py", "fun_name": "pre_encode", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def pre_encode(self) -> Optional[Callable[[np.ndarray], List[bytes]]]:\n \n dummy = np.zeros((20, 20, 3), dtype=\"uint8\")\n test = self._writer.pre_encode(dummy)\n retval: Optional[Callable[[np.ndarray],\n List[bytes]]] = None if test is None else self._writer.pre_encode\n logger.debug(\"Writer pre_encode function: %s\", retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 107, "n_words": 32, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 91, "n_ast_nodes": 138, "n_identifiers": 16, "random_cut": "def pre_encode(self) -> Optional[Callable[[np.ndarray], List[bytes]]]:\n \n dummy = np.zeros((20, 20, 3), dtype=\"uint8\")\n test = self._writer.pre_encode(dummy)\n retval: O", "d_id": 20789, "documentation": { "docstring": " python function: Selected writer's pre-encode function, if it has one,\n otherwise ``None`` ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 20, "language": "en" } }, { "id": 115431, "commit_id": "fc9776d9b342f873cbb3f36fd39955b9e1ea6f76", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/sqlite_handler/sqlite_handler.py", "file_name": "sqlite_handler.py", "fun_name": "disconnect", "commit_message": "added connection_args and connection_args_example dicts", "code": "def disconnect(self):\r\n \r\n\r\n if self.is_connected is False:\r\n return\r\n\r\n self.connection.close()\r\n self.is_connected = False\r\n return self.is_connected\r\n\r", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 59, "n_words": 13, "vocab_size": 10, "complexity": 2, "nloc": 6, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 5, "random_cut": "def disconnect(self):\r\n \r\n\r\n ", "d_id": 25459, "documentation": { "docstring": "\r\n Close any existing connections.\r\n ", "n_words": 4, "vocab_size": 4, "n_whitespaces": 19, "language": "en" } }, { "id": 133434, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/workflow/step_executor.py", "file_name": "step_executor.py", "fun_name": "resolve", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def resolve(self) -> Tuple[List, Dict]:\n \n objects_mapping = []\n for obj_ref in self.workflow_outputs:\n obj, ref = _resolve_object_ref(obj_ref.ref)\n objects_mapping.append(obj)\n\n workflow_ref_mapping = _resolve_dynamic_workflow_refs(self.workflow_refs)\n\n with serialization_context.workflow_args_resolving_context(\n objects_mapping, workflow_ref_mapping\n ):\n # reconstruct input arguments under correct serialization context\n flattened_args: List[Any] = ray.get(self.args)\n\n # dereference arguments like Ray remote functions\n flattened_args = [\n ray.get(a) if isinstance(a, ObjectRef) else a for a in flattened_args\n ]\n return signature.recover_args(flattened_args)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 196, "n_words": 60, "vocab_size": 49, "complexity": 4, "nloc": 30, "token_counts": 103, "n_ast_nodes": 164, "n_identifiers": 27, "random_cut": "def resolve(self) -> Tuple[List, Dict]:\n \n objects_mapping = []\n for obj_ref in self.workflow_outputs:\n obj, ref = _resolve_object_ref(obj_ref.ref)\n objects_mapping.append(o", "d_id": 30029, "documentation": { "docstring": "\n This function resolves the inputs for the code inside\n a workflow step (works on the callee side). For outputs from other\n workflows, we resolve them into object instances inplace.\n\n For each ObjectRef argument, the function returns both the ObjectRef\n and the object instance. If the ObjectRef is a chain of nested\n ObjectRefs, then we resolve it recursively until we get the\n object instance, and we return the *direct* ObjectRef of the\n instance. This function does not resolve ObjectRef\n inside another object (e.g. list of ObjectRefs) to give users some\n flexibility.\n\n Returns:\n Instances of arguments.\n ", "n_words": 94, "vocab_size": 62, "n_whitespaces": 190, "language": "en" } }, { "id": 270768, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_maybe_create_attribute", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _maybe_create_attribute(self, name, default_value):\n \n if not hasattr(self, name):\n self.__setattr__(name, default_value)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 3, "token_counts": 27, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def _maybe_create_attribute(self, name, default_value):\n ", "d_id": 80573, "documentation": { "docstring": "Create the attribute with the default value if it hasn't been created.\n\n This is useful for fields that is used for tracking purpose,\n _trainable_weights, or _layers. Note that user could create a layer subclass\n and assign an internal field before invoking the Layer.__init__(), the\n __setattr__() need to create the tracking fields and __init__() need to not\n override them.\n\n Args:\n name: String, the name of the attribute.\n default_value: Object, the default value of the attribute.\n ", "n_words": 74, "vocab_size": 53, "n_whitespaces": 141, "language": "en" } }, { "id": 177527, "commit_id": "979d54acba7c3d372c93d44c6c149700608ce8b0", "repo": "networkx", "path": "networkx/classes/digraph.py", "file_name": "digraph.py", "fun_name": "add_nodes_from", "commit_message": "doc: update documentation when providing an iterator over current graph to add/remove_edges_from. (#6268)\n\n* doc for add_edges_from\r\n\r\n* doc for digraph\r\n\r\n* doc for multigraph\r\n\r\n* multigraph.add_nodes_from returns keylist\r\n\r\n* update docs for graph - edges\r\n\r\n* doc update: graph.add_nodes_from\r\n\r\n* doc update: graph.remove_nodes_from\r\n\r\n* doc update: graph.add_edges_from\r\n\r\n* doc update: rewording for graph.add_edges_from\r\n\r\n* doc update: graph.add_weighted_edges_from rewording\r\n\r\n* doc update: digraph updated as graph\r\n\r\n* doc update: digraph minor sync\r\n\r\n* doc update: multigraph same as graph\r\n\r\n* Update graph.py\r\n\r\n* Update digraph.py\r\n\r\n* Update multigraph.py", "code": "def add_nodes_from(self, nodes_for_adding, **attr):\n \n for n in nodes_for_adding:\n try:\n newnode = n not in self._node\n newdict = attr\n except TypeError:\n n, ndict = n\n newnode = n not in self._node\n newdict = attr.copy()\n newdict.update(ndict)\n if newnode:\n if n is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[n] = self.adjlist_inner_dict_factory()\n self._pred[n] = self.adjlist_inner_dict_factory()\n self._node[n] = self.node_attr_dict_factory()\n self._node[n].update(newdict)\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 283, "n_words": 56, "vocab_size": 37, "complexity": 5, "nloc": 17, "token_counts": 118, "n_ast_nodes": 191, "n_identifiers": 17, "random_cut": "def add_nodes_from(self, nodes_for_adding, **attr):\n \n for n in nodes_for_adding:\n try:\n newnode = n not in self._node\n newdict = attr\n except Type", "d_id": 42421, "documentation": { "docstring": "Add multiple nodes.\n\n Parameters\n ----------\n nodes_for_adding : iterable container\n A container of nodes (list, dict, set, etc.).\n OR\n A container of (node, attribute dict) tuples.\n Node attributes are updated using the attribute dict.\n attr : keyword arguments, optional (default= no attributes)\n Update attributes for all nodes in nodes.\n Node attributes specified in nodes as a tuple take\n precedence over attributes specified via keyword arguments.\n\n See Also\n --------\n add_node\n\n Notes\n -------\n When adding nodes from an iterator over the graph you are changing,\n a `RuntimeError` can be raised with message:\n `RuntimeError: dictionary changed size during iteration`. This\n happens when the graph's underlying dictionary is modified during\n iteration. To avoid this error, evaluate the iterator into a separate\n object, e.g. by using `list(iterator_of_nodes)`, and pass this\n object to `G.add_nodes_from`.\n\n Examples\n --------\n >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc\n >>> G.add_nodes_from(\"Hello\")\n >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])\n >>> G.add_nodes_from(K3)\n >>> sorted(G.nodes(), key=str)\n [0, 1, 2, 'H', 'e', 'l', 'o']\n\n Use keywords to update specific node attributes for every node.\n\n >>> G.add_nodes_from([1, 2], size=10)\n >>> G.add_nodes_from([3, 4], weight=0.4)\n\n Use (node, attrdict) tuples to update attributes for specific nodes.\n\n >>> G.add_nodes_from([(1, dict(size=11)), (2, {\"color\": \"blue\"})])\n >>> G.nodes[1][\"size\"]\n 11\n >>> H = nx.Graph()\n >>> H.add_nodes_from(G.nodes(data=True))\n >>> H.nodes[1][\"size\"]\n 11\n\n Evaluate an iterator over a graph if using it to modify the same graph\n\n >>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)])\n >>> # wrong way - will raise RuntimeError\n >>> # G.add_nodes_from(n + 1 for n in G.nodes)\n >>> # correct way\n >>> G.add_nodes_from(list(n + 1 for n in G.nodes))\n ", "n_words": 260, "vocab_size": 173, "n_whitespaces": 632, "language": "en" } }, { "id": 72246, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_workflows.py", "file_name": "test_workflows.py", "fun_name": "test_submitted_email_notifications_sent", "commit_message": "Reformat with black", "code": "def test_submitted_email_notifications_sent(self):\n \n self.login(self.submitter)\n self.submit()\n\n self.assertEqual(len(mail.outbox), 4)\n\n task_submission_emails = [\n email for email in mail.outbox if \"task\" in email.subject\n ]\n task_submission_emailed_addresses = [\n address for email in task_submission_emails for address in email.to\n ]\n workflow_submission_emails = [\n email for email in mail.outbox if \"workflow\" in email.subject\n ]\n workflow_submission_emailed_addresses = [\n address for email in workflow_submission_emails for address in email.to\n ]\n\n self.assertEqual(len(task_submission_emails), 3)\n # the moderator is in the Group assigned to the GroupApproval task, so should get an email\n self.assertIn(self.moderator.email, task_submission_emailed_addresses)\n self.assertIn(self.moderator2.email, task_submission_emailed_addresses)\n # with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a task email\n self.assertIn(self.superuser.email, task_submission_emailed_addresses)\n # the submitter triggered this workflow update, so should not get an email\n self.assertNotIn(self.submitter.email, task_submission_emailed_addresses)\n\n self.assertEqual(len(workflow_submission_emails), 1)\n # the moderator should not get a workflow email\n self.assertNotIn(self.moderator.email, workflow_submission_emailed_addresses)\n self.assertNotIn(self.moderator2.email, workflow_submission_emailed_addresses)\n # with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a workflow email\n self.assertIn(self.superuser.email, workflow_submission_emailed_addresses)\n # as the submitter was the triggering user, the submitter should not get an email notification\n self.assertNotIn(self.submitter.email, workflow_submission_emailed_addresses)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 393, "n_words": 153, "vocab_size": 64, "complexity": 9, "nloc": 26, "token_counts": 214, "n_ast_nodes": 335, "n_identifiers": 22, "random_cut": "def test_submitted_email_notifications_sent(self):\n \n self.login(self.submitter)\n self.submit()\n\n self.assertEqual(len(mail.outbox), 4)\n\n task_submission_emails = [\n email for email in mail.outbox if \"task\" in email.subject\n ]\n task_submission_emailed_addresses = [\n address for email in task_submission_emails for address in email.to\n ]\n workflow_submission_emails = [\n email for email in mail.outbox if \"workflow\" in email.subject\n ]\n workflow_submission_emailed_addresses = [\n address for email in workflow_submission_emails for address in email.t", "d_id": 15860, "documentation": { "docstring": "Test that 'submitted' notifications for WorkflowState and TaskState are both sent correctly", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 35810, "commit_id": "d83d22f578276e9f201b0b3b0f8f9bd68e86c133", "repo": "transformers", "path": "src/transformers/models/maskformer/modeling_maskformer.py", "file_name": "modeling_maskformer.py", "fun_name": "forward", "commit_message": "Maskformer (#15682)\n\n* maskformer\r\n\r\n* conflicts\r\n\r\n* conflicts\r\n\r\n* minor fixes\r\n\r\n* feature extractor test fix\r\n\r\nrefactor MaskFormerLoss following conversation\r\n\r\nMaskFormer related types should not trigger a module time import error\r\n\r\nmissed one\r\n\r\nremoved all the types that are not used\r\n\r\nupdate config mapping\r\n\r\nminor updates in the doc\r\n\r\nresolved conversation that doesn't need a discussion\r\n\r\nminor changes\r\n\r\nresolved conversations\r\n\r\nfixed DetrDecoder\r\n\r\n* minor changes\r\n\r\nminor changes\r\n\r\nfixed mdx file\r\n\r\ntest feature_extractor return types\r\n\r\nfunctional losses -> classes\r\n\r\nremoved the return type test for the feature extractor\r\n\r\nminor changes + style + quality\r\n\r\n* conflicts?\r\n\r\n* rebase master\r\n\r\n* readme\r\n\r\n* added missing files\r\n\r\n* deleded poolformers test that where in the wrong palce\r\n\r\n* CI\r\n\r\n* minor changes\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* resolved conversations\r\n\r\n* minor changes\r\n\r\n* conversations\r\n\r\n[Unispeech] Fix slow tests (#15818)\r\n\r\n* remove soundfile old way of loading audio\r\n\r\n* Adapt slow test\r\n\r\n[Barthez Tokenizer] Fix saving (#15815)\r\n\r\n[TFXLNet] Correct tf xlnet generate (#15822)\r\n\r\n* [TFXLNet] Correct tf xlnet\r\n\r\n* adapt test comment\r\n\r\nFix the push run (#15807)\r\n\r\nFix semantic segmentation pipeline test (#15826)\r\n\r\nFix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776)\r\n\r\nAdd model specific output classes to PoolFormer model docs (#15746)\r\n\r\n* Added model specific output classes to poolformer docs\r\n\r\n* Fixed Segformer typo in Poolformer docs\r\n\r\nAdding the option to return_timestamps on pure CTC ASR models. (#15792)\r\n\r\n* Adding the option to return_timestamps on pure CTC ASR models.\r\n\r\n* Remove `math.prod` which was introduced in Python 3.8\r\n\r\n* int are not floats.\r\n\r\n* Reworking the PR to support \"char\" vs \"word\" output.\r\n\r\n* Fixup!\r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Quality.\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\nHFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824)\r\n\r\nFix tf.concatenate + test past_key_values for TF models (#15774)\r\n\r\n* fix wrong method name tf.concatenate\r\n\r\n* add tests related to causal LM / decoder\r\n\r\n* make style and quality\r\n\r\n* clean-up\r\n\r\n* Fix TFBertModel's extended_attention_mask when past_key_values is provided\r\n\r\n* Fix tests\r\n\r\n* fix copies\r\n\r\n* More tf.int8 -> tf.int32 in TF test template\r\n\r\n* clean-up\r\n\r\n* Update TF test template\r\n\r\n* revert the previous commit + update the TF test template\r\n\r\n* Fix TF template extended_attention_mask when past_key_values is provided\r\n\r\n* Fix some styles manually\r\n\r\n* clean-up\r\n\r\n* Fix ValueError: too many values to unpack in the test\r\n\r\n* Fix more: too many values to unpack in the test\r\n\r\n* Add a comment for extended_attention_mask when there is past_key_values\r\n\r\n* Fix TFElectra extended_attention_mask when past_key_values is provided\r\n\r\n* Add tests to other TF models\r\n\r\n* Fix for TF Electra test: add prepare_config_and_inputs_for_decoder\r\n\r\n* Fix not passing training arg to lm_head in TFRobertaForCausalLM\r\n\r\n* Fix tests (with past) for TF Roberta\r\n\r\n* add testing for pask_key_values for TFElectra model\r\n\r\nCo-authored-by: ydshieh \r\n\r\n[examples/summarization and translation] fix readme (#15833)\r\n\r\nAdd ONNX Runtime quantization for text classification notebook (#15817)\r\n\r\nRe-enable doctests for the quicktour (#15828)\r\n\r\n* Re-enable doctests for the quicktour\r\n\r\n* Re-enable doctests for task_summary (#15830)\r\n\r\n* Remove &\r\n\r\nFramework split model report (#15825)\r\n\r\nAdd TFConvNextModel (#15750)\r\n\r\n* feat: initial implementation of convnext in tensorflow.\r\n\r\n* fix: sample code for the classification model.\r\n\r\n* chore: added checked for from the classification model.\r\n\r\n* chore: set bias initializer in the classification head.\r\n\r\n* chore: updated license terms.\r\n\r\n* chore: removed ununsed imports\r\n\r\n* feat: enabled argument during using drop_path.\r\n\r\n* chore: replaced tf.identity with layers.Activation(linear).\r\n\r\n* chore: edited default checkpoint.\r\n\r\n* fix: minor bugs in the initializations.\r\n\r\n* partial-fix: tf model errors for loading pretrained pt weights.\r\n\r\n* partial-fix: call method updated\r\n\r\n* partial-fix: cross loading of weights (4x3 variables to be matched)\r\n\r\n* chore: removed unneeded comment.\r\n\r\n* removed playground.py\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: renaming TFConvNextStage conv and layer norm layers\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* add: tests for convnext.\r\n\r\n* fix: integration tester class.\r\n\r\n* fix: issues mentioned in pr feedback (round 1).\r\n\r\n* fix: how output_hidden_states arg is propoagated inside the network.\r\n\r\n* feat: handling of arg for pure cnn models.\r\n\r\n* chore: added a note on equal contribution in model docs.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* feat: encapsulation for the convnext trunk.\r\n\r\n* Fix variable naming; Test-related corrections; Run make fixup\r\n\r\n* chore: added Joao as a contributor to convnext.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: corrected copyright year and added comment on NHWC.\r\n\r\n* chore: fixed the black version and ran formatting.\r\n\r\n* chore: ran make style.\r\n\r\n* chore: removed from_pt argument from test, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: tests in the convnext subclass, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: moved convnext test to the correct location\r\n\r\n* fix: locations for the test file of convnext.\r\n\r\n* fix: convnext tests.\r\n\r\n* chore: applied sgugger's suggestion for dealing w/ output_attentions.\r\n\r\n* chore: added comments.\r\n\r\n* chore: applied updated quality enviornment style.\r\n\r\n* chore: applied formatting with quality enviornment.\r\n\r\n* chore: revert to the previous tests/test_modeling_common.py.\r\n\r\n* chore: revert to the original test_modeling_common.py\r\n\r\n* chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py\r\n\r\n* fix: tests for convnext.\r\n\r\n* chore: removed output_attentions argument from convnext config.\r\n\r\n* chore: revert to the earlier tf utils.\r\n\r\n* fix: output shapes of the hidden states\r\n\r\n* chore: removed unnecessary comment\r\n\r\n* chore: reverting to the right test_modeling_tf_common.py.\r\n\r\n* Styling nits\r\n\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger \r\n\r\n* minor changes\r\n\r\n* doc fix in feature extractor\r\n\r\n* doc\r\n\r\n* typose\r\n\r\n* removed detr logic from config\r\n\r\n* removed detr logic from config\r\n\r\n* removed num_labels\r\n\r\n* small fix in the config\r\n\r\n* auxilary -> auxiliary\r\n\r\n* make style\r\n\r\n* some test is failing\r\n\r\n* fix a weird char in config prevending doc-builder\r\n\r\n* retry to fix the doc-builder issue\r\n\r\n* make style\r\n\r\n* new try to fix the doc builder\r\n\r\n* CI\r\n\r\n* change weights to facebook\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger ", "code": "def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]:\n \n indices: List[Tuple[np.array]] = []\n\n preds_masks = masks_queries_logits\n preds_probs = class_queries_logits.softmax(dim=-1)\n # downsample all masks in one go -> save memory\n mask_labels = nn.functional.interpolate(mask_labels, size=preds_masks.shape[-2:], mode=\"nearest\")\n # iterate through batch size\n for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -pred_probs[:, labels]\n # flatten spatial dimension \"q h w -> q (h w)\"\n num_queries, height, width = pred_mask.shape\n pred_mask_flat = pred_mask.view(num_queries, height * width) # [num_queries, H*W]\n # same for target_mask \"c h w -> c (h w)\"\n num_channels, height, width = target_mask.shape\n target_mask_flat = target_mask.view(num_channels, height * width) # [num_total_labels, H*W]\n # compute the focal loss between each mask pairs -> shape [NUM_QUERIES, CLASSES]\n cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat)\n # Compute the dice loss betwen each mask pairs -> shape [NUM_QUERIES, CLASSES]\n cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat)\n # final cost matrix\n cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice\n # do the assigmented using the hungarian algorithm in scipy\n assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())\n indices.append(assigned_indices)\n\n # It could be stacked in one tensor\n matched_indices = [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices\n ]\n return matched_indices\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 535, "n_words": 229, "vocab_size": 152, "complexity": 3, "nloc": 44, "token_counts": 243, "n_ast_nodes": 376, "n_identifiers": 51, "random_cut": "def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]:\n \n indices: List[Tuple[np.array]] = []\n\n preds_masks = masks_queries_logits\n preds_probs = class_queries_logits.softmax(dim=-1)\n # downsample all masks in one go -> save memory\n mask_labels = nn.functional.interpolate(mask_labels, size=preds_masks.shape[-2:], mode=\"nearest\")\n # iterate through batch size\n for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -pred_probs[:, labels]\n # flatten spatial dimension \"q h w -> q (h w)\"\n num_queries, height, width = pred_mask.shape\n pred_mask_flat = pred_mask.view(num_queries, height * width) # [num_queries, H*W]\n # same for target_mask \"c h w -> c (h w)\"\n num_channels, height, width = target_mask.shape\n target_mask_flat = target_mask.view(num_channels, height * width) # [num_total_labels, H*W]\n # compute the focal loss between each mask pairs -> shape [NUM_QUERIES, CLASSES]\n cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat)\n # Compute the dice loss betwen each mask pairs -> shape [NUM_QUERIES, CLASSES]\n cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat)\n # final cost matrix\n cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice\n # do the assigmented using the hungarian algorithm in scipy\n assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())\n indices.append(assigned_indices)\n\n ", "d_id": 6543, "documentation": { "docstring": "Performs the matching\n\n Params:\n masks_queries_logits (`torch.Tensor`):\n A tensor` of dim `batch_size, num_queries, num_classes` with the\n classification logits.\n class_queries_logits (`torch.Tensor`):\n A tensor` of dim `batch_size, num_queries, height, width` with the\n predicted masks.\n\n class_labels (`torch.Tensor`):\n A tensor` of dim `num_target_boxes` (where num_target_boxes is the number\n of ground-truth objects in the target) containing the class labels.\n mask_labels (`torch.Tensor`):\n A tensor` of dim `num_target_boxes, height, width` containing the target\n masks.\n\n Returns:\n `List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected labels (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes).\n ", "n_words": 114, "vocab_size": 67, "n_whitespaces": 374, "language": "en" } }, { "id": 81681, "commit_id": "c59bbdecdbdd920c5d3d298d691129c6bbc94c5e", "repo": "awx", "path": "awx/main/models/unified_jobs.py", "file_name": "unified_jobs.py", "fun_name": "cancel_dispatcher_process", "commit_message": "Refactor canceling to work through messaging and signals, not database\n\nIf canceled attempted before, still allow attempting another cancel\nin this case, attempt to send the sigterm signal again.\nKeep clicking, you might help!\n\nReplace other cancel_callbacks with sigterm watcher\n adapt special inventory mechanism for this too\n\nGet rid of the cancel_watcher method with exception in main thread\n\nHandle academic case of sigterm race condition\n\nProcess cancelation as control signal\n\nFully connect cancel method and run_dispatcher to control\n\nNever transition workflows directly to canceled, add logs", "code": "def cancel_dispatcher_process(self):\n \n if not self.celery_task_id:\n return\n canceled = []\n try:\n # Use control and reply mechanism to cancel and obtain confirmation\n timeout = 5\n canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])\n except socket.timeout:\n logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')\n except Exception:\n logger.exception(\"error encountered when checking task status\")\n return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 174, "n_words": 58, "vocab_size": 49, "complexity": 4, "nloc": 12, "token_counts": 71, "n_ast_nodes": 136, "n_identifiers": 14, "random_cut": "def cancel_dispatcher_process(self):\n \n if not self.celery_task_id:\n return\n canceled = []\n try:\n # Use control and reply mechanism to cancel and obtain confirmation\n timeout = 5\n canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])\n except socket.timeout:\n logger.error(f'could not reach dispatcher on {self.controller_node} within {timeou", "d_id": 17245, "documentation": { "docstring": "Returns True if dispatcher running this job acknowledged request and sent SIGTERM", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 154573, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_mangle_index_names", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def _mangle_index_names(cls, names):\n \n return [\n f\"__index__{i}_{'__None__' if n is None else n}\"\n for i, n in enumerate(names)\n ]\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 22, "n_ast_nodes": 54, "n_identifiers": 6, "random_cut": "def _mangle_index_names(cls, names):\n \n return [\n f\"__index__{i", "d_id": 36083, "documentation": { "docstring": "\n Return mangled index names for index labels.\n\n Mangled names are used for index columns because index\n labels cannot always be used as HDK table column\n names. E.e. label can be a non-string value or an\n unallowed string (empty strings, etc.) for a table column\n name.\n\n Parameters\n ----------\n names : list of str\n Index labels.\n\n Returns\n -------\n list of str\n Mangled names.\n ", "n_words": 61, "vocab_size": 43, "n_whitespaces": 175, "language": "en" } }, { "id": 67221, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/gstr_1/gstr_1.py", "file_name": "gstr_1.py", "fun_name": "get_b2cs_json", "commit_message": "style: format code with black", "code": "def get_b2cs_json(data, gstin):\n\tcompany_state_number = gstin[0:2]\n\n\tout = []\n\tfor d in data:\n\t\tif not d.get(\"place_of_supply\"):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\n\t\t\t\t\t\n\t\t\t\t).format(frappe.bold(\"Place Of Supply\"))\n\t\t\t)\n\n\t\tpos = d.get(\"place_of_supply\").split(\"-\")[0]\n\t\ttax_details = {}\n\n\t\trate = d.get(\"rate\", 0)\n\t\ttax = flt((d[\"taxable_value\"] * rate) / 100.0, 2)\n\n\t\tif company_state_number == pos:\n\t\t\ttax_details.update({\"camt\": flt(tax / 2.0, 2), \"samt\": flt(tax / 2.0, 2)})\n\t\telse:\n\t\t\ttax_details.update({\"iamt\": tax})\n\n\t\tinv = {\n\t\t\t\"sply_ty\": \"INTRA\" if company_state_number == pos else \"INTER\",\n\t\t\t\"pos\": pos,\n\t\t\t\"typ\": d.get(\"type\"),\n\t\t\t\"txval\": flt(d.get(\"taxable_value\"), 2),\n\t\t\t\"rt\": rate,\n\t\t\t\"iamt\": flt(tax_details.get(\"iamt\"), 2),\n\t\t\t\"camt\": flt(tax_details.get(\"camt\"), 2),\n\t\t\t\"samt\": flt(tax_details.get(\"samt\"), 2),\n\t\t\t\"csamt\": flt(d.get(\"cess_amount\"), 2),\n\t\t}\n\n\t\tif d.get(\"type\") == \"E\" and d.get(\"ecommerce_gstin\"):\n\t\t\tinv.update({\"etin\": d.get(\"ecommerce_gstin\")})\n\n\t\tout.append(inv)\n\n\treturn out\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 69, "n_words": 101, "vocab_size": 76, "complexity": 7, "nloc": 34, "token_counts": 291, "n_ast_nodes": 495, "n_identifiers": 21, "random_cut": "def get_b2cs_json(data, gstin):\n\tcompany_state_number = gstin[0:2]\n\n\tout = []\n\tfor d in data:\n\t\tif not d.get(\"place_of_supply\"):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\n\t\t\t\t\t\n\t\t\t\t).format(frappe.bold(\"Place Of Supply\"))\n\t\t\t)\n\n\t\tpos = d.get(\"place_of_supply\").split(\"-\")[0]\n\t\ttax_details = {}\n\n\t\trate = d.get(\"rate\", 0)\n\t\ttax = flt((d[\"taxable_value\"] * rate) / 100.0, 2)\n\n\t\tif company_state_number == pos:\n\t\t\ttax_details.update({\"camt\": flt(tax / 2.0, 2), \"samt\": flt(tax / 2.0, 2)})\n\t\telse:\n\t\t\ttax_details.update({\"iamt\": tax})\n\n\t\tinv = {\n\t\t\t\"sply_ty\": \"INTRA\" if company_state_number == pos else \"INTER\",\n\t\t\t\"pos\": pos,\n\t\t\t\"typ\": d.get(\"type\"),\n\t\t\t\"txval\": flt(d.get(\"taxable_value\"), 2),\n\t\t\t\"rt\": rate,\n\t\t\t\"iamt\": flt(tax_details.get(\"iamt\"), 2),\n\t\t\t\"camt\": flt(tax_details.get(\"camt\"), 2),\n\t\t\t\"samt\": flt(tax_details.get(\"samt\"), 2),\n\t\t\t\"csamt\": flt(d.get(\"cess_amount\"), 2),\n\t\t}\n\n\t\tif d.get(\"type\") == \"E\" and d.get(\"ecommerce_gstin\"):\n\t\t\tinv.upda", "d_id": 14444, "documentation": { "docstring": "{0} not entered in some invoices.\n\t\t\t\tPlease update and try again", "n_words": 11, "vocab_size": 11, "n_whitespaces": 9, "language": "en" } }, { "id": 109189, "commit_id": "c739787b88f6bf36f5ac5603b84773287bcd98b7", "repo": "matplotlib", "path": "lib/matplotlib/tri/triinterpolate.py", "file_name": "triinterpolate.py", "fun_name": "_roll_vectorized", "commit_message": "Clean up code in tri", "code": "def _roll_vectorized(M, roll_indices, axis):\n \n assert axis in [0, 1]\n ndim = M.ndim\n assert ndim == 3\n ndim_roll = roll_indices.ndim\n assert ndim_roll == 1\n sh = M.shape\n r, c = sh[-2:]\n assert sh[0] == roll_indices.shape[0]\n vec_indices = np.arange(sh[0], dtype=np.int32)\n\n # Builds the rolled matrix\n M_roll = np.empty_like(M)\n if axis == 0:\n for ir in range(r):\n for ic in range(c):\n M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]\n else: # 1\n for ir in range(r):\n for ic in range(c):\n M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]\n return M_roll\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 201, "n_words": 89, "vocab_size": 51, "complexity": 6, "nloc": 20, "token_counts": 177, "n_ast_nodes": 266, "n_identifiers": 20, "random_cut": "def _roll_vectorized(M, roll_indices, axis):\n \n assert axis in [0, 1]\n ndim = M.ndim\n assert ndim == 3\n ndim_roll = roll_indices.ndim\n assert ndim_roll == 1\n sh = M.shape\n r, c = sh[-2:]\n assert sh[0] == roll_indices.shape[0]\n vec_indices = np.arange(sh[0], dtype=np.int32)\n\n # Builds the rolled matrix\n M_roll = np.empty_like(M)\n if axis == 0:\n for ir in range(r):\n for ic in range(c):\n M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]\n else: # 1\n for ir in range(r)", "d_id": 23469, "documentation": { "docstring": "\n Roll an array of matrices along *axis* (0: rows, 1: columns) according to\n an array of indices *roll_indices*.\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 28, "language": "en" } }, { "id": 6463, "commit_id": "f277e3bff51842c93f99187605dfaf19b5790b29", "repo": "ludwig", "path": "tests/integration_tests/test_visualization_api.py", "file_name": "test_visualization_api.py", "fun_name": "test_calibration_1_vs_all_vis_api", "commit_message": "Added lightweight preprocessor for categorical features (#1761)\n\n* Added lightweight preprocessor for categorical features\r\n\r\n* Fix visualization tests.\r\n\r\n* Get the number of classes from metadata instead of figuring it out on the fly from ground truth labels.\r\n\r\nCo-authored-by: Justin Zhao ", "code": "def test_calibration_1_vs_all_vis_api(experiment_to_use):\n \n experiment = experiment_to_use\n probabilities = experiment.probabilities\n viz_outputs = (\"pdf\", \"png\")\n with TemporaryDirectory() as tmpvizdir:\n for viz_output in viz_outputs:\n vis_output_pattern_pdf = os.path.join(tmpvizdir, f\"*.{viz_output}\")\n visualize.calibration_1_vs_all(\n [probabilities, probabilities],\n experiment.ground_truth,\n experiment.ground_truth_metadata,\n experiment.output_feature_name,\n top_n_classes=[6],\n labels_limit=0,\n model_namess=[\"Model1\", \"Model2\"],\n output_directory=tmpvizdir,\n file_format=viz_output,\n )\n figure_cnt = glob.glob(vis_output_pattern_pdf)\n assert 7 == len(figure_cnt)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 256, "n_words": 44, "vocab_size": 40, "complexity": 2, "nloc": 20, "token_counts": 110, "n_ast_nodes": 178, "n_identifiers": 25, "random_cut": "def test_calibration_1_vs_all_vis_api(experiment_to_use):\n \n experiment = experiment_to_use\n probabilities = experiment.probabilities\n viz_outputs = (\"pdf\", \"png\")\n with TemporaryDirectory() as tmpvizdir:\n for viz_output in viz_outputs:\n vis_output_pattern_pdf = os.path.join(tmpvizdir, f\"*.{viz_output}\")\n visualize.calibration_1_vs_all(\n [probabilities, probabilities],\n experiment.ground_truth,\n experiment.ground_truth_metadata,\n experiment.output_feature_name,\n top_n_classes=[6],\n labels_limit=0,\n model_namess=[\"Model1\", \"Model2\"],\n output_directory=tmpvizdir,\n file_format=", "d_id": 999, "documentation": { "docstring": "Ensure pdf and png figures can be saved via visualization API call.\n\n :param experiment_to_use: Object containing trained model and results to\n test visualization\n :return: None\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 41, "language": "en" } }, { "id": 247872, "commit_id": "33ebee47e4e96a2b6fdf72091769e59034dc550f", "repo": "synapse", "path": "tests/rest/admin/test_server_notice.py", "file_name": "test_server_notice.py", "fun_name": "test_send_server_notice_delete_room", "commit_message": "Remove redundant `get_success` calls in test code (#12346)\n\nThere are a bunch of places we call get_success on an immediate value, which is unnecessary. Let's rip them out, and remove the redundant functionality in get_success and friends.", "code": "def test_send_server_notice_delete_room(self) -> None:\n \n # user has no room memberships\n self._check_invite_and_join_status(self.other_user, 0, 0)\n\n # send first message\n channel = self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content={\n \"user_id\": self.other_user,\n \"content\": {\"msgtype\": \"m.text\", \"body\": \"test msg one\"},\n },\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n\n # user has one invite\n invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)\n first_room_id = invited_rooms[0].room_id\n\n # user joins the room and is member now\n self.helper.join(\n room=first_room_id, user=self.other_user, tok=self.other_user_token\n )\n self._check_invite_and_join_status(self.other_user, 0, 1)\n\n # get messages\n messages = self._sync_and_get_messages(first_room_id, self.other_user_token)\n self.assertEqual(len(messages), 1)\n self.assertEqual(messages[0][\"content\"][\"body\"], \"test msg one\")\n self.assertEqual(messages[0][\"sender\"], \"@notices:test\")\n\n # shut down and purge room\n self.get_success(\n self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user)\n )\n self.get_success(self.pagination_handler.purge_room(first_room_id))\n\n # user is not member anymore\n self._check_invite_and_join_status(self.other_user, 0, 0)\n\n # It doesn't really matter what API we use here, we just want to assert\n # that the room doesn't exist.\n summary = self.get_success(self.store.get_room_summary(first_room_id))\n # The summary should be empty since the room doesn't exist.\n self.assertEqual(summary, {})\n\n # invalidate cache of server notices room_ids\n # if server tries to send to a cached room_id it gives an error\n self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()\n\n # send second message\n channel = self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content={\n \"user_id\": self.other_user,\n \"content\": {\"msgtype\": \"m.text\", \"body\": \"test msg two\"},\n },\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n\n # user has one invite\n invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)\n second_room_id = invited_rooms[0].room_id\n\n # user joins the room and is member now\n self.helper.join(\n room=second_room_id, user=self.other_user, tok=self.other_user_token\n )\n self._check_invite_and_join_status(self.other_user, 0, 1)\n\n # get message\n messages = self._sync_and_get_messages(second_room_id, self.other_user_token)\n\n self.assertEqual(len(messages), 1)\n self.assertEqual(messages[0][\"content\"][\"body\"], \"test msg two\")\n self.assertEqual(messages[0][\"sender\"], \"@notices:test\")\n # second room has new ID\n self.assertNotEqual(first_room_id, second_room_id)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 800, "n_words": 240, "vocab_size": 122, "complexity": 1, "nloc": 55, "token_counts": 443, "n_ast_nodes": 715, "n_identifiers": 42, "random_cut": "def test_send_server_notice_delete_room(self) -> None:\n \n # user has no room memberships\n self._check_invite_and_join_status(self.other_user, 0, 0)\n\n # send first message\n channel = self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content={\n \"user_id\": self.other_user,\n \"content\": {\"msgtype\": \"m.text\", \"body\": \"test msg one\"},\n },\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n\n # user has one invite\n invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)\n first_room_id = invited_rooms[0].room_id\n\n # user joins the room and is member now\n self.helper.join(\n room=first_room_id, user=self.other_user, tok=self.other_user_token\n )\n self._check_invite_and_join_status(self.other_user, 0, 1)\n\n # get messages\n messages = self._sync_and_get_messages(first_room_id, self.other_user_token)\n self.assertEqual(len(messages), 1)\n self.assertEqual(messages[0][\"content\"][\"body\"], \"test msg one\")\n self.assertEqual(messages[0][\"sender\"], \"@notices:test\")\n\n # shut down and purge room\n self.get_success(\n self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user)\n )\n self.get_success(self.pagination_handler.purge_room(first_room_id))\n\n # user is not member anymore\n self._check_invite_and_join_status(self.other_user, 0, 0)\n\n # It doesn't really matter what API we use here, we just want to assert\n # that the room doesn't exist.\n summary = self.get_success(self.store.get_room_summary(first_room_id))\n # The summary should be empty since the room doesn't exist.\n self.assertEqual(summary, {})\n\n # invalidate cache of server notices room_ids\n # if server tries to send to a cached room_id it gives an error\n self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all()\n\n # send second message\n channel = self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content={\n \"user_id\": self.other_user,\n \"content\": {\"msgtype\": \"m.text\", \"body\": \"test msg two\"},\n },\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n\n ", "d_id": 71968, "documentation": { "docstring": "\n Tests that the user get server notice in a new room\n after the first server notice room was deleted.\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 41, "language": "en" } }, { "id": 195244, "commit_id": "b1acb681207559da56a787ba96e16f0e23697d92", "repo": "ParlAI", "path": "projects/bb3/agents/utils.py", "file_name": "utils.py", "fun_name": "is_request_failed_response", "commit_message": "Patch 8322 (#4709)\n\n* add dafetymix teacher\r\n\r\n* safety_mix teacher\r\n\r\n* safety_mix teacher pos and neg teachers\r\n\r\n* add tests for teacher\r\n\r\n* add license info\r\n\r\n* improvement\r\n\r\n* add task list\r\n\r\n* add task list and lint\r\n\r\n* add init.py\r\n\r\n* adding some patch to director\r\n\r\n* seeker changes\r\n\r\n* th\r\n\r\n* 3\r\n\r\n* jing\r\n\r\n* changes\r\n\r\n* z and r\r\n\r\n* remove .opts\r\n\r\n* fix docs\r\n\r\n* add contrractions\r\n\r\n* lint\r\n\r\nCo-authored-by: Dexter Ju \r\nCo-authored-by: Jing Xu ", "code": "def is_request_failed_response(resp):\n \n return len(\n resp.get('failures', [])\n ) > 0 or APIUtils.METASEQ_FAIL_MESSAGE_TEXT in resp.get('text', '')\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 61, "n_identifiers": 6, "random_cut": "def is_request_failed_response(resp):\n \n return len(\n resp.get('failures', [])\n ) > 0 or APIUtils.METASEQ_FAIL_MESSAGE_TEXT in resp.get('text', '')\n", "d_id": 47232, "documentation": { "docstring": "\n Whether the requests to Metaseq worker have failed.\n\n It checks this based on the existences of the failure reasons as they get\n accumulated in `_make_request` functionn calls.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 56, "language": "en" } }, { "id": 108776, "commit_id": "cf995d1304bfa7f660e7158b5121a46e54f869f2", "repo": "matplotlib", "path": "lib/matplotlib/patches.py", "file_name": "patches.py", "fun_name": "draw", "commit_message": "Remove ineffective exclusion of Arcs without parent Axes.\n\nThe `if not hasattr(self, 'axes'): raise RuntimeError(...)` check was\nineffectual, as artists now always have an Axes attribute, which can\njust be None for some artists. In fact, small Arcs are drawn just fine\nwithout a parent Axes; e.g.\n```\nfrom pylab import *\nfrom matplotlib.patches import *\nfig = figure()\nfig.add_artist(Ellipse((.2, .2), .1, .3, angle=45)) # for comparison\nfig.add_artist(Arc((.2, .2), .1, .3, angle=45, theta1=0, theta2=45))\n```\nworks just fine. Remove the check, and adjust the docs accordingly.\n\nOn the other hand, large arcs *did* previously fail,\nbut that occurred a bit further down, when computing\n`transforms.BboxTransformTo(self.axes.bbox)` (`self.axes` is None -->\nAttributeError). Fix that by using the figure bbox in that case (as the\npoint is to limit the drawing to the unclipped area, which is the whole\nfigure for Arcs without a parent Axes).", "code": "def draw(self, renderer):\n \n if not self.get_visible():\n return\n\n self._recompute_transform()\n\n width = self.convert_xunits(self.width)\n height = self.convert_yunits(self.height)\n\n # If the width and height of ellipse are not equal, take into account\n # stretching when calculating angles to draw between", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 96, "n_words": 36, "vocab_size": 31, "complexity": 11, "nloc": 50, "token_counts": 404, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def draw(self, renderer):\n \n if not self.get_visibl", "d_id": 23337, "documentation": { "docstring": "\n Draw the arc to the given *renderer*.\n\n Notes\n -----\n Ellipses are normally drawn using an approximation that uses\n eight cubic Bezier splines. The error of this approximation\n is 1.89818e-6, according to this unverified source:\n\n Lancaster, Don. *Approximating a Circle or an Ellipse Using\n Four Bezier Cubic Splines.*\n\n https://www.tinaja.com/glib/ellipse4.pdf\n\n There is a use case where very large ellipses must be drawn\n with very high accuracy, and it is too expensive to render the\n entire ellipse with enough segments (either splines or line\n segments). Therefore, in the case where either radius of the\n ellipse is large enough that the error of the spline\n approximation will be visible (greater than one pixel offset\n from the ideal), a different technique is used.\n\n In that case, only the visible parts of the ellipse are drawn,\n with each visible arc using a fixed number of spline segments\n (8). The algorithm proceeds as follows:\n\n 1. The points where the ellipse intersects the axes (or figure)\n bounding box are located. (This is done by performing an inverse\n transformation on the bbox such that it is relative to the unit\n circle -- this makes the intersection calculation much easier than\n doing rotated ellipse intersection directly.)\n\n This uses the \"line intersecting a circle\" algorithm from:\n\n Vince, John. *Geometry for Computer Graphics: Formulae,\n Examples & Proofs.* London: Springer-Verlag, 2005.\n\n 2. The angles of each of the intersection points are calculated.\n\n 3. Proceeding counterclockwise starting in the positive\n x-direction, each of the visible arc-segments between the\n pairs of vertices are drawn using the Bezier arc\n approximation technique implemented in `.Path.arc`.\n ", "n_words": 258, "vocab_size": 160, "n_whitespaces": 541, "language": "en" } }, { "id": 261701, "commit_id": "5e25f8e06dcf853d4079dbefa40e3e9558a1d976", "repo": "scikit-learn", "path": "sklearn/utils/_param_validation.py", "file_name": "_param_validation.py", "fun_name": "validate_parameter_constraints", "commit_message": "MAINT Make param validation more lenient towards downstream dependencies (#25088)", "code": "def validate_parameter_constraints(parameter_constraints, params, caller_name):\n \n for param_name, param_val in params.items():\n # We allow parameters to not have a constraint so that third party estimators\n # can inherit from sklearn estimators without having to necessarily use the\n # validation tools.\n if param_name not in parameter_constraints:\n continue\n\n constraints = parameter_constraints[param_name]\n\n if constraints == \"no_validation\":\n continue\n\n constraints = [make_constraint(constraint) for constraint in constraints]\n\n for constraint in constraints:\n if constraint.is_satisfied_by(param_val):\n # this constraint is satisfied, no need to check further.\n break\n else:\n # No constraint is satisfied, raise with an informative message.\n\n # Ignore constraints that we don't want to expose in the error message,\n # i.e. options that are for internal purpose or not officially supported.\n constraints = [\n constraint for constraint in constraints if not constraint.hidden\n ]\n\n if len(constraints) == 1:\n constraints_str = f\"{constraints[0]}\"\n else:\n constraints_str = (\n f\"{', '.join([str(c) for c in constraints[:-1]])} or\"\n f\" {constraints[-1]}\"\n )\n\n raise ValueError(\n f\"The {param_name!r} parameter of {caller_name} must be\"\n f\" {constraints_str}. Got {param_val!r} instead.\"\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 519, "n_words": 160, "vocab_size": 105, "complexity": 11, "nloc": 32, "token_counts": 137, "n_ast_nodes": 261, "n_identifiers": 18, "random_cut": "def validate_parameter_constraints(parameter_constraints, params, caller_name):\n \n for param_name, param_val in params.items():\n # We allow parameters to not have a constraint so that third party estimators\n # can inherit from sklearn estimators without having to necessarily use the\n # validation tools.\n if param_name not in parameter_constraints:\n continue\n\n constraints = parameter_constraints[param_name]\n\n if constraints == \"no_validation\":\n continue\n\n constraints = [make_constraint(constraint) for constraint in constraints]\n\n for constraint in constraints:\n if constraint.is_satisfied_by(param_val):\n # this constraint is satisfied, no need to check further.\n break\n else:\n ", "d_id": 76944, "documentation": { "docstring": "Validate types and values of given parameters.\n\n Parameters\n ----------\n parameter_constraints : dict or {\"no_validation\"}\n If \"no_validation\", validation is skipped for this parameter.\n\n If a dict, it must be a dictionary `param_name: list of constraints`.\n A parameter is valid if it satisfies one of the constraints from the list.\n Constraints can be:\n - an Interval object, representing a continuous or discrete range of numbers\n - the string \"array-like\"\n - the string \"sparse matrix\"\n - the string \"random_state\"\n - callable\n - None, meaning that None is a valid value for the parameter\n - any type, meaning that any instance of this type is valid\n - an Options object, representing a set of elements of a given type\n - a StrOptions object, representing a set of strings\n - the string \"boolean\"\n - the string \"verbose\"\n - the string \"cv_object\"\n - the string \"missing_values\"\n - a HasMethods object, representing method(s) an object must have\n - a Hidden object, representing a constraint not meant to be exposed to the user\n\n params : dict\n A dictionary `param_name: param_value`. The parameters to validate against the\n constraints.\n\n caller_name : str\n The name of the estimator or function or method that called this function.\n ", "n_words": 195, "vocab_size": 103, "n_whitespaces": 367, "language": "en" } }, { "id": 263844, "commit_id": "5b2ab7067ba954bd7950a79ed31e5ee177ff3f43", "repo": "pyinstaller", "path": "PyInstaller/building/build_main.py", "file_name": "build_main.py", "fun_name": "_get_module_collection_mode", "commit_message": "building & hooks: implement module collection mode setting\n\nImplement a mechanism for controlling the collection mode of\nmodules and packages, with granularity ranging from top-level\npackages to individual sub-modules. Therefore, the hooks can\nnow specify whether the hooked package should be collected as\nbyte-compiled .pyc modules into embedded PYZ archive (the\ndefault behavior), or as source .py files collected as external\ndata files (without corresponding modules in the PYZ archive).\n\nThe latter option should let us avoid unnecessary .pyc module\ncollection when the source files are required by the code, or\nwork around the situations where having a .pyc module in\nPYZ archive causes issues due to FrozenImporter's incompatibility\nwith sys.path manipulation that some packages attempt to perform.\n\nThis feature adds a new optional global hook variable, called\n`module_collection_mode`. The value can be either a string\n(\"py\" or \"pyc\") or a dictionary of module names and setting\nstrings.\n\nIn the case of a string, the setting affects the hooked module\nor a package, and is applied recursively to all sub-packages and\nsub-modules, unless another hook overrides it.\n\nThe dictionary setting allows a hook to specify different settings\nfor the package and it subpackages, or even different settings\nfor other packages.\n\nA corresponding `set_module_collection_mode` method has been\nadded to the `hook_api` object for adjusting the collection\nmode from within the `hook()` function.\n\nThe `Analysis` object can now also be passed a dictionary via\nan optional `module_collection_mode` argument; the corresponding\nsettings are applied last, which allows advanced users to both\nsupplement and override the settings made by the hooks.", "code": "def _get_module_collection_mode(mode_dict, name):\n \n mode = 'pyc' # Default mode\n\n # No settings available - return default.\n if not mode_dict:\n return mode\n\n # Search the parent modules/packages in top-down fashion, and take the last given setting. This ensures that\n # a setting given for the top-level package is recursively propagated to all its subpackages and submodules,\n # but also allows individual sub-modules to override the setting again.\n name_parts = name.split('.')\n for i in range(len(name_parts)):\n modlevel = \".\".join(name_parts[:i + 1])\n modlevel_mode = mode_dict.get(modlevel, None)\n if modlevel_mode is not None:\n mode = modlevel_mode\n\n return mode\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 162, "n_words": 92, "vocab_size": 64, "complexity": 4, "nloc": 11, "token_counts": 71, "n_ast_nodes": 125, "n_identifiers": 13, "random_cut": "def _get_module_collection_mode(mode_dict, name):\n \n mode = 'pyc' # Default mode\n\n # No settings available - return default.\n if not mode_dict:\n return mode\n\n # Search the parent modules/pa", "d_id": 77461, "documentation": { "docstring": "\n Determine the module/package collection mode for the given module name , based on the provided collection\n mode settings dictionary.\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 29, "language": "en" } }, { "id": 276928, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/kernelized_utils.py", "file_name": "kernelized_utils.py", "fun_name": "exact_gaussian_kernel", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def exact_gaussian_kernel(x, y, stddev):\n r\n x_aligned, y_aligned = _align_matrices(x, y)\n diff_squared_l2_norm = tf.reduce_sum(\n tf.math.squared_difference(x_aligned, y_aligned), 2\n )\n return tf.exp(-diff_squared_l2_norm / (2 * stddev * stddev))\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 46, "n_words": 25, "vocab_size": 23, "complexity": 1, "nloc": 29, "token_counts": 56, "n_ast_nodes": 85, "n_identifiers": 13, "random_cut": "def exact_gaussian_kernel(x, y, stddev):\n r\n x_aligned, y_aligned = _align_matrices(x, y)", "d_id": 81781, "documentation": { "docstring": "Computes exact Gaussian kernel value(s) for tensors x and y and stddev.\n\n The Gaussian kernel for vectors u, v is defined as follows:\n K(u, v) = exp(-||u-v||^2 / (2* stddev^2))\n where the norm is the l2-norm. x, y can be either vectors or matrices. If they\n are vectors, they must have the same dimension. If they are matrices, they\n must have the same number of columns. In the latter case, the method returns\n (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and\n v is a row from y.\n\n Args:\n x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].\n y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].\n stddev: The width of the Gaussian kernel.\n\n Returns:\n A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix\n of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for\n all (u,v) pairs where u, v are rows from x and y respectively.\n\n Raises:\n ValueError: if the shapes of x, y are not compatible.\n ", "n_words": 196, "vocab_size": 107, "n_whitespaces": 273, "language": "en" } }, { "id": 265867, "commit_id": "ed2f7f12369fe0b8f2dc2a5910840c928126a1b8", "repo": "netbox", "path": "netbox/extras/models/models.py", "file_name": "models.py", "fun_name": "enqueue_job", "commit_message": "Job scheduling review changes", "code": "def enqueue_job(cls, func, name, obj_type, user, schedule_at=None, *args, **kwargs):\n \n job_result: JobResult = cls.objects.create(\n name=name,\n obj_type=obj_type,\n user=user,\n job_id=uuid.uuid4()\n )\n\n queue = django_rq.get_queue(\"default\")\n\n if schedule_at:\n job_result.status = JobResultStatusChoices.STATUS_SCHEDULED\n job_result.scheduled_time = schedule_at\n job_result.save()\n\n queue.enqueue_at(schedule_at, func, job_id=str(job_result.job_id), job_result=job_result, **kwargs)\n else:\n queue.enqueue(func, job_id=str(job_result.job_id), job_result=job_result, **kwargs)\n\n return job_result\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 190, "n_words": 42, "vocab_size": 35, "complexity": 2, "nloc": 16, "token_counts": 132, "n_ast_nodes": 199, "n_identifiers": 27, "random_cut": "def enqueue_job(cls, func, name, obj_type, user, schedule_at=None, *args, **kwargs):\n \n job_result: JobResult = cls.objects.create(\n name=name,\n obj_type=obj_type,\n user=user,\n job_id=uuid.uuid4()\n )\n\n queue = django_rq.get_queue(\"default\")\n\n if schedule_at:\n job_result.status = JobResultStatusChoices.STATUS_SCHEDULED\n job_result.scheduled_time = schedule_at\n job_result.save()\n\n queue.enqueue_at(schedule_at, func, job_id=str(job_result.job_id), job_result=job_result, **kwargs)\n else:\n queue.enqueue(func, job_id=str(job_result.job_id), job_result=job_result, **kwargs)\n\n return job_result\n\n", "d_id": 78221, "documentation": { "docstring": "\n Create a JobResult instance and enqueue a job using the given callable\n\n func: The callable object to be enqueued for execution\n name: Name for the JobResult instance\n obj_type: ContentType to link to the JobResult instance obj_type\n user: User object to link to the JobResult instance\n schedule_at: Schedule the job to be executed at the passed date and time\n args: additional args passed to the callable\n kwargs: additional kargs passed to the callable\n ", "n_words": 72, "vocab_size": 39, "n_whitespaces": 136, "language": "en" } }, { "id": 183357, "commit_id": "efd4273a4ca8282b677e43f4732013e60926753b", "repo": "textual", "path": "src/textual/dom.py", "file_name": "dom.py", "fun_name": "text_style", "commit_message": "auto sizing", "code": "def text_style(self) -> Style:\n \n\n # TODO: Feels like there may be opportunity for caching here.\n\n style = Style()\n for node in reversed(self.ancestors):\n style += node.styles.text_style\n\n return style\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 27, "vocab_size": 24, "complexity": 2, "nloc": 14, "token_counts": 32, "n_ast_nodes": 55, "n_identifiers": 8, "random_cut": "def text_style(self) -> Style:\n \n\n # TODO: Feels like there may be opportunity for caching here.\n\n style = Style()\n for node in reversed(self.an", "d_id": 44160, "documentation": { "docstring": "Get the text style object.\n\n A widget's style is influenced by its parent. For instance if a widgets background has an alpha,\n then its parent's background color will show through. Additionally, widgets will inherit their\n parent's text style (i.e. bold, italic etc).\n\n Returns:\n Style: Rich Style object.\n ", "n_words": 47, "vocab_size": 38, "n_whitespaces": 93, "language": "en" } }, { "id": 203482, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/sites.py", "file_name": "sites.py", "fun_name": "get_app_list", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_app_list(self, request):\n \n app_dict = self._build_app_dict(request)\n\n # Sort the apps alphabetically.\n app_list = sorted(app_dict.values(), key=lambda x: x[\"name\"].lower())\n\n # Sort the models alphabetically within each app.\n for app in app_list:\n app[\"models\"].sort(key=lambda x: x[\"name\"])\n\n return app_list\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 94, "n_words": 34, "vocab_size": 28, "complexity": 2, "nloc": 6, "token_counts": 64, "n_ast_nodes": 110, "n_identifiers": 13, "random_cut": "def get_app_list(self, request):\n \n app_dict = self._build_app_dict(request)\n\n # Sort the apps alphabetically.", "d_id": 50401, "documentation": { "docstring": "\n Return a sorted list of all the installed apps that have been\n registered in this site.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 222558, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/ccompiler.py", "file_name": "ccompiler.py", "fun_name": "find_library_file", "commit_message": "add python 3.10.4 for windows", "code": "def find_library_file (self, dirs, lib, debug=0):\n \n raise NotImplementedError\n\n # -- Filename generation methods -----------------------------------\n\n # The default implementation of the filename generating methods are\n # prejudiced towards the Unix/DOS/Windows view of the world:\n # * object files are named by replacing the source file extension\n # (eg. .c/.cpp -> .o/.obj)\n # * library files (shared or static) are named by plugging the\n # library name and extension into a format string, eg.\n # \"lib%s.%s\" % (lib_name, \".a\") for Unix static libraries\n # * executables are named by appending an extension (possibly\n # empty) to the program name: eg. progname + \".exe\" for\n # Windows\n #\n # To reduce redundant code, these methods expect to find\n # several attributes in the current object (presumably defined\n # as class attributes):\n # * src_extensions -\n # list of C/C++ source file extensions, eg. ['.c', '.cpp']\n # * obj_extension -\n # object file extension, eg. '.o' or '.obj'\n # * static_lib_extension -\n # extension for static library files, eg. '.a' or '.lib'\n # * shared_lib_extension -\n # extension for shared library/object files, eg. '.so', '.dll'\n # * static_lib_format -\n # format string for generating static library filenames,\n # eg. 'lib%s.%s' or '%s.%s'\n # * shared_lib_format\n # format string for generating shared library filenames\n # (probably same as static_lib_format, since the extension\n # is one of the intended parameters to the format string)\n # * exe_extension -\n # extension for executable files, eg. '' or '.exe'\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 431, "n_words": 241, "vocab_size": 128, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 57, "n_identifiers": 6, "random_cut": "def find_library_file (self, dirs, lib, debug=0):\n \n raise NotImplementedError\n\n # -- Filename generation methods -----------------------------------\n\n # The default implementation of the filename generating methods are\n # prejudiced towards the Unix/DOS/Windows view of the world:\n # * object files are named by replacing the source file extension\n # (eg. .c/.cpp -> .o/.obj)\n # * library files (shared or static) are named by plugging the\n # library name and extension into a format string, eg.\n # \"lib%s.%s\" % (lib_name, \".a\") for Unix static libraries\n # * executables are named by appending an extensio", "d_id": 56635, "documentation": { "docstring": "Search the specified list of directories for a static or shared\n library file 'lib' and return the full path to that file. If\n 'debug' true, look for a debugging version (if that makes sense on\n the current platform). Return None if 'lib' wasn't found in any of\n the specified directories.\n ", "n_words": 50, "vocab_size": 41, "n_whitespaces": 87, "language": "en" } }, { "id": 247900, "commit_id": "f0b03186d96305fd44d74a89bf4230beec0c5c31", "repo": "synapse", "path": "tests/storage/databases/main/test_lock.py", "file_name": "test_lock.py", "fun_name": "test_simple_lock", "commit_message": "Add type hints for `tests/unittest.py`. (#12347)\n\nIn particular, add type hints for get_success and friends, which are then helpful in a bunch of places.", "code": "def test_simple_lock(self):\n \n # First to acquire this lock, so it should complete\n lock = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n assert lock is not None\n\n # Enter the context manager\n self.get_success(lock.__aenter__())\n\n # Attempting to acquire the lock again fails.\n lock2 = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n self.assertIsNone(lock2)\n\n # Calling `is_still_valid` reports true.\n self.assertTrue(self.get_success(lock.is_still_valid()))\n\n # Drop the lock\n self.get_success(lock.__aexit__(None, None, None))\n\n # We can now acquire the lock again.\n lock3 = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n assert lock3 is not None\n self.get_success(lock3.__aenter__())\n self.get_success(lock3.__aexit__(None, None, None))\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 200, "n_words": 74, "vocab_size": 46, "complexity": 1, "nloc": 12, "token_counts": 138, "n_ast_nodes": 236, "n_identifiers": 13, "random_cut": "def test_simple_lock(self):\n \n # First to acquire this lock, so it should complete\n lock = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n assert lock is not None\n\n # Enter the context manager\n self.get_success(lock.__aenter__())\n\n # Attempting to acquire the lock again fails.\n lock2 = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n self.assertIsNone(lock2)\n\n # Calling `is_still_valid` reports true.\n self.assertTrue(self.get_success(lock.is_still_valid()))\n\n # Drop the lock\n self.get_success(lock.__aexit__(None, None, None))\n\n # We can now acquire the lock again.\n lock3 = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n assert lock3 is not None\n self.get_success(lock3.__aenter__())\n self.get_success(lock3.__aexit__(None, None, None))\n", "d_id": 71988, "documentation": { "docstring": "Test that we can take out a lock and that while we hold it nobody\n else can take it out.\n ", "n_words": 20, "vocab_size": 15, "n_whitespaces": 34, "language": "en" } }, { "id": 109341, "commit_id": "438d30b227b1fef7e8733578f851e76a8e360f24", "repo": "matplotlib", "path": "lib/matplotlib/font_manager.py", "file_name": "font_manager.py", "fun_name": "set_family", "commit_message": "Get rcParams from mpl", "code": "def set_family(self, family):\n \n if family is None:\n family = mpl.rcParams['font.family']\n if isinstance(family, str):\n family = [family]\n self._family = family\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 19, "vocab_size": 13, "complexity": 3, "nloc": 6, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 8, "random_cut": "def set_family(self, family):\n \n if family is None:\n family ", "d_id": 23536, "documentation": { "docstring": "\n Change the font family. May be either an alias (generic name\n is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',\n 'fantasy', or 'monospace', a real font name or a list of real\n font names. Real font names are not supported when\n :rc:`text.usetex` is `True`. Default: :rc:`font.family`\n ", "n_words": 45, "vocab_size": 37, "n_whitespaces": 90, "language": "en" } }, { "id": 113656, "commit_id": "d68c786ff81bad19c04619d6a999ff34aaa724e7", "repo": "nni", "path": "nni/compression/pytorch/quantization/qat_quantizer.py", "file_name": "qat_quantizer.py", "fun_name": "update_ema", "commit_message": "[Compression] remove pruning v1 & refactor directory (#5228)", "code": "def update_ema(biased_ema, value, decay):\n \n biased_ema = biased_ema * decay + (1 - decay) * value\n return biased_ema\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 26, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 40, "n_identifiers": 4, "random_cut": "def update_ema(biased_ema, value, decay):\n \n ", "d_id": 24992, "documentation": { "docstring": "\n calculate biased stat and unbiased stat in each step using exponential moving average method\n\n Parameters\n ----------\n biased_ema : float\n previous stat value\n value : float\n current stat value\n decay : float\n the weight of previous stat value, larger means smoother curve\n\n Returns\n -------\n float, float\n ", "n_words": 45, "vocab_size": 33, "n_whitespaces": 97, "language": "en" } }, { "id": 195869, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/ntheory/partitions_.py", "file_name": "partitions_.py", "fun_name": "npartitions", "commit_message": "Improved documentation formatting", "code": "def npartitions(n, verbose=False):\n \n n = int(n)\n if n < 0:\n return 0\n if n <= 5:\n return [1, 1, 2, 3, 5, 7][n]\n if '_factor' not in globals():\n _pre()\n # Estimate number of bits in p(n). This formula could be tidied\n pbits = int((\n math.pi*(2*n/3.)**0.5 -\n math.log(4*n))/math.log(10) + 1) * \\\n math.log(10, 2)\n prec = p = int(pbits*1.1 + 100)\n s = fzero\n M = max(6, int(0.24*n**0.5 + 4))\n if M > 10**5:\n raise ValueError(\"Input too big\") # Corresponds to n > 1.7e11\n sq23pi = mpf_mul(mpf_sqrt(from_rational(2, 3, p), p), mpf_pi(p), p)\n sqrt8 = mpf_sqrt(from_int(8), p)\n for q in range(1, M):\n a = _a(n, q, p)\n d = _d(n, q, p, sq23pi, sqrt8)\n s = mpf_add(s, mpf_mul(a, d), prec)\n if verbose:\n print(\"step\", q, \"of\", M, to_str(a, 10), to_str(d, 10))\n # On average, the terms decrease rapidly in magnitude.\n # Dynamically reducing the precision greatly improves\n # performance.\n p = bitcount(abs(to_int(d))) + 50\n return int(to_int(mpf_add(s, fhalf, prec)))\n\n__all__ = ['npartitions']\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 318, "n_words": 158, "vocab_size": 116, "complexity": 7, "nloc": 27, "token_counts": 298, "n_ast_nodes": 450, "n_identifiers": 38, "random_cut": "def npartitions(n, verbose=False):\n \n n = int(n)\n if n < 0:\n return 0\n if n <= 5:\n return [1, 1, 2, 3, 5, 7][n]\n if '_factor' not in globals():\n _pre()\n # Estimate number of bits in p(n). This formula could be tidied\n pbits = int((\n math.pi*(2*n/3.)**0.5 -\n math.log(4*n))/math.log(10) + 1) * \\\n math.log(10, 2)\n prec = p = int(pbits*1.1 + 100)\n s = fzero\n M = max(6, int(0.24*n**0.5 + 4))\n if M > 10**5:\n raise ValueError(\"Input too big\") # Corresponds to n > 1.7e11\n sq23pi = mpf_mul(mpf_sqrt(from_rational(2, 3, p), p), mpf_pi(p), p)\n sqrt8 = mpf_sqrt(from_int(8), p)\n for q in range(1, M):\n a = _a(n, q, p)\n d = _d(n, q, p, sq23pi, sqrt8)\n s = mpf_add(s, mpf_mul(a, d), prec)\n if verbose:\n print(\"step\", q, \"of\", M, to_str(a, 10), to_str(d, 10))\n # On average, the terms decrease rapidly in magnitude.\n # Dynamically reducing the precision greatly improves\n # performance.\n p = bitcount(abs(to_int(d))) + 50\n return int(to_int(mpf_add(s, fhalf, prec)))\n\n_", "d_id": 47456, "documentation": { "docstring": "\n Calculate the partition function P(n), i.e. the number of ways that\n n can be written as a sum of positive integers.\n\n P(n) is computed using the Hardy-Ramanujan-Rademacher formula [1]_.\n\n\n The correctness of this implementation has been tested through $10^10$.\n\n Examples\n ========\n\n >>> from sympy.ntheory import npartitions\n >>> npartitions(25)\n 1958\n\n References\n ==========\n\n .. [1] http://mathworld.wolfram.com/PartitionFunctionP.html\n\n ", "n_words": 54, "vocab_size": 49, "n_whitespaces": 94, "language": "en" } }, { "id": 268742, "commit_id": "cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_profiles.py", "file_name": "host_profiles.py", "fun_name": "build_sleep_command", "commit_message": "ansible-test - Improve container management. (#78550)\n\nSee changelogs/fragments/ansible-test-container-management.yml for details.", "code": "def build_sleep_command(self) -> list[str]:\n \n docker_pull(self.args, self.config.image)\n inspect = docker_image_inspect(self.args, self.config.image)\n\n return ['sh', '-c', f'sleep 60; exec {shlex.join(inspect.cmd)}']\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 15, "token_counts": 46, "n_ast_nodes": 90, "n_identifiers": 13, "random_cut": "def build_sleep_command(self) -> list[str]:\n \n doc", "d_id": 79642, "documentation": { "docstring": "\n Build and return the command to put the container to sleep.\n\n The sleep duration below was selected to:\n\n - Allow enough time to perform necessary operations in the container before waking it.\n - Make the delay obvious if the wake command doesn't run or succeed.\n - Avoid hanging indefinitely or for an unreasonably long time.\n\n NOTE: The container must have a POSIX-compliant default shell \"sh\" with a non-builtin \"sleep\" command.\n ", "n_words": 70, "vocab_size": 56, "n_whitespaces": 126, "language": "en" } }, { "id": 136321, "commit_id": "76cb42c578adf19a70a6b4401098a7a21e0d3b29", "repo": "ray", "path": "rllib/evaluation/tests/test_envs_that_crash.py", "file_name": "test_envs_that_crash.py", "fun_name": "test_crash_only_one_worker_during_sampling_but_ignore", "commit_message": "[RLlib] Fault tolerant and elastic WorkerSets used across RLlib's algorithms (for sampling and evaluation). (#30118)", "code": "def test_crash_only_one_worker_during_sampling_but_ignore(self):\n \n config = (\n pg.PGConfig()\n .rollouts(\n num_rollout_workers=2,\n num_envs_per_worker=3,\n # Ignore worker failures (continue with worker #2).\n ignore_worker_failures=True,\n )\n .environment(\n env=CartPoleCrashing,\n env_config={\n # Crash prob=80%.\n \"p_crash\": 0.8,\n # Only crash on worker with index 1.\n \"crash_on_worker_indices\": [1],\n # Make sure nothing happens during pre-checks.\n \"skip_env_checking\": True,\n },\n )\n .debugging(worker_cls=ForwardHealthCheckToEnvWorker)\n )\n # Pre-checking disables, so building the Algorithm is save.\n algo = config.build()\n # Expect some errors being logged here, but in general, should continue\n # as we ignore worker failures.\n algo.train()\n # One worker has been removed -> Only one left.\n self.assertEqual(algo.workers.num_healthy_remote_workers(), 1)\n algo.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 456, "n_words": 94, "vocab_size": 78, "complexity": 1, "nloc": 22, "token_counts": 98, "n_ast_nodes": 164, "n_identifiers": 23, "random_cut": "def test_crash_only_one_worker_during_sampling_but_ignore(self):\n \n config = (\n pg.PGConfig()\n .rollouts(\n num_rollout_workers=2,\n num_envs_per_worker=3,\n # Ignore worker failures (continue with worker #2).\n ignore_worker_failures=True,\n )\n .environment(\n env=CartPoleCrashing,\n env_config={\n # Crash prob=80%.\n \"p_crash\": 0.8,\n # Only crash on worker with index 1.\n \"crash_on_worker_indices\": [1],\n # Make sure nothing happens during pre-checks.\n \"skip_env_checking\": True,\n },\n )\n .debugging(worker_cls=ForwardHealthCheckToEnvWorker)\n )\n # Pre-checking disables, so building the Algorithm is save.\n algo = config.build()\n # Expect some errors being logged here, but in gener", "d_id": 30874, "documentation": { "docstring": "Expect some sub-envs to fail (and not recover), but ignore.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 259225, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/preprocessing/tests/test_encoders.py", "file_name": "test_encoders.py", "fun_name": "test_ohe_infrequent_three_levels_drop_infrequent_errors", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop):\n \n X_train = np.array([[\"a\"] * 5 + [\"b\"] * 20 + [\"c\"] * 10 + [\"d\"] * 3]).T\n ohe = OneHotEncoder(\n handle_unknown=\"infrequent_if_exist\", sparse=False, max_categories=3, drop=drop\n )\n\n msg = f\"Unable to drop category {drop[0]!r} from feature 0 because it is infrequent\"\n with pytest.raises(ValueError, match=msg):\n ohe.fit(X_train)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 77, "n_words": 45, "vocab_size": 38, "complexity": 1, "nloc": 8, "token_counts": 82, "n_ast_nodes": 149, "n_identifiers": 17, "random_cut": "def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop):\n \n X_train = np.array([[\"a\"] * 5 + [\"b\"] * 20 + [\"c\"] * 10 + [\"d\"] * 3", "d_id": 75660, "documentation": { "docstring": "Test three levels and dropping the infrequent category.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 334185, "commit_id": "95f4256fc905b6e29e5ea0f245dcf88f72a9ddd1", "repo": "diffusers", "path": "utils/check_repo.py", "file_name": "check_repo.py", "fun_name": "is_a_private_model", "commit_message": "upload some cleaning tools", "code": "def is_a_private_model(model):\n \n if model in PRIVATE_MODELS:\n return True\n\n # Wrapper, Encoder and Decoder are all privates\n if model.endswith(\"Wrapper\"):\n return True\n if model.endswith(\"Encoder\"):\n return True\n if model.endswith(\"Decoder\"):\n return True\n return False\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 79, "n_words": 30, "vocab_size": 20, "complexity": 5, "nloc": 10, "token_counts": 45, "n_ast_nodes": 82, "n_identifiers": 4, "random_cut": "def is_a_private_model(model):\n \n if model in PRIVATE_MODELS:\n ", "d_id": 120552, "documentation": { "docstring": "Returns True if the model should not be in the main init.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 294292, "commit_id": "23a630e0bcbd2aec6a598a19ebaf2929eba97e5b", "repo": "core", "path": "tests/components/tod/test_binary_sensor.py", "file_name": "test_binary_sensor.py", "fun_name": "test_midnight_turnover_before_midnight_outside_period", "commit_message": "Update Times of the Day tests to use freezegun (#68327)", "code": "async def test_midnight_turnover_before_midnight_outside_period(hass):\n \n config = {\n \"binary_sensor\": [\n {\"platform\": \"tod\", \"name\": \"Night\", \"after\": \"22:00\", \"before\": \"5:00\"}\n ]\n }\n await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.night\")\n assert state.state == STATE_OFF\n\n\n@freeze_time(\"2019-01-10 10:00:00-08:00\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@freeze_time(\"2019-01-10 10:00:00-08:00\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 78, "n_words": 33, "vocab_size": 31, "complexity": 1, "nloc": 10, "token_counts": 62, "n_ast_nodes": 131, "n_identifiers": 10, "random_cut": "async def test_midnight_turnover_before_midnight_outside_period(hass):\n \n config = {\n \"binary_sensor\": [\n {\"platform\": \"tod\", \"name\": \"Night\", \"after\": \"22:00\", \"before\": \"5:00\"}\n ]\n }\n await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"bina", "d_id": 93329, "documentation": { "docstring": "Test midnight turnover setting before midnight outside period.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 130601, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/data/impl/block_list.py", "file_name": "block_list.py", "fun_name": "_check_if_cleared", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _check_if_cleared(self) -> None:\n \n if self._blocks is None:\n raise ValueError(\n \"This Dataset's blocks have been moved, which means that you \"\n \"can no longer use this Dataset.\"\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 94, "n_words": 28, "vocab_size": 27, "complexity": 2, "nloc": 7, "token_counts": 21, "n_ast_nodes": 41, "n_identifiers": 4, "random_cut": "def _check_if_cleared(self) -> None:\n \n if self._blocks is None:\n raise ValueError(\n \"This Dataset's blocks have been moved, which means that you \"\n \"can no longer use this Dataset.\"\n )\n", "d_id": 29327, "documentation": { "docstring": "Raise an error if this BlockList has been previously cleared.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 247834, "commit_id": "437a8ed9efdf8f1aefa092d0761076da3ae78100", "repo": "synapse", "path": "tests/rest/client/test_sync.py", "file_name": "test_sync.py", "fun_name": "test_join_leave", "commit_message": "Add a configuration to exclude rooms from sync response (#12310)", "code": "def test_join_leave(self) -> None:\n \n channel = self.make_request(\"GET\", \"/sync\", access_token=self.tok)\n self.assertEqual(channel.code, 200, channel.result)\n\n self.assertNotIn(self.excluded_room_id, channel.json_body[\"rooms\"][\"join\"])\n self.assertIn(self.included_room_id, channel.json_body[\"rooms\"][\"join\"])\n\n self.helper.leave(self.excluded_room_id, self.user_id, tok=self.tok)\n self.helper.leave(self.included_room_id, self.user_id, tok=self.tok)\n\n channel = self.make_request(\n \"GET\",\n \"/sync?since=\" + channel.json_body[\"next_batch\"],\n access_token=self.tok,\n )\n self.assertEqual(channel.code, 200, channel.result)\n\n self.assertNotIn(self.excluded_room_id, channel.json_body[\"rooms\"][\"leave\"])\n self.assertIn(self.included_room_id, channel.json_body[\"rooms\"][\"leave\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 155, "n_words": 38, "vocab_size": 27, "complexity": 1, "nloc": 18, "token_counts": 188, "n_ast_nodes": 301, "n_identifiers": 17, "random_cut": "def test_join_leave(self) -> None:\n \n channel = self.make_request(\"GET\", \"/sync\", access_token=self.tok)\n self.assertEqual(channel.code, 200, channel.result)\n\n self.assertNotIn(self.excluded_room_id, channel.json_body[\"rooms\"][\"join\"])\n self.assertIn(self.included_room_id, channel.json_body[\"rooms\"][\"join\"])\n\n self.helper.leave(self.excluded_room_id, self.user_id, tok=self.tok)\n self.helper.leave(self.included_room_id, self.user_id, tok=self.tok)\n\n channel = self.make_request(\n \"GET\",\n \"/sync?since=\" + channel.json_body[\"next_batch\"],\n access_token=self.tok,\n )\n ", "d_id": 71954, "documentation": { "docstring": "Tests that rooms are correctly excluded from the 'join' and 'leave' sections of\n sync responses.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 29, "language": "en" } }, { "id": 73213, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/tests/test_page_modeladmin.py", "file_name": "test_page_modeladmin.py", "fun_name": "test_short_description_is_used_as_field_label", "commit_message": "Reformat with black", "code": "def test_short_description_is_used_as_field_label(self):\n \n response = self.client.get(\"/admin/modeladmintest/author/inspect/1/\")\n self.assertContains(response, \"Birth information\")\n self.assertNotContains(response, \"author_birth_string\")\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 38, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def test_short_description_is_used_as_field_label(self):\n ", "d_id": 15992, "documentation": { "docstring": "\n A custom field has been added to the inspect view's `inspect_view_fields` and since\n this field has a `short_description` we expect it to be used as the field's label,\n and not use the name of the function.\n ", "n_words": 36, "vocab_size": 29, "n_whitespaces": 65, "language": "en" } }, { "id": 67630, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/item/item.py", "file_name": "item.py", "fun_name": "check_stock_uom_with_bin", "commit_message": "style: format code with black", "code": "def check_stock_uom_with_bin(item, stock_uom):\n\tif stock_uom == frappe.db.get_value(\"Item\", item, \"stock_uom\"):\n\t\treturn\n\n\tref_uom = frappe.db.get_value(\"Stock Ledger Entry\", {\"item_code\": item}, \"stock_uom\")\n\n\tif ref_uom:\n\t\tif cstr(ref_uom) != cstr(stock_uom):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\n\t\t\t\t\t\"Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM.\"\n\t\t\t\t).format(item)\n\t\t\t)\n\n\tbin_list = frappe.db.sql(\n\t\t,\n\t\t(item, stock_uom),\n\t\tas_dict=1,\n\t)\n\n\tif bin_list:\n\t\tfrappe.throw(\n\t\t\t_(\n\t\t\t\t\"Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You need to either cancel the linked documents or create a new Item.\"\n\t\t\t).format(item)\n\t\t)\n\n\t# No SLE or documents against item. Bin UOM can be changed safely.\n\tfrappe.db.sql(, (stock_uom, item))\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 103, "n_words": 127, "vocab_size": 84, "complexity": 5, "nloc": 27, "token_counts": 122, "n_ast_nodes": 204, "n_identifiers": 14, "random_cut": "def check_stock_uom_with_bin(item, stock_uom):\n\tif stock_uom == frappe.db.get_value(\"Item\", item, \"stock_uom\"):\n\t\treturn\n\n\tref_uom = frappe.db.get_value(\"Stock Ledger Entry\", {\"item_code\": item}, \"stock_uom\")\n\n\tif ref_uom:\n\t\tif cstr(ref_uom) != cstr(stock_uom):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\n\t\t\t\t\t\"Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM.\"\n\t\t\t\t).format(item)\n\t\t\t)\n\n\tbin_list = frappe.db.sql(\n\t\t,\n\t\t(item, stock_uom),\n", "d_id": 14584, "documentation": { "docstring": "\n\t\t\tselect * from tabBin where item_code = %s\n\t\t\t\tand (reserved_qty > 0 or ordered_qty > 0 or indented_qty > 0 or planned_qty > 0)\n\t\t\t\tand stock_uom != %s\n\t\t\tupdate tabBin set stock_uom=%s where item_code=%s", "n_words": 34, "vocab_size": 23, "n_whitespaces": 30, "language": "en" } }, { "id": 90853, "commit_id": "b9f5a910dc841b85f58d46266ec049ae5a7fd305", "repo": "sentry", "path": "src/sentry/runner/commands/repair.py", "file_name": "repair.py", "fun_name": "fix_group_counters", "commit_message": "ref(models): `ActivityType` (#34978)\n\n## Objective:\r\nWe want to separate enum logic from Model logic. This breaks a lot of circular dependencies.", "code": "def fix_group_counters():\n from django.db import connection\n\n click.echo(\"Correcting Group.num_comments counter\")\n cursor = connection.cursor()\n cursor.execute(\n ,\n [ActivityType.NOTE.value],\n )\n\n\n@click.command()\n@click.option(\n \"--with-docs/--without-docs\",\n default=False,\n help=\"Synchronize and repair embedded documentation. This \" \"is disabled by default.\",\n)\n@configuration", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@click.command()\n@click.option(\n \"--with-docs/--without-docs\",\n default=False,\n help=\"Synchronize and repair embedded documentation. This \" \"is disabled by default.\",\n)\n@configuration", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 66, "n_words": 33, "vocab_size": 32, "complexity": 1, "nloc": 13, "token_counts": 38, "n_ast_nodes": 109, "n_identifiers": 16, "random_cut": "def fix_group_counters():\n from django.db import connection\n\n click.echo(\"Correcting Group.num_comments counter\")\n cursor = connection.cursor()\n cursor.execute(\n ,\n [ActivityType.NOTE.value],\n )\n\n\n@click.command()\n@click.option(\n \"--with-docs/--without-docs\",\n default=False,\n help=\"Synchronize and repa", "d_id": 18700, "documentation": { "docstring": "\n UPDATE sentry_groupedmessage SET num_comments = (\n SELECT COUNT(*) from sentry_activity\n WHERE type = %s and group_id = sentry_groupedmessage.id\n )\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 59, "language": "en" } }, { "id": 91564, "commit_id": "0d29073264ceea2dd8b1528f98bedb6e6771d383", "repo": "sentry", "path": "src/sentry/search/events/filter.py", "file_name": "filter.py", "fun_name": "get_filter", "commit_message": "fix(metric_alerts): Prevent date fields from being used as filters in metric alerts (#35762)\n\nSince metric alerts are relative alerts based on the current time and the timestamp, it doesn't make\r\nsense to filter on dates within them. There were also cases where people would use relative syntax\r\nlike `-24h`, which would convert to 24 hours before the current time and then store that absolute\r\nvalue in the query. Accepting these filters is confusing because it implies that the relative date\r\nwould be applied to the alert.\r\n\r\nNot sure if overrides is the best way to do this, but it's simple enough to implement this way.", "code": "def get_filter(query=None, params=None, parser_config_overrides=None):\n \n # NOTE: this function assumes project permissions check already happened\n parsed_terms = []\n if query is not None:\n try:\n parsed_terms = parse_search_query(\n query, params=params, config_overrides=parser_config_overrides\n )\n except ParseError as e:\n raise InvalidSearchQuery(f\"Parse error: {e.expr.name} (column {e.column():d})\")\n\n kwargs = {\n \"start\": None,\n \"end\": None,\n \"conditions\": [],\n \"having\": [],\n \"user_id\": None,\n \"organization_id\": None,\n \"team_id\": [],\n \"project_ids\": [],\n \"group_ids\": [],\n \"condition_aggregates\": [],\n \"aliases\": params.get(\"aliases\", {}) if params is not None else {},\n }\n\n projects_to_filter = []\n if any(\n isinstance(term, ParenExpression) or SearchBoolean.is_operator(term)\n for term in parsed_terms\n ):\n (\n condition,\n having,\n found_projects_to_filter,\n group_ids,\n ) = convert_search_boolean_to_snuba_query(parsed_terms, params)\n\n if condition:\n and_conditions = flatten_condition_tree(condition, SNUBA_AND)\n for func in and_conditions:\n kwargs[\"conditions\"].append(convert_function_to_condition(func))\n if having:\n kwargs[\"condition_aggregates\"] = [\n term.key.name for term in parsed_terms if isinstance(term, AggregateFilter)\n ]\n and_having = flatten_condition_tree(having, SNUBA_AND)\n for func in and_having:\n kwargs[\"having\"].append(convert_function_to_condition(func))\n if found_projects_to_filter:\n projects_to_filter = list(set(found_projects_to_filter))\n if group_ids is not None:\n kwargs[\"group_ids\"].extend(list(set(group_ids)))\n else:\n projects_to_filter = set()\n for term in parsed_terms:\n if isinstance(term, SearchFilter):\n conditions, found_projects_to_filter, group_ids = format_search_filter(term, params)\n if len(conditions) > 0:\n kwargs[\"conditions\"].extend(conditions)\n if found_projects_to_filter:\n projects_to_filter.update(found_projects_to_filter)\n if group_ids is not None:\n kwargs[\"group_ids\"].extend(group_ids)\n elif isinstance(term, AggregateFilter):\n converted_filter = convert_aggregate_filter_to_snuba_query(term, params)\n kwargs[\"condition_aggregates\"].append(term.key.name)\n if converted_filter:\n kwargs[\"having\"].append(converted_filter)\n projects_to_filter = list(projects_to_filter)\n\n # Keys included as url params take precedent if same key is included in search\n # They are also considered safe and to have had access rules applied unlike conditions\n # from the query string.\n if params:\n for key in (\"start\", \"end\"):\n kwargs[key] = params.get(key, None)\n if \"user_id\" in params:\n kwargs[\"user_id\"] = params[\"user_id\"]\n if \"organization_id\" in params:\n kwargs[\"organization_id\"] = params[\"organization_id\"]\n if \"team_id\" in params:\n kwargs[\"team_id\"] = params[\"team_id\"]\n # OrganizationEndpoint.get_filter() uses project_id, but eventstore.Filter uses project_ids\n if \"project_id\" in params:\n if projects_to_filter:\n kwargs[\"project_ids\"] = projects_to_filter\n else:\n kwargs[\"project_ids\"] = params[\"project_id\"]\n if \"environment\" in params:\n term = SearchFilter(SearchKey(\"environment\"), \"=\", SearchValue(params[\"environment\"]))\n kwargs[\"conditions\"].append(convert_search_filter_to_snuba_query(term))\n if \"group_ids\" in params:\n kwargs[\"group_ids\"] = to_list(params[\"group_ids\"])\n # Deprecated alias, use `group_ids` instead\n if ISSUE_ID_ALIAS in params:\n kwargs[\"group_ids\"] = to_list(params[\"issue.id\"])\n\n return eventstore.Filter(**kwargs)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1158, "n_words": 307, "vocab_size": 185, "complexity": 32, "nloc": 87, "token_counts": 568, "n_ast_nodes": 980, "n_identifiers": 53, "random_cut": "def get_filter(query=None, params=None, parser_config_overrides=None):\n \n # NOTE: this function assumes project permissions check already happened\n ", "d_id": 18769, "documentation": { "docstring": "\n Returns an eventstore filter given the search text provided by the user and\n URL params\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 25, "language": "en" } }, { "id": 21956, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/chardet/universaldetector.py", "file_name": "universaldetector.py", "fun_name": "reset", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def reset(self):\n \n self.result = {\"encoding\": None, \"confidence\": 0.0, \"language\": None}\n self.done = False\n self._got_data = False\n self._has_win_bytes = False\n self._input_state = InputState.PURE_ASCII\n self._last_char = b\"\"\n if self._esc_charset_prober:\n self._esc_charset_prober.reset()\n if self._utf1632_prober:\n self._utf1632_prober.reset()\n for prober in self._charset_probers:\n prober.reset()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 139, "n_words": 36, "vocab_size": 28, "complexity": 4, "nloc": 13, "token_counts": 89, "n_ast_nodes": 147, "n_identifiers": 14, "random_cut": "def reset(self):\n \n self.result = {\"en", "d_id": 4094, "documentation": { "docstring": "\n Reset the UniversalDetector and all of its probers back to their\n initial states. This is called by ``__init__``, so you only need to\n call this directly in between analyses of different documents.\n ", "n_words": 32, "vocab_size": 30, "n_whitespaces": 62, "language": "en" } }, { "id": 320322, "commit_id": "01d070b882ef9027bef9a046852c2060119edd5d", "repo": "paperless-ngx", "path": "src/paperless/tests/test_settings.py", "file_name": "test_settings.py", "fun_name": "test_redis_socket_parsing", "commit_message": "Adds a layer to translate between differing formats of socket based Redis URLs", "code": "def test_redis_socket_parsing(self):\n \n\n for input, expected in [\n (None, (\"redis://localhost:6379\", \"redis://localhost:6379\")),\n (\n \"redis+socket:///run/redis/redis.sock\",\n (\n \"redis+socket:///run/redis/redis.sock\",\n \"unix:///run/redis/redis.sock\",\n ),\n ),\n (\n \"unix:///run/redis/redis.sock\",\n (\n \"redis+socket:///run/redis/redis.sock\",\n \"unix:///run/redis/redis.sock\",\n ),\n ),\n ]:\n result = _parse_redis_url(input)\n self.assertTupleEqual(expected, result)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 294, "n_words": 30, "vocab_size": 20, "complexity": 2, "nloc": 20, "token_counts": 62, "n_ast_nodes": 103, "n_identifiers": 7, "random_cut": "def test_redis_socket_parsing(self):\n \n\n for input, expected in [\n (None, (\"redis://localhost:6379\", \"redis://localhost:6379\")),\n (\n \"redis+socket:///run/redis/redis.sock\",\n (\n \"redis+socket:///run/redis/redis.sock\",\n \"unix:///run/redis/redis.sock\",\n ),\n ),\n (\n \"unix:///run/redis/redis.sock\",\n (\n \"redis+socket:///run/redis/redis.sock\",\n \"unix:///run/redis/redis.sock\",\n ),\n ),\n ]:\n result = _parse_redis_url(input)\n self.assertTupleEqua", "d_id": 117128, "documentation": { "docstring": "\n GIVEN:\n - Various Redis connection URI formats\n WHEN:\n - The URI is parsed\n THEN:\n - Socket based URIs are translated\n - Non-socket URIs are unchanged\n - None provided uses default\n ", "n_words": 30, "vocab_size": 23, "n_whitespaces": 114, "language": "en" } }, { "id": 251156, "commit_id": "e83ec8390ad6be6a86cfcfc57bce14cb8861bf32", "repo": "mitmproxy", "path": "mitmproxy/http.py", "file_name": "http.py", "fun_name": "path_components", "commit_message": "`pyupgrade --py39-plus **/*.py`", "code": "def path_components(self) -> tuple[str, ...]:\n \n path = urllib.parse.urlparse(self.url).path\n # This needs to be a tuple so that it's immutable.\n # Otherwise, this would fail silently:\n # request.path_components.append(\"foo\")\n return tuple(url.unquote(i) for i in path.split(\"/\") if i)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 79, "n_words": 35, "vocab_size": 33, "complexity": 3, "nloc": 7, "token_counts": 48, "n_ast_nodes": 81, "n_identifiers": 12, "random_cut": "def path_components(self) -> tuple[str, ...]:\n \n path = urllib.parse.urlparse(self.url).path\n # This needs to be a tuple so that it's immutable.\n # Otherwise, this would fail silently:\n # request.path_comp", "d_id": 73612, "documentation": { "docstring": "\n The URL's path components as a tuple of strings.\n Components are unquoted.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 261349, "commit_id": "0003ee0492e420783fd9aa665903d9d489736369", "repo": "scikit-learn", "path": "sklearn/utils/__init__.py", "file_name": "__init__.py", "fun_name": "gen_batches", "commit_message": "DOC Ensure that gen_batches passes numpydoc validation (#24609)", "code": "def gen_batches(n, batch_size, *, min_batch_size=0):\n \n if not isinstance(batch_size, numbers.Integral):\n raise TypeError(\n \"gen_batches got batch_size=%s, must be an integer\" % batch_size\n )\n if batch_size <= 0:\n raise ValueError(\"gen_batches got batch_size=%s, must be positive\" % batch_size)\n start = 0\n for _ in range(int(n // batch_size)):\n end = start + batch_size\n if end + min_batch_size > n:\n continue\n yield slice(start, end)\n start = end\n if start < n:\n yield slice(start, n)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 164, "n_words": 68, "vocab_size": 46, "complexity": 6, "nloc": 16, "token_counts": 91, "n_ast_nodes": 155, "n_identifiers": 15, "random_cut": "def gen_batches(n, batch_size, *, min_batch_size=0):\n \n if not isinstance(batch_size, numbers.Integral):\n raise TypeError(\n \"gen_batches got batch_size=%s, must be an integer\" % batch_size\n )\n if batch_size <= 0:\n raise ValueError(\"gen_batches got batch_size=%s, must be positive\" % batch_size)\n start = 0\n for _ in range(int(n // batch_size)):\n end = start + batch_size\n if end + min_batch_size > n:\n continue\n yield slice(start, end)\n start = end\n if start < n:\n yield slice(start, n)\n\n", "d_id": 76769, "documentation": { "docstring": "Generator to create slices containing `batch_size` elements from 0 to `n`.\n\n The last slice may contain less than `batch_size` elements, when\n `batch_size` does not divide `n`.\n\n Parameters\n ----------\n n : int\n Size of the sequence.\n batch_size : int\n Number of elements in each batch.\n min_batch_size : int, default=0\n Minimum number of elements in each batch.\n\n Yields\n ------\n slice of `batch_size` elements\n\n See Also\n --------\n gen_even_slices: Generator to create n_packs slices going up to n.\n\n Examples\n --------\n >>> from sklearn.utils import gen_batches\n >>> list(gen_batches(7, 3))\n [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]\n >>> list(gen_batches(6, 3))\n [slice(0, 3, None), slice(3, 6, None)]\n >>> list(gen_batches(2, 3))\n [slice(0, 2, None)]\n >>> list(gen_batches(7, 3, min_batch_size=0))\n [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]\n >>> list(gen_batches(7, 3, min_batch_size=2))\n [slice(0, 3, None), slice(3, 7, None)]\n ", "n_words": 131, "vocab_size": 71, "n_whitespaces": 233, "language": "en" } }, { "id": 245868, "commit_id": "79c8295801acedee0cbdbf128a01b9fe162646b0", "repo": "mmdetection", "path": "tests/test_models/test_dense_heads/test_condinst_head.py", "file_name": "test_condinst_head.py", "fun_name": "test_condinst_maskhead_loss", "commit_message": "[Feature]: Support Condinst (#9223)\n\n* [Feature]: support condinst for instance segmentation\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update\r\n\r\n* fix config name and add test unit\r\n\r\n* fix squeeze error\r\n\r\n* add README and chang mask to poly", "code": "def test_condinst_maskhead_loss(self):\n \n s = 256\n img_metas = [{\n 'img_shape': (s, s, 3),\n 'pad_shape': (s, s, 3),\n 'scale_factor': 1,\n }]\n condinst_bboxhead = CondInstBboxHead(\n num_classes=4,\n in_channels=1,\n feat_channels=1,\n stacked_convs=1,\n norm_cfg=None)\n\n mask_feature_head = _fake_mask_feature_head()\n condinst_maskhead = CondInstMaskHead(\n mask_feature_head=mask_feature_head,\n loss_mask=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n eps=5e-6,\n loss_weight=1.0))\n\n # Fcos head expects a multiple levels of features per image\n feats = []\n for i in range(len(condinst_bboxhead.strides)):\n feats.append(\n torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))\n feats = tuple(feats)\n cls_scores, bbox_preds, centernesses, param_preds =\\\n condinst_bboxhead.forward(feats)\n\n # Test that empty ground truth encourages the network to\n # predict background\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.empty((0, 4))\n gt_instances.labels = torch.LongTensor([])\n gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)\n\n _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,\n centernesses, param_preds,\n [gt_instances], img_metas)\n # When truth is empty then all mask loss\n # should be zero for random inputs\n positive_infos = condinst_bboxhead.get_positive_infos()\n mask_outs = condinst_maskhead.forward(feats, positive_infos)\n empty_gt_mask_losses = condinst_maskhead.loss_by_feat(\n *mask_outs, [gt_instances], img_metas, positive_infos)\n loss_mask = empty_gt_mask_losses['loss_mask']\n self.assertEqual(loss_mask, 0, 'mask loss should be zero')\n\n # When truth is non-empty then all cls, box loss and centerness loss\n # should be nonzero for random inputs\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.Tensor(\n [[23.6667, 23.8757, 238.6326, 151.8874]])\n gt_instances.labels = torch.LongTensor([2])\n gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)\n\n _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,\n centernesses, param_preds,\n [gt_instances], img_metas)\n positive_infos = condinst_bboxhead.get_positive_infos()\n mask_outs = condinst_maskhead.forward(feats, positive_infos)\n one_gt_mask_losses = condinst_maskhead.loss_by_feat(\n *mask_outs, [gt_instances], img_metas, positive_infos)\n loss_mask = one_gt_mask_losses['loss_mask']\n self.assertGreater(loss_mask, 0, 'mask loss should be nonzero')\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 917, "n_words": 228, "vocab_size": 134, "complexity": 2, "nloc": 56, "token_counts": 412, "n_ast_nodes": 641, "n_identifiers": 55, "random_cut": "def test_condinst_maskhead_loss(self):\n \n s = 256\n img_metas = [{\n 'img_shape': (s, s, 3),\n 'pad_shape': (s, s, 3),\n 'scale_factor': 1,\n }]\n condinst_bboxhead = CondInstBboxHead(\n num_classes=4,\n in_channels=1,\n feat_channels=1,\n stacked_convs=1,\n norm_cfg=None)\n\n mask_feature_head = _fake_mask_feature_head()\n condinst_maskhead = CondInstMaskHead(\n mask_feature_head=mask_feature_head,\n loss_mask=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n eps=5e-6,\n loss_weight=1.0))\n\n # Fcos head expects a multiple levels of features per image\n feats = []\n for i in range(len(condinst_bboxhead.strides)):\n feats.append(\n torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))\n feats = t", "d_id": 70917, "documentation": { "docstring": "Tests condinst maskhead loss when truth is empty and non-empty.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 181643, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/feature_transformers_tests.py", "file_name": "feature_transformers_tests.py", "fun_name": "test_ContinuousSelector_2", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_ContinuousSelector_2():\n \n cs = ContinuousSelector(threshold=5, svd_solver='randomized')\n X_transformed = cs.transform(iris_data[0:16, :])\n assert_equal(X_transformed.shape[1],3)\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 23, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 43, "n_ast_nodes": 71, "n_identifiers": 10, "random_cut": "def test_ContinuousSelector_2():\n \n cs = ContinuousSelector(threshold=5, svd_solver='randomized')\n X_transformed = cs.transform(iris_da", "d_id": 43431, "documentation": { "docstring": "Assert that ContinuousSelector works as expected with threshold=5.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 207807, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_add_view", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_add_view(self):\n \n add_dict = {\n \"title\": \"Døm ikke\",\n \"content\": \"

great article

\",\n \"date_0\": \"2008-03-18\",\n \"date_1\": \"10:54:39\",\n \"section\": self.s1.pk,\n }\n # Change User should not have access to add articles\n self.client.force_login(self.changeuser)\n # make sure the view removes test cookie\n self.assertIs(self.client.session.test_cookie_worked(), False)\n response = self.client.get(reverse(\"admin:admin_views_article_add\"))\n self.assertEqual(response.status_code, 403)\n # Try POST just to make sure\n post = self.client.post(reverse(\"admin:admin_views_article_add\"), add_dict)\n self.assertEqual(post.status_code, 403)\n self.assertEqual(Article.objects.count(), 3)\n self.client.get(reverse(\"admin:logout\"))\n\n # View User should not have access to add articles\n self.client.force_login(self.viewuser)\n response = self.client.get(reverse(\"admin:admin_views_article_add\"))\n self.assertEqual(response.status_code, 403)\n # Try POST just to make sure\n post = self.client.post(reverse(\"admin:admin_views_article_add\"), add_dict)\n self.assertEqual(post.status_code, 403)\n self.assertEqual(Article.objects.count(), 3)\n # Now give the user permission to add but not change.\n self.viewuser.user_permissions.add(\n get_perm(Article, get_permission_codename(\"add\", Article._meta))\n )\n response = self.client.get(reverse(\"admin:admin_views_article_add\"))\n self.assertEqual(response.context[\"title\"], \"Add article\")\n self.assertContains(response, \"Add article | Django site admin\")\n self.assertContains(\n response, ''\n )\n post = self.client.post(\n reverse(\"admin:admin_views_article_add\"), add_dict, follow=False\n )\n self.assertEqual(post.status_code, 302)\n self.assertEqual(Article.objects.count(), 4)\n article = Article.objects.latest(\"pk\")\n response = self.client.get(\n reverse(\"admin:admin_views_article_change\", args=(article.pk,))\n )\n self.assertContains(\n response,\n '
  • The article “Døm ikke” was added successfully.
  • ',\n )\n article.delete()\n self.client.get(reverse(\"admin:logout\"))\n\n # Add user may login and POST to add view, then redirect to admin root\n self.client.force_login(self.adduser)\n addpage = self.client.get(reverse(\"admin:admin_views_article_add\"))\n change_list_link = '›
    Articles' % reverse(\n \"admin:admin_views_article_changelist\"\n )\n self.assertNotContains(\n addpage,\n change_list_link,\n msg_prefix=\"User restricted to add permission is given link to change list view in breadcrumbs.\",\n )\n post = self.client.post(reverse(\"admin:admin_views_article_add\"), add_dict)\n self.assertRedirects(post, self.index_url)\n self.assertEqual(Article.objects.count(), 4)\n self.assertEqual(len(mail.outbox), 2)\n self.assertEqual(mail.outbox[0].subject, \"Greetings from a created object\")\n self.client.get(reverse(\"admin:logout\"))\n\n # The addition was logged correctly\n addition_log = LogEntry.objects.all()[0]\n new_article = Article.objects.last()\n article_ct = ContentType.objects.get_for_model(Article)\n self.assertEqual(addition_log.user_id, self.adduser.pk)\n self.assertEqual(addition_log.content_type_id, article_ct.pk)\n self.assertEqual(addition_log.object_id, str(new_article.pk))\n self.assertEqual(addition_log.object_repr, \"Døm ikke\")\n self.assertEqual(addition_log.action_flag, ADDITION)\n self.assertEqual(addition_log.get_change_message(), \"Added.\")\n\n # Super can add too, but is redirected to the change list view\n self.client.force_login(self.superuser)\n addpage = self.client.get(reverse(\"admin:admin_views_article_add\"))\n self.assertContains(\n addpage,\n change_list_link,\n msg_prefix=\"Unrestricted user is not given link to change list view in breadcrumbs.\",\n )\n post = self.client.post(reverse(\"admin:admin_views_article_add\"), add_dict)\n self.assertRedirects(post, reverse(\"admin:admin_views_article_changelist\"))\n self.assertEqual(Article.objects.count(), 5)\n self.client.get(reverse(\"admin:logout\"))\n\n # 8509 - if a normal user is already logged in, it is possible\n # to change user into the superuser without error\n self.client.force_login(self.joepublicuser)\n # Check and make sure that if user expires, data still persists\n self.client.force_login(self.superuser)\n # make sure the view removes test cookie\n self.assertIs(self.client.session.test_cookie_worked(), False)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1100, "n_words": 342, "vocab_size": 189, "complexity": 1, "nloc": 85, "token_counts": 718, "n_ast_nodes": 1201, "n_identifiers": 62, "random_cut": "def test_add_view(self):\n \n add_dict = {\n \"title\": \"Døm ikke\",\n \"content\": \"

    great article

    \",\n \"date_0\": \"2008-03-18\",\n \"date_1\": \"10:54:39\",\n \"section\": self.s1.pk,\n }\n # Change User should not have access to add articles\n self.client.force_login(self.changeuser)\n # make sure the view removes test cookie\n self.assertIs(self.client.session.test_cookie_worked(), False)\n response = self.client.get(reverse(\"admin:admin_views_article_add\"))\n self.assertEqual(response.status_code, 403)\n # Try POST just to make sure\n post = self.client.post(reverse(\"admin:admin_views_article_add\"), add_dict)\n self.assertEqual(post.status_code, 403)\n self.assertEqual(Article.objects.count(), 3)\n self.client.get(reverse(\"admin:logout\"))\n\n # View User should not have access to add articles\n self.client.force_login(self.viewuser)\n response = self.client.get(reverse(\"admin:admin_views_article_add\"))\n self.assertEqual(response.status_code, 403)\n # Try POST just to make sure\n post = self.client.post(reverse(\"admin:admin_views_article_add\"), add_dict)\n self.assertEqual(post.status_code, 403)\n self.assertEqual(Article.objects.count(), 3)\n # Now give the user permission to add but not change.\n self.viewuser.user_permissions.add(\n get_perm(Article, get_permission_codename(\"add\", Article._meta))\n )\n response = self.client.get(reverse(\"admin:admin_views_article_add\"))\n self.assertEqual(response.context[\"title\"], \"Add article\")\n self.assertContains(response, \"Add article | Django site admin\")\n self.assertContains(\n response, ''\n )\n post = self.client.post(\n reverse(\"admin:admin_views_article_add\"), add_dict, follow=False\n )\n self.assertEqual(post.status_code, 302)\n self.assertEqual(Article.objects.count(), 4)\n article = Article.objects.latest(\"pk\")\n response = self.client.get(\n reverse(\"admin:admin_views_article_change\", args=(article.pk,))\n )\n self.assertContains(\n response,\n '
  • The article “Døm ikke” was added successfully.
  • ',\n )\n article.delete()\n self.client.get(reverse(\"admin:logout\"))\n\n # Add user may login and POST to add view, then redirect to admin root\n self.client.force_login(self.adduser)\n addpage = self.client.get(reverse(\"admin:admin_views_article_add\"))\n change_list_li", "d_id": 52111, "documentation": { "docstring": "Test add view restricts access and actually adds items.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 266595, "commit_id": "43e55db20821a1341d21ffa1e4e7e6185b244105", "repo": "ansible", "path": "lib/ansible/galaxy/collection/__init__.py", "file_name": "__init__.py", "fun_name": "install", "commit_message": "ansible-galaxy - add signature verification of the MANIFEST.json (#76681)\n\n* ansible-galaxy collection install|verify:\r\n\r\n - Support verifying the origin of the MANIFEST.json when the Galaxy server has provided signatures.\r\n - Allow supplemental signatures to use during verification on the CLI/requirements file.\r\n\r\n* ansible-galaxy collection install:\r\n\r\n - Support disabling signature verification. This silences the warning provided by ansible-galaxy if the Galaxy server provided signatures it cannot use because no keyring is configured.\r\n - Store Galaxy server metadata alongside installed collections for provenance. This is used by 'ansible-galaxy collection verify --offline'.\r\n\r\n* Add unit tests for method that gets signatures from a Galaxy server\r\n\r\n* Add integration tests for user-provided signature sources\r\n\r\n- Test CLI option combinations\r\n- Test installing collections with valid/invalid signature sources\r\n- Test disabling GPG verification when installing collections\r\n- Test verifying collections with valid/invalid signature sources\r\n\r\n* Make signature verification advisory-by-default if signatures are provided by the Galaxy server\r\n\r\n- Make the default keyring None\r\n- Warn if the keyring is None but the Galaxy server provided signatures\r\n- Error if the keyring is None but the user supplied signatures\r\n- Error if the keyring is not None but is invalid\r\n\r\n* changelog\r\n\r\n* add ansible-galaxy user documentation for new options\r\n\r\nCo-authored-by: Matt Martz \r\nCo-authored-by: Sviatoslav Sydorenko \r\nCo-authored-by: Martin Krizek \r\nCo-authored-by: Sandra McCann \r\nCo-authored-by: Andy Mott \r\nCo-authored-by: John R Barker ", "code": "def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?\n # type: (Candidate, str, ConcreteArtifactsManager) -> None\n \n b_artifact_path = (\n artifacts_manager.get_artifact_path if collection.is_concrete_artifact\n else artifacts_manager.get_galaxy_artifact_path\n )(collection)\n\n collection_path = os.path.join(path, collection.namespace, collection.name)\n b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')\n display.display(\n u\"Installing '{coll!s}' to '{path!s}'\".\n format(coll=to_text(collection), path=collection_path),\n )\n\n if os.path.exists(b_collection_path):\n shutil.rmtree(b_collection_path)\n\n if collection.is_dir:\n install_src(collection, b_artifact_path, b_collection_path, artifacts_manager)\n else:\n install_artifact(\n b_artifact_path,\n b_collection_path,\n artifacts_manager._b_working_directory,\n collection.signatures,\n artifacts_manager.keyring\n )\n if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):\n write_source_metadata(\n collection,\n b_collection_path,\n artifacts_manager\n )\n\n display.display(\n '{coll!s} was installed successfully'.\n format(coll=to_text(collection)),\n )\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 315, "n_words": 76, "vocab_size": 62, "complexity": 6, "nloc": 33, "token_counts": 170, "n_ast_nodes": 262, "n_identifiers": 34, "random_cut": "def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?\n # type: (Candidate, str, ConcreteArtifactsManager) -> None\n \n b_artifact_path = (\n artifacts_manager.get_artifact_path if collection.is_concrete_artifact\n else artifacts_manager.get_galaxy_artifact_path\n )(collection)\n\n collection_path = os.path.join(path, collection.namespace, collection.name)\n b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')\n display.display(\n u\"Installing '{coll!s}' to '{path!s}'\".\n format(coll=to_text(collection), path=collection_path),\n )\n\n if os.path.exists(b_collection_path):\n shutil.rmtree(b_collection_path)\n\n if collection.is_dir:\n install_src(collection, b_artifact_path, b_collection_path, artifacts_manager)\n else:\n install_artifact(\n b_artifact_path,\n b_collection_path,\n artifacts_manager._b_working_directory,\n collection.signatures,\n artifacts_manager.keyring\n )\n if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):\n write_source_metadata(\n collection,\n b_collection_path,", "d_id": 78490, "documentation": { "docstring": "Install a collection under a given path.\n\n :param collection: Collection to be installed.\n :param path: Collection dirs layout path.\n :param artifacts_manager: Artifacts manager.\n ", "n_words": 23, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 208492, "commit_id": "23276ac4770f380ce1d5808950dd412a35594af1", "repo": "ipython", "path": "IPython/core/magics/code.py", "file_name": "code.py", "fun_name": "edit", "commit_message": "Fix EncodingWarning on Python 3.10", "code": "def edit(self, parameter_s='',last_call=['','']):\n \n opts,args = self.parse_options(parameter_s,'prxn:')\n\n try:\n filename, lineno, is_temp = self._find_edit_target(self.shell, \n args, opts, last_call)\n except MacroToEdit as e:\n self._edit_macro(args, e.args[0])\n return\n except InteractivelyDefined as e:\n print(\"Editing In[%i]\" % e.index)\n args = str(e.index)\n filename, lineno, is_temp = self._find_edit_target(self.shell, \n args, opts, last_call)\n if filename is None:\n # nothing was found, warnings have already been issued,\n # just give up.\n return\n\n if is_temp:\n self._knowntemps.add(filename)\n elif (filename in self._knowntemps):\n is_temp = True\n\n\n # do actual editing here\n print('Editing...', end=' ')\n sys.stdout.flush()\n filepath = Path(filename)\n try:\n # Quote filenames that may have spaces in them when opening\n # the editor\n quoted = filename = str(filepath.absolute())\n if \" \" in quoted:\n quoted = \"'%s'\" % quoted\n self.shell.hooks.editor(quoted, lineno)\n except TryNext:\n warn('Could not open editor')\n return\n\n # XXX TODO: should this be generalized for all string vars?\n # For now, this is special-cased to blocks created by cpaste\n if args.strip() == \"pasted_block\":\n self.shell.user_ns[\"pasted_block\"] = filepath.read_text(encoding='utf-8')\n\n if 'x' in opts: # -x prevents actual execution\n print()\n else:\n print('done. Executing edited code...')\n with preserve_keys(self.shell.user_ns, '__file__'):\n if not is_temp:\n self.shell.user_ns['__file__'] = filename\n if 'r' in opts: # Untranslated IPython code\n source = filepath.read_text(encoding='utf-8')\n self.shell.run_cell(source, store_history=False)\n else:\n self.shell.safe_execfile(filename, self.shell.user_ns,\n self.shell.user_ns)\n\n if is_temp:\n try:\n return filepath.read_text(encoding='utf-8')\n except IOError as msg:\n if Path(msg.filename) == filepath:\n warn('File not found. Did you forget to save?')\n return\n else:\n self.shell.showtraceback()\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1014, "n_words": 214, "vocab_size": 142, "complexity": 15, "nloc": 54, "token_counts": 364, "n_ast_nodes": 634, "n_identifiers": 45, "random_cut": "def edit(self, parameter_s='',last_call=['','']):\n \n opts,args = self.parse_options(parameter_s,'prxn:')\n\n try:\n filename, lineno, is_temp = self._find_edit_target(self.shell, \n args, opts, last_call)\n except MacroToEdit as e:\n self._edit_macro(args, e.args[0])\n return\n except InteractivelyDefined as e:\n print(\"Editing In[%i]\" % e.index)\n args = str(e.index)\n filename, lineno, is_temp = self._find_edit_target(self.shell, \n args, opts, last_call)\n if filename is None:\n # nothing was found, warnings have already been issued,\n # just give up.\n return\n\n if is_temp:\n self._knowntemps.add(filename)\n elif (filename in self._knowntemps):\n is_temp = True\n\n\n # do actual editing here\n print('Editing...', end=' ')\n sys.stdout.flush()\n filepath = Path(filename)\n try:\n # Quote filenames that may have spaces in them when opening\n # the editor\n quoted = filename = str(filepath.absolute())\n if \" \" in quoted:\n quoted = \"'%s'\" % quoted\n self.shell.hook", "d_id": 52353, "documentation": { "docstring": "Bring up an editor and execute the resulting code.\n\n Usage:\n %edit [options] [args]\n\n %edit runs IPython's editor hook. The default version of this hook is\n set to call the editor specified by your $EDITOR environment variable.\n If this isn't found, it will default to vi under Linux/Unix and to\n notepad under Windows. See the end of this docstring for how to change\n the editor hook.\n\n You can also set the value of this editor via the\n ``TerminalInteractiveShell.editor`` option in your configuration file.\n This is useful if you wish to use a different editor from your typical\n default with IPython (and for Windows users who typically don't set\n environment variables).\n\n This command allows you to conveniently edit multi-line code right in\n your IPython session.\n\n If called without arguments, %edit opens up an empty editor with a\n temporary file and will execute the contents of this file when you\n close it (don't forget to save it!).\n\n\n Options:\n\n -n : open the editor at a specified line number. By default,\n the IPython editor hook uses the unix syntax 'editor +N filename', but\n you can configure this by providing your own modified hook if your\n favorite editor supports line-number specifications with a different\n syntax.\n\n -p: this will call the editor with the same data as the previous time\n it was used, regardless of how long ago (in your current session) it\n was.\n\n -r: use 'raw' input. This option only applies to input taken from the\n user's history. By default, the 'processed' history is used, so that\n magics are loaded in their transformed version to valid Python. If\n this option is given, the raw input as typed as the command line is\n used instead. When you exit the editor, it will be executed by\n IPython's own processor.\n\n -x: do not execute the edited code immediately upon exit. This is\n mainly useful if you are editing programs which need to be called with\n command line arguments, which you can then do using %run.\n\n\n Arguments:\n\n If arguments are given, the following possibilities exist:\n\n - If the argument is a filename, IPython will load that into the\n editor. It will execute its contents with execfile() when you exit,\n loading any code in the file into your interactive namespace.\n\n - The arguments are ranges of input history, e.g. \"7 ~1/4-6\".\n The syntax is the same as in the %history magic.\n\n - If the argument is a string variable, its contents are loaded\n into the editor. You can thus edit any string which contains\n python code (including the result of previous edits).\n\n - If the argument is the name of an object (other than a string),\n IPython will try to locate the file where it was defined and open the\n editor at the point where it is defined. You can use `%edit function`\n to load an editor exactly at the point where 'function' is defined,\n edit it and have the file be executed automatically.\n\n - If the object is a macro (see %macro for details), this opens up your\n specified editor with a temporary file containing the macro's data.\n Upon exit, the macro is reloaded with the contents of the file.\n\n Note: opening at an exact line is only supported under Unix, and some\n editors (like kedit and gedit up to Gnome 2.8) do not understand the\n '+NUMBER' parameter necessary for this feature. Good editors like\n (X)Emacs, vi, jed, pico and joe all do.\n\n After executing your code, %edit will return as output the code you\n typed in the editor (except when it was an existing file). This way\n you can reload the code in further invocations of %edit as a variable,\n via _ or Out[], where is the prompt number of\n the output.\n\n Note that %edit is also available through the alias %ed.\n\n This is an example of creating a simple function inside the editor and\n then modifying it. First, start up the editor::\n\n In [1]: edit\n Editing... done. Executing edited code...\n Out[1]: 'def foo():\\\\n print \"foo() was defined in an editing\n session\"\\\\n'\n\n We can then call the function foo()::\n\n In [2]: foo()\n foo() was defined in an editing session\n\n Now we edit foo. IPython automatically loads the editor with the\n (temporary) file where foo() was previously defined::\n\n In [3]: edit foo\n Editing... done. Executing edited code...\n\n And if we call foo() again we get the modified version::\n\n In [4]: foo()\n foo() has now been changed!\n\n Here is an example of how to edit a code snippet successive\n times. First we call the editor::\n\n In [5]: edit\n Editing... done. Executing edited code...\n hello\n Out[5]: \"print 'hello'\\\\n\"\n\n Now we call it again with the previous output (stored in _)::\n\n In [6]: edit _\n Editing... done. Executing edited code...\n hello world\n Out[6]: \"print 'hello world'\\\\n\"\n\n Now we call it with the output #8 (stored in _8, also as Out[8])::\n\n In [7]: edit _8\n Editing... done. Executing edited code...\n hello again\n Out[7]: \"print 'hello again'\\\\n\"\n\n\n Changing the default editor hook:\n\n If you wish to write your own editor hook, you can put it in a\n configuration file which you load at startup time. The default hook\n is defined in the IPython.core.hooks module, and you can use that as a\n starting example for further modifications. That file also has\n general instructions on how to set a new hook for use once you've\n defined it.", "n_words": 882, "vocab_size": 385, "n_whitespaces": 1675, "language": "en" } }, { "id": 186299, "commit_id": "6f82ad9c4a2e17812a68d3c76d7eae89aee3a515", "repo": "textual", "path": "src/textual/strip.py", "file_name": "strip.py", "fun_name": "cell_length", "commit_message": "adds Strip primitive", "code": "def cell_length(self) -> int:\n \n # Done on demand and cached, as this is an O(n) operation\n if self._cell_length is None:\n self._cell_length = Segment.get_line_length(self._segments)\n return self._cell_length\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 25, "vocab_size": 22, "complexity": 2, "nloc": 5, "token_counts": 31, "n_ast_nodes": 53, "n_identifiers": 7, "random_cut": "def cell_length(self) -> int:\n \n # Done on demand and cached, as this is an O(n) operation", "d_id": 45438, "documentation": { "docstring": "Get the number of cells required to render this object.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 320933, "commit_id": "5616a99eff34f7074641d1391ed77d6b4b743529", "repo": "qutebrowser", "path": "tests/unit/mainwindow/test_messageview.py", "file_name": "test_messageview.py", "fun_name": "test_show_message_twice", "commit_message": "Add a MessageInfo data class\n\nPreparation for #7246", "code": "def test_show_message_twice(view):\n \n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))\n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))\n assert len(view._messages) == 1\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 22, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 49, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def test_show_message_twice(view):\n \n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))\n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))\n assert len(view._messages) == 1\n\n", "d_id": 117445, "documentation": { "docstring": "Show the same message twice -> only one should be shown.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 82438, "commit_id": "c1290c9ff89cb00caa5469129fd527e9d82cd820", "repo": "django-cms", "path": "cms/utils/plugins.py", "file_name": "plugins.py", "fun_name": "create_default_plugins", "commit_message": "ci: Added codespell (#7355)\n\nCo-authored-by: Christian Clauss \r\n\r\n* ci: codespell config taken from #7292", "code": "def create_default_plugins(request, placeholders, template, lang):\n \n from cms.api import add_plugin\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 4, "nloc": 11, "token_counts": 87, "n_ast_nodes": 28, "n_identifiers": 8, "random_cut": "def create_default_plugins(request, placeholders, template, lang):\n \n from cms.api import add_plug", "d_id": 17401, "documentation": { "docstring": "\n Create all default plugins for the given ``placeholders`` if they have\n a \"default_plugins\" configuration value in settings.\n return all plugins, children, grandchildren (etc.) created\n ", "n_words": 24, "vocab_size": 23, "n_whitespaces": 37, "language": "en" } }, { "id": 242745, "commit_id": "ee85e387bab535e2339b9d3cd1ab87c61d23af15", "repo": "Pillow", "path": "src/PIL/Jpeg2KImagePlugin.py", "file_name": "Jpeg2KImagePlugin.py", "fun_name": "_parse_jp2_header", "commit_message": "Remove redundant parentheses", "code": "def _parse_jp2_header(fp):\n \n\n # Find the JP2 header box\n reader = BoxReader(fp)\n header = None\n mimetype = None\n while reader.has_next_box():\n tbox = reader.next_box_type()\n\n if tbox == b\"jp2h\":\n header = reader.read_boxes()\n break\n elif tbox == b\"ftyp\":\n if reader.read_fields(\">4s\")[0] == b\"jpx \":\n mimetype = \"image/jpx\"\n\n size = None\n mode = None\n bpc = None\n nc = None\n dpi = None # 2-tuple of DPI info, or None\n\n while header.has_next_box():\n tbox = header.next_box_type()\n\n if tbox == b\"ihdr\":\n height, width, nc, bpc = header.read_fields(\">IIHB\")\n size = (width, height)\n if nc == 1 and (bpc & 0x7F) > 8:\n mode = \"I;16\"\n elif nc == 1:\n mode = \"L\"\n elif nc == 2:\n mode = \"LA\"\n elif nc == 3:\n mode = \"RGB\"\n elif nc == 4:\n mode = \"RGBA\"\n elif tbox == b\"res \":\n res = header.read_boxes()\n while res.has_next_box():\n tres = res.next_box_type()\n if tres == b\"resc\":\n vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(\">HHHHBB\")\n hres = _res_to_dpi(hrcn, hrcd, hrce)\n vres = _res_to_dpi(vrcn, vrcd, vrce)\n if hres is not None and vres is not None:\n dpi = (hres, vres)\n break\n\n if size is None or mode is None:\n raise SyntaxError(\"Malformed JP2 header\")\n\n return size, mode, mimetype, dpi\n\n\n##\n# Image plugin for JPEG2000 images.\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 658, "n_words": 198, "vocab_size": 101, "complexity": 20, "nloc": 46, "token_counts": 285, "n_ast_nodes": 476, "n_identifiers": 30, "random_cut": "def _parse_jp2_header(fp):\n \n\n # Find the JP2 header box\n reader = BoxReader(fp)\n header = None\n mimetype = None\n while reader.has_next_box():\n tbox = reader.next_box_type()\n\n if tbox == b\"jp2h\":\n header = reader.read_boxes()\n break\n elif tbox == b\"ftyp\":\n if reader.read_fields(\">4s\")[0] == b\"jpx \":\n mimetype = \"image/jpx\"\n\n size = None\n mode = None\n bpc = None\n nc = None\n dpi = None # 2-tuple of DPI info, or None\n\n while header.has_next_box():\n tbox = header.next_box_type()\n\n if tbox == b\"ihdr\":\n height, width, nc, bpc = header.read_fields(\">IIHB\")\n ", "d_id": 69908, "documentation": { "docstring": "Parse the JP2 header box to extract size, component count,\n color space information, and optionally DPI information,\n returning a (size, mode, mimetype, dpi) tuple.", "n_words": 24, "vocab_size": 23, "n_whitespaces": 29, "language": "en" } }, { "id": 248145, "commit_id": "96e0cdbc5af0563ee805ec4e588e1df14899af66", "repo": "synapse", "path": "tests/storage/databases/main/test_events_worker.py", "file_name": "test_events_worker.py", "fun_name": "_populate_events", "commit_message": "Add a consistency check on events read from the database (#12620)\n\nI've seen a few errors which can only plausibly be explained by the calculated\r\nevent id for an event being different from the ID of the event in the\r\ndatabase. It should be cheap to check this, so let's do so and raise an\r\nexception.", "code": "def _populate_events(self) -> None:\n \n self.get_success(\n self.store.db_pool.simple_upsert(\n \"rooms\",\n {\"room_id\": self.room_id},\n {\"room_version\": RoomVersions.V4.identifier},\n )\n )\n\n self.event_ids: List[str] = []\n for idx in range(20):\n event_json = {\n \"type\": f\"test {idx}\",\n \"room_id\": self.room_id,\n }\n event = make_event_from_dict(event_json, room_version=RoomVersions.V4)\n event_id = event.event_id\n self.get_success(\n self.store.db_pool.simple_upsert(\n \"events\",\n {\"event_id\": event_id},\n {\n \"event_id\": event_id,\n \"room_id\": self.room_id,\n \"topological_ordering\": idx,\n \"stream_ordering\": idx,\n \"type\": event.type,\n \"processed\": True,\n \"outlier\": False,\n },\n )\n )\n self.get_success(\n self.store.db_pool.simple_upsert(\n \"event_json\",\n {\"event_id\": event_id},\n {\n \"room_id\": self.room_id,\n \"json\": json.dumps(event_json),\n \"internal_metadata\": \"{}\",\n \"format_version\": EventFormatVersions.V3,\n },\n )\n )\n self.event_ids.append(event_id)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 774, "n_words": 78, "vocab_size": 55, "complexity": 2, "nloc": 54, "token_counts": 208, "n_ast_nodes": 351, "n_identifiers": 26, "random_cut": "def _populate_events(self) -> None:\n \n self.get_success(\n self.store.db_pool.simple_upsert(\n \"rooms\",\n {\"room_id\": self.room_id},\n {\"room_version\": RoomVersions.V4.identifier},\n )\n )\n\n self.event_ids: List[str] = []\n for idx in range(20):\n event_json = {\n \"type\": f\"test {idx}\",\n \"room_id\": self.room_id,\n }\n event = make_event_from_dict(event_json, room_version=RoomVersions.V4)\n event_id = event.event_id\n self.get_success(\n self.store.db_pool.simple_upsert(\n \"events\",\n {\"event_id\": event_id},\n {\n \"event_id\": event_id,\n \"room_id\": self.room_id,\n \"topological_ordering\": idx,\n \"stream_ordering\": idx,\n \"type\": event.type,\n \"processed\": True,\n \"outlier\": False,\n },\n )\n )\n self.get_success(\n self.store.db_pool.simple_upsert(\n \"event_json\",\n {\"event_id\": event_id},\n {\n \"room_id\": self.room_id,\n ", "d_id": 72125, "documentation": { "docstring": "Ensure that there are test events in the database.\n\n When testing with the in-memory SQLite database, all the events are lost during\n the simulated outage.\n\n To ensure consistency between `room_id`s and `event_id`s before and after the\n outage, rows are built and inserted manually.\n\n Upserts are used to handle the non-SQLite case where events are not lost.\n ", "n_words": 56, "vocab_size": 43, "n_whitespaces": 98, "language": "en" } }, { "id": 88840, "commit_id": "2ab29056f48c587870e0897a4feaef9ac7fd3b53", "repo": "sentry", "path": "src/sentry/tasks/weekly_reports.py", "file_name": "weekly_reports.py", "fun_name": "check_if_ctx_is_empty", "commit_message": "fix(weekly-email): skip organization report if report is empty (#41620)\n\nThis PR adds logic to skip sending the organization weekly report if\r\nthere is no context for any of the projects", "code": "def check_if_ctx_is_empty(ctx):\n \n return all(check_if_project_is_empty(project_ctx) for project_ctx in ctx.projects.values())\n\n\n# The entry point. This task is scheduled to run every week.\n@instrumented_task(\n name=\"sentry.tasks.weekly_reports.schedule_organizations\",\n queue=\"reports.prepare\",\n max_retries=5,\n acks_late=True,\n)", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@instrumented_task(\n name=\"sentry.tasks.weekly_reports.schedule_organizations\",\n queue=\"reports.prepare\",\n max_retries=5,\n acks_late=True,\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 41, "n_words": 26, "vocab_size": 26, "complexity": 2, "nloc": 2, "token_counts": 24, "n_ast_nodes": 74, "n_identifiers": 12, "random_cut": "def check_if_ctx_is_empty(ctx):\n \n return all(check_if_project_is_empty(project_ctx) for project_ctx in ctx.projects.values", "d_id": 18455, "documentation": { "docstring": "\n Check if the context is empty. If it is, we don't want to send an email.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 23, "language": "en" } }, { "id": 223797, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/message.py", "file_name": "message.py", "fun_name": "__delitem__", "commit_message": "add python 3.10.4 for windows", "code": "def __delitem__(self, name):\n \n name = name.lower()\n newheaders = []\n for k, v in self._headers:\n if k.lower() != name:\n newheaders.append((k, v))\n self._headers = newheaders\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 23, "vocab_size": 20, "complexity": 3, "nloc": 7, "token_counts": 52, "n_ast_nodes": 85, "n_identifiers": 9, "random_cut": "def __delitem__(self, name):\n \n name = name.lower()\n newheaders = []\n for k, v in self._headers:\n if k.lower() != name:\n newheaders.append((k, v))\n self._headers = newheade", "d_id": 57070, "documentation": { "docstring": "Delete all occurrences of a header, if present.\n\n Does not raise an exception if the header is missing.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 32, "language": "en" } }, { "id": 142807, "commit_id": "0959f44b6fc217a4f2766ed46a721eb79b067b2c", "repo": "ray", "path": "python/ray/tune/execution/placement_groups.py", "file_name": "placement_groups.py", "fun_name": "update_status", "commit_message": "[tune/structure] Introduce execution package (#26015)\n\nExecution-specific packages are moved to tune.execution.\r\n\r\nCo-authored-by: Xiaowei Jiang ", "code": "def update_status(self):\n \n self.cleanup()\n ready = True\n while ready:\n # Use a loop as `ready` might return futures one by one\n ready, _ = ray.wait(list(self._staging_futures.keys()), timeout=0)\n\n for ready_fut in ready:\n self.handle_ready_future(ready_fut)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 106, "n_words": 30, "vocab_size": 27, "complexity": 3, "nloc": 7, "token_counts": 51, "n_ast_nodes": 86, "n_identifiers": 13, "random_cut": "def update_status(self):\n \n self.cleanup()\n ready = True\n while ready:\n # Use a loop as `ready` might return futures one by one\n ready, _ = ray.wait(list(self._staging_futures.keys()), timeout=0)\n\n ", "d_id": 32804, "documentation": { "docstring": "Update placement group status.\n\n Moves ready placement groups from `self._staging` to\n `self._ready`.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 63968, "commit_id": "0faa116f9799f6d921ce8868a8f8eac1756ae008", "repo": "erpnext", "path": "erpnext/patches/v13_0/trim_whitespace_from_serial_nos.py", "file_name": "trim_whitespace_from_serial_nos.py", "fun_name": "execute", "commit_message": "fix(patch): serial no whitespace trimming\n\nold data can contain trailing/leading whitespace which doesn't work well\nwith code to find last SLE for serial no.", "code": "def execute():\n\tbroken_sles = frappe.db.sql(,\n\t\t\t(\n\t\t\t\t\" %\", # leading whitespace\n\t\t\t\t\"% \", # trailing whitespace\n\t\t\t\t\"%\\n %\", # leading whitespace on newline\n\t\t\t\t\"% \\n%\", # trailing whitespace on newline\n\t\t\t),\n\t\t\tas_dict=True,\n\t\t)\n\n\tfrappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sles)\n\n\tif not broken_sles:\n\t\treturn\n\n\tbroken_serial_nos = set()\n\n\tfor sle in broken_sles:\n\t\tserial_no_list = get_serial_nos(sle.serial_no)\n\t\tcorrect_sr_no = \"\\n\".join(serial_no_list)\n\n\t\tif correct_sr_no == sle.serial_no:\n\t\t\tcontinue\n\n\t\tfrappe.db.set_value(\"Stock Ledger Entry\", sle.name, \"serial_no\", correct_sr_no, update_modified=False)\n\t\tbroken_serial_nos.update(serial_no_list)\n\n\tif not broken_serial_nos:\n\t\treturn\n\n\tbroken_sr_no_records = [sr[0] for sr in frappe.db.sql(, (list(broken_serial_nos),)\n\t\t\t\t\t\t\t)]\n\n\tfrappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sr_no_records)\n\n\tpatch_savepoint = \"serial_no_patch\"\n\tfor serial_no in broken_sr_no_records:\n\t\ttry:\n\t\t\tfrappe.db.savepoint(patch_savepoint)\n\t\t\tsn = frappe.get_doc(\"Serial No\", serial_no)\n\t\t\tsn.update_serial_no_reference()\n\t\t\tsn.db_update()\n\t\texcept Exception:\n\t\t\tfrappe.db.rollback(save_point=patch_savepoint)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 72, "n_words": 101, "vocab_size": 70, "complexity": 8, "nloc": 46, "token_counts": 198, "n_ast_nodes": 335, "n_identifiers": 32, "random_cut": "def execute():\n\tbroken_sles = frappe.db.sql(,\n\t\t\t(\n\t\t\t\t\" %\", # leading whitespace\n\t\t\t\t\"% \", # trailing whitespace\n\t\t\t\t\"%\\n %\", # leading whitespace on newline\n\t\t\t\t\"% \\n%\", # trailing whitespace on newline\n\t\t\t),\n\t\t\tas_dict=True,\n\t\t)\n\n\tfrappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sles)\n\n\tif not broken_sles:\n\t\treturn\n\n\tbroken_serial_nos = set()\n\n\tfor sle in broken_sles:\n\t\tserial_no_list = get_serial_nos(sle.serial_no)\n\t\tcorrect_sr_no = \"\\n\".join(serial_no_list)\n\n\t\tif correct_sr_no == s", "d_id": 13542, "documentation": { "docstring": "\n\t\t\tselect name, serial_no\n\t\t\tfrom `tabStock Ledger Entry`\n\t\t\twhere\n\t\t\t\tis_cancelled = 0\n\t\t\t\tand (serial_no like %s or serial_no like %s or serial_no like %s or serial_no like %s)\n\t\t\t\n\t\t\t\t\t\t\tselect name\n\t\t\t\t\t\t\tfrom `tabSerial No`\n\t\t\t\t\t\t\twhere status='Active'\n\t\t\t\t\t\t\t\tand coalesce(purchase_document_type, '') = ''\n\t\t\t\t\t\t\t\tand name in %s ", "n_words": 43, "vocab_size": 25, "n_whitespaces": 34, "language": "en" } }, { "id": 248247, "commit_id": "d38d242411b8910dfacde1e61fd3a0ec5cbcaa66", "repo": "synapse", "path": "tests/config/test_cache.py", "file_name": "test_cache.py", "fun_name": "test_individual_caches_from_environ", "commit_message": "Reload cache factors from disk on SIGHUP (#12673)", "code": "def test_individual_caches_from_environ(self):\n \n config = {}\n self.config._environ = {\n \"SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER\": \"2\",\n \"SYNAPSE_NOT_CACHE\": \"BLAH\",\n }\n self.config.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.config.resize_all_caches()\n\n self.assertEqual(dict(self.config.cache_factors), {\"something_or_other\": 2.0})\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 91, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 70, "n_ast_nodes": 121, "n_identifiers": 11, "random_cut": "def test_individual_caches_from_environ(self):\n \n config = {}\n self.config._environ = {\n \"SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER\": \"2\",\n \"SYNAPSE_NOT_CACHE\": \"BLAH\",\n }\n self.config.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.config.resize_all_caches()\n\n self.assertEqual(dict(self.config.cache_factors), {\"something_or_oth", "d_id": 72178, "documentation": { "docstring": "\n Individual cache factors will be loaded from the environment.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 42688, "commit_id": "6bbe015905bd2709e621455d9f71a78b374d1337", "repo": "airflow", "path": "kubernetes_tests/test_kubernetes_pod_operator.py", "file_name": "test_kubernetes_pod_operator.py", "fun_name": "test_already_checked_on_success", "commit_message": "Use \"remote\" pod when patching KPO pod as \"checked\" (#23676)\n\nWhen patching as \"checked\", we have to use the current version of the pod otherwise we may get an error when trying to patch it, e.g.:\r\n\r\n```\r\nOperation cannot be fulfilled on pods \\\"test-kubernetes-pod-db9eedb7885c40099dd40cd4edc62415\\\": the object has been modified; please apply your changes to the latest version and try again\"\r\n```\r\n\r\nThis error would not cause a failure of the task, since errors in `cleanup` are suppressed. However, it would fail to patch.\r\n\r\nI believe one scenario when the pod may be updated is when retrieving xcom, since the sidecar is terminated after extracting the value.\r\n\r\nConcerning some changes in the tests re the \"already_checked\" label, it was added to a few \"expected pods\" recently, when we changed it to patch even in the case of a successful pod.\r\n\r\nSince we are changing the \"patch\" code to patch with the latest read on the pod that we have (i.e. using the `remote_pod` variable), and no longer the pod object stored on `k.pod`, the label no longer shows up in those tests (that's because in k.pod isn't actually a read of the remote pod, but just happens to get mutated in the patch function before it is used to actually patch the pod).\r\n\r\nFurther, since the `remote_pod` is a local variable, we can't observe it in tests. So we have to read the pod using k8s api. _But_, our \"find pod\" function excludes \"already checked\" pods! So we have to make this configurable.\r\n\r\nSo, now we have a proper integration test for the \"already_checked\" behavior (there was already a unit test).", "code": "def test_already_checked_on_success(self):\n \n pod_name = \"test-\" + str(random.randint(0, 1000000))\n k = KubernetesPodOperator(\n namespace='default',\n image=\"ubuntu:16.04\",\n cmds=[\"bash\", \"-cx\"],\n arguments=[\"echo 10\"],\n labels={\"foo\": \"bar\"},\n name=pod_name,\n task_id=\"task\" + self.get_current_task_name(),\n in_cluster=False,\n do_xcom_push=False,\n is_delete_operator_pod=False,\n )\n context = create_context(k)\n k.execute(context)\n actual_pod = k.find_pod('default', context, exclude_checked=False)\n actual_pod = self.api_client.sanitize_for_serialization(actual_pod)\n assert actual_pod['metadata']['labels']['already_checked'] == 'True'\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 216, "n_words": 43, "vocab_size": 37, "complexity": 1, "nloc": 19, "token_counts": 131, "n_ast_nodes": 221, "n_identifiers": 27, "random_cut": "def test_already_checked_on_success(self):\n \n pod_name = \"t", "d_id": 7690, "documentation": { "docstring": "\n When ``is_delete_operator_pod=False``, pod should have 'already_checked'\n label, whether pod is successful or not.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 35, "language": "en" } }, { "id": 65338, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/sales_register/sales_register.py", "file_name": "sales_register.py", "fun_name": "get_invoice_cc_wh_map", "commit_message": "style: format code with black", "code": "def get_invoice_cc_wh_map(invoice_list):\n\tsi_items = frappe.db.sql(\n\t\t\n\t\t% \", \".join([\"%s\"] * len(invoice_list)),\n\t\ttuple(inv.name for inv in invoice_list),\n\t\tas_dict=1,\n\t)\n\n\tinvoice_cc_wh_map = {}\n\tfor d in si_items:\n\t\tif d.cost_center:\n\t\t\tinvoice_cc_wh_map.setdefault(d.parent, frappe._dict()).setdefault(\"cost_center\", []).append(\n\t\t\t\td.cost_center\n\t\t\t)\n\n\t\tif d.warehouse:\n\t\t\tinvoice_cc_wh_map.setdefault(d.parent, frappe._dict()).setdefault(\"warehouse\", []).append(\n\t\t\t\td.warehouse\n\t\t\t)\n\n\treturn invoice_cc_wh_map\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 23, "n_words": 40, "vocab_size": 31, "complexity": 5, "nloc": 20, "token_counts": 124, "n_ast_nodes": 201, "n_identifiers": 20, "random_cut": "def get_invoice_cc_wh_map(invoice_list):\n\tsi_items = frappe.db.sql(\n\t\t\n\t\t% \", \".join([\"%s\"] * len(invoice_list)),\n\t\ttuple(inv.name fo", "d_id": 13860, "documentation": { "docstring": "select parent, cost_center, warehouse\n\t\tfrom `tabSales Invoice Item` where parent in (%s)\n\t\tand (ifnull(cost_center, '') != '' or ifnull(warehouse, '') != '')", "n_words": 22, "vocab_size": 19, "n_whitespaces": 19, "language": "en" } }, { "id": 123800, "commit_id": "86ac3025edb83ce49d563b6787df4fc6ca305ce6", "repo": "sqlmap", "path": "lib/core/common.py", "file_name": "common.py", "fun_name": "parseSqliteTableSchema", "commit_message": "Improving SQLite table schema parsing (#2678)", "code": "def parseSqliteTableSchema(value):\n \n\n retVal = False\n\n value = extractRegexResult(r\"(?s)\\((?P.+)\\)\", value)\n\n if value:\n table = {}\n columns = OrderedDict()\n\n value = re.sub(r\"\\(.+?\\)\", \"\", value).strip()\n\n for match in re.finditer(r\"(?:\\A|,)\\s*(([\\\"'`]).+?\\2|\\w+)(?:\\s+(INT|INTEGER|TINYINT|SMALLINT|MEDIUMINT|BIGINT|UNSIGNED BIG INT|INT2|INT8|INTEGER|CHARACTER|VARCHAR|VARYING CHARACTER|NCHAR|NATIVE CHARACTER|NVARCHAR|TEXT|CLOB|LONGTEXT|BLOB|NONE|REAL|DOUBLE|DOUBLE PRECISION|FLOAT|REAL|NUMERIC|DECIMAL|BOOLEAN|DATE|DATETIME|NUMERIC)\\b)?\", decodeStringEscape(value), re.I):\n column = match.group(1).strip(match.group(2) or \"\")\n if re.search(r\"(?i)\\A(CONSTRAINT|PRIMARY|UNIQUE|CHECK|FOREIGN)\\b\", column.strip()):\n continue\n retVal = True\n\n columns[column] = match.group(3) or \"TEXT\"\n\n table[safeSQLIdentificatorNaming(conf.tbl, True)] = columns\n kb.data.cachedColumns[conf.db] = table\n\n return retVal\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 175, "n_words": 59, "vocab_size": 43, "complexity": 6, "nloc": 16, "token_counts": 146, "n_ast_nodes": 260, "n_identifiers": 24, "random_cut": "def parseSqliteTableSchema(value):\n \n\n retVal = False\n\n value = extractRegexResult(r\"(?s)\\((?P.+)\\)\", value)\n\n if value:\n table = {}\n columns = OrderedDict()\n\n value = re.sub(r\"\\(.+?\\)\", \"\", value).strip()\n\n for match in re.finditer(r\"(?:\\A|,)\\s*(([\\\"'`]).+?\\2|\\w+)(?:\\s+(INT|INTEGER|TINYINT|SMALLINT|MEDIUMINT|BIGINT|UNSIGNED BIG INT|INT2|INT8|INTEGER|CHARACTER|VARCHAR|VARYING CHARACTER|NCHAR|NATIVE CHARACTER|NVARCHAR|TEXT|CLOB|LONGTEXT|BLOB|NONE|REAL|DOUBLE|DOUBLE PRECISION|FLOAT|REAL|NUMERIC|DECIMAL|BOOLEAN|DATE|DATETIME|NUMERIC)\\b)?\", decodeStringEscape(value), re.I):\n column = match.group(1).strip(match.group(2) or \"\")\n if re.search(r\"(?i)\\A(CONSTRAINT|PRIMARY|UNIQUE|CHECK|FOREIGN)\\b\", column.strip()):\n continue\n retVal = Tr", "d_id": 27449, "documentation": { "docstring": "\n Parses table column names and types from specified SQLite table schema\n\n >>> kb.data.cachedColumns = {}\n >>> parseSqliteTableSchema(\"CREATE TABLE users(\\\\n\\\\t\\\\tid INTEGER,\\\\n\\\\t\\\\tname TEXT\\\\n);\")\n True\n >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('id', 'INTEGER'), ('name', 'TEXT'))\n True\n >>> parseSqliteTableSchema(\"CREATE TABLE dummy(`foo bar` BIGINT, \\\\\"foo\\\\\" VARCHAR, 'bar' TEXT)\");\n True\n >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('foo bar', 'BIGINT'), ('foo', 'VARCHAR'), ('bar', 'TEXT'))\n True\n >>> parseSqliteTableSchema(\"CREATE TABLE suppliers(\\\\n\\\\tsupplier_id INTEGER PRIMARY KEY DESC,\\\\n\\\\tname TEXT NOT NULL\\\\n);\");\n True\n >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('supplier_id', 'INTEGER'), ('name', 'TEXT'))\n True\n >>> parseSqliteTableSchema(\"CREATE TABLE country_languages (\\\\n\\\\tcountry_id INTEGER NOT NULL,\\\\n\\\\tlanguage_id INTEGER NOT NULL,\\\\n\\\\tPRIMARY KEY (country_id, language_id),\\\\n\\\\tFOREIGN KEY (country_id) REFERENCES countries (country_id) ON DELETE CASCADE ON UPDATE NO ACTION,\\\\tFOREIGN KEY (language_id) REFERENCES languages (language_id) ON DELETE CASCADE ON UPDATE NO ACTION);\");\n True\n >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('country_id', 'INTEGER'), ('language_id', 'INTEGER'))\n True\n ", "n_words": 119, "vocab_size": 69, "n_whitespaces": 177, "language": "en" } }, { "id": 223532, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "get_phrase", "commit_message": "add python 3.10.4 for windows", "code": "def get_phrase(value):\n \n phrase = Phrase()\n try:\n token, value = get_word(value)\n phrase.append(token)\n except errors.HeaderParseError:\n phrase.defects.append(errors.InvalidHeaderDefect(\n \"phrase does not start with word\"))\n while value and value[0] not in PHRASE_ENDS:\n if value[0]=='.':\n phrase.append(DOT)\n phrase.defects.append(errors.ObsoleteHeaderDefect(\n \"period in 'phrase'\"))\n value = value[1:]\n else:\n try:\n token, value = get_word(value)\n except errors.HeaderParseError:\n if value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n phrase.defects.append(errors.ObsoleteHeaderDefect(\n \"comment found without atom\"))\n else:\n raise\n phrase.append(token)\n return phrase, value\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 334, "n_words": 64, "vocab_size": 41, "complexity": 7, "nloc": 26, "token_counts": 149, "n_ast_nodes": 253, "n_identifiers": 16, "random_cut": "def get_phrase(value):\n \n phrase = Phrase()\n try:\n token, value = get_word(value)\n phrase.append(token)\n except errors.HeaderParseError:\n phrase.defects.append(errors.InvalidHeaderDefect(\n \"phrase does not start with word\"))\n while value and value[0] not in PHRASE_ENDS:\n if value[0]=='.':\n phrase.append(DOT)\n phrase.defects.append(errors.ObsoleteHeaderDefect(\n \"period in 'phrase'\"))\n ", "d_id": 56955, "documentation": { "docstring": " phrase = 1*word / obs-phrase\n obs-phrase = word *(word / \".\" / CFWS)\n\n This means a phrase can be a sequence of words, periods, and CFWS in any\n order as long as it starts with at least one word. If anything other than\n words is detected, an ObsoleteHeaderDefect is added to the token's defect\n list. We also accept a phrase that starts with CFWS followed by a dot;\n this is registered as an InvalidHeaderDefect, since it is not supported by\n even the obsolete grammar.\n\n ", "n_words": 84, "vocab_size": 63, "n_whitespaces": 115, "language": "en" } }, { "id": 299285, "commit_id": "66551e6fcbd063e53c13adc8a6462b8e00ce1450", "repo": "core", "path": "tests/components/cast/test_media_player.py", "file_name": "test_media_player.py", "fun_name": "test_group_media_states", "commit_message": "Add state buffering to media_player and use it in cast (#70802)", "code": "async def test_group_media_states(hass, mz_mock):\n \n entity_id = \"media_player.speaker\"\n reg = er.async_get(hass)\n\n info = get_fake_chromecast_info()\n\n chromecast, _ = await async_setup_media_player_cast(hass, info)\n _, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks(\n chromecast, mz_mock\n )\n\n connection_status = MagicMock()\n connection_status.status = \"CONNECTED\"\n conn_status_cb(connection_status)\n await hass.async_block_till_done()\n\n state = hass.states.get(entity_id)\n assert state is not None\n assert state.name == \"Speaker\"\n assert state.state == \"off\"\n assert entity_id == reg.async_get_entity_id(\"media_player\", \"cast\", str(info.uuid))\n\n group_media_status = MagicMock(images=None)\n player_media_status = MagicMock(images=None)\n\n # Player has no state, group is buffering -> Should report 'buffering'\n group_media_status.player_state = \"BUFFERING\"\n group_media_status_cb(str(FakeGroupUUID), group_media_status)\n await hass.async_block_till_done()\n state = hass.states.get(entity_id)\n assert state.state == \"buffering\"\n\n # Player has no state, group is playing -> Should report 'playing'\n group_media_status.player_state = \"PLAYING\"\n group_media_status_cb(str(FakeGroupUUID), group_media_status)\n await hass.async_block_till_done()\n state = hass.states.get(entity_id)\n assert state.state == \"playing\"\n\n # Player is paused, group is playing -> Should report 'paused'\n player_media_status.player_state = None\n player_media_status.player_is_paused = True\n media_status_cb(player_media_status)\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n state = hass.states.get(entity_id)\n assert state.state == \"paused\"\n\n # Player is in unknown state, group is playing -> Should report 'playing'\n player_media_status.player_state = \"UNKNOWN\"\n media_status_cb(player_media_status)\n await hass.async_block_till_done()\n state = hass.states.get(entity_id)\n assert state.state == \"playing\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 311, "n_words": 172, "vocab_size": 76, "complexity": 1, "nloc": 41, "token_counts": 275, "n_ast_nodes": 474, "n_identifiers": 33, "random_cut": "async def test_group_media_states(hass, mz_mock):\n \n entity_id = \"media_player.speaker\"\n reg = er.async_get(hass)\n\n info = get_fake_chromecast_info()\n\n chromecast, _ = await async_setup_media_player_cast(hass, info)\n _, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks(\n chromecast, mz_mock\n )\n\n connection_status = MagicMock()\n connection_status.status = \"CONNECTED\"\n conn_status_cb(connection_status)\n await hass.async_block_till_done()\n\n state = hass.states.get(entity_id)\n assert state is not None\n assert state.name == \"Speaker\"\n assert state.state == \"off\"\n assert entity_id == reg.async_get_entity_id(\"media_player\", \"cast\", str(info.uuid))\n\n group_media_status = MagicMock(images=None)\n player_media_status = MagicMock(images=None)\n\n # Player has no state, group is buffering -> Should report 'buffering'\n group_media_status.player_state = \"BUFFERING\"\n group_media_status_cb(str(FakeGroupUUID), group_media_status)\n await hass.async_block_till_done()\n state = hass.states.get(entity_id)\n assert state.state == \"buffering\"\n\n # Player has no state, group is playing -> Should report 'playing'\n group_media_status.player_state = \"PLAYING\"\n ", "d_id": 98219, "documentation": { "docstring": "Test media states are read from group if entity has no state.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 70904, "commit_id": "a0ef2477a68f2deb83cdc9a0bb709cb644be028b", "repo": "wagtail", "path": "wagtail/core/tests/test_blocks.py", "file_name": "test_blocks.py", "fun_name": "test_deserialize", "commit_message": "Improve asserts in wagtail.\n\nThese improvements were based on flake8-assertive, which compiled an extensive\nlist of patterns to replace with more precise assertions. This should make\nthe error messages better in case of failures.", "code": "def test_deserialize(self):\n \n block = blocks.PageChooserBlock()\n christmas_page = Page.objects.get(slug='christmas')\n\n self.assertEqual(block.to_python(christmas_page.id), christmas_page)\n\n # None should deserialize to None\n self.assertIsNone(block.to_python(None))\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 51, "n_ast_nodes": 88, "n_identifiers": 14, "random_cut": "def test_deserialize(self):\n \n block = blocks.PageChooserBlock()\n christmas_page = Page.objects.get(slug='christmas')\n\n self.assertEqual(block.to_python(christmas_page.id), christmas_page)\n\n # None should deserialize to None\n sel", "d_id": 15578, "documentation": { "docstring": "The serialized value of a PageChooserBlock (an ID) should deserialize to a Page object", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 269492, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "_is_current_explicit_device", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _is_current_explicit_device(device_type):\n \n device_type = device_type.upper()\n if device_type not in [\"CPU\", \"GPU\"]:\n raise ValueError('`device_type` should be either \"CPU\" or \"GPU\".')\n device = _get_current_tf_device()\n return device is not None and device.device_type == device_type.upper()\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 31, "vocab_size": 26, "complexity": 3, "nloc": 6, "token_counts": 48, "n_ast_nodes": 85, "n_identifiers": 6, "random_cut": "def _is_current_explicit_device(device_type):\n \n device_type ", "d_id": 80124, "documentation": { "docstring": "Check if the current device is explicitly set on the device type specified.\n\n Args:\n device_type: A string containing `GPU` or `CPU` (case-insensitive).\n\n Returns:\n A boolean indicating if the current device scope is explicitly set on the\n device type.\n\n Raises:\n ValueError: If the `device_type` string indicates an unsupported device.\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 88, "language": "en" } }, { "id": 266828, "commit_id": "12865139472f0a2fa95b94983dcedb4d57e93b10", "repo": "ansible", "path": "lib/ansible/module_utils/common/parameters.py", "file_name": "parameters.py", "fun_name": "_return_datastructure_name", "commit_message": "module_utils - Fix type hinting issues.", "code": "def _return_datastructure_name(obj):\n \n if isinstance(obj, (text_type, binary_type)):\n if obj:\n yield to_native(obj, errors='surrogate_or_strict')\n return\n elif isinstance(obj, Mapping):\n for element in obj.items():\n for subelement in _return_datastructure_name(element[1]):\n yield subelement\n elif is_iterable(obj):\n for element in obj:\n for subelement in _return_datastructure_name(element):\n yield subelement\n elif obj is None or isinstance(obj, bool):\n # This must come before int because bools are also ints\n return\n elif isinstance(obj, tuple(list(integer_types) + [float])):\n yield to_native(obj, nonstring='simplerepr')\n else:\n raise TypeError('Unknown parameter type: %s' % (type(obj)))\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 212, "n_words": 72, "vocab_size": 49, "complexity": 12, "nloc": 19, "token_counts": 136, "n_ast_nodes": 222, "n_identifiers": 20, "random_cut": "def _return_datastructure_name(obj):\n \n if isinstance(obj, (text_type, binary_type)):\n if obj:\n yield to_native(obj, errors='surrogate_or_strict')\n return\n elif isinstance(obj, Mapping):\n for element in obj.items():\n for subelement in _return_datastructure_name(element[1]):\n ", "d_id": 78610, "documentation": { "docstring": " Return native stringified values from datastructures.\n\n For use with removing sensitive values pre-jsonification.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 16, "language": "en" } }, { "id": 151751, "commit_id": "afc00bc30a94abd64fee000535e66287fd91595f", "repo": "freqtrade", "path": "freqtrade/rpc/api_server/ws/message_stream.py", "file_name": "message_stream.py", "fun_name": "__aiter__", "commit_message": "log warning if channel too far behind, add docstrings to message stream", "code": "async def __aiter__(self):\n \n waiter = self._waiter\n while True:\n # Shield the future from being cancelled by a task waiting on it\n message, ts, waiter = await asyncio.shield(waiter)\n yield message, ts\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 30, "vocab_size": 27, "complexity": 2, "nloc": 5, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 8, "random_cut": "async def __aiter__(self):\n \n waiter = self._waiter\n while True:\n # Shield the future from being cancelled by a task waiting on it\n message, t", "d_id": 35130, "documentation": { "docstring": "\n Iterate over the messages in the message stream\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 23, "language": "en" } }, { "id": 109448, "commit_id": "c73f4c455514cf5422d27bf38c93250de8316b21", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "get_gridspec", "commit_message": "Merge SubplotBase into AxesBase.", "code": "def get_gridspec(self):\n \n return self._subplotspec.get_gridspec() if self._subplotspec else None\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 2, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 3, "random_cut": "def get_gridspec(self):\n ", "d_id": 23593, "documentation": { "docstring": "Return the `.GridSpec` associated with the subplot, or None.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 48856, "commit_id": "33c9d1a8e4d7fd0a023819e27ba6c3819cda6b4b", "repo": "PaddleHub", "path": "modules/image/classification/esnet_x0_5_imagenet/model.py", "file_name": "model.py", "fun_name": "ESNet_x0_5", "commit_message": "add clas modules", "code": "def ESNet_x0_5(pretrained=False, use_ssld=False, **kwargs):\n \n model = ESNet(scale=0.5, stages_pattern=MODEL_STAGES_PATTERN[\"ESNet\"], **kwargs)\n return model\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 20, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 37, "n_ast_nodes": 56, "n_identifiers": 9, "random_cut": "def ESNet_x0_5(pretrained=False, use_ssld=False, **kwargs):\n \n mo", "d_id": 9622, "documentation": { "docstring": "\n ESNet_x0_5\n Args:\n pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.\n If str, means the path of the pretrained model.\n use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.\n Returns:\n model: nn.Layer. Specific `ESNet_x0_5` model depends on args.\n ", "n_words": 40, "vocab_size": 35, "n_whitespaces": 93, "language": "en" } }, { "id": 37515, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_rjieba", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def require_rjieba(test_case):\n \n return unittest.skipUnless(is_rjieba_available(), \"test requires rjieba\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def require_rjieba(test_case):\n ", "d_id": 6820, "documentation": { "docstring": "\n Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 22, "language": "en" } }, { "id": 109257, "commit_id": "5af97515b3823b2efa1961253a11e2d77df88637", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "get_yaxis", "commit_message": "Add discouraged admonitions\n\nThe [*Discouraged*] prefix in the summary line is added in analogy to\nthe [*Deprecated*] prefix we add automatically. We do this so that\nthese \"labels\" are prominently visible also in summary overviews of\nthe functions in the docs.\n\nSince we rarely discourage whole functions, for now I just do this\nmanually.", "code": "def get_yaxis(self):\n \n return self.yaxis\n\n get_xgridlines = _axis_method_wrapper(\"xaxis\", \"get_gridlines\")\n get_xticklines = _axis_method_wrapper(\"xaxis\", \"get_ticklines\")\n get_ygridlines = _axis_method_wrapper(\"yaxis\", \"get_gridlines\")\n get_yticklines = _axis_method_wrapper(\"yaxis\", \"get_ticklines\")\n\n # Adding and tracking artists\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 54, "n_words": 25, "vocab_size": 18, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 84, "n_identifiers": 8, "random_cut": "def get_yaxis(self):\n \n return self.yaxis\n\n get_xgridlines = _axis_method_wrapper(\"xaxis\", \"get_gridlines\")\n get_xticklines = _axis_method_wrapper(\"xaxis\", \"g", "d_id": 23495, "documentation": { "docstring": "\n [*Discouraged*] Return the YAxis instance.\n\n .. admonition:: Discouraged\n\n The use of this function is discouraged. You should instead\n directly access the attribute ``ax.yaxis``.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 67, "language": "en" } }, { "id": 269510, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "disable_tf_random_generator", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def disable_tf_random_generator():\n \n global _USE_GENERATOR_FOR_RNG\n _USE_GENERATOR_FOR_RNG = False\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 16, "n_words": 7, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 20, "n_identifiers": 2, "random_cut": "def disable_tf_random_generator():\n ", "d_id": 80141, "documentation": { "docstring": "Disable the `tf.random.Generator` as the RNG for Keras.\n\n See `tf.keras.backend.experimental.is_tf_random_generator_enabled` for more\n details.\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 22, "language": "en" } }, { "id": 171994, "commit_id": "5a372d892a8c45a2442ab1e744aea3241d2c26a8", "repo": "pandas", "path": "pandas/tests/io/parser/test_parse_dates.py", "file_name": "test_parse_dates.py", "fun_name": "test_parse_timezone", "commit_message": "API: default to stdlib timezone objects for fixed-offsets (#49677)\n\n* API: default to stdlib timezone objects for fixed-offsets\r\n\r\n* update docstrings\r\n\r\n* flesh out whatsnew\r\n\r\n* handle strings\r\n\r\n* skip on windows", "code": "def test_parse_timezone(all_parsers):\n # see gh-22256\n parser = all_parsers\n data = \n result = parser.read_csv(StringIO(data), parse_dates=[\"dt\"])\n\n dti = DatetimeIndex(\n list(\n date_range(\n start=\"2018-01-04 09:01:00\",\n end=\"2018-01-04 09:05:00\",\n freq=\"1min\",\n tz=timezone(timedelta(minutes=540)),\n )\n ),\n freq=None,\n )\n expected_data = {\"dt\": dti, \"val\": [23350, 23400, 23400, 23400, 23400]}\n\n expected = DataFrame(expected_data)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\n@pytest.mark.parametrize(\n \"date_string\",\n [\"32/32/2019\", \"02/30/2019\", \"13/13/2019\", \"13/2019\", \"a3/11/2018\", \"10/11/2o17\"],\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@skip_pyarrow\n@pytest.mark.parametrize(\n \"date_string\",\n [\"32/32/2019\", \"02/30/2019\", \"13/13/2019\", \"13/2019\", \"a3/11/2018\", \"10/11/2o17\"],\n)", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 187, "n_words": 54, "vocab_size": 45, "complexity": 1, "nloc": 23, "token_counts": 103, "n_ast_nodes": 215, "n_identifiers": 28, "random_cut": "def test_parse_timezone(all_parsers):\n # see gh-22256\n parser = all_parsers\n data = \n result = parser.read_csv(StringIO(data), parse_dates=[\"dt\"])\n\n dti = DatetimeIndex(\n list(\n date_range(\n start=\"2018-01-04 09:01:00\",\n end=\"2018-01-04 09:05:00\",\n freq=\"1min\",\n tz=timezone(timedelta(minutes=540)),\n )\n ),\n freq=None,\n )\n expected_data = {\"dt\": dti, \"val\": [23350, 23400, 23400, 23400, 23400]}\n\n expected = DataFrame(expected_data)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\n@pytest.mark.parametrize(\n \"d", "d_id": 40744, "documentation": { "docstring": "dt,val\n 2018-01-04 09:01:00+09:00,23350\n 2018-01-04 09:02:00+09:00,23400\n 2018-01-04 09:03:00+09:00,23400\n 2018-01-04 09:04:00+09:00,23400\n 2018-01-04 09:05:00+09:00,23400", "n_words": 11, "vocab_size": 7, "n_whitespaces": 75, "language": "en" } }, { "id": 251340, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/connection.py", "file_name": "connection.py", "fun_name": "alpn_proto_negotiated", "commit_message": "make it black!", "code": "def alpn_proto_negotiated(self) -> Optional[bytes]: # pragma: no cover\n \n warnings.warn(\n \"Connection.alpn_proto_negotiated is deprecated, use Connection.alpn instead.\",\n DeprecationWarning,\n )\n return self.alpn\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 70, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 8, "random_cut": "def alpn_proto_negotiated(self) -> Optional[bytes]: # pragma: no cover\n \n warnings.warn(\n \"Connection.alpn_proto_negotiated is deprecated, use Connection.alpn instead.\",\n DeprecationWarnin", "d_id": 73690, "documentation": { "docstring": "*Deprecated:* An outdated alias for Connection.alpn.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 314495, "commit_id": "10dc38e0ec27f7bef990ee431459342f9c3c52b4", "repo": "core", "path": "homeassistant/components/velux/cover.py", "file_name": "cover.py", "fun_name": "device_class", "commit_message": "Adjust CoverEntity property type hints in components (#73943)\n\n* Adjust CoverEntity property type hints in components\r\n\r\n* Revert changes to rflink\r\n\r\n* Revert changes to wilight", "code": "def device_class(self) -> CoverDeviceClass:\n \n if isinstance(self.node, Awning):\n return CoverDeviceClass.AWNING\n if isinstance(self.node, Blind):\n return CoverDeviceClass.BLIND\n if isinstance(self.node, GarageDoor):\n return CoverDeviceClass.GARAGE\n if isinstance(self.node, Gate):\n return CoverDeviceClass.GATE\n if isinstance(self.node, RollerShutter):\n return CoverDeviceClass.SHUTTER\n if isinstance(self.node, Window):\n return CoverDeviceClass.WINDOW\n return CoverDeviceClass.WINDOW\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 158, "n_words": 36, "vocab_size": 19, "complexity": 7, "nloc": 15, "token_counts": 96, "n_ast_nodes": 148, "n_identifiers": 17, "random_cut": "def device_class(self) -> CoverDeviceClass:\n \n if isinstance(self.node, Awning):\n return CoverDeviceClass.AWNING\n if isinstance", "d_id": 113101, "documentation": { "docstring": "Define this cover as either awning, blind, garage, gate, shutter or window.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 284522, "commit_id": "4f692f26e6f3935b7454e8b448838c8a87b98f23", "repo": "OpenBBTerminal", "path": "openbb_terminal/economy/investingcom_model.py", "file_name": "investingcom_model.py", "fun_name": "get_yieldcurve", "commit_message": "Feature/yieldcurve (#1734)\n\n* Adds yield curves\r\n\r\n* Adds yield curves for several countries\r\n\r\n* Adds yield curves for several countries\r\n\r\n* Adds yield curves for several countries\r\n\r\n* Adds yield curves for several countries\r\n\r\n* Adds yield curves for several countries\r\n\r\n* ycrv plots by default\r\n\r\n* Limits source choices and renames raw columns\r\n\r\n* Fix test\r\n\r\n* Fix test\r\n\r\n* lint\r\n\r\nCo-authored-by: Jeroen Bouma \r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: James Maslek ", "code": "def get_yieldcurve(country) -> pd.DataFrame:\n \n\n data = investpy.bonds.get_bonds_overview(country)\n data.drop(columns=data.columns[0], axis=1, inplace=True)\n data.rename(\n columns={\n \"name\": \"Tenor\",\n \"last\": \"Current\",\n \"last_close\": \"Previous\",\n \"high\": \"High\",\n \"low\": \"Low\",\n \"change\": \"Change\",\n \"change_percentage\": \"% Change\",\n },\n inplace=True,\n )\n return data\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 148, "n_words": 32, "vocab_size": 31, "complexity": 1, "nloc": 23, "token_counts": 85, "n_ast_nodes": 152, "n_identifiers": 13, "random_cut": "def get_yieldcurve(country) -> pd.DataFrame:\n \n\n data = investpy.bonds.get_bonds", "d_id": 84774, "documentation": { "docstring": "Get country yield curve [Source: Investing.com]\n\n Returns\n -------\n pd.DataFrame\n Country yield curve\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 31, "language": "en" } }, { "id": 291554, "commit_id": "5d4c4a1293b00e864f6ad202fbc565388d613e71", "repo": "core", "path": "tests/components/alexa/test_capabilities.py", "file_name": "test_capabilities.py", "fun_name": "test_report_humidifier_humidity_state", "commit_message": "Add humidifier support for Alexa (#81329)", "code": "async def test_report_humidifier_humidity_state(hass):\n \n hass.states.async_set(\n \"humidifier.dry\",\n \"on\",\n {\n \"friendly_name\": \"Humidifier dry\",\n \"supported_features\": 0,\n \"humidity\": 25,\n \"min_humidity\": 20,\n \"max_humidity\": 90,\n },\n )\n hass.states.async_set(\n \"humidifier.wet\",\n \"on\",\n {\n \"friendly_name\": \"Humidifier wet\",\n \"supported_features\": 0,\n \"humidity\": 80,\n \"min_humidity\": 20,\n \"max_humidity\": 90,\n },\n )\n properties = await reported_properties(hass, \"humidifier.dry\")\n properties.assert_equal(\"Alexa.RangeController\", \"rangeValue\", 25)\n\n properties = await reported_properties(hass, \"humidifier.wet\")\n properties.assert_equal(\"Alexa.RangeController\", \"rangeValue\", 80)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 246, "n_words": 53, "vocab_size": 33, "complexity": 1, "nloc": 27, "token_counts": 112, "n_ast_nodes": 204, "n_identifiers": 7, "random_cut": "async def test_report_humidifier_humidity_state(hass):\n \n hass.states.async_set(\n \"humidifier.dry\",\n \"on\",\n {\n \"friendly_name\": \"Humidifier dry\",\n \"supported_features\": 0,\n \"humidity\": 25,\n \"min_humidity\": 20,\n \"max_humidity\": 90,\n },\n )\n ", "d_id": 90659, "documentation": { "docstring": "Test PercentageController, PowerLevelController reports humidifier humidity correctly.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 264125, "commit_id": "fa1e28e860c4bdb3e585a968bd248a2ac666e1f6", "repo": "netbox", "path": "netbox/extras/models/customfields.py", "file_name": "customfields.py", "fun_name": "serialize", "commit_message": "Initial work on #7006", "code": "def serialize(self, value):\n \n if self.type == CustomFieldTypeChoices.TYPE_OBJECT and value is not None:\n return value.pk\n return value\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 48, "n_words": 16, "vocab_size": 14, "complexity": 3, "nloc": 4, "token_counts": 28, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def serialize(self, value):\n \n if self.type == CustomFieldTypeChoices.TYPE_OBJECT and value is not None:\n return value.pk\n ", "d_id": 77612, "documentation": { "docstring": "\n Prepare a value for storage as JSON data.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 100290, "commit_id": "23d92c1f0d83ce1cdcc51480cfe37af074a981b3", "repo": "faceswap", "path": "tools/manual/detected_faces.py", "file_name": "detected_faces.py", "fun_name": "revert_to_saved", "commit_message": "Bugfixes\n - Sort - Fix rare help-text parsing bug\n - Manual - Fix issue where frame count is incorrect when een > 1 used on extract", "code": "def revert_to_saved(self, frame_index):\n \n if frame_index not in self._updated_frame_indices:\n logger.debug(\"Alignments not amended. Returning\")\n return\n logger.verbose(\"Reverting alignments for frame_index %s\", frame_index)\n print(frame_index)\n print(len(self._sorted_frame_names))\n alignments = self._alignments.data[self._sorted_frame_names[frame_index]][\"faces\"]\n faces = self._frame_faces[frame_index]\n\n reset_grid = self._add_remove_faces(alignments, faces)\n\n for detected_face, face in zip(faces, alignments):\n detected_face.from_alignment(face, with_thumb=True)\n detected_face.load_aligned(None, force=True)\n _ = detected_face.aligned.average_distance # cache the distances\n\n self._updated_frame_indices.remove(frame_index)\n if not self._updated_frame_indices:\n self._tk_unsaved.set(False)\n\n if reset_grid:\n self._tk_face_count_changed.set(True)\n else:\n self._tk_edited.set(True)\n self._globals.tk_update.set(True)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 246, "n_words": 59, "vocab_size": 47, "complexity": 5, "nloc": 22, "token_counts": 172, "n_ast_nodes": 278, "n_identifiers": 34, "random_cut": "def revert_to_saved(self, frame_index):\n \n if frame_index not in self._updated_frame_indices:\n logger.debug(\"Alignments not amended. Returning\")\n return\n logger.verbose(\"Reverting alignments for frame_index %s\", frame", "d_id": 19791, "documentation": { "docstring": " Revert the frame's alignments to their saved version for the given frame index.\n\n Parameters\n ----------\n frame_index: int\n The frame that should have their faces reverted to their saved version\n ", "n_words": 29, "vocab_size": 22, "n_whitespaces": 69, "language": "en" } }, { "id": 186787, "commit_id": "6e1696ba32ef8c1162bb0cd85df5a22113952828", "repo": "certbot", "path": "tools/finish_release.py", "file_name": "finish_release.py", "fun_name": "parse_args", "commit_message": "Add Signed Windows Installer Workflow (#9076)\n\n* Add Code Signing action for Windows Installer\r\n\r\n* Clean up variable names and input\r\n\r\n* Amend and add to documentation per PR guidelines\r\n\r\n* Update tools/finish_release.py\r\n\r\nCo-authored-by: Brad Warren \r\n\r\n* Update tools/finish_release.py\r\n\r\nAmend typo\r\n\r\nCo-authored-by: Brad Warren \r\n\r\n* Amend release script for better work flow\r\n\r\n- SCP commands to upload and download unsigned & signed installers from CSS\r\n\r\n* Collapse spaces\r\n\r\n* Update tools/finish_release.py\r\n\r\nCo-authored-by: Brad Warren \r\n\r\n* Create new windows signer function\r\n\r\n* Update Windows Installer Script\r\n\r\n- Update change log\r\n- add new function for signing and document\r\n- @TODO Streammline SSH session\r\n\r\n* Remove Azure and Github release methods\r\n\r\n- Methods moved to CSS\r\n- Reduced to a ssh function that triggers the process on a CSS\r\n\r\n* Amend Chnagelog and Remove Unneeded Deps\r\n\r\n* Update tools/finish_release.py\r\n\r\nCo-authored-by: Brad Warren \r\n\r\n* Add Verison Fetch Function\r\n\r\n- For the purpose of snap releases\r\n- Add back package to dev extras for function\r\n\r\n* Chaneg path in ssh command\r\n\r\n* Amend release script\r\n\r\n* Amend the ssh command for CSS\r\n\r\n* Update tools/finish_release.py\r\n\r\nCo-authored-by: Brad Warren \r\n\r\n* Update script with proper path and subprocess call\r\n\r\n* Update ssh command\r\n\r\n* Correct typo in path\r\n\r\n* Fix typo in path\r\n\r\n* Update certbot/CHANGELOG.md\r\n\r\nCo-authored-by: ohemorange \r\n\r\n* Remove missed conflict text\r\n\r\nCo-authored-by: Brad Warren \r\nCo-authored-by: ohemorange ", "code": "def parse_args(args):\n \n # Use the file's docstring for the help text and don't let argparse reformat it.\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--css', type=str, required=True, help='hostname of code signing server')\n group = parser.add_mutually_exclusive_group()\n # We use 'store_false' and a destination related to the other type of\n # artifact to cause the flag being set to disable publishing of the other\n # artifact. This makes using the parsed arguments later on a little simpler\n # and cleaner.\n group.add_argument('--snaps-only', action='store_false', dest='publish_windows',\n help='Skip publishing other artifacts and only publish the snaps')\n group.add_argument('--windows-only', action='store_false', dest='publish_snaps',\n help='Skip publishing other artifacts and only publish the Windows installer')\n return parser.parse_args(args)\n\n ", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 224, "n_words": 102, "vocab_size": 71, "complexity": 1, "nloc": 10, "token_counts": 90, "n_ast_nodes": 160, "n_identifiers": 18, "random_cut": "def parse_args(args):\n \n # Use the file's docstring for the help text and don't let argparse reformat it.\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--css', type=str, required=True, help='hostname of code signing server')\n group = parser.add_mutually_exclusive_group()\n # We use 'store_false' and a destination related to the other type of\n # artifact to cause the flag bein", "d_id": 45621, "documentation": { "docstring": "Parse command line arguments.\n\n :param args: command line arguments with the program name removed. This is\n usually taken from sys.argv[1:].\n :type args: `list` of `str`\n\n :returns: parsed arguments\n :rtype: argparse.Namespace\n\n ", "n_words": 30, "vocab_size": 26, "n_whitespaces": 52, "language": "en" } }, { "id": 197437, "commit_id": "9a3ffc6781bd44c47cf49e128ef154389c32876a", "repo": "sympy", "path": "sympy/physics/vector/fieldfunctions.py", "file_name": "fieldfunctions.py", "fun_name": "is_conservative", "commit_message": "Some pep8 cleanup of sympy.physics.vector.", "code": "def is_conservative(field):\n \n\n # Field is conservative irrespective of frame\n # Take the first frame in the result of the separate method of Vector\n if field == Vector(0):\n return True\n frame = list(field.separate())[0]\n return curl(field, frame).simplify() == Vector(0)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 37, "vocab_size": 28, "complexity": 2, "nloc": 5, "token_counts": 45, "n_ast_nodes": 77, "n_identifiers": 8, "random_cut": "def is_conservative(field):\n \n\n # Field is conservative irrespective of frame\n # Take the first frame ", "d_id": 48545, "documentation": { "docstring": "\n Checks if a field is conservative.\n\n Parameters\n ==========\n\n field : Vector\n The field to check for conservative property\n\n Examples\n ========\n\n >>> from sympy.physics.vector import ReferenceFrame\n >>> from sympy.physics.vector import is_conservative\n >>> R = ReferenceFrame('R')\n >>> is_conservative(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z)\n True\n >>> is_conservative(R[2] * R.y)\n False\n\n ", "n_words": 46, "vocab_size": 36, "n_whitespaces": 96, "language": "en" } }, { "id": 223020, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/support.py", "file_name": "support.py", "fun_name": "fixup_build_ext", "commit_message": "add python 3.10.4 for windows", "code": "def fixup_build_ext(cmd):\n \n if os.name == 'nt':\n cmd.debug = sys.executable.endswith('_d.exe')\n elif sysconfig.get_config_var('Py_ENABLE_SHARED'):\n # To further add to the shared builds fun on Unix, we can't just add\n # library_dirs to the Extension() instance because that doesn't get\n # plumbed through to the final compiler command.\n runshared = sysconfig.get_config_var('RUNSHARED')\n if runshared is None:\n cmd.library_dirs = ['.']\n else:\n if sys.platform == 'darwin':\n cmd.library_dirs = []\n else:\n name, equals, value = runshared.partition('=')\n cmd.library_dirs = [d for d in value.split(os.pathsep) if d]\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 213, "n_words": 77, "vocab_size": 57, "complexity": 7, "nloc": 13, "token_counts": 102, "n_ast_nodes": 182, "n_identifiers": 19, "random_cut": "def fixup_build_ext(cmd):\n \n if os.name == 'nt':\n cmd.debug = sys.executable.endswith('_d.exe')\n elif sysconfig.get_config_var('Py_ENABLE_SHARED'):\n # To further add to the shared builds fun on Unix, we can't just add\n # library_dirs to the Extension() instance because that doesn't get\n # plumbed through to the final compiler command.\n runshared = sysconfig.get_config_var('RUNSHARED')\n if runshared is None:\n cmd.library_dirs = ['.']\n else:\n ", "d_id": 56852, "documentation": { "docstring": "Function needed to make build_ext tests pass.\n\n When Python was built with --enable-shared on Unix, -L. is not enough to\n find libpython.so, because regrtest runs in a tempdir, not in the\n source directory where the .so lives.\n\n When Python was built with in debug mode on Windows, build_ext commands\n need their debug attribute set, and it is not done automatically for\n some reason.\n\n This function handles both of these things. Example use:\n\n cmd = build_ext(dist)\n support.fixup_build_ext(cmd)\n cmd.ensure_finalized()\n\n Unlike most other Unix platforms, Mac OS X embeds absolute paths\n to shared libraries into executables, so the fixup is not needed there.\n ", "n_words": 100, "vocab_size": 80, "n_whitespaces": 152, "language": "en" } }, { "id": 67581, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/batch/batch.py", "file_name": "batch.py", "fun_name": "batch_uses_naming_series", "commit_message": "style: format code with black", "code": "def batch_uses_naming_series():\n\t\n\tuse_naming_series = cint(frappe.db.get_single_value(\"Stock Settings\", \"use_naming_series\"))\n\treturn bool(use_naming_series)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 6, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def batch_uses_naming_series():\n\t\n\tuse_naming_series ", "d_id": 14564, "documentation": { "docstring": "\n\tVerify if the Batch is to be named using a naming series\n\t:return: bool\n\t", "n_words": 14, "vocab_size": 14, "n_whitespaces": 12, "language": "en" } }, { "id": 12444, "commit_id": "1b05b842d7a2c851b5de2150591198ad0d9987dc", "repo": "jina", "path": "jina/parsers/orchestrate/runtimes/remote.py", "file_name": "remote.py", "fun_name": "mixin_http_gateway_parser", "commit_message": "refactor: remove unnecessary code (#4865)\n\n* refactor: remove unnecessary code\r\n\r\n* refactor: remove unnecessary code\r\n\r\n* fix: #4866\r\n\r\n* refactor: grpc compression arg\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: #4864\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def mixin_http_gateway_parser(parser=None):\n \n gp = add_arg_group(parser, title='HTTP Gateway')\n\n gp.add_argument(\n '--title',\n type=str,\n help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.',\n )\n\n gp.add_argument(\n '--description',\n type=str,\n help='The description of this HTTP server. It will be used in automatics docs such as Swagger UI.',\n )\n\n gp.add_argument(\n '--cors',\n action='store_true',\n default=False,\n help=,\n )\n\n gp.add_argument(\n '--no-debug-endpoints',\n action='store_true',\n default=False,\n help='If set, `/status` `/post` endpoints are removed from HTTP interface. ',\n )\n\n gp.add_argument(\n '--no-crud-endpoints',\n action='store_true',\n default=False,\n help=,\n )\n\n gp.add_argument(\n '--expose-endpoints',\n type=str,\n help=,\n )\n\n gp.add_argument(\n '--uvicorn-kwargs',\n action=KVAppendAction,\n metavar='KEY: VALUE',\n nargs='*',\n help=,\n )\n\n gp.add_argument(\n '--grpc-server-kwargs',\n action=KVAppendAction,\n metavar='KEY: VALUE',\n nargs='*',\n help=,\n )\n\n gp.add_argument(\n '--ssl-certfile',\n type=str,\n help=,\n dest='ssl_certfile',\n )\n\n gp.add_argument(\n '--ssl-keyfile',\n type=str,\n help=,\n dest='ssl_keyfile',\n )\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 449, "n_words": 110, "vocab_size": 57, "complexity": 1, "nloc": 80, "token_counts": 204, "n_ast_nodes": 346, "n_identifiers": 15, "random_cut": "def mixin_http_gateway_parser(parser=None):\n \n gp = add_arg_group(parser, title='HTTP Gateway')\n\n gp.add_argument(\n '--title',\n type=str,\n help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.',\n )\n\n gp.add_argument(\n ", "d_id": 2296, "documentation": { "docstring": "Add the options to rest server\n\n :param parser: the parser\n \n If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.\n \n If set, `/index`, `/search`, `/update`, `/delete` endpoints are removed from HTTP interface.\n\n Any executor that has `@requests(on=...)` bind with those values will receive data requests.\n \n A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.\n \nDictionary of kwargs arguments that will be passed to Uvicorn server when starting the server\n\nMore details can be found in Uvicorn docs: https://www.uvicorn.org/settings/\n\n\n Dictionary of kwargs arguments that will be passed to the grpc server when starting the server # todo update\n \n the path to the certificate file\n \n the path to the key file\n ", "n_words": 118, "vocab_size": 75, "n_whitespaces": 211, "language": "en" } }, { "id": 11747, "commit_id": "217a11bb8dc613ed1136b8b541a68e6d53ca4fc1", "repo": "jina", "path": "tests/unit/helloworld/multimodal/test_executors.py", "file_name": "test_executors.py", "fun_name": "test_image_crafter_index", "commit_message": "test: fix tests failing after new docarray patch (#4449)\n\n* test: fix tests failing after new docarray patch\r\n\r\n* test: fix failing tests", "code": "def test_image_crafter_index(encoder_doc_array, tmpdir):\n \n create_test_img(path=str(tmpdir), file_name='1.jpg')\n with Flow().add(uses=ImageCrafter) as f:\n res = f.index(inputs=encoder_doc_array)\n assert len(res) == 1\n doc = res[0]\n assert doc.mime_type == 'image/jpeg'\n assert doc.tensor is not None\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 56, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 71, "n_ast_nodes": 120, "n_identifiers": 19, "random_cut": "def test_image_crafter_index(encoder_doc_array, tmpdir):\n \n create_test_img(path=str(tmpdir), file_name='1.jpg')\n with Flow().add(uses=ImageCrafter) as f:\n res = f.index(inputs=encoder_doc_array)\n as", "d_id": 2104, "documentation": { "docstring": "In this test, we input one ``DocumentArray`` with one ``Document``,\n and the `craft` method in the ``ImageCrafter`` returns chunks.\n In the ``ImageCrafter``, we filtered out all the modalities and only kept `image/jpeg`.\n So the 2 chunks should left only 1 chunk.\n And the tensor value of the ``Document`` is not empty once we finished crafting since\n we converted image uri/datauri to tensor.\n ", "n_words": 62, "vocab_size": 49, "n_whitespaces": 80, "language": "en" } }, { "id": 216492, "commit_id": "52e1d0b8116c86777c85cb6c3d940e2c04a518c4", "repo": "salt", "path": "salt/fileserver/roots.py", "file_name": "roots.py", "fun_name": "find_file", "commit_message": "add __env__ substitution inside file and pillar root paths", "code": "def find_file(path, saltenv=\"base\", **kwargs):\n \n actual_saltenv = saltenv\n if \"env\" in kwargs:\n # \"env\" is not supported; Use \"saltenv\".\n kwargs.pop(\"env\")\n\n path = os.path.normpath(path)\n fnd = {\"path\": \"\", \"rel\": \"\"}\n if os.path.isabs(path):\n return fnd\n if saltenv not in __opts__[\"file_roots\"]:\n if \"__env__\" in __opts__[\"file_roots\"]:\n log.debug(\n \"salt environment '%s' maps to __env__ file_roots directory\", saltenv\n )\n saltenv = \"__env__\"\n else:\n return fnd\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 173, "n_words": 58, "vocab_size": 40, "complexity": 13, "nloc": 38, "token_counts": 256, "n_ast_nodes": 170, "n_identifiers": 13, "random_cut": "def find_file(path, saltenv=\"base\", **kwargs):\n \n actual_saltenv = saltenv\n if \"env\" in kwargs:\n # \"env\" is not supported; Use \"saltenv\".\n kwargs.pop(\"env\")\n\n path = os.path.normpath(path)\n fnd = {\"path\": \"\", \"rel\": \"\"}\n if os.path.isabs(path):\n return fnd\n if saltenv not in __opts__[\"file_roots\"]:\n if \"__env__\" in __opts__[\"file_roots\"]:\n log.debug(\n \"salt environment '%s' maps to __env__ file_roots directory\", saltenv\n )\n ", "d_id": 54609, "documentation": { "docstring": "\n Search the environment for the relative path.\n ", "n_words": 7, "vocab_size": 6, "n_whitespaces": 14, "language": "en" } }, { "id": 258357, "commit_id": "9ebf164cfdfb320503b7161493420c1b0ec577a3", "repo": "haystack", "path": "haystack/nodes/prompt/prompt_node.py", "file_name": "prompt_node.py", "fun_name": "get_prompt_templates", "commit_message": "feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667)\n\nCo-authored-by: ZanSara ", "code": "def get_prompt_templates(cls) -> List[PromptTemplate]:\n \n return list(cls.prompt_templates.values())\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 6, "token_counts": 22, "n_ast_nodes": 38, "n_identifiers": 7, "random_cut": "def get_prompt_templates(cls) -> List[PromptTemplate]:\n \n return list(cls.prompt_templates.values())\n", "d_id": 75226, "documentation": { "docstring": "\n Returns the list of supported prompt templates.\n :return: List of supported prompt templates.\n ", "n_words": 13, "vocab_size": 9, "n_whitespaces": 35, "language": "en" } }, { "id": 118187, "commit_id": "b96825c643cb2ce062d80868a5b7824d99bca07f", "repo": "mindsdb", "path": "tests/integration_tests/flows/test_company_independent.py", "file_name": "test_company_independent.py", "fun_name": "test_views", "commit_message": "fix tests", "code": "def test_views(self, postgres_db):\n\n query = \n\n for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:\n self.sql_via_http(\n query.format(f'test_view_{char}', char),\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.OK\n )\n\n tables = self.get_tables_in('mindsdb', cid)\n self.assert_list(\n tables, {\n 'models',\n 'models_versions',\n f'test_view_{char}'\n }\n )\n\n for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:\n response = self.sql_via_http(\n f\"select * from mindsdb.test_view_{char}\",\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.TABLE\n )\n assert len(response['data']) == 50\n\n response = self.sql_via_http(\n f\"DROP VIEW mindsdb.test_view_{char}\",\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.OK\n )\n\n tables = self.get_tables_in('mindsdb', cid)\n self.assert_list(\n tables, {\n 'models',\n 'models_versions'\n }\n )\n\n self.sql_via_http(\n f\"select * from mindsdb.test_view_{char}\",\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.ERROR\n )\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 602, "n_words": 81, "vocab_size": 43, "complexity": 3, "nloc": 54, "token_counts": 200, "n_ast_nodes": 309, "n_identifiers": 21, "random_cut": "def test_views(self, postgres_db):\n\n query = \n\n for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:\n self.sql_via_http(\n query.format(f'test_view_{char}', char),\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.OK\n )\n\n tables = self.get_tables_in('mindsdb', cid)\n self.assert_list(\n tables, {\n 'models',\n 'models_versions',\n f'test_view_{char}'\n }\n )\n\n for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:\n response = self.sql_via_http(\n f\"select * from mindsdb.test_view_{char}\",\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.TABLE\n )\n assert len(response['data']) == 50\n\n response = self.sql_via_http(\n f\"DROP VIEW mindsdb.test_view_{char}\",\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.OK\n )\n\n tables = self.get_tables_in('mindsdb', cid)\n self.assert_list(\n tables, {\n 'models',\n 'models_versions'\n }\n )\n\n self.sql_via_http(\n f\"select * from mindsdb.test_view_{char}\",\n co", "d_id": 26187, "documentation": { "docstring": "\n CREATE VIEW mindsdb.{}\n FROM test_integration_{} (\n select * from rentals limit 50\n )\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 69, "language": "en" } }, { "id": 108653, "commit_id": "740235060519c8330e6e733a10d8795e40e19b54", "repo": "matplotlib", "path": "lib/matplotlib/testing/compare.py", "file_name": "compare.py", "fun_name": "convert", "commit_message": "Support not embedding glyphs in svg mathtests.", "code": "def convert(filename, cache):\n \n path = Path(filename)\n if not path.exists():\n raise IOError(f\"{path} does not exist\")\n if path.suffix[1:] not in converter:\n import pytest\n pytest.skip(f\"Don't know how to convert {path.suffix} files to png\")\n newpath = path.parent / f\"{path.stem}_{path.suffix[1:]}.png\"\n\n # Only convert the file if the destination doesn't already exist or\n # is out of date.\n if not newpath.exists() or newpath.stat().st_mtime < path.stat().st_mtime:\n cache_dir = _get_cache_path() if cache else None\n\n if cache_dir is not None:\n _register_conversion_cache_cleaner_once()\n hash_value = get_file_hash(path)\n cached_path = cache_dir / (hash_value + newpath.suffix)\n if cached_path.exists():\n _log.debug(\"For %s: reusing cached conversion.\", filename)\n shutil.copyfile(cached_path, newpath)\n return str(newpath)\n\n _log.debug(\"For %s: converting to png.\", filename)\n convert = converter[path.suffix[1:]]\n if path.suffix == \".svg\":\n contents = path.read_text()\n if 'style=\"font:' in contents:\n # for svg.fonttype = none, we explicitly patch the font search\n # path so that fonts shipped by Matplotlib are found.\n convert = _svg_with_matplotlib_fonts_converter\n convert(path, newpath)\n\n if cache_dir is not None:\n _log.debug(\"For %s: caching conversion result.\", filename)\n shutil.copyfile(newpath, cached_path)\n\n return str(newpath)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 430, "n_words": 155, "vocab_size": 104, "complexity": 11, "nloc": 29, "token_counts": 219, "n_ast_nodes": 397, "n_identifiers": 30, "random_cut": "def convert(filename, cache):\n \n path = Path(filename)\n if not path.exists():\n raise IOError(f\"{path} does not exist\")\n if path.suffix[1:] not in converter:\n import pytest\n pytest.skip(f\"Don't know how to convert {path.suffix} files to png\")\n newpath = path.parent / f\"{path.stem}_{path.suffix[1:]}.png\"\n\n # Only convert the file if the destination doesn't already exist or\n # is out of date.\n if not newpath.exists() or newpath.stat().st_mtime < path.stat().st_mtime:\n cache_dir = _get_cache_path() if cache else None\n\n if cache_dir is not None:\n _register_conversion_cache_cleaner_once()\n hash_value = get_file_hash(path)\n cached_path = cache_dir / (hash_value + newpath.suffix)\n if cached_path.exists():\n _log.debug(\"For %s: reusing cached conversion.\", filename)\n shutil.copyfile(cached_path, newpath)\n return str(newpath)\n\n _log.debug(\"For %s: converting to png.\", filename)\n convert = converter[path.suffix[1:]]\n if path.suffix == \".svg\":\n contents = path.read_text()\n if 'style=\"font:' in contents:\n # for svg.fonttype = none, we explicitly patch the font search\n ", "d_id": 23286, "documentation": { "docstring": "\n Convert the named file to png; return the name of the created file.\n\n If *cache* is True, the result of the conversion is cached in\n `matplotlib.get_cachedir() + '/test_cache/'`. The caching is based on a\n hash of the exact contents of the input file. Old cache entries are\n automatically deleted as needed to keep the size of the cache capped to\n twice the size of all baseline images.\n ", "n_words": 67, "vocab_size": 46, "n_whitespaces": 91, "language": "en" } }, { "id": 242234, "commit_id": "f8e4e9c2dd94c6f4789639dd891b8a6d5fb16e14", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "resize", "commit_message": "Added enums", "code": "def resize(self, size, resample=None, box=None, reducing_gap=None):\n \n\n if resample is None:\n type_special = \";\" in self.mode\n resample = Resampling.NEAREST if type_special else Resampling.BICUBIC\n elif resample not in (\n Resampling.NEAREST,\n Resampling.BILINEAR,\n Resampling.BICUBIC,\n Resampling.LANCZOS,\n Resampling.BOX,\n Resampling.HAMMING,\n ):\n message = f\"Unknown resampling filter ({resample}).\"\n\n filters = [\n f\"{filter[1]} ({filter[0]})\"\n for filter in (\n (Resampling.NEAREST, \"Image.Resampling.NEAREST\"),\n (Resampling.LANCZOS, \"Image.Resampling.LANCZOS\"),\n (Resampling.BILINEAR, \"Image.Resampling.BILINEAR\"),\n (Resampling.BICUBIC, \"Image.Resampling.BICUBIC\"),\n (Resampling.BOX, \"Image.Resampling.BOX\"),\n (Resampling.HAMMING, \"Image.Resampling.HAMMING\"),\n )\n ]\n raise ValueError(\n message + \" Use \" + \", \".join(filters[:-1]) + \" or \" + filters[-1]\n )\n\n if reducing_gap is not None and reducing_gap < 1.0:\n raise ValueError(\"reducing_gap must be 1.0 or greater\")\n\n size = tuple(size)\n\n if box is None:\n box = (0, 0) + self.size\n else:\n box = tuple(box)\n\n if self.size == size and box == (0, 0) + self.size:\n return self.copy()\n\n if self.mode in (\"1\", \"P\"):\n resample = Resampling.NEAREST\n\n if self.mode in [\"LA\", \"RGBA\"] and resample != Resampling.NEAREST:\n im = self.convert({\"LA\": \"La\", \"RGBA\": \"RGBa\"}[self.mode])\n im = im.resize(size, resample, box)\n return im.convert(self.mode)\n\n self.load()\n\n if reducing_gap is not None and resample != Resampling.NEAREST:\n factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1\n factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1\n if factor_x > 1 or factor_y > 1:\n reduce_box = self._get_safe_box(size, resample, box)\n factor = (factor_x, factor_y)\n if callable(self.reduce):\n self = self.reduce(factor, box=reduce_box)\n else:\n self = Image.reduce(self, factor, box=reduce_box)\n box = (\n (box[0] - reduce_box[0]) / factor_x,\n (box[1] - reduce_box[1]) / factor_y,\n (box[2] - reduce_box[0]) / factor_x,\n (box[3] - reduce_box[1]) / factor_y,\n )\n\n return self._new(self.im.resize(size, resample, box))\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 985, "n_words": 245, "vocab_size": 134, "complexity": 20, "nloc": 60, "token_counts": 520, "n_ast_nodes": 816, "n_identifiers": 35, "random_cut": "def resize(self, size, resample=None, box=None, reducing_gap=None):\n \n\n if resample is None:\n type_special = \";\" in self.mode\n resample = Resampling.NEAREST if type_special else Resampling.BICUBIC\n elif resample not in (\n Resampling.NEAREST,\n Resampling.BILINEAR,\n Resampling.BICUBIC,\n Resampling.LANCZOS,\n Resampling.BOX,\n Resampling.HAMMING,\n ):\n message = f\"Unknown resampling filter ({resample}).\"\n\n filters = [\n f\"{filter[1]} ({filter[0]})\"\n for filter in (\n (Resampling.NEAREST, \"Image.Resampling.NEAREST\"),\n (Resampling.LANCZOS, \"Image.Resampling.LANCZOS\"),\n (Resampling.BILINEAR, \"Image.Resampling.BILINEAR\"),\n (Resampling.BICUBIC, \"Image.Resampling.BICUBIC\"),\n (Resampling.BOX, \"Image.Resampling.BOX\"),\n (Resampling.HAMMING, \"Image.Resampling.HAMMING\"),\n )\n ]\n raise ValueError(\n message + \" Use \" + \", \".join(filters[:-1]) + \" or \" + filters[-1]\n )\n\n if reducing_gap is not None and reducing_gap < 1.0:\n raise ValueError(\"reducing_gap must be 1.0 o", "d_id": 69796, "documentation": { "docstring": "\n Returns a resized copy of this image.\n\n :param size: The requested size in pixels, as a 2-tuple:\n (width, height).\n :param resample: An optional resampling filter. This can be\n one of :py:data:`PIL.Image.Resampling.NEAREST`,\n :py:data:`PIL.Image.Resampling.BOX`,\n :py:data:`PIL.Image.Resampling.BILINEAR`,\n :py:data:`PIL.Image.Resampling.HAMMING`,\n :py:data:`PIL.Image.Resampling.BICUBIC` or\n :py:data:`PIL.Image.Resampling.LANCZOS`.\n If the image has mode \"1\" or \"P\", it is always set to\n :py:data:`PIL.Image.Resampling.NEAREST`.\n If the image mode specifies a number of bits, such as \"I;16\", then the\n default filter is :py:data:`PIL.Image.Resampling.NEAREST`.\n Otherwise, the default filter is\n :py:data:`PIL.Image.Resampling.BICUBIC`. See: :ref:`concept-filters`.\n :param box: An optional 4-tuple of floats providing\n the source image region to be scaled.\n The values must be within (0, 0, width, height) rectangle.\n If omitted or None, the entire source is used.\n :param reducing_gap: Apply optimization by resizing the image\n in two steps. First, reducing the image by integer times\n using :py:meth:`~PIL.Image.Image.reduce`.\n Second, resizing using regular resampling. The last step\n changes size no less than by ``reducing_gap`` times.\n ``reducing_gap`` may be None (no first step is performed)\n or should be greater than 1.0. The bigger ``reducing_gap``,\n the closer the result to the fair resampling.\n The smaller ``reducing_gap``, the faster resizing.\n With ``reducing_gap`` greater or equal to 3.0, the result is\n indistinguishable from fair resampling in most cases.\n The default value is None (no optimization).\n :returns: An :py:class:`~PIL.Image.Image` object.\n ", "n_words": 207, "vocab_size": 130, "n_whitespaces": 528, "language": "en" } }, { "id": 102176, "commit_id": "bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d", "repo": "pytorch", "path": "tools/test/test_gen_backend_stubs.py", "file_name": "test_gen_backend_stubs.py", "fun_name": "test_unrecognized_key", "commit_message": "Revert \"Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels\" (#69950)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/69950\n\nThis reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa.\n\nTest Plan: Imported from OSS\n\nReviewed By: albanD\n\nDifferential Revision: D33113545\n\nPulled By: bdhirsh\n\nfbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288", "code": "def test_unrecognized_key(self) -> None:\n yaml_str = \n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, ) # noqa: B950\n\n # if use_out_as_primary is provided, it must be a bool", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 9, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 6, "random_cut": "def test_unrecognized_key(self) -> None:\n yaml_str = \n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, ) # noqa: B950\n\n # if use_out_as_primary is provided, it must ", "d_id": 21491, "documentation": { "docstring": "\\\nbackend: XLA\ncpp_namespace: torch_xla\nsupported:\n- abs\ninvalid_key: invalid_val contains unexpected keys: invalid_key. Only the following keys are supported: backend, cpp_namespace, extra_headers, supported, autograd, full_codegen", "n_words": 26, "vocab_size": 25, "n_whitespaces": 20, "language": "en" } }, { "id": 160521, "commit_id": "d4e11c7a2eb64861275facb076d47ccd135fa28c", "repo": "numpy", "path": "numpy/f2py/capi_maps.py", "file_name": "capi_maps.py", "fun_name": "f2cexpr", "commit_message": "ENH: Support character string arrays\n\nTST: added test for issue #18684\n\nENH: f2py opens files with correct encoding, fixes #635\n\nTST: added test for issue #6308\n\nTST: added test for issue #4519\n\nTST: added test for issue #3425\n\nENH: Implement user-defined hooks support for post-processing f2py data structure. Implement character BC hook.\n\nENH: Add support for detecting utf-16 and utf-32 encodings.", "code": "def f2cexpr(expr):\n \n # TODO: support Fortran `len` function with optional kind parameter\n expr = re.sub(r'\\blen\\b', 'f2py_slen', expr)\n return expr\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 38, "n_identifiers": 4, "random_cut": "def f2cexpr(expr):\n \n ", "d_id": 38648, "documentation": { "docstring": "Rewrite Fortran expression as f2py supported C expression.\n\n Due to the lack of a proper expression parser in f2py, this\n function uses a heuristic approach that assumes that Fortran\n arithmetic expressions are valid C arithmetic expressions when\n mapping Fortran function calls to the corresponding C function/CPP\n macros calls.\n\n ", "n_words": 48, "vocab_size": 36, "n_whitespaces": 66, "language": "en" } }, { "id": 101950, "commit_id": "dab823a3eb7a5257cb1e0818ee10ed234d3de97f", "repo": "faceswap", "path": "lib/gui/wrapper.py", "file_name": "wrapper.py", "fun_name": "build_args", "commit_message": "Typing - lib.gui.display_command", "code": "def build_args(self, category, command=None, generate=False):\n \n logger.debug(\"Build cli arguments: (category: %s, command: %s, generate: %s)\",\n category, command, generate)\n command = self.command if not command else command\n script = f\"{category}.py\"\n pathexecscript = os.path.join(self.pathscript, script)\n\n args = [sys.executable] if generate else [sys.executable, \"-u\"]\n args.extend([pathexecscript, command])\n\n cli_opts = get_config().cli_opts\n for cliopt in cli_opts.gen_cli_arguments(command):\n args.extend(cliopt)\n if command == \"train\" and not generate:\n self._get_training_session_info(cliopt)\n\n if not generate:\n args.append(\"-gui\") # Indicate to Faceswap that we are running the GUI\n if generate:\n # Delimit args with spaces\n args = [f'\"{arg}\"' if \" \" in arg and not arg.startswith((\"[\", \"(\"))\n and not arg.endswith((\"]\", \")\")) else arg\n for arg in args]\n logger.debug(\"Built cli arguments: (%s)\", args)\n return args\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 328, "n_words": 108, "vocab_size": 70, "complexity": 12, "nloc": 21, "token_counts": 183, "n_ast_nodes": 309, "n_identifiers": 26, "random_cut": "def build_args(self, category, command=None, generate=False):\n \n logger.debug(\"Build cli arguments: (category: %s, command: %s, generate: %s)\",\n category, command, generate)\n command = self.command if not command else command\n script = f\"{category}.py\"\n pathexecscript = os.path.join(self.pathscript, script)\n\n args = [sys.executable] if generate else [sys.executable, \"-u\"]\n args.extend([pathexecscript, command])", "d_id": 21327, "documentation": { "docstring": " Build the faceswap command and arguments list.\n\n If training, pass the model folder and name to the training\n :class:`lib.gui.analysis.Session` for the GUI.\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 44, "language": "en" } }, { "id": 130365, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/aliyun/utils.py", "file_name": "utils.py", "fun_name": "describe_v_switches", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def describe_v_switches(self, vpc_id=None):\n \n request = DescribeVSwitchesRequest()\n if vpc_id is not None:\n request.set_VpcId(vpc_id)\n response = self._send_request(request)\n if response is not None:\n return response.get(\"VSwitches\").get(\"VSwitch\")\n else:\n logging.error(\"Describe VSwitches Failed.\")\n return None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 114, "n_words": 28, "vocab_size": 21, "complexity": 3, "nloc": 10, "token_counts": 63, "n_ast_nodes": 110, "n_identifiers": 11, "random_cut": "def describe_v_switches(self, vpc_id=None):\n \n request = DescribeVSwitchesRequest()\n if vpc_id", "d_id": 29251, "documentation": { "docstring": "Queries one or more VSwitches.\n\n :param vpc_id: The ID of the VPC to which the VSwitch belongs.\n :return: VSwitch list.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 106863, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "line", "commit_message": "apply black py to all python files", "code": "def line(self, Y, X=None, win=None, env=None, opts=None, update=None, name=None):\n \n if update is not None:\n if update == \"remove\":\n return self.scatter(\n X=None,\n Y=None,\n opts=opts,\n win=win,\n env=env,\n update=update,\n name=name,\n )\n else:\n assert X is not None, \"must specify x-values for line update\"\n assert Y.ndim == 1 or Y.ndim == 2, \"Y should have 1 or 2 dim\"\n assert Y.shape[-1] > 0, \"must plot one line at least\"\n\n if X is not None:\n assert X.ndim == 1 or X.ndim == 2, \"X should have 1 or 2 dim\"\n else:\n X = np.linspace(0, 1, Y.shape[0])\n\n if Y.ndim == 2 and X.ndim == 1:\n X = np.tile(X, (Y.shape[1], 1)).transpose()\n\n assert X.shape == Y.shape, \"X and Y should be the same shape\"\n\n opts = {} if opts is None else opts\n opts[\"markers\"] = opts.get(\"markers\", False)\n opts[\"fillarea\"] = opts.get(\"fillarea\", False)\n opts[\"mode\"] = \"lines+markers\" if opts.get(\"markers\") else \"lines\"\n\n _title2str(opts)\n _assert_opts(opts)\n\n if Y.ndim == 1:\n linedata = np.column_stack((X, Y))\n else:\n linedata = np.column_stack((X.ravel(order=\"F\"), Y.ravel(order=\"F\")))\n\n labels = None\n if Y.ndim == 2:\n labels = np.arange(1, Y.shape[1] + 1)\n labels = np.tile(labels, (Y.shape[0], 1)).ravel(order=\"F\")\n\n return self.scatter(\n X=linedata, Y=labels, opts=opts, win=win, env=env, update=update, name=name\n )\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 611, "n_words": 183, "vocab_size": 101, "complexity": 12, "nloc": 40, "token_counts": 389, "n_ast_nodes": 601, "n_identifiers": 25, "random_cut": "def line(self, Y, X=None, win=None, env=None, opts=None, update=None, name=None):\n \n if update is not None:\n if update == \"re", "d_id": 22482, "documentation": { "docstring": "\n This function draws a line plot. It takes in an `N` or `NxM` tensor\n `Y` that specifies the values of the `M` lines (that connect `N` points)\n to plot. It also takes an optional `X` tensor that specifies the\n corresponding x-axis values; `X` can be an `N` tensor (in which case all\n lines will share the same x-axis values) or have the same size as `Y`.\n\n `update` can be used to efficiently update the data of an existing line.\n Use 'append' to append data, 'replace' to use new data, and 'remove' to\n delete the trace that is specified in `name`. If updating a\n single trace, use `name` to specify the name of the trace to be updated.\n Update data that is all NaN is ignored (can be used for masking update).\n Using `update='append'` will create a plot if it doesn't exist\n and append to the existing plot otherwise.\n\n The following `opts` are supported:\n\n - `opts.fillarea` : fill area below line (`boolean`)\n - `opts.markers` : show markers (`boolean`; default = `false`)\n - `opts.markersymbol`: marker symbol (`string`; default = `'dot'`)\n - `opts.markersize` : marker size (`number`; default = `'10'`)\n - `opts.linecolor` : line colors (`np.array`; default = None)\n - `opts.dash` : line dash type (`np.array`; default = None)\n - `opts.legend` : `list` or `tuple` containing legend names\n\n If `update` is specified, the figure will be updated without\n creating a new plot -- this can be used for efficient updating.\n ", "n_words": 237, "vocab_size": 140, "n_whitespaces": 421, "language": "en" } }, { "id": 46519, "commit_id": "352d7f72dd1e21f1522d69b71917142430548d66", "repo": "airflow", "path": "tests/providers/databricks/operators/test_databricks_repos.py", "file_name": "test_databricks_repos.py", "fun_name": "test_delete_with_id", "commit_message": "More operators for Databricks Repos (#22422)", "code": "def test_delete_with_id(self, db_mock_class):\n \n op = DatabricksReposDeleteOperator(task_id=TASK_ID, repo_id=\"123\")\n db_mock = db_mock_class.return_value\n db_mock.delete_repo.return_value = None\n\n op.execute(None)\n\n db_mock_class.assert_called_once_with(\n DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay\n )\n\n db_mock.delete_repo.assert_called_once_with('123')\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 64, "n_ast_nodes": 105, "n_identifiers": 18, "random_cut": "def test_delete_with_id(self, db_mock_class):\n \n op = DatabricksReposDeleteOperator(task_id=TASK_ID, repo_id=\"123\")\n db_mock = db_mock_class.return_value\n db_mock.delete_repo.re", "d_id": 8910, "documentation": { "docstring": "\n Test the execute function using Repo ID.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 86306, "commit_id": "d745edbd591063f2c3241cd1960c361834058823", "repo": "sentry", "path": "tests/sentry/models/test_groupsnooze.py", "file_name": "test_groupsnooze.py", "fun_name": "test_user_rate_reached", "commit_message": "ref(perf issues): Enable ignore in a time period (#39120)\n\nEnable ignoring a performance issue in a time period e.g. ignore this\r\nuntil it happens 10x / hr or ignore until 10 users experience it in an\r\nhour.", "code": "def test_user_rate_reached(self):\n \n for i in range(5):\n group = self.store_event(\n data={\n \"fingerprint\": [\"group1\"],\n \"timestamp\": iso_format(before_now(minutes=5 + i)),\n \"tags\": {\"sentry:user\": i},\n },\n project_id=self.project.id,\n ).group\n\n snooze = GroupSnooze.objects.create(group=group, user_count=5, user_window=60)\n assert not snooze.is_valid(test_rates=True)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 182, "n_words": 30, "vocab_size": 29, "complexity": 2, "nloc": 12, "token_counts": 94, "n_ast_nodes": 153, "n_identifiers": 21, "random_cut": "def test_user_rate_reached(self):\n \n for i in range(5):\n group = self.store_event(\n data={\n \"fingerprint\": [\"group1\"],\n \"timestamp\": iso_format(before_now(minutes=5 + i)),\n \"tags\": {\"sentry:user\": i},\n ", "d_id": 18096, "documentation": { "docstring": "Test that ignoring an error issue until it's hit by 10 users in an hour works.", "n_words": 16, "vocab_size": 15, "n_whitespaces": 15, "language": "en" } }, { "id": 222552, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/archive_util.py", "file_name": "archive_util.py", "fun_name": "make_zipfile", "commit_message": "add python 3.10.4 for windows", "code": "def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):\n \n zip_filename = base_name + \".zip\"\n mkpath(os.path.dirname(zip_filename), dry_run=dry_run)\n\n # If zipfile module is not available, try spawning an external\n # 'zip' command.\n if zipfile is None:\n if verbose:\n zipoptions = \"-r\"\n else:\n zipoptions = \"-rq\"\n\n try:\n spawn([\"zip\", zipoptions, zip_filename, base_dir],\n dry_run=dry_run)\n except DistutilsExecError:\n # XXX really should distinguish between \"couldn't find\n # external 'zip' command\" and \"zip failed\".\n raise DistutilsExecError((\"unable to create zip file '%s': \"\n \"could neither import the 'zipfile' module nor \"\n \"find a standalone zip utility\") % zip_filename)\n\n else:\n log.info(\"creating '%s' and adding '%s' to it\",\n zip_filename, base_dir)\n\n if not dry_run:\n try:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_DEFLATED)\n except RuntimeError:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_STORED)\n\n with zip:\n if base_dir != os.curdir:\n path = os.path.normpath(os.path.join(base_dir, ''))\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n for dirpath, dirnames, filenames in os.walk(base_dir):\n for name in dirnames:\n path = os.path.normpath(os.path.join(dirpath, name, ''))\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n for name in filenames:\n path = os.path.normpath(os.path.join(dirpath, name))\n if os.path.isfile(path):\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n\n return zip_filename\n\nARCHIVE_FORMATS = {\n 'gztar': (make_tarball, [('compress', 'gzip')], \"gzip'ed tar-file\"),\n 'bztar': (make_tarball, [('compress', 'bzip2')], \"bzip2'ed tar-file\"),\n 'xztar': (make_tarball, [('compress', 'xz')], \"xz'ed tar-file\"),\n 'ztar': (make_tarball, [('compress', 'compress')], \"compressed tar file\"),\n 'tar': (make_tarball, [('compress', None)], \"uncompressed tar file\"),\n 'zip': (make_zipfile, [],\"ZIP file\")\n }\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 860, "n_words": 203, "vocab_size": 134, "complexity": 11, "nloc": 41, "token_counts": 290, "n_ast_nodes": 638, "n_identifiers": 34, "random_cut": "def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):\n \n zip_filename = base_name + \".zip\"\n mkpath(os.path.dirname(zip_filename), dry_run=dry_run)\n\n # If zipfile module is not available, try spawning an external\n # 'zip' command.\n if zipfile is None:\n if verbose:\n zipoptions = \"-r\"\n else:\n zipoptions = \"-rq\"\n\n try:\n spawn([\"zip\", zipoptions, zip_filename, base_dir],\n dry_run=dry_run)\n except DistutilsExecError:\n # XXX really should distinguish between \"couldn't find\n # external 'zip' command\" and \"zip failed\".\n raise DistutilsExecError((\"unable to create zip file '%s': \"\n \"could neither import the 'zipfile' module nor \"\n \"find a standalone zip utility\") % zip_filename)\n\n else:\n log.info(\"creating '%s' and adding '%s' to it\",\n zip_filename, base_dir)\n\n if not dry_run:\n try:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_DEFLATED)\n except RuntimeError:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_STORED)\n\n ", "d_id": 56631, "documentation": { "docstring": "Create a zip file from all the files under 'base_dir'.\n\n The output zip file will be named 'base_name' + \".zip\". Uses either the\n \"zipfile\" Python module (if available) or the InfoZIP \"zip\" utility\n (if installed and found on the default search path). If neither tool is\n available, raises DistutilsExecError. Returns the name of the output zip\n file.\n ", "n_words": 57, "vocab_size": 47, "n_whitespaces": 78, "language": "en" } }, { "id": 246929, "commit_id": "02d708568b476f2f7716000b35c0adfa4cbd31b3", "repo": "synapse", "path": "tests/rest/client/test_upgrade_room.py", "file_name": "test_upgrade_room.py", "fun_name": "test_power_levels_user_default", "commit_message": "Replace assertEquals and friends with non-deprecated versions. (#12092)", "code": "def test_power_levels_user_default(self):\n \n # The other user doesn't have the proper power level.\n channel = self._upgrade_room(self.other_token)\n self.assertEqual(403, channel.code, channel.result)\n\n # Increase the power levels so that this user can upgrade.\n power_levels = self.helper.get_state(\n self.room_id,\n \"m.room.power_levels\",\n tok=self.creator_token,\n )\n power_levels[\"users_default\"] = 100\n self.helper.send_state(\n self.room_id,\n \"m.room.power_levels\",\n body=power_levels,\n tok=self.creator_token,\n )\n\n # The upgrade should succeed!\n channel = self._upgrade_room(self.other_token)\n self.assertEqual(200, channel.code, channel.result)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 224, "n_words": 56, "vocab_size": 39, "complexity": 1, "nloc": 17, "token_counts": 104, "n_ast_nodes": 165, "n_identifiers": 16, "random_cut": "def test_power_levels_user_default(self):\n \n # The other", "d_id": 71413, "documentation": { "docstring": "\n Another user can upgrade the room if the default power level for users is increased.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 30, "language": "en" } }, { "id": 67267, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/uae_vat_201/uae_vat_201.py", "file_name": "uae_vat_201.py", "fun_name": "get_exempt_total", "commit_message": "style: format code with black", "code": "def get_exempt_total(filters):\n\t\n\tconditions = get_conditions(filters)\n\ttry:\n\t\treturn (\n\t\t\tfrappe.db.sql(\n\t\t\t\t.format(\n\t\t\t\t\twhere_conditions=conditions\n\t\t\t\t),\n\t\t\t\tfilters,\n\t\t\t)[0][0]\n\t\t\tor 0\n\t\t)\n\texcept (IndexError, TypeError):\n\t\treturn 0\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 8, "n_words": 22, "vocab_size": 20, "complexity": 3, "nloc": 24, "token_counts": 52, "n_ast_nodes": 84, "n_identifiers": 11, "random_cut": "def get_exempt_total(filters):\n\t\n\tconditions = get_conditions(filters)\n\ttry:\n\t\treturn (\n\t\t\tfrappe.db.sql(\n\t\t\t\t.format(\n\t\t\t\t\twhere_conditions=conditions\n\t\t\t\t),\n\t\t\t\tfilters,\n\t\t\t)[0][0]\n\t\t", "d_id": 14461, "documentation": { "docstring": "Returns the sum of each Sales Invoice Item Amount which is Vat Exempt.\n\t\t\tselect\n\t\t\t\tsum(i.base_amount) as total\n\t\t\tfrom\n\t\t\t\t`tabSales Invoice Item` i inner join `tabSales Invoice` s\n\t\t\ton\n\t\t\t\ti.parent = s.name\n\t\t\twhere\n\t\t\t\ts.docstatus = 1 and i.is_exempt = 1\n\t\t\t\t{where_conditions} ;\n\t\t\t", "n_words": 41, "vocab_size": 36, "n_whitespaces": 32, "language": "en" } }, { "id": 209428, "commit_id": "b26f2283379d3bba48d575c1fffd1c3cdeaf64c2", "repo": "scapy", "path": "scapy/layers/kerberos.py", "file_name": "kerberos.py", "fun_name": "encrypt", "commit_message": "Kerberos update (#3688)\n\n* Kerberos over TCP\r\n\r\n* Kerberos: add FAST & PKCA\r\n\r\n* Many user-friendly improvements\r\n\r\n* RFC3961 crypto\r\n\r\n* Summary, Sessions, Examples, Bugs\r\n\r\n* More tests, _n_fold edge case\r\n\r\n* Ignore potatoe (kerberos tests) from codespell", "code": "def encrypt(self, key, text, confounder=None, key_usage_number=None):\n \n if key_usage_number is None:\n key_usage_number = self.get_usage()[0]\n self.cipher = key.encrypt(key_usage_number, text, confounder=confounder)\n\n\nEncryptionKey = lambda **kwargs: ASN1F_SEQUENCE(\n Int32(\"keytype\", 0, explicit_tag=0x0),\n ASN1F_STRING(\"keyvalue\", \"\", explicit_tag=0x1),\n **kwargs\n)\nKerberosFlags = ASN1F_FLAGS\n\n\n_PADATA_TYPES = {\n 1: \"PA-TGS-REQ\",\n 2: \"PA-ENC-TIMESTAMP\",\n 3: \"PA-PW-SALT\",\n 11: \"PA-ETYPE-INFO\",\n 14: \"PA-PK-AS-REQ-OLD\",\n 15: \"PA-PK-AS-REP-OLD\",\n 16: \"PA-PK-AS-REQ\",\n 17: \"PA-PK-AS-REP\",\n 19: \"PA-ETYPE-INFO2\",\n 20: \"PA-SVR-REFERRAL-INFO\",\n 128: \"PA-PAC-REQUEST\",\n 133: \"PA-FX-COOKIE\",\n 134: \"PA-AUTHENTICATION-SET\",\n 135: \"PA-AUTH-SET-SELECTED\",\n 136: \"PA-FX-FAST\",\n 137: \"PA-FX-ERROR\",\n 165: \"PA-SUPPORTED-ENCTYPES\",\n 167: \"PA-PAC-OPTIONS\",\n}\n\n_PADATA_CLASSES = {\n # Filled elsewhere in this file\n}\n\n\n# RFC4120\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 176, "n_words": 86, "vocab_size": 76, "complexity": 2, "nloc": 4, "token_counts": 49, "n_ast_nodes": 274, "n_identifiers": 18, "random_cut": "def encrypt(self, key, text, confounder=None, key_usage_number=None):\n \n if key_usage_number is None:\n key_usage_number = self.get_usage()[0]\n self.cipher = key.encrypt(key_usage_number, text, confounder=confounder)\n\n\nEncryptionKey = lambda **kwargs: ASN1F_SEQUENCE(\n Int32(\"keytype\", 0, explicit_tag=0x0),\n ASN1F_STRING(\"keyvalue\", \"\", explicit_tag=0x1),\n **kwargs\n)\nKerberos", "d_id": 52673, "documentation": { "docstring": "\n Encrypt text and set it into cipher.\n\n :param key: the key to use for encryption\n :param text: the bytes value to encode\n :param confounder: (optional) specify the confounder bytes. Random otherwise\n :param key_usage_number: (optional) specify the key usage number.\n Guessed otherwise\n ", "n_words": 41, "vocab_size": 30, "n_whitespaces": 116, "language": "en" } }, { "id": 130352, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/aliyun/utils.py", "file_name": "utils.py", "fun_name": "create_v_switch", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def create_v_switch(self, vpc_id, zone_id, cidr_block):\n \n request = CreateVSwitchRequest()\n request.set_ZoneId(zone_id)\n request.set_VpcId(vpc_id)\n request.set_CidrBlock(cidr_block)\n response = self._send_request(request)\n if response is not None:\n return response.get(\"VSwitchId\")\n else:\n logging.error(\"create_v_switch vpc_id %s failed.\", vpc_id)\n return None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 114, "n_words": 29, "vocab_size": 26, "complexity": 2, "nloc": 11, "token_counts": 68, "n_ast_nodes": 114, "n_identifiers": 15, "random_cut": "def create_v_switch(self, vpc_id, zone_id, cidr_block):\n \n request = CreateVSwitchRequest()\n request.set_ZoneId(zone_", "d_id": 29238, "documentation": { "docstring": "Create vSwitches to divide the VPC into one or more subnets\n\n :param vpc_id: The ID of the VPC to which the VSwitch belongs.\n :param zone_id: The ID of the zone to which\n the target VSwitch belongs.\n :param cidr_block: The CIDR block of the VSwitch.\n :return:\n ", "n_words": 45, "vocab_size": 27, "n_whitespaces": 103, "language": "en" } }, { "id": 20835, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/style.py", "file_name": "style.py", "fun_name": "transparent_background", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def transparent_background(self) -> bool:\n \n return self.bgcolor is None or self.bgcolor.is_default\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def transparent_background(self) -> bool:\n \n return self.bgcolor is None or self.bgcolor.is_default\n", "d_id": 3579, "documentation": { "docstring": "Check if the style specified a transparent background.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 22757, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "primelib/primelib.py", "file_name": "primelib.py", "fun_name": "isPerfectNumber", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def isPerfectNumber(number):\n \n\n # precondition\n assert isinstance(number, int) and (\n number > 1\n ), \"'number' must been an int and >= 1\"\n\n divisors = getDivisors(number)\n\n # precondition\n assert (\n isinstance(divisors, list)\n and (divisors[0] == 1)\n and (divisors[len(divisors) - 1] == number)\n ), \"Error in help-function getDivisiors(...)\"\n\n # summed all divisors up to 'number' (exclusive), hence [:-1]\n return sum(divisors[:-1]) == number\n\n\n# ------------------------------------------------------------\n\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 118, "n_words": 61, "vocab_size": 47, "complexity": 4, "nloc": 11, "token_counts": 73, "n_ast_nodes": 120, "n_identifiers": 9, "random_cut": "def isPerfectNumber(number):\n \n\n # precondition\n assert isinstance(number, int) and (\n number > 1\n ), \"'number' must been an int and >= 1\"\n\n divisors = getDivisors(number)\n\n # precondition\n ass", "d_id": 4448, "documentation": { "docstring": "\n input: positive integer 'number' > 1\n returns true if 'number' is a perfect number otherwise false.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 109426, "commit_id": "bc4b0295161db92fe7232eb46ddb97eba287287d", "repo": "matplotlib", "path": "lib/matplotlib/cm.py", "file_name": "cm.py", "fun_name": "_get_cmap", "commit_message": "API: Add pending deprecation to mpl.cm top level functions\n\n- matplotlib.cm.get_cmap\n- matplotlib.cm.register_cmap\n- matplotlib.cm.unregister_cmap\n- matplotlib.pyplot.register_cmap\n\nin preference for working with the ColormapRegistry on the top level module.\n\nCo-authored-by: Greg Lucas ", "code": "def _get_cmap(name=None, lut=None):\n \n if name is None:\n name = mpl.rcParams['image.cmap']\n if isinstance(name, colors.Colormap):\n return name\n _api.check_in_list(sorted(_colormaps), name=name)\n if lut is None:\n return _colormaps[name]\n else:\n return _colormaps[name].resampled(lut)\n\n# do it in two steps like this so we can have an un-deprecated version in\n# pyplot.\nget_cmap = _api.deprecated(\n '3.6', pending=True, alternative=\"``matplotlib.colormaps[name]``\"\n)(_get_cmap)\n\n\n@_api.deprecated(\n '3.6',\n pending=True,\n alternative=\"``matplotlib.colormaps.unregister_cmap(name)``\"\n)", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@_api.deprecated(\n '3.6',\n pending=True,\n alternative=\"``matplotlib.colormaps.unregister_cmap(name)``\"\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 108, "n_words": 56, "vocab_size": 43, "complexity": 4, "nloc": 10, "token_counts": 72, "n_ast_nodes": 175, "n_identifiers": 17, "random_cut": "def _get_cmap(name=None, lut=None):\n \n if name is None:\n name = mpl.rcParams['image.cmap']\n if isinstance(name, colors.Colormap):\n retu", "d_id": 23578, "documentation": { "docstring": "\n Get a colormap instance, defaulting to rc values if *name* is None.\n\n Colormaps added with :func:`register_cmap` take precedence over\n built-in colormaps.\n\n Parameters\n ----------\n name : `matplotlib.colors.Colormap` or str or None, default: None\n If a `.Colormap` instance, it will be returned. Otherwise, the name of\n a colormap known to Matplotlib, which will be resampled by *lut*. The\n default, None, means :rc:`image.cmap`.\n lut : int or None, default: None\n If *name* is not already a Colormap instance and *lut* is not None, the\n colormap will be resampled to have *lut* entries in the lookup table.\n\n Returns\n -------\n Colormap\n ", "n_words": 96, "vocab_size": 65, "n_whitespaces": 165, "language": "en" } }, { "id": 221578, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/concurrent/futures/_base.py", "file_name": "_base.py", "fun_name": "cancel", "commit_message": "add python 3.10.4 for windows", "code": "def cancel(self):\n \n with self._condition:\n if self._state in [RUNNING, FINISHED]:\n return False\n\n if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:\n return True\n\n self._state = CANCELLED\n self._condition.notify_all()\n\n self._invoke_callbacks()\n return True\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 127, "n_words": 25, "vocab_size": 18, "complexity": 3, "nloc": 10, "token_counts": 56, "n_ast_nodes": 93, "n_identifiers": 10, "random_cut": "def cancel(self):\n \n with self._condition:\n if self._state in [RUNNING, FINISHED]:\n return False\n\n if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:\n return True\n\n self._state =", "d_id": 56438, "documentation": { "docstring": "Cancel the future if possible.\n\n Returns True if the future was cancelled, False otherwise. A future\n cannot be cancelled if it is running or has already completed.\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 48, "language": "en" } }, { "id": 32823, "commit_id": "5cd40323684c183c30b34758aea1e877996a7ac9", "repo": "transformers", "path": "src/transformers/utils/hub.py", "file_name": "hub.py", "fun_name": "get_hub_metadata", "commit_message": "Use new huggingface_hub tools for download models (#18438)\n\n* Draft new cached_file\r\n\r\n* Initial draft for config and model\r\n\r\n* Small fixes\r\n\r\n* Fix first batch of tests\r\n\r\n* Look in cache when internet is down\r\n\r\n* Fix last tests\r\n\r\n* Bad black, not fixing all quality errors\r\n\r\n* Make diff less\r\n\r\n* Implement change for TF and Flax models\r\n\r\n* Add tokenizer and feature extractor\r\n\r\n* For compatibility with main\r\n\r\n* Add utils to move the cache and auto-do it at first use.\r\n\r\n* Quality\r\n\r\n* Deal with empty commit shas\r\n\r\n* Deal with empty etag\r\n\r\n* Address review comments", "code": "def get_hub_metadata(url, token=None):\n \n if token is None:\n token = HfFolder.get_token()\n headers = {\"user-agent\": http_user_agent()}\n headers[\"authorization\"] = f\"Bearer {token}\"\n\n r = huggingface_hub.file_download._request_with_retry(\n method=\"HEAD\", url=url, headers=headers, allow_redirects=False\n )\n huggingface_hub.file_download._raise_for_status(r)\n commit_hash = r.headers.get(HUGGINGFACE_HEADER_X_REPO_COMMIT)\n etag = r.headers.get(HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get(\"ETag\")\n if etag is not None:\n etag = huggingface_hub.file_download._normalize_etag(etag)\n return etag, commit_hash\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 46, "vocab_size": 33, "complexity": 4, "nloc": 14, "token_counts": 119, "n_ast_nodes": 200, "n_identifiers": 20, "random_cut": "def get_hub_metadata(url, token=None):\n \n if token is None:\n token = HfFolder.get_token()\n ", "d_id": 5990, "documentation": { "docstring": "\n Returns the commit hash and associated etag for a given url.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 244523, "commit_id": "015f8a9bafe808fbe3db673d629f126a804a9207", "repo": "mmdetection", "path": "mmdet/models/dense_heads/anchor_free_head.py", "file_name": "anchor_free_head.py", "fun_name": "aug_test", "commit_message": "Refactor interface of base dense free head and fcos head", "code": "def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False):\n \n return self.aug_test_bboxes(\n aug_batch_feats, aug_batch_img_metas, rescale=rescale)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False):\n \n return self.aug_test_bboxes(\n aug_batch_feats, aug_batch_img_metas, rescal", "d_id": 70416, "documentation": { "docstring": "Test function with test time augmentation.\n\n Args:\n aug_batch_feats (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains features for all images in the batch.\n aug_batch_img_metas (list[list[dict]]): the outer list indicates\n test-time augs (multiscale, flip, etc.) and the inner list\n indicates images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[ndarray]: bbox results of each class\n ", "n_words": 75, "vocab_size": 56, "n_whitespaces": 215, "language": "en" } }, { "id": 130602, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/data/impl/block_list.py", "file_name": "block_list.py", "fun_name": "ensure_schema_for_first_block", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def ensure_schema_for_first_block(self) -> Optional[Union[\"pyarrow.Schema\", type]]:\n \n get_schema = cached_remote_fn(_get_schema)\n try:\n block = next(self.iter_blocks())\n except (StopIteration, ValueError):\n # Dataset is empty (no blocks) or was manually cleared.\n return None\n schema = ray.get(get_schema.remote(block))\n # Set the schema.\n self._metadata[0].schema = schema\n return schema\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 128, "n_words": 39, "vocab_size": 32, "complexity": 2, "nloc": 13, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 18, "random_cut": "def ensure_schema_for_first_block(self) -> Optional[Union[\"pyarrow.Schema\", type]]:\n \n get_schema = cached_remote_fn(_get_schema)\n try:\n block = next(self.iter_blocks())\n except (StopIteration, ValueError):\n # Dataset is empty (no blocks) or was manually", "d_id": 29328, "documentation": { "docstring": "Ensure that the schema is set for the first block.\n\n Returns None if the block list is empty.\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 32, "language": "en" } }, { "id": 168984, "commit_id": "ae6dc976d334e791b3e215cf6e63a267675cccbe", "repo": "pandas", "path": "pandas/tests/io/formats/test_format.py", "file_name": "test_format.py", "fun_name": "get_local_am_pm", "commit_message": "BUG: Fixed Unicode decoding error in `Period.strftime` when a locale-specific directive is used (#46405)\n\n* Added test representative of #46319. Should fail on CI\r\n\r\n* Added a gha worker with non utf 8 zh_CN encoding\r\n\r\n* Attempt to fix the encoding so that locale works\r\n\r\n* Added the fix, but not using it for now, until CI is able to reproduce the issue.\r\n\r\n* Crazy idea: maybe simply removing the .utf8 modifier will use the right encoding !\r\n\r\n* Hopefully fixing the locale not available error\r\n\r\n* Now simply generating the locale, not updating the ubuntu one\r\n\r\n* Trying to install the locale without enabling it\r\n\r\n* Stupid mistake\r\n\r\n* Testing the optional locale generator condition\r\n\r\n* Put back all runners\r\n\r\n* Added whatsnew\r\n\r\n* Now using the fix\r\n\r\n* As per code review: moved locale-switching fixture `overridden_locale` to conftest\r\n\r\n* Flake8\r\n\r\n* Added comments on the runner\r\n\r\n* Added a non-utf8 locale in the `it_IT` runner. Added the zh_CN.utf8 locale in the tests\r\n\r\n* Improved readability of fixture `overridden_locale` as per code review\r\n\r\n* Added two comments on default encoding\r\n\r\n* Fixed #46319 by adding a new `char_to_string_locale` function in the `tslibs.util` module, able to decode char* using the current locale.\r\n\r\n* As per code review: modified the test to contain non-utf8 chars. Fixed the resulting issue.\r\n\r\n* Split the test in two for clarity\r\n\r\n* Fixed test and flake8 error.\r\n\r\n* Updated whatsnew to ref #46468 . Updated test name\r\n\r\n* Removing wrong whatsnew bullet\r\n\r\n* Nitpick on whatsnew as per code review\r\n\r\n* Fixed build error rst directive\r\n\r\n* Names incorrectly reverted in last merge commit\r\n\r\n* Fixed test_localization so that #46595 can be demonstrated on windows targets (even if today these do not run on windows targets, see #46597)\r\n\r\n* Fixed `tm.set_locale` context manager, it could error and leak when category LC_ALL was used. Fixed #46595\r\n\r\n* Removed the fixture as per code review, and added corresponding parametrization in tests.\r\n\r\n* Dummy mod to trigger CI again\r\n\r\n* reverted dummy mod\r\n\r\n* Attempt to fix the remaining error on the numpy worker\r\n\r\n* Fixed issue in `_from_ordinal`\r\n\r\n* Added asserts to try to understand\r\n\r\n* Reverted debugging asserts and applied fix for numpy repeat from #47670.\r\n\r\n* Fixed the last issue on numpy dev: a TypeError message had changed\r\n\r\n* Code review: Removed `EXTRA_LOC`\r\n\r\n* Code review: removed commented line\r\n\r\n* Code review: reverted out of scope change\r\n\r\n* Code review: reverted out of scope change\r\n\r\n* Fixed unused import\r\n\r\n* Fixed revert mistake\r\n\r\n* Moved whatsnew to 1.6.0\r\n\r\n* Update pandas/tests/io/parser/test_quoting.py\r\n\r\nCo-authored-by: Sylvain MARIE ", "code": "def get_local_am_pm():\n \n am_local = time(1).strftime(\"%p\")\n pm_local = time(13).strftime(\"%p\")\n return am_local, pm_local\n\n\n@pytest.fixture(params=[\"string\", \"pathlike\", \"buffer\"])", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture(params=[\"string\", \"pathlike\", \"buffer\"])", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 25, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 86, "n_identifiers": 8, "random_cut": "def get_local_am_pm():\n \n am_local = time(1).strftime(\"%p\")\n pm_local = time(13).strftime(\"%p\")\n return am_local, pm_local\n\n\n@pytest.fixture(params=[\"string\", \"pa", "d_id": 40362, "documentation": { "docstring": "Return the AM and PM strings returned by strftime in current locale.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 40010, "commit_id": "9d622aca0ce4d2d6a3cbc56079c6978b46219a98", "repo": "dash", "path": "dash/_validate.py", "file_name": "_validate.py", "fun_name": "validate_pages_layout", "commit_message": "update 2 after review", "code": "def validate_pages_layout(module, page):\n try:\n getattr(page, \"layout\")\n except AttributeError:\n raise exceptions.NoLayoutException(\n f\n )\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 49, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 10, "token_counts": 26, "n_ast_nodes": 53, "n_identifiers": 7, "random_cut": "def validate_pages_layout(module, page):\n try:\n getattr(page, \"layout\")\n except AttributeError:\n raise exceptions.NoLayoutException(\n f\n )\n", "d_id": 7309, "documentation": { "docstring": "\n No layout found in {module + \".py\"}\n A variable or a function named \"layout\" is required.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 50, "language": "en" } }, { "id": 130950, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/serve/tests/test_autoscaling_policy.py", "file_name": "test_autoscaling_policy.py", "fun_name": "test_upscale_downscale_delay", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_upscale_downscale_delay():\n \n\n upscale_delay_s = 30.0\n downscale_delay_s = 600.0\n\n config = AutoscalingConfig(\n min_replicas=1,\n max_replicas=2,\n target_num_ongoing_requests_per_replica=1,\n upscale_delay_s=30.0,\n downscale_delay_s=600.0,\n )\n\n policy = BasicAutoscalingPolicy(config)\n\n upscale_wait_periods = int(upscale_delay_s / CONTROL_LOOP_PERIOD_S)\n downscale_wait_periods = int(downscale_delay_s / CONTROL_LOOP_PERIOD_S)\n\n overload_requests = [100]\n\n # We should scale up only after enough consecutive scale-up decisions.\n for i in range(upscale_wait_periods):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 1, i\n\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 2\n\n no_requests = [0, 0]\n\n # We should scale down only after enough consecutive scale-down decisions.\n for i in range(downscale_wait_periods):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=no_requests, curr_target_num_replicas=2\n )\n assert new_num_replicas == 2, i\n\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=no_requests, curr_target_num_replicas=2\n )\n assert new_num_replicas == 1\n\n # Get some scale-up decisions, but not enough to trigger a scale up.\n for i in range(int(upscale_wait_periods / 2)):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 1, i\n\n # Interrupt with a scale-down decision.\n policy.get_decision_num_replicas(\n current_num_ongoing_requests=[0], curr_target_num_replicas=1\n )\n\n # The counter should be reset, so it should require `upscale_wait_periods`\n # more periods before we actually scale up.\n for i in range(upscale_wait_periods):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 1, i\n\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 2\n\n # Get some scale-down decisions, but not enough to trigger a scale down.\n for i in range(int(downscale_wait_periods / 2)):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=no_requests, curr_target_num_replicas=2\n )\n assert new_num_replicas == 2, i\n\n # Interrupt with a scale-up decision.\n policy.get_decision_num_replicas(\n current_num_ongoing_requests=[100, 100], curr_target_num_replicas=2\n )\n\n # The counter should be reset so it should require `downscale_wait_periods`\n # more periods before we actually scale down.\n for i in range(downscale_wait_periods):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=no_requests, curr_target_num_replicas=2\n )\n assert new_num_replicas == 2, i\n\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=no_requests, curr_target_num_replicas=2\n )\n assert new_num_replicas == 1\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 673, "n_words": 278, "vocab_size": 92, "complexity": 7, "nloc": 67, "token_counts": 358, "n_ast_nodes": 534, "n_identifiers": 22, "random_cut": "def test_upscale_downscale_delay():\n \n\n upscale_delay_s = 30.0\n downscale_delay_s = 600.0\n\n config = AutoscalingConfig(\n min_replicas=1,\n max_replicas=2,\n target_num_ongoing_requests_per_replica=1,\n upscale_delay_s=30.0,\n downscale_delay_s=600.0,\n )\n\n policy = BasicAutoscalingPolicy(config)\n\n upscale_wait_periods = int(upscale_delay_s / CONTROL_LOOP_PERIOD_S)\n downscale_wait_periods = int(downscale_delay_s / CONTROL_LOOP_PERIOD_S)\n\n overload_requests = [100]\n\n # We should scale up only after enough consecutive scale-up decisions.\n for i in range(upscale_wait_periods):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 1, i\n\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 2\n\n no_requests = [0, 0]\n\n # We should scale down only after enough consecutive scale-down decisions.\n for i in range(downscale_wait_periods):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=no_requests, curr_target_num_replicas=2\n )\n assert new_num_replicas == 2, i\n\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=no_requests, curr_target_num_replicas=2\n )\n assert new_num_replicas == 1\n\n # Get some scale-up decisions, but not enough to trigger a scale up.\n for i in range(int(upscale_wait_periods / 2)):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 1, i\n\n # Interrupt with a scale-down decision.\n policy.get_decision_num_replicas(\n current_num_ongoing_requests=[0], curr_target_num_replicas=1\n )\n\n # The counter should be reset, so it should require `upscale_wait_periods`\n # more periods before we actually scale up.\n for i in range(upscale_wait_periods):\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1\n )\n assert new_num_replicas == 1, i\n\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_r", "d_id": 29435, "documentation": { "docstring": "Unit test for upscale_delay_s and downscale_delay_s.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 109612, "commit_id": "9d616615417eac104e12f2915f3fe875177bb2e4", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "set_aspect", "commit_message": "Update _base.py", "code": "def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):\n \n if cbook._str_equal(aspect, 'equal'):\n aspect = 1\n if not cbook._str_equal(aspect, 'auto'):\n aspect = float(aspect) # raise ValueError if necessary\n if aspect<0:\n raise ValueError(\"aspect must be positive\")\n\n if share:\n axes = {sibling for name in self._axis_names\n for sibling in self._shared_axes[name].get_siblings(self)}\n else:\n axes = [self]\n\n for ax in axes:\n ax._aspect = aspect\n\n if adjustable is None:\n adjustable = self._adjustable\n self.set_adjustable(adjustable, share=share) # Handle sharing.\n\n if anchor is not None:\n self.set_anchor(anchor, share=share)\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 271, "n_words": 77, "vocab_size": 50, "complexity": 10, "nloc": 20, "token_counts": 146, "n_ast_nodes": 232, "n_identifiers": 22, "random_cut": "def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):\n \n if cbook._str_equal(aspect, 'equal'):\n aspect = 1\n if not cbook._str_equal(aspect, 'auto'):\n aspect = float(aspect) # raise ValueError if ", "d_id": 23671, "documentation": { "docstring": "\n Set the aspect ratio of the axes scaling, i.e. y/x-scale.\n\n Parameters\n ----------\n aspect : {'auto', 'equal'} or float\n Possible values:\n\n - 'auto': fill the position rectangle with data.\n - 'equal': same as ``aspect=1``, i.e. same scaling for x and y.\n - *float*: The displayed size of 1 unit in y-data coordinates will\n be *aspect* times the displayed size of 1 unit in x-data\n coordinates; e.g. for ``aspect=2`` a square in data coordinates\n will be rendered with a height of twice its width.\n\n adjustable : None or {'box', 'datalim'}, optional\n If not ``None``, this defines which parameter will be adjusted to\n meet the required aspect. See `.set_adjustable` for further\n details.\n\n anchor : None or str or (float, float), optional\n If not ``None``, this defines where the Axes will be drawn if there\n is extra space due to aspect constraints. The most common way to\n to specify the anchor are abbreviations of cardinal directions:\n\n ===== =====================\n value description\n ===== =====================\n 'C' centered\n 'SW' lower left corner\n 'S' middle of bottom edge\n 'SE' lower right corner\n etc.\n ===== =====================\n\n See `~.Axes.set_anchor` for further details.\n\n share : bool, default: False\n If ``True``, apply the settings to all shared Axes.\n\n See Also\n --------\n matplotlib.axes.Axes.set_adjustable\n Set how the Axes adjusts to achieve the required aspect ratio.\n matplotlib.axes.Axes.set_anchor\n Set the position in case of extra space.\n ", "n_words": 219, "vocab_size": 140, "n_whitespaces": 618, "language": "en" } }, { "id": 202525, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/custom_pk/tests.py", "file_name": "tests.py", "fun_name": "test_pk_attributes", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_pk_attributes(self):\n \n # pk can be used as a substitute for the primary key.\n # The primary key can be accessed via the pk property on the model.\n e = Employee.objects.get(pk=123)\n self.assertEqual(e.pk, 123)\n # Or we can use the real attribute name for the primary key:\n self.assertEqual(e.employee_code, 123)\n\n with self.assertRaisesMessage(\n AttributeError, \"'Employee' object has no attribute 'id'\"\n ):\n e.id\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 144, "n_words": 59, "vocab_size": 44, "complexity": 1, "nloc": 8, "token_counts": 51, "n_ast_nodes": 89, "n_identifiers": 12, "random_cut": "def test_pk_attributes(self):\n \n # pk can be used as a substitute for the primary key.\n # The primary key can be accessed via the pk property on the model.\n e = Employee.objects.get(pk=123)\n self.ass", "d_id": 50145, "documentation": { "docstring": "\n pk and attribute name are available on the model\n No default id attribute is added\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 54803, "commit_id": "05b92d7c7f6cf21c5d6033df7242c331fc66b92e", "repo": "prefect", "path": "src/prefect/client.py", "file_name": "client.py", "fun_name": "__aenter__", "commit_message": "Disable lifespan management during logging", "code": "async def __aenter__(self):\n \n if self._closed:\n # httpx.AsyncClient does not allow reuse so we will not either.\n raise RuntimeError(\n \"The client cannot be started again after closing. \"\n \"Retrieve a new client with `get_client()` instead.\"\n )\n\n if self._started:\n # httpx.AsyncClient does not allow reentrancy so we will not either.\n raise RuntimeError(\"The client cannot be started more than once.\")\n\n await self._exit_stack.__aenter__()\n\n # Enter a lifespan context if using an ephemeral application.\n # See https://github.com/encode/httpx/issues/350\n if self._ephemeral_app and self.manage_lifespan:\n self._ephemeral_lifespan = await self._exit_stack.enter_async_context(\n app_lifespan_context(self._ephemeral_app)\n )\n\n # Enter the httpx client's context\n await self._exit_stack.enter_async_context(self._client)\n\n self._started = True\n\n return self\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 294, "n_words": 95, "vocab_size": 65, "complexity": 5, "nloc": 16, "token_counts": 80, "n_ast_nodes": 145, "n_identifiers": 12, "random_cut": "async def __aenter__(self):\n \n if self._closed:\n # httpx.AsyncClient does not allow reuse so we will not either.\n raise RuntimeError(\n \"The client cannot be started again after closing. \"\n ", "d_id": 11152, "documentation": { "docstring": "\n Start the client.\n\n If the client is already started, this will raise an exception.\n\n If the client is already closed, this will raise an exception. Use a new client\n instance instead.\n ", "n_words": 31, "vocab_size": 19, "n_whitespaces": 67, "language": "en" } }, { "id": 138659, "commit_id": "627b9f2e888b05434bb67f547b390409f26538e7", "repo": "ray", "path": "rllib/agents/qmix/qmix.py", "file_name": "qmix.py", "fun_name": "training_iteration", "commit_message": "[RLlib] QMIX training iteration function and new replay buffer API. (#24164)", "code": "def training_iteration(self) -> ResultDict:\n \n # Sample n batches from n workers.\n new_sample_batches = synchronous_parallel_sample(\n worker_set=self.workers, concat=False\n )\n\n for batch in new_sample_batches:\n # Update counters.\n self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps()\n self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps()\n # Store new samples in the replay buffer.\n self.local_replay_buffer.add(batch)\n\n # Sample n batches from replay buffer until the total number of timesteps\n # reaches `train_batch_size`.\n train_batch = sample_min_n_steps_from_buffer(\n replay_buffer=self.local_replay_buffer,\n min_steps=self.config[\"train_batch_size\"],\n count_by_agent_steps=self._by_agent_steps,\n )\n if train_batch is None:\n return {}\n\n # Learn on the training batch.\n # Use simple optimizer (only for multi-agent or tf-eager; all other\n # cases should use the multi-GPU optimizer, even if only using 1 GPU)\n if self.config.get(\"simple_optimizer\") is True:\n train_results = train_one_step(self, train_batch)\n else:\n train_results = multi_gpu_train_one_step(self, train_batch)\n\n # TODO: Move training steps counter update outside of `train_one_step()` method.\n # # Update train step counters.\n # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps()\n # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()\n\n # Update target network every `target_network_update_freq` steps.\n cur_ts = self._counters[NUM_ENV_STEPS_SAMPLED]\n last_update = self._counters[LAST_TARGET_UPDATE_TS]\n if cur_ts - last_update >= self.config[\"target_network_update_freq\"]:\n to_update = self.workers.local_worker().get_policies_to_train()\n self.workers.local_worker().foreach_policy_to_train(\n lambda p, pid: pid in to_update and p.update_target()\n )\n self._counters[NUM_TARGET_UPDATES] += 1\n self._counters[LAST_TARGET_UPDATE_TS] = cur_ts\n\n # Update weights and global_vars - after learning on the local worker - on all\n # remote workers.\n global_vars = {\n \"timestep\": self._counters[NUM_ENV_STEPS_SAMPLED],\n }\n # Update remote workers' weights and global vars after learning on local worker.\n with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:\n self.workers.sync_weights(global_vars=global_vars)\n\n # Return all collected metrics for the iteration.\n return train_results\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 665, "n_words": 224, "vocab_size": 139, "complexity": 6, "nloc": 46, "token_counts": 238, "n_ast_nodes": 397, "n_identifiers": 42, "random_cut": "def training_iteration(self) -> ResultDict:\n \n # Sample n batches from n workers.\n new_sample_batches = synchronous_parallel_sample(\n worker_set=self.workers, concat=False\n )\n\n for batch in new_sample_batches:\n # Update counters.\n self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps()\n self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps()\n # Store new samples in the replay buffer.\n self.local_replay_buffer.add(batch)\n\n # Sample n batches from replay buffer until the total number of timesteps\n # reaches `train_batch_size`.\n train_batch = sample_min_n_steps_from_buffer(\n replay_buffer=self.local_replay_buffer,\n min_steps=self.config[\"train_batch_size\"],\n count_by_agent_steps=self._by_agent_steps,\n )\n if train_batch is None:\n return {}\n\n # Learn on the training batch.\n # Use simple optimizer (only for multi-agent or tf-eager; all other\n # cases should use the multi-GPU optimizer, even if only using 1 GPU)\n if self.config.get(\"simple_optimizer\") is True:\n train_results = train_one_step(self, train_batch)\n else:\n train_results = multi_gpu_train_one_step(self, train_batch)\n\n # TODO: Move training steps counter update outside of `train_one_step()` method.\n # # Update train step counters.\n # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps()\n # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()\n\n # Update target network every `target_network_update_freq` steps.\n ", "d_id": 31501, "documentation": { "docstring": "QMIX training iteration function.\n\n - Sample n MultiAgentBatches from n workers synchronously.\n - Store new samples in the replay buffer.\n - Sample one training MultiAgentBatch from the replay buffer.\n - Learn on the training batch.\n - Update the target network every `target_network_update_freq` steps.\n - Return all collected training metrics for the iteration.\n\n Returns:\n The results dict from executing the training iteration.\n ", "n_words": 61, "vocab_size": 40, "n_whitespaces": 128, "language": "en" } }, { "id": 249457, "commit_id": "c7b18d9d44c90acfd4ceaec1fa2f8275e03f14af", "repo": "synapse", "path": "scripts-dev/release.py", "file_name": "release.py", "fun_name": "_announce", "commit_message": "Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. (#13483)", "code": "def _announce() -> None:\n \n\n current_version = get_package_version()\n tag_name = f\"v{current_version}\"\n\n click.echo(\n f\n )\n\n if \"rc\" in tag_name:\n click.echo(\n \n )\n else:\n click.echo(\n \n )\n\n\n@cli.command()\n@click.option(\"--gh-token\", envvar=[\"GH_TOKEN\", \"GITHUB_TOKEN\"], required=True)", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "@cli.command()\n@click.option(\"--gh-token\", envvar=[\"GH_TOKEN\", \"GITHUB_TOKEN\"], required=True)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 105, "n_words": 27, "vocab_size": 22, "complexity": 2, "nloc": 31, "token_counts": 42, "n_ast_nodes": 147, "n_identifiers": 11, "random_cut": "def _announce() -> None:\n \n\n current_version = get_package_version()\n tag_name = f\"v{current_version}\"\n\n click.echo(\n f\n", "d_id": 72929, "documentation": { "docstring": "Generate markdown to announce the release.\nHi everyone. Synapse {current_version} has just been released.\n\n[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \\\n[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \\\n[debs](https://packages.matrix.org/debian/) | \\\n[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)\nAnnounce the RC in\n- #homeowners:matrix.org (Synapse Announcements)\n- #synapse-dev:matrix.org\nAnnounce the release in\n- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic\n- #synapse:matrix.org (Synapse Admins), bumping the version in the topic\n- #synapse-dev:matrix.org\n- #synapse-package-maintainers:matrix.org\n\nAsk the designated people to do the blog and tweets.", "n_words": 72, "vocab_size": 43, "n_whitespaces": 57, "language": "en" } }, { "id": 212852, "commit_id": "ed2bc288ff17344f6406c49623036620f18e65bb", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "update", "commit_message": "Completed switching all elements over to the new way of handling visiblity", "code": "def update(self, value=None, visible=None):\n \n if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow\n return\n\n if value is not None:\n self._TKOut.output.delete('1.0', tk.END)\n self._TKOut.output.insert(tk.END, value)\n if visible is False:\n self._pack_forget_save_settings(self._TKOut.frame)\n elif visible is True:\n self._pack_restore_settings(self._TKOut.frame)\n\n if visible is not None:\n self._visible = visible\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 154, "n_words": 45, "vocab_size": 32, "complexity": 6, "nloc": 12, "token_counts": 98, "n_ast_nodes": 158, "n_identifiers": 15, "random_cut": "def update(self, value=None, visible=None):\n \n ", "d_id": 53460, "documentation": { "docstring": "\n Changes some of the settings for the Output Element. Must call `Window.Read` or `Window.Finalize` prior\n\n Changes will not be visible in your window until you call window.read or window.refresh.\n\n If you change visibility, your element may MOVE. If you want it to remain stationary, use the \"layout helper\"\n function \"pin\" to ensure your element is \"pinned\" to that location in your layout so that it returns there\n when made visible.\n\n :param value: string that will replace current contents of the output area\n :type value: (str)\n :param visible: control visibility of element\n :type visible: (bool)\n ", "n_words": 94, "vocab_size": 67, "n_whitespaces": 171, "language": "en" } }, { "id": 150631, "commit_id": "01232e9a1f8e28e3611e38af3816edb026600767", "repo": "freqtrade", "path": "freqtrade/freqai/prediction_models/RLPredictionModel.py", "file_name": "RLPredictionModel.py", "fun_name": "example", "commit_message": "callback function and TDQN model added", "code": "def example(self):\n \n result = getattr(self, \"_example\", None)\n if result is None:\n # No example batch was found, so get one from the `.train` dataset\n result = next(iter(self.train))\n # And cache it for next time\n self._example = result\n return result", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 111, "n_words": 39, "vocab_size": 32, "complexity": 2, "nloc": 6, "token_counts": 39, "n_ast_nodes": 68, "n_identifiers": 8, "random_cut": "def example(self):\n \n result = getattr(self, \"_example\", None)\n if result is None:\n # No example batch was found, so get one from the `.train` dataset\n result = next(iter(self.train))\n # And cache it for next time\n self._example = result\n return result", "d_id": 34801, "documentation": { "docstring": "Get and cache an example batch of `inputs, labels` for plotting.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 204114, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/measure.py", "file_name": "measure.py", "fun_name": "unit_attname", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def unit_attname(cls, unit_str):\n \n lower = unit_str.lower()\n if unit_str in cls.UNITS:\n return unit_str\n elif lower in cls.UNITS:\n return lower\n elif lower in cls.LALIAS:\n return cls.LALIAS[lower]\n else:\n raise Exception(\n 'Could not find a unit keyword associated with \"%s\"' % unit_str\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 151, "n_words": 39, "vocab_size": 28, "complexity": 4, "nloc": 12, "token_counts": 56, "n_ast_nodes": 93, "n_identifiers": 7, "random_cut": "def unit_attname(cls, unit_str):\n \n lower = unit_str.lower()\n if unit_str in cls.UNITS:\n return unit_str\n elif lower in cls.UNITS:\n return lower\n elif lower in cls.LALIAS:\n return cls.LALIAS[lower]\n else:\n raise Exception(\n 'Could not find a unit keyword associated", "d_id": 50635, "documentation": { "docstring": "\n Retrieve the unit attribute name for the given unit string.\n For example, if the given unit string is 'metre', return 'm'.\n Raise an exception if an attribute cannot be found.\n ", "n_words": 30, "vocab_size": 22, "n_whitespaces": 59, "language": "en" } }, { "id": 135932, "commit_id": "087548031bcf22dd73364b58acb70e61a49f2427", "repo": "ray", "path": "rllib/tests/test_nn_framework_import_errors.py", "file_name": "test_nn_framework_import_errors.py", "fun_name": "test_dont_import_tf_error", "commit_message": "[RLlib] AlgorithmConfigs: Make None a valid value for methods to set properties; Use new `NotProvided` singleton, instead, to indicate no changes wanted on that property. (#30020)", "code": "def test_dont_import_tf_error():\n \n # Do not import tf for testing purposes.\n os.environ[\"RLLIB_TEST_NO_TF_IMPORT\"] = \"1\"\n\n config = ppo.PPOConfig().environment(\"CartPole-v1\")\n for _ in framework_iterator(config, frameworks=(\"tf\", \"tf2\")):\n with pytest.raises(ImportError, match=\"However, no installation was found\"):\n config.build()\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 63, "n_words": 30, "vocab_size": 28, "complexity": 2, "nloc": 6, "token_counts": 58, "n_ast_nodes": 108, "n_identifiers": 15, "random_cut": "def test_dont_import_tf_error():\n \n # Do n", "d_id": 30776, "documentation": { "docstring": "Check error being thrown, if tf not installed but configured.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 217655, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/hmac.py", "file_name": "hmac.py", "fun_name": "_current", "commit_message": "add python 3.10.4 for windows", "code": "def _current(self):\n \n if self._hmac:\n return self._hmac\n else:\n h = self._outer.copy()\n h.update(self._inner.digest())\n return h\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 78, "n_words": 13, "vocab_size": 11, "complexity": 2, "nloc": 7, "token_counts": 40, "n_ast_nodes": 69, "n_identifiers": 9, "random_cut": "def _current(self):\n \n if self._hmac:\n return self._hmac\n else:\n h = self._outer.copy()\n ", "d_id": 54870, "documentation": { "docstring": "Return a hash object for the current state.\n\n To be used only internally with digest() and hexdigest().\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 31, "language": "en" } }, { "id": 64064, "commit_id": "f469ec87d94d4639ff4eb99a45496721c4779bf3", "repo": "erpnext", "path": "erpnext/patches/v13_0/delete_old_sales_reports.py", "file_name": "delete_old_sales_reports.py", "fun_name": "delete_links_from_desktop_icons", "commit_message": "fix: broken patches (backport #29067) (#29406)\n\n* chore: patch fixes\r\n\r\n(cherry picked from commit 8b5b146f6d2720587a16f78a8d47840be8dca2b7)\r\n\r\n# Conflicts:\r\n#\terpnext/patches/v13_0/make_homepage_products_website_items.py\r\n\r\n* fix: remove desktop icons while deleting sales reports\r\n\r\n(cherry picked from commit 5f72026cb932d01fc827c382747e996a94b441fd)\r\n\r\n* refactor: dont ignore dangerous exceptions in patches\r\n\r\n(cherry picked from commit 0aa1ea8aeb7757592616bd491de98c69fef08854)\r\n\r\n* fix: make patch kinda idempotent\r\n\r\nwith previous query rerunning would've caused all values to become 0.\r\n\r\n* chore: conflicts\r\n\r\n* fix: check type before patching\r\n\r\nCo-authored-by: Saurabh \r\nCo-authored-by: Ankush Menat ", "code": "def delete_links_from_desktop_icons(report):\n\t\n\tdesktop_icons = frappe.db.get_values(\"Desktop Icon\", {\"_report\": report}, [\"name\"])\n\tfor desktop_icon in desktop_icons:\n\t\tfrappe.delete_doc(\"Desktop Icon\", desktop_icon[0])", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 4, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 8, "random_cut": "def delete_links_from_desktop_icons(report):\n\t\n\tdesktop_icons = frappe.db.get_values(\"Desktop Icon\", {\"_report\": report}, [\"name\"])\n\tfor desktop_icon in desktop_icons:\n\t\tfrappe.delete_doc(\"Desktop Icon\", desktop_icon[0])", "d_id": 13559, "documentation": { "docstring": " Check for one or multiple Desktop Icons and delete ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 24519, "commit_id": "ddaa2c2552e19635cd6cdf38619f1f176c358f89", "repo": "PaddleOCR", "path": "ppstructure/table/table_master_match.py", "file_name": "table_master_match.py", "fun_name": "get_bboxes_list", "commit_message": "add SLANet", "code": "def get_bboxes_list(end2end_result, structure_master_result):\n \n # end2end\n end2end_xyxy_list = []\n end2end_xywh_list = []\n for end2end_item in end2end_result:\n src_bbox = end2end_item['bbox']\n end2end_xyxy_list.append(src_bbox)\n xywh_bbox = xyxy2xywh(src_bbox)\n end2end_xywh_list.append(xywh_bbox)\n end2end_xyxy_bboxes = np.array(end2end_xyxy_list)\n end2end_xywh_bboxes = np.array(end2end_xywh_list)\n\n # structure master\n src_bboxes = structure_master_result['bbox']\n src_bboxes = remove_empty_bboxes(src_bboxes)\n # structure_master_xywh_bboxes = src_bboxes\n # xyxy_bboxes = xywh2xyxy(src_bboxes)\n # structure_master_xyxy_bboxes = xyxy_bboxes\n structure_master_xyxy_bboxes = src_bboxes\n xywh_bbox = xyxy2xywh(src_bboxes)\n structure_master_xywh_bboxes = xywh_bbox\n\n return end2end_xyxy_bboxes, end2end_xywh_bboxes, structure_master_xywh_bboxes, structure_master_xyxy_bboxes\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 143, "n_words": 64, "vocab_size": 37, "complexity": 2, "nloc": 16, "token_counts": 93, "n_ast_nodes": 159, "n_identifiers": 18, "random_cut": "def get_bboxes_list(end2end_result, structure_master_result):\n \n # end2end\n e", "d_id": 4761, "documentation": { "docstring": "\n This function is use to convert end2end results and structure master results to\n List of xyxy bbox format and List of xywh bbox format\n :param end2end_result: bbox's format is xyxy\n :param structure_master_result: bbox's format is xywh\n :return: 4 kind list of bbox ()\n ", "n_words": 43, "vocab_size": 26, "n_whitespaces": 62, "language": "en" } }, { "id": 20557, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/core.py", "file_name": "core.py", "fun_name": "autoname_elements", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def autoname_elements() -> None:\n \n for name, var in sys._getframe().f_back.f_locals.items():\n if isinstance(var, ParserElement) and not var.customName:\n var.set_name(name)\n\n\ndbl_quoted_string = Combine(\n Regex(r'\"(?:[^\"\\n\\r\\\\]|(?:\"\")|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*') + '\"'\n).set_name(\"string enclosed in double quotes\")\n\nsgl_quoted_string = Combine(\n Regex(r\"'(?:[^'\\n\\r\\\\]|(?:'')|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*\") + \"'\"\n).set_name(\"string enclosed in single quotes\")\n\nquoted_string = Combine(\n Regex(r'\"(?:[^\"\\n\\r\\\\]|(?:\"\")|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*') + '\"'\n | Regex(r\"'(?:[^'\\n\\r\\\\]|(?:'')|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*\") + \"'\"\n).set_name(\"quotedString using single or double quotes\")\n\nunicode_string = Combine(\"u\" + quoted_string.copy()).set_name(\"unicode string literal\")\n\n\nalphas8bit = srange(r\"[\\0xc0-\\0xd6\\0xd8-\\0xf6\\0xf8-\\0xff]\")\npunc8bit = srange(r\"[\\0xa1-\\0xbf\\0xd7\\0xf7]\")\n\n# build list of built-in expressions, for future reference if a global default value\n# gets updated\n_builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)]\n\n# backward compatibility names\ntokenMap = token_map\nconditionAsParseAction = condition_as_parse_action\nnullDebugAction = null_debug_action\nsglQuotedString = sgl_quoted_string\ndblQuotedString = dbl_quoted_string\nquotedString = quoted_string\nunicodeString = unicode_string\nlineStart = line_start\nlineEnd = line_end\nstringStart = string_start\nstringEnd = string_end\ntraceParseAction = trace_parse_action\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 145, "n_words": 134, "vocab_size": 87, "complexity": 4, "nloc": 8, "token_counts": 45, "n_ast_nodes": 339, "n_identifiers": 46, "random_cut": "def autoname_elements() -> None:\n \n for name, var in sys._getframe().f_back.f_locals.items():\n if isinstance(var, ParserElement) and not var.customName:\n var.set_name(name)\n\n\ndbl_quoted_string = Combine(\n Regex(r'\"(?:[^\"\\n\\r\\\\]|(?:\"\")|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*') + '\"'\n).set_name(\"string enclosed in double quotes\")\n\nsgl_quoted_string = Combine(\n Regex(r\"'(?:[^'\\n\\r\\\\]|(?:'')|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*\") + \"'\"\n).set_name(\"string enclosed in single quotes\")\n\nquoted_string = Combine(\n Regex(r'\"(?:[^\"\\n\\r\\\\]|(?:\"\")|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*') + '\"'\n | Regex(r\"'(?:[^'\\n\\r\\\\]|(?:'')|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*\") + \"'\"\n).set_name(\"quotedString using single or double quotes\")\n\nunicode_string = Combine(\"u\" + quoted_string.copy()).set_name(\"unicode string literal\")\n\n\nalphas8bit = srange(r\"[\\0xc0-\\0xd6\\0xd8-\\0xf6\\0xf8-\\0xff]\")\npunc8bit = srange(r\"[\\0xa1-\\0xbf\\0xd7\\0xf7]\")\n\n# build list of built-in expressions, for future reference if a global default value\n# gets updated\n_builtin_expr", "d_id": 3424, "documentation": { "docstring": "\n Utility to simplify mass-naming of parser elements, for\n generating railroad diagram with named subdiagrams.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 24, "language": "en" } }, { "id": 102171, "commit_id": "bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d", "repo": "pytorch", "path": "tools/test/test_gen_backend_stubs.py", "file_name": "test_gen_backend_stubs.py", "fun_name": "test_valid_zero_ops_doesnt_require_backend_dispatch_key", "commit_message": "Revert \"Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels\" (#69950)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/69950\n\nThis reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa.\n\nTest Plan: Imported from OSS\n\nReviewed By: albanD\n\nDifferential Revision: D33113545\n\nPulled By: bdhirsh\n\nfbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288", "code": "def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None:\n yaml_str = \n # External codegen on a yaml file with no operators is effectively a no-op,\n # so there's no reason to parse the backend\n self.assert_success_from_gen_backend_stubs(yaml_str)\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 58, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 6, "token_counts": 16, "n_ast_nodes": 32, "n_identifiers": 4, "random_cut": "def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None:\n yaml_str = \n # External codegen on a yaml file with no operators is effectively a no-op,\n ", "d_id": 21486, "documentation": { "docstring": "\\\nbackend: BAD_XLA\ncpp_namespace: torch_xla\nsupported:", "n_words": 6, "vocab_size": 6, "n_whitespaces": 2, "language": "en" } }, { "id": 9995, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "tests/distributed/test_remote_peas/test_remote_peas.py", "file_name": "test_remote_peas.py", "fun_name": "test_pseudo_remote_peas_topologies", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "async def test_pseudo_remote_peas_topologies(gateway, head, worker):\n \n worker_port = random_port()\n head_port = random_port()\n port_expose = random_port()\n graph_description = '{\"start-gateway\": [\"pod0\"], \"pod0\": [\"end-gateway\"]}'\n if head == 'remote':\n pods_addresses = f'{{\"pod0\": [\"{HOST}:{head_port}\"]}}'\n else:\n pods_addresses = f'{{\"pod0\": [\"0.0.0.0:{head_port}\"]}}'\n\n # create a single head pea\n head_pea = _create_head_pea(head, head_port)\n\n # create a single worker pea\n worker_pea = _create_worker_pea(worker, worker_port)\n\n # create a single gateway pea\n gateway_pea = _create_gateway_pea(\n gateway, graph_description, pods_addresses, port_expose\n )\n\n with gateway_pea, worker_pea, head_pea:\n await asyncio.sleep(1.0)\n # this would be done by the Pod, its adding the worker to the head\n activate_msg = ControlRequest(command='ACTIVATE')\n worker_host, worker_port = worker_pea.runtime_ctrl_address.split(':')\n if head == 'remote':\n worker_host = __docker_host__\n\n activate_msg.add_related_entity('worker', worker_host, int(worker_port))\n assert GrpcConnectionPool.send_request_sync(\n activate_msg, head_pea.runtime_ctrl_address\n )\n\n # send requests to the gateway\n c = Client(host='127.0.0.1', port=port_expose, asyncio=True)\n responses = c.post(\n '/', inputs=async_inputs, request_size=1, return_results=True\n )\n response_list = []", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 322, "n_words": 132, "vocab_size": 85, "complexity": 4, "nloc": 33, "token_counts": 210, "n_ast_nodes": 316, "n_identifiers": 41, "random_cut": "async def test_pseudo_remote_peas_topologies(gateway, head, worker):\n \n worker_port = random_port()\n head_port = random_port()\n port_expose = random_port()\n graph_description = '{\"start-gateway\": [\"pod0\"], \"pod0\": [\"end-gateway\"]}'\n if head == 'remote':\n pods_addresses = f'{{\"pod0\": [\"{HOST}:{head_port}\"]}}'\n else:\n pods_addresses = f'{{\"pod0\": [\"0.0.0.0:{head_port}\"]}}'\n\n # create a single head pea\n head_pea = _create_head_pea(head, head_port)\n\n # create a single worker pea\n worker_pea = _create_worker_pea(worker, worker_port)\n\n # create a single gateway pea\n gateway_pea = _create_gateway_pea(\n gateway, graph_description, pods_addresses, port_expose\n )\n\n with gateway_pea, worker_pea, head_pea:\n await asyncio.sleep(1.0)\n # this would be done by the Pod, its adding the worker to the head\n activate_msg = ControlRequest(command='ACTIVATE')\n worker_host, worker_port = worker_pea.runtime_ctrl_address.split(':')\n if head == 'remote':\n worker_host = __docker_host__\n\n activate_msg.add_related_entity('worker', worker_host, int(worker_port))\n assert GrpcConnectionPool.send_request_sync(\n activate_msg, head_pea.runtime_ctrl_address\n )\n\n # send requests to the gateway\n c = Client(host='127.0.0.1', port=port_expose, asyncio=True)\n responses = c.post(\n ", "d_id": 1809, "documentation": { "docstring": "\n g(l)-h(l)-w(l) - works\n g(l)-h(l)-w(r) - works - head connects to worker via localhost\n g(l)-h(r)-w(r) - works - head (inside docker) connects to worker via dockerhost\n g(l)-h(r)-w(l) - doesn't work remote head need remote worker\n g(r)-... - doesn't work, as distributed parser not enabled for gateway\n After any 1 failure, segfault\n ", "n_words": 50, "vocab_size": 33, "n_whitespaces": 72, "language": "en" } }, { "id": 9848, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/peapods/peas/__init__.py", "file_name": "__init__.py", "fun_name": "async_wait_start_success", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "async def async_wait_start_success(self):\n \n import asyncio\n\n _timeout = self.args.timeout_ready\n if _timeout <= 0:\n _timeout = None\n else:\n _timeout /= 1e3\n\n timeout_ns = 1e9 * _timeout if _timeout else None\n now = time.time_ns()\n while timeout_ns is None or time.time_ns() - now < timeout_ns:\n\n if self.ready_or_shutdown.event.is_set():\n self._check_failed_to_start()\n self.logger.debug(__ready_msg__)\n return\n else:\n await asyncio.sleep(0.1)\n\n self._fail_start_timeout(_timeout)\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 217, "n_words": 50, "vocab_size": 34, "complexity": 6, "nloc": 17, "token_counts": 102, "n_ast_nodes": 168, "n_identifiers": 19, "random_cut": "async def async_wait_start_success(self):\n \n import asyncio\n\n _timeout = self.args.timeout_ready\n if _timeout <= 0:\n _timeout = None\n else:\n _timeout /= 1e3\n\n timeout_ns = 1e9 * _timeout if _timeout else None\n now = time.time_ns()\n while timeout_ns is None or time.ti", "d_id": 1728, "documentation": { "docstring": "\n Wait for the `Pea` to start successfully in a non-blocking manner\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 109359, "commit_id": "438d30b227b1fef7e8733578f851e76a8e360f24", "repo": "matplotlib", "path": "lib/matplotlib/offsetbox.py", "file_name": "offsetbox.py", "fun_name": "set_fontsize", "commit_message": "Get rcParams from mpl", "code": "def set_fontsize(self, s=None):\n \n if s is None:\n s = mpl.rcParams[\"legend.fontsize\"]\n\n self.prop = FontProperties(size=s)\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 16, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 38, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def set_fontsize(self, s=None):\n \n if s is None:\n s = mpl.rcParams[\"legend.fontsize\"]\n\n self.prop = FontProperties(size=s)\n self.stale = True\n", "d_id": 23550, "documentation": { "docstring": "\n Set the fontsize in points.\n\n If *s* is not given, reset to :rc:`legend.fontsize`.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 132899, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/actor_pool.py", "file_name": "actor_pool.py", "fun_name": "get_next", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_next(self, timeout=None):\n \n if not self.has_next():\n raise StopIteration(\"No more results to get\")\n if self._next_return_index >= self._next_task_index:\n raise ValueError(\n \"It is not allowed to call get_next() after \" \"get_next_unordered().\"\n )\n future = self._index_to_future[self._next_return_index]\n if timeout is not None:\n res, _ = ray.wait([future], timeout=timeout)\n if not res:\n raise TimeoutError(\"Timed out waiting for result\")\n del self._index_to_future[self._next_return_index]\n self._next_return_index += 1\n\n future_key = tuple(future) if isinstance(future, list) else future\n i, a = self._future_to_actor.pop(future_key)\n\n self._return_actor(a)\n return ray.get(future)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 233, "n_words": 71, "vocab_size": 54, "complexity": 6, "nloc": 18, "token_counts": 133, "n_ast_nodes": 218, "n_identifiers": 25, "random_cut": "def get_next(self, timeout=None):\n \n if not s", "d_id": 29858, "documentation": { "docstring": "Returns the next pending result in order.\n\n This returns the next result produced by submit(), blocking for up to\n the specified timeout until it is available.\n\n Returns:\n The next result.\n\n Raises:\n TimeoutError if the timeout is reached.\n\n Examples:\n >>> pool = ActorPool(...)\n >>> pool.submit(lambda a, v: a.double.remote(v), 1)\n >>> print(pool.get_next())\n 2\n ", "n_words": 51, "vocab_size": 41, "n_whitespaces": 159, "language": "en" } }, { "id": 21185, "commit_id": "4b996c0fa85824b323ad9eff3364dbe2213ebb4c", "repo": "pipenv", "path": "pipenv/environment.py", "file_name": "environment.py", "fun_name": "expand_egg_links", "commit_message": "Convert type comments to type annotations", "code": "def expand_egg_links(self) -> None:\n \n prefixes = [\n Path(prefix)\n for prefix in self.base_paths[\"libdirs\"].split(os.pathsep)\n if vistir.path.is_in_path(prefix, self.prefix.as_posix())\n ]\n for loc in prefixes:\n if not loc.exists():\n continue\n for pth in loc.iterdir():\n if not pth.suffix == \".egg-link\":\n continue\n contents = [\n vistir.path.normalize_path(line.strip())\n for line in pth.read_text().splitlines()\n ]\n pth.write_text(\"\\n\".join(contents))\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 259, "n_words": 44, "vocab_size": 31, "complexity": 8, "nloc": 21, "token_counts": 120, "n_ast_nodes": 200, "n_identifiers": 26, "random_cut": "def expand_egg_links(self) -> None:\n \n prefixes = [\n Path(prefix)\n for prefix in self.base_paths[\"libdirs\"].split(os.pathsep)\n if vistir.path.is_in_path(prefix, self.prefix.as_posix())\n ]\n for loc in prefixes:\n if not loc.exists():\n continue\n for pth in loc.iterdir():\n if not pth.suffix == \".egg-link\":\n continue\n contents = [\n vistir.path.normalize_path(line.strip())\n for line in pth", "d_id": 3716, "documentation": { "docstring": "\n Expand paths specified in egg-link files to prevent pip errors during\n reinstall\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 83638, "commit_id": "327ff9ea0f5e4712a34d767fee55a549cc1d3f39", "repo": "zulip", "path": "zerver/tests/test_link_embed.py", "file_name": "test_link_embed.py", "fun_name": "test_page_with_og", "commit_message": "preview: Use a dataclass for the embed data.\n\nThis is significantly cleaner than passing around `Dict[str, Any]` all\nof the time.", "code": "def test_page_with_og(self) -> None:\n html = b\n\n parser = OpenGraphParser(html, \"text/html; charset=UTF-8\")\n result = parser.extract_data()\n self.assertEqual(result.title, \"The Rock\")\n self.assertEqual(result.description, \"The Rock film\")\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 14, "token_counts": 46, "n_ast_nodes": 79, "n_identifiers": 10, "random_cut": "def test_page_with_og(self) -> None:\n ", "d_id": 17698, "documentation": { "docstring": "\n \n \n \n \n \n \n \n ", "n_words": 27, "vocab_size": 18, "n_whitespaces": 96, "language": "en" } }, { "id": 196831, "commit_id": "117f9554466e08aa4178137ad65fae1f2d49b340", "repo": "sympy", "path": "sympy/core/expr.py", "file_name": "expr.py", "fun_name": "is_rational_function", "commit_message": "Moved definition of illegal", "code": "def is_rational_function(self, *syms):\n \n if self in _illegal:\n return False\n\n if syms:\n syms = set(map(sympify, syms))\n else:\n syms = self.free_symbols\n if not syms:\n return True\n\n return self._eval_is_rational_function(syms)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 120, "n_words": 26, "vocab_size": 19, "complexity": 4, "nloc": 10, "token_counts": 50, "n_ast_nodes": 83, "n_identifiers": 9, "random_cut": "def is_rational_function(self, *syms):\n \n if self in _illegal:\n return False\n\n if syms:\n syms = set(map(sympify, syms))\n else:\n syms = self.free_symbols\n if not syms:\n return True\n\n return self._eval_is_rational_function(syms)\n", "d_id": 48207, "documentation": { "docstring": "\n Test whether function is a ratio of two polynomials in the given\n symbols, syms. When syms is not given, all free symbols will be used.\n The rational function does not have to be in expanded or in any kind of\n canonical form.\n\n This function returns False for expressions that are \"rational\n functions\" with symbolic exponents. Thus, you should be able to call\n .as_numer_denom() and apply polynomial algorithms to the result for\n expressions for which this returns True.\n\n This is not part of the assumptions system. You cannot do\n Symbol('z', rational_function=True).\n\n Examples\n ========\n\n >>> from sympy import Symbol, sin\n >>> from sympy.abc import x, y\n\n >>> (x/y).is_rational_function()\n True\n\n >>> (x**2).is_rational_function()\n True\n\n >>> (x/sin(y)).is_rational_function(y)\n False\n\n >>> n = Symbol('n', integer=True)\n >>> (x**n + 1).is_rational_function(x)\n False\n\n This function does not attempt any nontrivial simplifications that may\n result in an expression that does not appear to be a rational function\n to become one.\n\n >>> from sympy import sqrt, factor\n >>> y = Symbol('y', positive=True)\n >>> a = sqrt(y**2 + 2*y + 1)/y\n >>> a.is_rational_function(y)\n False\n >>> factor(a)\n (y + 1)/y\n >>> factor(a).is_rational_function(y)\n True\n\n See also is_algebraic_expr().\n\n ", "n_words": 182, "vocab_size": 114, "n_whitespaces": 444, "language": "en" } }, { "id": 197838, "commit_id": "af44b30d68265acb25340374b648e198fb5570e7", "repo": "sympy", "path": "sympy/polys/numberfields/primes.py", "file_name": "primes.py", "fun_name": "reduce_alg_num", "commit_message": "Improve `PrimeIdeal` reduction methods.", "code": "def reduce_alg_num(self, a):\n \n elt = self.ZK.parent.element_from_alg_num(a)\n red = self.reduce_element(elt)\n return a.field_element(list(reversed(red.QQ_col.flat())))\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 39, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 47, "n_ast_nodes": 78, "n_identifiers": 14, "random_cut": "def reduce_alg_num(self, a):\n \n elt = self.ZK.parent.element_from_alg_num(a)\n red = self.reduce_element(elt)\n return a.field_element(list(reversed(red.QQ_col.f", "d_id": 48713, "documentation": { "docstring": "\n Reduce an :py:class:`~.AlgebraicNumber` to a \"small representative\"\n modulo this prime ideal.\n\n Parameters\n ==========\n\n elt : :py:class:`~.AlgebraicNumber`\n The element to be reduced.\n\n Returns\n =======\n\n :py:class:`~.AlgebraicNumber`\n The reduced element.\n\n See Also\n ========\n\n reduce_element\n reduce_ANP\n .Submodule.reduce_element\n\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 154, "language": "en" } }, { "id": 322610, "commit_id": "1e2ee01dade0d4076ba98aa613c3eb150c615abb", "repo": "PaddleNLP", "path": "paddlenlp/taskflow/task.py", "file_name": "task.py", "fun_name": "_auto_joiner", "commit_message": "Update Taskflow word_segmentation and ner tasks (#1666)\n\n* Add AutoSplitter & AutoJoiner\r\n\r\n* codestyle fix\r\n\r\n* unify auto joiner\r\n\r\n* add comments\r\n\r\n* add sentence split mode\r\n\r\n* update params\r\n\r\n* add paddle version check\r\n\r\n* add wordtag for word_segmentation\r\n\r\n* add wordtag for word_segmentation\r\n\r\n* add ner-lac and word_segmentation-jieba\r\n\r\n* add return entities only for ner\r\n\r\n* fix ci\r\n\r\n* fix ci\r\n\r\n* fix ci\r\n\r\n* fix ci\r\n\r\n* fix ci\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* fix bugs of dataloader\r\n\r\n* remove guard\r\n\r\n* use fast mode for rnn example\r\n\r\n* Update README.md\r\n\r\n* Update README.md", "code": "def _auto_joiner(self, short_results, input_mapping, is_dict=False):\n \n concat_results = []\n elem_type = {} if is_dict else []\n for k, vs in input_mapping.items():\n single_results = elem_type\n for v in vs:\n if len(single_results) == 0:\n single_results = short_results[v]\n elif isinstance(elem_type, list):\n single_results.extend(short_results[v])\n elif isinstance(elem_type, dict):\n for sk in single_results.keys():\n if isinstance(single_results[sk], str):\n single_results[sk] += short_results[v][sk]\n else:\n single_results[sk].extend(short_results[v][sk])\n else:\n raise ValueError(\n \"Invalid element type, the type of results \"\n \"for each element should be list of dict, \"\n \"but {} received.\".format(type(single_results)))\n concat_results.append(single_results)\n return concat_results\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 452, "n_words": 79, "vocab_size": 59, "complexity": 9, "nloc": 23, "token_counts": 159, "n_ast_nodes": 252, "n_identifiers": 24, "random_cut": "def _auto_joiner(self, short_results, input_mapping, is_dict=False):\n \n concat_results = []\n elem_type = {} if is_dict else []\n for k, vs in input_mapping.items():\n single_results = elem_type\n for v in vs:\n if len(single_results) == 0:\n single_results = short_results[v]\n elif isinstance(elem_type, list):\n single_results.extend(short_results[v])\n elif isinstance(elem_type, dict):\n for sk in single_results.keys():\n if isinstance(single_results[sk], str):\n single_results[sk] += short_results[v][sk]\n else:\n single_results[sk].extend(short_results[v][sk])\n else:\n raise ValueError(\n \"Invalid element type, the type of results \"\n \"for each element should be list of dict, \"\n \"but {} received.\".format(t", "d_id": 118204, "documentation": { "docstring": "\n Join the short results automatically and generate the final results to match with the user inputs.\n Args:\n short_results (List[dict] / List[List[str]] / List[str]): input raw texts.\n input_mapping (dict): cutting length.\n is_dict (bool): whether the element type is dict, default to False.\n return:\n short_input_texts (List[str]): the short input texts for model inference.\n ", "n_words": 51, "vocab_size": 42, "n_whitespaces": 124, "language": "en" } }, { "id": 269373, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/applications/efficientnet_weight_update_util.py", "file_name": "efficientnet_weight_update_util.py", "fun_name": "get_variable_names_from_ckpt", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_variable_names_from_ckpt(path_ckpt, use_ema=True):\n \n v_all = tf.train.list_variables(path_ckpt)\n\n # keep name only\n v_name_all = [x[0] for x in v_all]\n\n if use_ema:\n v_name_all = [x for x in v_name_all if \"ExponentialMovingAverage\" in x]\n else:\n v_name_all = [\n x for x in v_name_all if \"ExponentialMovingAverage\" not in x\n ]\n\n # remove util variables used for RMSprop\n v_name_all = [x for x in v_name_all if \"RMS\" not in x]\n return v_name_all\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 125, "n_words": 66, "vocab_size": 32, "complexity": 9, "nloc": 11, "token_counts": 80, "n_ast_nodes": 130, "n_identifiers": 9, "random_cut": "def get_variable_names_from_ckpt(path_ckpt, use_ema=True):\n \n v_all = tf.train.list_variables(path_ckpt)\n\n # keep name only\n v_name_all = [x[0] for x in v_all]\n\n if use_ema:\n v_name_all = [x for x in v_name_all if \"ExponentialMovingAverage\" in x]\n else:\n v_name_all = [\n x for x in v_name_all if \"ExponentialMovingAverage\" not in x\n ]\n\n # remove util variables used for RMSprop\n v_", "d_id": 80043, "documentation": { "docstring": "Get list of tensor names from checkpoint.\n\n Args:\n path_ckpt: str, path to the ckpt files\n use_ema: Bool, whether to use ExponentialMovingAverage result or not.\n Returns:\n List of variable names from checkpoint.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 55, "language": "en" } }, { "id": 198418, "commit_id": "bd9f607176c58dfba01e27c05c2b7d49ff97c901", "repo": "sympy", "path": "sympy/solvers/deutils.py", "file_name": "deutils.py", "fun_name": "ode_order", "commit_message": "Improve loop performance in solvers", "code": "def ode_order(expr, func):\n \n a = Wild('a', exclude=[func])\n if expr.match(a):\n return 0\n\n if isinstance(expr, Derivative):\n if expr.args[0] == func:\n return len(expr.variables)\n else:\n return max(ode_order(arg, func) for arg in expr.args[0].args) + len(expr.variables)\n else:\n return max(ode_order(arg, func) for arg in expr.args)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 103, "n_words": 38, "vocab_size": 26, "complexity": 6, "nloc": 11, "token_counts": 103, "n_ast_nodes": 161, "n_identifiers": 14, "random_cut": "def ode_order(expr, func):\n ", "d_id": 48925, "documentation": { "docstring": "\n Returns the order of a given differential\n equation with respect to func.\n\n This function is implemented recursively.\n\n Examples\n ========\n\n >>> from sympy import Function\n >>> from sympy.solvers.deutils import ode_order\n >>> from sympy.abc import x\n >>> f, g = map(Function, ['f', 'g'])\n >>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +\n ... f(x).diff(x), f(x))\n 2\n >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))\n 2\n >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))\n 3\n\n ", "n_words": 67, "vocab_size": 46, "n_whitespaces": 119, "language": "en" } }, { "id": 291932, "commit_id": "cb03db8df4bf8b50945b36a4b0debcaaed1190a8", "repo": "core", "path": "homeassistant/components/discord/notify.py", "file_name": "notify.py", "fun_name": "async_send_message", "commit_message": "Replace discord.py with nextcord (#66540)\n\n* Replace discord.py with nextcord\r\n\r\n* Typing tweak\r\n\r\n* Another pip check decrease :)", "code": "async def async_send_message(self, message, **kwargs):\n \n nextcord.VoiceClient.warn_nacl = False\n discord_bot = nextcord.Client()\n images = None\n embedding = None\n\n if ATTR_TARGET not in kwargs:\n _LOGGER.error(\"No target specified\")\n return None\n\n data = kwargs.get(ATTR_DATA) or {}\n\n embeds: list[nextcord.Embed] = []\n if ATTR_EMBED in data:\n embedding = data[ATTR_EMBED]\n fields = embedding.get(ATTR_EMBED_FIELDS) or []\n\n if embedding:\n embed = nextcord.Embed(**embedding)\n for field in fields:\n embed.add_field(**field)\n if ATTR_EMBED_FOOTER in embedding:\n embed.set_footer(**embedding[ATTR_EMBED_FOOTER])\n if ATTR_EMBED_AUTHOR in embedding:\n embed.set_author(**embedding[ATTR_EMBED_AUTHOR])\n if ATTR_EMBED_THUMBNAIL in embedding:\n embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL])\n embeds.append(embed)\n\n if ATTR_IMAGES in data:\n images = []\n\n for image in data.get(ATTR_IMAGES, []):\n image_exists = await self.hass.async_add_executor_job(\n self.file_exists, image\n )\n\n if image_exists:\n images.append(image)\n else:\n _LOGGER.warning(\"Image not found: %s\", image)\n\n await discord_bot.login(self.token)\n\n try:\n for channelid in kwargs[ATTR_TARGET]:\n channelid = int(channelid)\n try:\n channel = await discord_bot.fetch_channel(channelid)\n except nextcord.NotFound:\n try:\n channel = await discord_bot.fetch_user(channelid)\n except nextcord.NotFound:\n _LOGGER.warning(\"Channel not found for ID: %s\", channelid)\n continue\n # Must create new instances of File for each channel.\n files = [nextcord.File(image) for image in images] if images else []\n await channel.send(message, files=files, embeds=embeds)\n except (nextcord.HTTPException, nextcord.NotFound) as error:\n _LOGGER.warning(\"Communication error: %s\", error)\n await discord_bot.close()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 866, "n_words": 170, "vocab_size": 102, "complexity": 19, "nloc": 51, "token_counts": 347, "n_ast_nodes": 564, "n_identifiers": 53, "random_cut": "async def async_send_message(self, message, **kwargs):\n \n nextcord.VoiceClient.warn_nacl = False\n discord_bot = nextcord.Client()\n images = None\n embedding = None\n\n if ATTR_TARGET not in kwargs:\n _LOGGER.error(\"No target specified\")\n return None\n\n data = kwargs.get(ATTR_DATA) or {}\n\n embeds: list[nextcord.Embed] = []\n if ATTR_EMBED in data:\n embedding = data[ATTR_EMBED]\n fields = embedding.get(ATTR_EMBED_FIELDS) or []\n\n if embedding:\n embed = nextcord.Embed(**embedding)\n for field in fields:\n embed.add_field(**field)\n if ATTR_EMBED_FOOTER in embedding:\n embed.set_footer(**embedding[ATTR_EMBED_FOOTER])\n if ATTR_EMBED_AUTHOR in embedding:\n embed.set_author(**embedding[ATTR_EMBED_AUTHOR])\n if ATTR_EMBED_THUMBNAIL in embedding:\n embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL])\n embeds.append(embed)\n\n if ATTR_IMAGES in data:\n images = []\n\n for image in data.get(ATTR_IMAGES, []):\n image_exists = await self.hass.async_add_executor_job(\n self.file_exists, image\n )\n\n if image_exists:\n images.", "d_id": 91035, "documentation": { "docstring": "Login to Discord, send message to channel(s) and log out.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 167613, "commit_id": "f538568afc2c76c2d738d32e3544cf9fe6742960", "repo": "pandas", "path": "pandas/conftest.py", "file_name": "conftest.py", "fun_name": "rand_series_with_duplicate_datetimeindex", "commit_message": "TYP: misc return type annotations (#47558)", "code": "def rand_series_with_duplicate_datetimeindex() -> Series:\n \n dates = [\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n ]\n\n return Series(np.random.randn(len(dates)), index=dates)\n\n\n# ----------------------------------------------------------------\n# Scalars\n# ----------------------------------------------------------------\n@pytest.fixture(\n params=[\n (\n Interval(left=0, right=5, inclusive=\"right\"),\n IntervalDtype(\"int64\", inclusive=\"right\"),\n ),\n (\n Interval(left=0.1, right=0.5, inclusive=\"right\"),\n IntervalDtype(\"float64\", inclusive=\"right\"),\n ),\n (Period(\"2012-01\", freq=\"M\"), \"period[M]\"),\n (Period(\"2012-02-01\", freq=\"D\"), \"period[D]\"),\n (\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n DatetimeTZDtype(tz=\"US/Eastern\"),\n ),\n (Timedelta(seconds=500), \"timedelta64[ns]\"),\n ]\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture(\n params=[\n (\n Interval(left=0, right=5, inclusive=\"right\"),\n IntervalDtype(\"int64\", inclusive=\"right\"),\n ),\n (\n Interval(left=0.1, right=0.5, inclusive=\"right\"),\n IntervalDtype(\"float64\", inclusive=\"right\"),\n ),\n (Period(\"2012-01\", freq=\"M\"), \"period[M]\"),\n (Period(\"2012-02-01\", freq=\"D\"), \"period[D]\"),\n (\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n DatetimeTZDtype(tz=\"US/Eastern\"),\n ),\n (Timedelta(seconds=500), \"timedelta64[ns]\"),\n ]\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 290, "n_words": 78, "vocab_size": 43, "complexity": 1, "nloc": 17, "token_counts": 120, "n_ast_nodes": 360, "n_identifiers": 24, "random_cut": "def rand_series_with_duplicate_datetimeindex() -> Series:\n \n dates = [\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n ]\n\n return Series(np.random.randn(len(dates)), index=dates)\n\n\n# ----------------------------------------------------------------\n# Scalars\n# ----------------------------------------------------------------\n@pytest.fixture(\n params=[\n (\n Interval(left=0, right=5, inclusive=\"right\"),\n IntervalDtype(\"int64\", inclusive=\"right\"),\n ),\n (\n Interval(lef", "d_id": 40065, "documentation": { "docstring": "\n Fixture for Series with a DatetimeIndex that has duplicates.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 60770, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/locations/base.py", "file_name": "base.py", "fun_name": "get_major_minor_version", "commit_message": "upd; format", "code": "def get_major_minor_version():\n # type: () -> str\n \n return \"{}.{}\".format(*sys.version_info)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 31, "n_identifiers": 4, "random_cut": "def get_major_minor_version():\n # typ", "d_id": 12285, "documentation": { "docstring": "\n Return the major-minor version of the current Python as a string, e.g.\n \"3.7\" or \"3.10\".\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 25, "language": "en" } }, { "id": 244731, "commit_id": "9d7511d8c35df1f9c13b17eb770136859bf370be", "repo": "mmdetection", "path": "tests/test_models/test_dense_heads/test_ssd_head.py", "file_name": "test_ssd_head.py", "fun_name": "test_ssd_head_loss", "commit_message": "Update SSD and PISA-SSD model config", "code": "def test_ssd_head_loss(self):\n \n s = 300\n img_metas = [{\n 'img_shape': (s, s, 3),\n 'scale_factor': 1,\n }]\n cfg = Config(\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.,\n ignore_iof_thr=-1,\n gt_max_assign_all=False),\n sampler=dict(type='PseudoSampler'),\n smoothl1_beta=1.,\n allowed_border=-1,\n pos_weight=-1,\n neg_pos_ratio=3,\n debug=False))\n ssd_head = SSDHead(\n num_classes=4,\n in_channels=(1, 1, 1, 1, 1, 1),\n stacked_convs=1,\n feat_channels=1,\n use_depthwise=True,\n anchor_generator=dict(\n type='SSDAnchorGenerator',\n scale_major=False,\n input_size=s,\n basesize_ratio_range=(0.15, 0.9),\n strides=[8, 16, 32, 64, 100, 300],\n ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),\n train_cfg=cfg)\n\n # SSD head expects a multiple levels of features per image\n feats = (\n torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))\n for stride in ssd_head.prior_generator.strides)\n cls_scores, bbox_preds = ssd_head.forward(feats)\n\n # Test that empty ground truth encourages the network to\n # predict background\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.empty((0, 4))\n gt_instances.labels = torch.LongTensor([])\n\n empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances],\n img_metas)\n # When there is no truth, cls_loss and box_loss should all be zero.\n empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n self.assertEqual(\n empty_cls_loss.item(), 0,\n 'there should be no cls loss when there are no true boxes')\n self.assertEqual(\n empty_box_loss.item(), 0,\n 'there should be no box loss when there are no true boxes')\n\n # When truth is non-empty then both cls and box loss\n # should be nonzero for random inputs\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.Tensor(\n [[23.6667, 23.8757, 238.6326, 151.8874]])\n gt_instances.labels = torch.LongTensor([2])\n\n one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances],\n img_metas)\n onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n self.assertGreater(onegt_cls_loss.item(), 0,\n 'cls loss should be non-zero')\n self.assertGreater(onegt_box_loss.item(), 0,\n 'box loss should be non-zero')\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1066, "n_words": 232, "vocab_size": 154, "complexity": 2, "nloc": 64, "token_counts": 471, "n_ast_nodes": 677, "n_identifiers": 61, "random_cut": "def test_ssd_head_loss(self):\n \n s = 300\n img_metas = [{\n 'img_shape': (s, s, 3),\n 'scale_factor': 1,\n }]\n cfg = Config(\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.,\n ignore_iof_thr=-1,\n gt_max_assign_all=False),\n sampler=dict(type='PseudoSampler'),\n smoothl1_beta=1.,\n allowed_border=-1,\n pos_weight=-1,\n neg_pos_ratio=3,\n debug=False))\n ssd_head = SSDHe", "d_id": 70501, "documentation": { "docstring": "Tests ssd head loss when truth is empty and non-empty.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 100671, "commit_id": "a9908b46f77dc66ac7efe7100ea0eed4b1f2b460", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_legacy_check", "commit_message": "Alignments tool - Replace 'extract-large' with 'min-size'", "code": "def _legacy_check(self) -> None:\n \n if self._min_size > 0 or self._arguments.extract_every_n != 1:\n logger.warning(\"This alignments file was generated with the legacy extraction method.\")\n logger.warning(\"You should run this extraction job, but with 'min_size' set to 0 and \"\n \"'extract-every-n' set to 1 to update the alignments file.\")\n logger.warning(\"You can then re-run this extraction job with your chosen options.\")\n sys.exit(0)\n\n maskers = [\"components\", \"extended\"]\n nn_masks = [mask for mask in list(self._alignments.mask_summary) if mask not in maskers]\n logtype = logger.warning if nn_masks else logger.info\n logtype(\"This alignments file was created with the legacy extraction method and will be \"\n \"updated.\")\n logtype(\"Faces will be extracted using the new method and landmarks based masks will be \"\n \"regenerated.\")\n if nn_masks:\n logtype(\"However, the NN based masks '%s' will be cropped to the legacy extraction \"\n \"method, so you may want to run the mask tool to regenerate these \"\n \"masks.\", \"', '\".join(nn_masks))\n self._mask_pipeline = Extractor(None, None, maskers, multiprocess=True)\n self._mask_pipeline.launch()\n # Update alignments versioning\n self._alignments._version = _VERSION # pylint:disable=protected-access\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 394, "n_words": 160, "vocab_size": 103, "complexity": 7, "nloc": 26, "token_counts": 143, "n_ast_nodes": 256, "n_identifiers": 24, "random_cut": "def _legacy_check(self) -> None:\n \n if self._min_size > 0 or self._arguments.extract_every_n != 1:\n logger.warning(\"This alignments file was generated with the legacy extraction method.\")\n logger.warning(\"You should run this extraction job, but with 'min_size' set to 0 and \"\n \"'extract-every-n' set to 1 to update the alignments file.\")\n logger.warning(\"You can then re-run this extraction job with your chosen options.\")\n sys.exit(0)\n\n maskers = [\"components\", \"extended\"]\n nn_masks = [mask for mask in list(self._alignments.mask_summary) if mask not in maskers]\n logtype = logger.warning if nn_masks else logger.info\n logtype(\"This alignments file was created with the legacy extraction method and will be \"\n \"updated.\")\n logtype(\"Faces will be extracted using the new method and landmarks based masks will be \"\n \"regenerated.\")\n if nn_masks:\n logtype(\"However, the NN based masks '%s' will be cropped to the legacy extraction \"\n \"method, so you may want to run the mask tool to regenerate these \"\n \"masks.\", \"', '\".join(nn_masks))\n self._mask_pipeline = Extractor(None, None, maskers, multiproce", "d_id": 20129, "documentation": { "docstring": " Check whether the alignments file was created with the legacy extraction method.\n\n If so, force user to re-extract all faces if any options have been specified, otherwise\n raise the appropriate warnings and set the legacy options.\n ", "n_words": 36, "vocab_size": 32, "n_whitespaces": 58, "language": "en" } }, { "id": 196688, "commit_id": "9ad8ab9fe58051cf11626ba6654852fcfec60147", "repo": "sympy", "path": "sympy/stats/crv_types.py", "file_name": "crv_types.py", "fun_name": "FisherZ", "commit_message": "Documentation cleanup 5", "code": "def FisherZ(name, d1, d2):\n r\n\n return rv(name, FisherZDistribution, (d1, d2))\n\n#-------------------------------------------------------------------------------\n# Frechet distribution ---------------------------------------------------------\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 61, "token_counts": 24, "n_ast_nodes": 36, "n_identifiers": 6, "random_cut": "def FisherZ(name, d1, d2):\n r\n\n return rv(name, FisherZDistribution, (d1, d2))\n\n#-------------------------------------------------------------------------------\n# Frechet distribution -----", "d_id": 48106, "documentation": { "docstring": "\n Create a Continuous Random Variable with an Fisher's Z distribution.\n\n Explanation\n ===========\n\n The density of the Fisher's Z distribution is given by\n\n .. math::\n f(x) := \\frac{2d_1^{d_1/2} d_2^{d_2/2}} {\\mathrm{B}(d_1/2, d_2/2)}\n \\frac{e^{d_1z}}{\\left(d_1e^{2z}+d_2\\right)^{\\left(d_1+d_2\\right)/2}}\n\n\n .. TODO - What is the difference between these degrees of freedom?\n\n Parameters\n ==========\n\n d1 : `d_1 > 0`\n Degree of freedom.\n d2 : `d_2 > 0`\n Degree of freedom.\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import FisherZ, density\n >>> from sympy import Symbol, pprint\n\n >>> d1 = Symbol(\"d1\", positive=True)\n >>> d2 = Symbol(\"d2\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = FisherZ(\"x\", d1, d2)\n\n >>> D = density(X)(z)\n >>> pprint(D, use_unicode=False)\n d1 d2\n d1 d2 - -- - --\n -- -- 2 2\n 2 2 / 2*z \\ d1*z\n 2*d1 *d2 *\\d1*e + d2/ *e\n -----------------------------------------\n /d1 d2\\\n B|--, --|\n \\2 2 /\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution\n .. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html\n\n ", "n_words": 145, "vocab_size": 98, "n_whitespaces": 459, "language": "en" } }, { "id": 37529, "commit_id": "18df440709f1b19d1c5617c0d987c5ff8fd0915d", "repo": "transformers", "path": "src/transformers/trainer_pt_utils.py", "file_name": "trainer_pt_utils.py", "fun_name": "find_batch_size", "commit_message": "Replace dict/BatchEncoding instance checks by Mapping (#17014)\n\n* Replace dict/BatchEncoding instance checks by Mapping\r\n\r\n* Typo", "code": "def find_batch_size(tensors):\n \n if isinstance(tensors, (list, tuple)):\n for t in tensors:\n result = find_batch_size(t)\n if result is not None:\n return result\n elif isinstance(tensors, Mapping):\n for key, value in tensors.items():\n result = find_batch_size(value)\n if result is not None:\n return result\n elif isinstance(tensors, torch.Tensor):\n return tensors.shape[0] if len(tensors.shape) >= 1 else None\n elif isinstance(tensors, np.ndarray):\n return tensors.shape[0] if len(tensors.shape) >= 1 else None\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 177, "n_words": 60, "vocab_size": 31, "complexity": 11, "nloc": 15, "token_counts": 126, "n_ast_nodes": 192, "n_identifiers": 17, "random_cut": "def find_batch_size(tensors):\n \n if isinstance(tensors, (list, tuple)):\n for t in tensors:\n result = find_batch_size(t)\n if result is not None:\n return result\n elif isinstance(tensors, Mapping):\n for key, value in tensors.items():\n result = find_batch_size(value)\n if result is not None:\n return result\n elif isinstance(tensors, torch.Tensor):\n return tensors.shape[0] if len(tensor", "d_id": 6828, "documentation": { "docstring": "\n Find the first dimension of a tensor in a nested list/tuple/dict of tensors.\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 20, "language": "en" } }, { "id": 37632, "commit_id": "1ac698744c4dbdf1495d303246d08ffacdf4f5b8", "repo": "transformers", "path": "src/transformers/models/yolos/feature_extraction_yolos.py", "file_name": "feature_extraction_yolos.py", "fun_name": "post_process_segmentation", "commit_message": "Add YOLOS (#16848)\n\n* First draft\r\n\r\n* Add YolosForObjectDetection\r\n\r\n* Make forward pass work\r\n\r\n* Add mid position embeddings\r\n\r\n* Add interpolation of position encodings\r\n\r\n* Add expected values\r\n\r\n* Add YOLOS to tests\r\n\r\n* Add integration test\r\n\r\n* Support tiny model as well\r\n\r\n* Support all models in conversion script\r\n\r\n* Remove mid_pe_size attribute\r\n\r\n* Make more tests pass\r\n\r\n* Add model to README and fix config\r\n\r\n* Add copied from statements\r\n\r\n* Rename base_model_prefix to vit\r\n\r\n* Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP\r\n\r\n* Apply suggestions from code review\r\n\r\n* Apply more suggestions from code review\r\n\r\n* Convert remaining checkpoints\r\n\r\n* Improve docstrings\r\n\r\n* Add YolosFeatureExtractor\r\n\r\n* Add feature extractor to docs\r\n\r\n* Add corresponding tests\r\n\r\n* Fix style\r\n\r\n* Fix docs\r\n\r\n* Apply suggestion from code review\r\n\r\n* Fix bad rebase\r\n\r\n* Fix some more bad rebase\r\n\r\n* Fix missing character\r\n\r\n* Improve docs and variable names\r\n\r\nCo-authored-by: Niels Rogge ", "code": "def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5):\n \n out_logits, raw_masks = outputs.logits, outputs.pred_masks\n preds = []\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 16, "token_counts": 196, "n_ast_nodes": 51, "n_identifiers": 11, "random_cut": "def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5):\n \n out_logits, raw_masks = outputs.logits, outputs.pred_masks\n preds = []\n", "d_id": 6842, "documentation": { "docstring": "\n Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch.\n\n Parameters:\n outputs ([`DetrSegmentationOutput`]):\n Raw outputs of the model.\n target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):\n Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.\n threshold (`float`, *optional*, defaults to 0.9):\n Threshold to use to filter out queries.\n mask_threshold (`float`, *optional*, defaults to 0.5):\n Threshold to use when turning the predicted masks into binary values.\n\n Returns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image\n in the batch as predicted by the model.\n ", "n_words": 101, "vocab_size": 73, "n_whitespaces": 256, "language": "en" } }, { "id": 196207, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/subsets.py", "file_name": "subsets.py", "fun_name": "iterate_graycode", "commit_message": "Updated import locations", "code": "def iterate_graycode(self, k):\n \n unranked_code = GrayCode.unrank(self.superset_size,\n (self.rank_gray + k) % self.cardinality)\n return Subset.subset_from_bitlist(self.superset,\n unranked_code)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 114, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 64, "n_identifiers": 12, "random_cut": "def iterate_graycode(self, k):\n \n unranked_code = GrayCode.unrank(self.superset_size,\n ", "d_id": 47707, "documentation": { "docstring": "\n Helper function used for prev_gray and next_gray.\n It performs ``k`` step overs to get the respective Gray codes.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> a = Subset([1, 2, 3], [1, 2, 3, 4])\n >>> a.iterate_graycode(3).subset\n [1, 4]\n >>> a.iterate_graycode(-2).subset\n [1, 2, 4]\n\n See Also\n ========\n\n next_gray, prev_gray\n ", "n_words": 49, "vocab_size": 39, "n_whitespaces": 148, "language": "en" } }, { "id": 273879, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/rnn/gru_lstm_utils.py", "file_name": "gru_lstm_utils.py", "fun_name": "is_sequence_right_padded", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def is_sequence_right_padded(mask):\n \n max_seq_length = tf.shape(mask)[1]\n count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)\n right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length)\n return tf.reduce_all(tf.equal(mask, right_padded_mask))\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 64, "n_ast_nodes": 100, "n_identifiers": 15, "random_cut": "def is_sequence_right_padded(mask):\n \n max_seq_length = tf.shape(mask)[1]\n count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)\n right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length)\n ", "d_id": 81150, "documentation": { "docstring": "Check the mask tensor and see if it right padded.\n\n For cuDNN kernel, it uses the sequence length param to skip the tailing\n timestep. If the data is left padded, or not a strict right padding (has\n masked value in the middle of the sequence), then cuDNN kernel won't be work\n properly in those cases.\n\n Left padded data: [[False, False, True, True, True]].\n Right padded data: [[True, True, True, False, False]].\n Mixture of mask/unmasked data: [[True, False, True, False, False]].\n\n Note that for the mixed data example above, the actually data RNN should see\n are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not\n pollute the internal states.\n\n Args:\n mask: the Boolean tensor with shape [batch, timestep]\n\n Returns:\n boolean scalar tensor, whether the mask is strictly right padded.\n ", "n_words": 135, "vocab_size": 93, "n_whitespaces": 184, "language": "en" } }, { "id": 246321, "commit_id": "c3db7a0b59d48b8872bc24096f9a2467ef35f703", "repo": "synapse", "path": "tests/rest/client/test_third_party_rules.py", "file_name": "test_third_party_rules.py", "fun_name": "_send_event_over_federation", "commit_message": "Tests: replace mocked Authenticator with the real thing (#11913)\n\nIf we prepopulate the test homeserver with a key for a remote homeserver, we\r\ncan make federation requests to it without having to stub out the\r\nauthenticator. This has two advantages:\r\n\r\n * means that what we are testing is closer to reality (ie, we now have\r\n complete tests for the incoming-request-authorisation flow)\r\n\r\n * some tests require that other objects be signed by the remote server (eg,\r\n the event in `/send_join`), and doing that would require a whole separate\r\n set of mocking out. It's much simpler just to use real keys.", "code": "def _send_event_over_federation(self) -> None:\n \n body = {\n \"pdus\": [\n {\n \"sender\": self.user_id,\n \"type\": EventTypes.Message,\n \"state_key\": \"\",\n \"content\": {\"body\": \"hello world\", \"msgtype\": \"m.text\"},\n \"room_id\": self.room_id,\n \"depth\": 0,\n \"origin_server_ts\": self.clock.time_msec(),\n \"prev_events\": [],\n \"auth_events\": [],\n \"signatures\": {},\n \"unsigned\": {},\n }\n ],\n }\n\n channel = self.make_signed_federation_request(\n method=\"PUT\",\n path=\"/_matrix/federation/v1/send/1\",\n content=body,\n )\n\n self.assertEqual(channel.code, 200, channel.result)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 385, "n_words": 49, "vocab_size": 44, "complexity": 1, "nloc": 25, "token_counts": 120, "n_ast_nodes": 211, "n_identifiers": 17, "random_cut": "def _send_event_over_federation(self) -> None:\n \n body = {\n \"pdus\": [\n {\n \"sender\": self.user_id,\n \"type\": EventTypes.Message,\n \"state_key\": \"\",\n \"content\": {\"body\": \"hello world\", \"msgtype\": \"m.text\"},\n \"room_id\": self.room_id,\n \"depth\": 0,\n \"origin_server_ts\": self.clock.time_msec(),\n ", "d_id": 71154, "documentation": { "docstring": "Send a dummy event over federation and check that the request succeeds.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 100391, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "plugins/train/trainer/_base.py", "file_name": "_base.py", "fun_name": "_print_loss", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _print_loss(self, loss):\n \n output = \", \".join([f\"Loss {side}: {side_loss:.5f}\"\n for side, side_loss in zip((\"A\", \"B\"), loss)])\n timestamp = time.strftime(\"%H:%M:%S\")\n output = f\"[{timestamp}] [#{self._model.iterations:05d}] {output}\"\n print(f\"\\r{output}\", end=\"\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 88, "n_words": 26, "vocab_size": 23, "complexity": 2, "nloc": 6, "token_counts": 55, "n_ast_nodes": 132, "n_identifiers": 15, "random_cut": "def _print_loss(self, loss):\n \n output = \", \".join([f\"Loss {side}: {side_loss:.5f}\"\n for side, side_loss in zip((\"A\", \"B\"), loss)])\n timestamp = time.strftime(\"%H:%M:%S\")\n output = f\"[{timestamp}] [#{self._model.iterations:05d}] {output}\"\n ", "d_id": 19876, "documentation": { "docstring": " Outputs the loss for the current iteration to the console.\n\n Parameters\n ----------\n loss: list\n The loss for each side. List should contain 2 ``floats`` side \"a\" in position 0 and\n side \"b\" in position `.\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 87, "language": "en" } }, { "id": 221238, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/calendar.py", "file_name": "calendar.py", "fun_name": "itermonthdays2", "commit_message": "add python 3.10.4 for windows", "code": "def itermonthdays2(self, year, month):\n \n for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday):\n yield d, i % 7\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 3, "token_counts": 37, "n_ast_nodes": 57, "n_identifiers": 9, "random_cut": "def itermonthdays2(self, year, month):\n \n for i, d in enumerate(self.itermonthdays(year, mont", "d_id": 56285, "documentation": { "docstring": "\n Like itermonthdates(), but will yield (day number, weekday number)\n tuples. For days outside the specified month the day number is 0.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 43, "language": "en" } }, { "id": 300633, "commit_id": "4885331509eeffe50f42d76b234996467b06170f", "repo": "core", "path": "tests/helpers/test_template.py", "file_name": "test_template.py", "fun_name": "test_distance_function_return_none_if_invalid_state", "commit_message": "Fail template functions when no default specified (#71687)", "code": "def test_distance_function_return_none_if_invalid_state(hass):\n \n hass.states.async_set(\"test.object_2\", \"happy\", {\"latitude\": 10})\n tpl = template.Template(\"{{ distance(states.test.object_2) | round }}\", hass)\n with pytest.raises(TemplateError):\n tpl.async_render()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 36, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 45, "n_ast_nodes": 83, "n_identifiers": 11, "random_cut": "def test_distance_function_return_none_if_invalid_state(hass):\n \n hass.states.async_set(\"test.object_2\", \"happy\", {\"latitude\": 10})\n tpl = template.Template(\"{{ distance(states.test.object_2) | round }}\", hass)\n with pytes", "d_id": 99493, "documentation": { "docstring": "Test distance function return None if invalid state.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 20890, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", "file_name": "typing_extensions.py", "fun_name": "Concatenate", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def Concatenate(self, parameters):\n \n return _concatenate_getitem(self, parameters)\n# 3.7-8\nelif sys.version_info[:2] >= (3, 7):", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "elif sys.version_info[:2] >= (3, 7):sys", "n_ast_errors": 2, "ast_levels": 7, "n_whitespaces": 25, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 48, "n_identifiers": 7, "random_cut": "def Concatenate(self, parameters):\n \n return _con", "d_id": 3609, "documentation": { "docstring": "Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a\n higher order function which adds, removes or transforms parameters of a\n callable.\n\n For example::\n\n Callable[Concatenate[int, P], int]\n\n See PEP 612 for detailed information.\n ", "n_words": 33, "vocab_size": 32, "n_whitespaces": 78, "language": "en" } }, { "id": 119349, "commit_id": "8372b98c4856b6b2363b7bb28abdb4579440a656", "repo": "jax", "path": "tests/ann_test.py", "file_name": "ann_test.py", "fun_name": "compute_recall", "commit_message": "[JAX] Move ann.ann_recall back to tests.\n\nThe function is simple enough for users to implement their own on the host.\n\nPiperOrigin-RevId: 430696789", "code": "def compute_recall(result_neighbors, ground_truth_neighbors) -> float:\n \n assert len(\n result_neighbors.shape) == 2, \"shape = [num_queries, neighbors_per_query]\"\n assert len(ground_truth_neighbors.shape\n ) == 2, \"shape = [num_queries, ground_truth_neighbors_per_query]\"\n assert result_neighbors.shape[0] == ground_truth_neighbors.shape[0]\n gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors]\n hits = sum(\n len(list(x\n for x in nn_per_q\n if x.item() in gt_sets[q]))\n for q, nn_per_q in enumerate(result_neighbors))\n return hits / ground_truth_neighbors.size\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 116, "n_words": 55, "vocab_size": 37, "complexity": 5, "nloc": 25, "token_counts": 105, "n_ast_nodes": 164, "n_identifiers": 19, "random_cut": "def compute_recall(result_neighbors, ground_truth_neighbors) -> float:\n \n", "d_id": 26587, "documentation": { "docstring": "Computes the recall of an approximate nearest neighbor search.\n\n Args:\n result_neighbors: int32 numpy array of the shape [num_queries,\n neighbors_per_query] where the values are the indices of the dataset.\n ground_truth_neighbors: int32 numpy array of with shape [num_queries,\n ground_truth_neighbors_per_query] where the values are the indices of the\n dataset.\n\n Returns:\n The recall.\n ", "n_words": 49, "vocab_size": 28, "n_whitespaces": 76, "language": "en" } }, { "id": 163635, "commit_id": "a0b40c0f2ad73420a54e48ec4f564b9667e3f452", "repo": "pandas", "path": "pandas/core/arrays/datetimes.py", "file_name": "datetimes.py", "fun_name": "isocalendar", "commit_message": "EA interface: rename ExtensionArray._hasnans to ._hasna (#45519)", "code": "def isocalendar(self) -> DataFrame:\n \n from pandas import DataFrame\n\n values = self._local_timestamps()\n sarray = fields.build_isocalendar_sarray(values)\n iso_calendar_df = DataFrame(\n sarray, columns=[\"year\", \"week\", \"day\"], dtype=\"UInt32\"\n )\n if self._hasna:\n iso_calendar_df.iloc[self._isnan] = None\n return iso_calendar_df\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 108, "n_words": 30, "vocab_size": 26, "complexity": 2, "nloc": 44, "token_counts": 64, "n_ast_nodes": 109, "n_identifiers": 15, "random_cut": "def isocalendar(self) -> DataFrame:\n \n from pandas import DataFrame\n\n values = self._local_timestamps()\n sarray = fields.build_isocalendar_sarray(values)\n iso_calendar_df = DataFrame(\n sarray, columns=[\"year\", \"week\", \"day\"], dtype=\"UInt32\"\n )\n if self._hasna:\n iso_calendar_df.iloc[self._isnan] = None\n", "d_id": 39469, "documentation": { "docstring": "\n Returns a DataFrame with the year, week, and day calculated according to\n the ISO 8601 standard.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n with columns year, week and day\n\n See Also\n --------\n Timestamp.isocalendar : Function return a 3-tuple containing ISO year,\n week number, and weekday for the given Timestamp object.\n datetime.date.isocalendar : Return a named tuple object with\n three components: year, week and weekday.\n\n Examples\n --------\n >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)\n >>> idx.isocalendar()\n year week day\n 2019-12-29 2019 52 7\n 2019-12-30 2020 1 1\n 2019-12-31 2020 1 2\n 2020-01-01 2020 1 3\n >>> idx.isocalendar().week\n 2019-12-29 52\n 2019-12-30 1\n 2019-12-31 1\n 2020-01-01 1\n Freq: D, Name: week, dtype: UInt32\n ", "n_words": 108, "vocab_size": 70, "n_whitespaces": 384, "language": "en" } }, { "id": 243180, "commit_id": "a37593f004247ebf69d5582524da6dc5143cb023", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "putpixel", "commit_message": "Allow RGB and RGBA values for PA image putpixel", "code": "def putpixel(self, xy, value):\n \n\n if self.readonly:\n self._copy()\n self.load()\n\n if self.pyaccess:\n return self.pyaccess.putpixel(xy, value)\n\n if (\n self.mode in (\"P\", \"PA\")\n and isinstance(value, (list, tuple))\n and len(value) in [3, 4]\n ):\n # RGB or RGBA value for a P or PA image\n if self.mode == \"PA\":\n alpha = value[3] if len(value) == 4 else 255\n value = value[:3]\n value = self.palette.getcolor(value, self)\n if self.mode == \"PA\":\n value = (value, alpha)\n return self.im.putpixel(xy, value)\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 264, "n_words": 71, "vocab_size": 49, "complexity": 9, "nloc": 18, "token_counts": 142, "n_ast_nodes": 225, "n_identifiers": 17, "random_cut": "def putpixel(self, xy, value):\n \n\n if self.readonly:\n self._copy()\n self.load()\n\n if self.pyaccess:\n return self.pyaccess.putpixel(xy, value)\n\n if (\n self.mode in (\"P\", \"PA\")\n and isinstance(value, (list, tuple))\n and len(value) in [3, 4]\n ):\n # RGB or RGBA value for a P or PA image\n if self.mode == \"PA\":\n alpha = value[3] if len(value) == 4 else 255\n valu", "d_id": 70002, "documentation": { "docstring": "\n Modifies the pixel at the given position. The color is given as\n a single numerical value for single-band images, and a tuple for\n multi-band images. In addition to this, RGB and RGBA tuples are\n accepted for P and PA images.\n\n Note that this method is relatively slow. For more extensive changes,\n use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`\n module instead.\n\n See:\n\n * :py:meth:`~PIL.Image.Image.paste`\n * :py:meth:`~PIL.Image.Image.putdata`\n * :py:mod:`~PIL.ImageDraw`\n\n :param xy: The pixel coordinate, given as (x, y). See\n :ref:`coordinate-system`.\n :param value: The pixel value.\n ", "n_words": 81, "vocab_size": 60, "n_whitespaces": 191, "language": "en" } }, { "id": 300441, "commit_id": "11cc1feb853bcfd9633ebfc44eae142c10a7f983", "repo": "core", "path": "tests/components/template/test_switch.py", "file_name": "test_switch.py", "fun_name": "test_available_template_with_entities", "commit_message": "Tweak template switch tests (#71738)", "code": "async def test_available_template_with_entities(hass):\n \n await setup.async_setup_component(\n hass,\n \"switch\",\n {\n \"switch\": {\n \"platform\": \"template\",\n \"switches\": {\n \"test_template_switch\": {\n **OPTIMISTIC_SWITCH_CONFIG,\n \"value_template\": \"{{ 1 == 1 }}\",\n \"availability_template\": \"{{ is_state('availability_state.state', 'on') }}\",\n }\n },\n }\n },\n )\n\n await hass.async_block_till_done()\n await hass.async_start()\n await hass.async_block_till_done()\n\n hass.states.async_set(\"availability_state.state\", STATE_ON)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"switch.test_template_switch\").state != STATE_UNAVAILABLE\n\n hass.states.async_set(\"availability_state.state\", STATE_OFF)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"switch.test_template_switch\").state == STATE_UNAVAILABLE\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 293, "n_words": 55, "vocab_size": 34, "complexity": 1, "nloc": 26, "token_counts": 123, "n_ast_nodes": 224, "n_identifiers": 14, "random_cut": "async def test_available_template_with_entities(hass):\n \n await setup.async_setup_component(\n hass,\n \"switch\",\n {\n \"switch\": {\n \"platform\": \"template\",\n \"switches\": {\n \"test_template_switch\": {\n **OPTIMISTIC_SWITCH_CONFIG,\n ", "d_id": 99301, "documentation": { "docstring": "Test availability templates with values from other entities.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 282896, "commit_id": "50cafd500ece43df98e3cf076d81084b2806ea03", "repo": "OpenBBTerminal", "path": "bots/etf/tops.py", "file_name": "tops.py", "fun_name": "etfs_disc_command", "commit_message": "Discord bot massive improvement (#1481)\n\n* allow logs feature flag\r\n\r\n* Adding log collection md\r\n\r\n* upload last log at startup\r\n\r\n* additions/refractor\r\n\r\n* refactor\r\n\r\n* lint/black ++\r\n\r\n* disc\r\n\r\n* TimeRotating Logger and upload to s3\r\n\r\n* corrected regex error\r\n\r\n* makeup for config\r\n\r\n* logging/disc/sia/etf/++\r\n\r\n* append .log before uploading\r\n\r\n* process to upload logs to s3\r\n\r\n* candle ta/etfmcds\r\n\r\n* fix\r\n\r\n* ta candles\r\n\r\n* implement presignedURL\r\n\r\n* fixed regex\r\n\r\n* ma's in 1 cmd, delete older files\r\n\r\n* refactor ta candle\r\n\r\n* updates\r\n\r\n* black\r\n\r\n* moon?\r\n\r\n* Logger uploader\r\n\r\n* rotate every hour\r\n\r\n* only archive if successful\r\n\r\n* chavis suggestions\r\n\r\n* windows\r\n\r\n* ta\r\n\r\n* commands_dict update\r\n\r\n* discord tacmds\r\n\r\n* log_collection error fix\r\n\r\n* fix\r\n\r\n* fix\r\n\r\n* pylint\r\n\r\n* bb fix\r\n\r\n* only log filesize\r\n\r\n* fixes\r\n\r\n* discord logs\r\n\r\n* Delete log_collection.md\r\n\r\n* fixes for other bots on images\r\n\r\n* bots image upload fix\r\n\r\n* updated helpers/load candle\r\n\r\n* more ta cc/housekeeping/refactors/slashcmds\r\n\r\n* update bots cmds_dict\r\n\r\n* adjustments to font size/fixes\r\n\r\n* test fixs/disc earnings\r\n\r\n* missed a spot\r\n\r\n* fixes had > revesred\r\n\r\n* reversed the >< again oops\r\n\r\n* remove logger branch code blocking tests\r\n\r\n* black fix\r\n\r\n* fix missing sources in docstr/daily candle dt tz\r\n\r\n* load_candle refactor with docstring\r\n\r\n* moved insiders to disc\r\n\r\n* Lucas logging changes\r\n\r\n* Fixing log_collection.md\r\n\r\n* testing scenario\r\n\r\n* more ta converted\r\n\r\n* more ta\r\n\r\n* Update config_terminal : remove print of verbosity\r\n\r\n* table cfg/fix matplt/ screener +\r\n\r\n* fix\r\n\r\n* what's sleep? 1 more line.. or 2. scr df2img\r\n\r\n* juan more. fix news 1m chart issue\r\n\r\n* ticker.upper() fixes\r\n\r\n* Update log collection\r\n\r\n* Updating log collection - change tmp folder\r\n\r\nCo-authored-by: LBolte29 \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: LBolte29 <97528701+LBolte29@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: didierlopes.eth ", "code": "def etfs_disc_command(sort=\"\"):\n \n\n # Debug\n if cfg.DEBUG:\n logger.debug(\"etfs\")\n\n df_etfs = wsj_model.etf_movers(sort, export=True)\n\n if df_etfs.empty:\n raise Exception(\"No available data found\")\n\n df_etfs.set_index(\" \", inplace=True)\n prfx = \"Top\"\n if sort == \"active\":\n prfx = \"Most\"\n title = f\"ETF Movers ({prfx} {sort.capitalize()})\"\n\n dindex = len(df_etfs.index)\n if dindex > 15:\n embeds: list = []\n # Output\n i, i2, end = 0, 0, 15\n df_pg, embeds_img, images_list = [], [], []\n while i < dindex:\n df_pg = df_etfs.iloc[i:end]\n df_pg.append(df_pg)\n fig = df2img.plot_dataframe(\n df_pg,\n fig_size=(1200, (40 + (40 * dindex))),\n col_width=[1, 9, 1.5, 1.5, 1.5, 1.5],\n tbl_header=cfg.PLT_TBL_HEADER,\n tbl_cells=cfg.PLT_TBL_CELLS,\n font=cfg.PLT_TBL_FONT,\n row_fill_color=cfg.PLT_TBL_ROW_COLORS,\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n fig.update_traces(cells=(dict(align=[\"left\"])))\n imagefile = \"disc-etfs.png\"\n imagefile = helpers.save_image(imagefile, fig)\n\n if cfg.IMAGES_URL or cfg.IMGUR_CLIENT_ID != \"REPLACE_ME\":\n image_link = cfg.IMAGES_URL + imagefile\n images_list.append(imagefile)\n else:\n imagefile_save = cfg.IMG_DIR / imagefile\n uploaded_image = gst_imgur.upload_image(\n imagefile_save, title=\"something\"\n )\n image_link = uploaded_image.link\n os.remove(imagefile_save)\n\n embeds_img.append(\n f\"{image_link}\",\n )\n embeds.append(\n disnake.Embed(\n title=title,\n colour=cfg.COLOR,\n ),\n )\n i2 += 1\n i += 15\n end += 15\n\n # Author/Footer\n for i in range(0, i2):\n embeds[i].set_author(\n name=cfg.AUTHOR_NAME,\n url=cfg.AUTHOR_URL,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n embeds[i].set_footer(\n text=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n i = 0\n for i in range(0, i2):\n embeds[i].set_image(url=embeds_img[i])\n\n i += 1\n embeds[0].set_footer(text=f\"Page 1 of {len(embeds)}\")\n choices = [\n disnake.SelectOption(label=\"Home\", value=\"0\", emoji=\"🟢\"),\n ]\n\n output = {\n \"view\": Menu,\n \"title\": title,\n \"embed\": embeds,\n \"choices\": choices,\n \"embeds_img\": embeds_img,\n \"images_list\": images_list,\n }\n else:\n fig = df2img.plot_dataframe(\n df_etfs,\n fig_size=(1200, (40 + (40 * dindex))),\n col_width=[1, 9, 1.5, 1.5, 1.5, 1.5],\n tbl_header=cfg.PLT_TBL_HEADER,\n tbl_cells=cfg.PLT_TBL_CELLS,\n font=cfg.PLT_TBL_FONT,\n row_fill_color=cfg.PLT_TBL_ROW_COLORS,\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n fig.update_traces(cells=(dict(align=[\"left\"])))\n imagefile = helpers.save_image(\"disc-etfs.png\", fig)\n\n output = {\n \"title\": title,\n \"imagefile\": imagefile,\n }\n\n return output\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1278, "n_words": 247, "vocab_size": 146, "complexity": 10, "nloc": 98, "token_counts": 599, "n_ast_nodes": 945, "n_identifiers": 84, "random_cut": "def etfs_disc_command(sort=\"\"):\n \n\n # Debug\n i", "d_id": 84345, "documentation": { "docstring": "Displays ETF's Top Gainers/Decliners, Most Active [Wall Street Journal]", "n_words": 9, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 153753, "commit_id": "49fc2cf3733f20ac6cf8a7c61e42ef7aa5cf4b03", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py", "file_name": "partition.py", "fun_name": "get", "commit_message": "FEAT-#4371: Add logging to Modin (#4372)\n\nCo-authored-by: Devin Petersohn \r\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Anatoly Myachev \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Naren Krishna ", "code": "def get(self):\n \n logger = get_logger()\n logger.debug(f\"ENTER::Partition.get::{self._identity}\")\n if len(self.call_queue):\n self.drain_call_queue()\n result = ray.get(self.oid)\n logger.debug(f\"EXIT::Partition.get::{self._identity}\")\n return result\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 15, "vocab_size": 13, "complexity": 2, "nloc": 8, "token_counts": 50, "n_ast_nodes": 101, "n_identifiers": 12, "random_cut": "def get(self):\n \n ", "d_id": 35584, "documentation": { "docstring": "\n Get the object wrapped by this partition out of the Plasma store.\n\n Returns\n -------\n pandas.DataFrame\n The object from the Plasma store.\n ", "n_words": 21, "vocab_size": 16, "n_whitespaces": 68, "language": "en" } }, { "id": 47660, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/sensors/test_external_task_sensor.py", "file_name": "test_external_task_sensor.py", "fun_name": "dag_bag_ext", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def dag_bag_ext():\n \n clear_db_runs()\n\n dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)\n\n dag_0 = DAG(\"dag_0\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_0 = EmptyOperator(task_id=\"task_a_0\", dag=dag_0)\n task_b_0 = ExternalTaskMarker(\n task_id=\"task_b_0\", external_dag_id=\"dag_1\", external_task_id=\"task_a_1\", recursion_depth=3, dag=dag_0\n )\n task_a_0 >> task_b_0\n\n dag_1 = DAG(\"dag_1\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_1 = ExternalTaskSensor(\n task_id=\"task_a_1\", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1\n )\n task_b_1 = ExternalTaskMarker(\n task_id=\"task_b_1\", external_dag_id=\"dag_2\", external_task_id=\"task_a_2\", recursion_depth=2, dag=dag_1\n )\n task_a_1 >> task_b_1\n\n dag_2 = DAG(\"dag_2\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_2 = ExternalTaskSensor(\n task_id=\"task_a_2\", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2\n )\n task_b_2 = ExternalTaskMarker(\n task_id=\"task_b_2\", external_dag_id=\"dag_3\", external_task_id=\"task_a_3\", recursion_depth=1, dag=dag_2\n )\n task_a_2 >> task_b_2\n\n dag_3 = DAG(\"dag_3\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_3 = ExternalTaskSensor(\n task_id=\"task_a_3\", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3\n )\n task_b_3 = EmptyOperator(task_id=\"task_b_3\", dag=dag_3)\n task_a_3 >> task_b_3\n\n for dag in [dag_0, dag_1, dag_2, dag_3]:\n dag_bag.bag_dag(dag=dag, root_dag=dag)\n\n yield dag_bag\n\n clear_db_runs()\n\n\n@pytest.fixture", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 243, "n_words": 111, "vocab_size": 69, "complexity": 2, "nloc": 35, "token_counts": 290, "n_ast_nodes": 460, "n_identifiers": 36, "random_cut": "def dag_bag_ext():\n \n clear_db_runs()\n\n dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)\n\n dag_0 = DAG(\"dag_0\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_0 = EmptyOperator(task_id=\"task_a_0\", dag=dag_0)\n task_b_0 = ExternalTaskMarker(\n task_id=\"task_b_0\", external_dag_id=\"dag_1\", external_task_id=\"task_a_1\", recursion_depth=3, dag=dag_0\n )\n task_a_0 >> task_b_0\n\n dag_1 = DAG(\"dag_1\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_1 = ExternalTaskSensor(\n task_id=\"task_a_1\", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1\n )\n task_b_1 = ExternalTaskMarker(\n task_id=\"task_b_1\", external_dag_id=\"dag_2\", external_task_id=\"task_a_2\", recursion_depth=2, dag=dag_1\n )\n task_a_1 >> task_b_1\n\n dag_2 = DAG(\"dag_2\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_2 = ExternalTaskSensor(\n task_id=\"task_a_2\", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2\n )\n task_b_2 = ExternalTaskMarker(\n task_id=\"task_b_2\", external_dag_id=\"dag_3\", external_task_id=\"task_a_3\", recursion_depth=1, dag=dag_2\n )\n task_a_2 >> task_b_2\n\n dag_3 = DAG(\"dag_3\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_3 = ExternalTaskSensor(\n task_id=\"task_a_3\", external_dag_id=dag_2.", "d_id": 9197, "documentation": { "docstring": "\n Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies\n set up using ExternalTaskMarker and ExternalTaskSensor.\n\n dag_0: task_a_0 >> task_b_0\n |\n |\n dag_1: ---> task_a_1 >> task_b_1\n |\n |\n dag_2: ---> task_a_2 >> task_b_2\n |\n |\n dag_3: ---> task_a_3 >> task_b_3\n ", "n_words": 45, "vocab_size": 35, "n_whitespaces": 480, "language": "en" } }, { "id": 107160, "commit_id": "ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_constrainedlayout.py", "file_name": "test_constrainedlayout.py", "fun_name": "test_constrained_layout3", "commit_message": "ENH: implement and use base layout_engine for more flexible layout.", "code": "def test_constrained_layout3():\n \n\n fig, axs = plt.subplots(2, 2, layout=\"constrained\")\n for nn, ax in enumerate(axs.flat):\n pcm = example_pcolor(ax, fontsize=24)\n if nn == 3:\n pad = 0.08\n else:\n pad = 0.02 # default\n fig.colorbar(pcm, ax=ax, pad=pad)\n\n\n@image_comparison(['constrained_layout4.png'])", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@image_comparison(['constrained_layout4.png'])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 93, "n_words": 34, "vocab_size": 30, "complexity": 3, "nloc": 9, "token_counts": 74, "n_ast_nodes": 127, "n_identifiers": 16, "random_cut": "def test_constrained_layout3():\n \n\n fig, axs = plt.subplots(2, 2, layout=\"constrained\")\n for nn, ax in enumerate(axs.flat):\n pcm = example_pcolor(ax, fontsize=24)\n if nn == 3:\n pad = 0.08\n else:\n pad = 0.02 # default\n fig.colorbar(pcm, ax=ax, pad=pad)\n\n\n@image_comparison(['constraine", "d_id": 22615, "documentation": { "docstring": "Test constrained_layout for colorbars with subplots", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 167771, "commit_id": "f65417656ba8c59438d832b6e2a431f78d40c21c", "repo": "pandas", "path": "pandas/core/groupby/groupby.py", "file_name": "groupby.py", "fun_name": "indices", "commit_message": "TYP: more return annotations in core/ (#47618)\n\n* TYP: more return annotations in core/\r\n\r\n* from __future__ import annotations\r\n\r\n* more __future__", "code": "def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n \n return self.grouper.indices\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 5, "token_counts": 26, "n_ast_nodes": 41, "n_identifiers": 9, "random_cut": "def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n \n return self.grouper.indi", "d_id": 40114, "documentation": { "docstring": "\n Dict {group name -> group indices}.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 61950, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "get_hash", "commit_message": "upd; format", "code": "def get_hash(self, data, hasher=None):\n \n if hasher is None:\n hasher = self.hasher\n if hasher is None:\n hasher = hashlib.md5\n prefix = ''\n else:\n hasher = getattr(hashlib, hasher)\n prefix = '%s=' % self.hasher\n digest = hasher(data).digest()\n digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')\n return '%s%s' % (prefix, digest)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 146, "n_words": 42, "vocab_size": 25, "complexity": 3, "nloc": 12, "token_counts": 89, "n_ast_nodes": 151, "n_identifiers": 13, "random_cut": "def get_hash(self, data, hasher=None):\n \n if ", "d_id": 12773, "documentation": { "docstring": "\n Get the hash of some data, using a particular hash algorithm, if\n specified.\n\n :param data: The data to be hashed.\n :type data: bytes\n :param hasher: The name of a hash implementation, supported by hashlib,\n or ``None``. Examples of valid values are ``'sha1'``,\n ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and\n ``'sha512'``. If no hasher is specified, the ``hasher``\n attribute of the :class:`InstalledDistribution` instance\n is used. If the hasher is determined to be ``None``, MD5\n is used as the hashing algorithm.\n :returns: The hash of the data. If a hasher was explicitly specified,\n the returned hash will be prefixed with the specified hasher\n followed by '='.\n :rtype: str\n ", "n_words": 104, "vocab_size": 70, "n_whitespaces": 327, "language": "en" } }, { "id": 135565, "commit_id": "d329147ae28c57b290f6b932f9f3044523f67c4e", "repo": "ray", "path": "rllib/utils/tests/test_actor_manager.py", "file_name": "test_actor_manager.py", "fun_name": "test_async_call_same_actor_multiple_times", "commit_message": "[RLlib] Introduce FaultTolerantActorManager (#29703)\n\nSigned-off-by: Jun Gong ", "code": "def test_async_call_same_actor_multiple_times(self):\n \n actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n # 2 asynchronous call to actor 0.\n num_of_calls = manager.foreach_actor_async(\n lambda w: w.call(),\n healthy_only=False,\n remote_actor_indices=[0, 0],\n )\n self.assertEqual(num_of_calls, 2)\n\n # Now, let's actually fetch the results.\n results = manager.fetch_ready_async_reqs(timeout_seconds=None)\n # Returns 1 and 2, representing the first and second calls to actor 0.\n self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2])\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 173, "n_words": 63, "vocab_size": 51, "complexity": 3, "nloc": 11, "token_counts": 107, "n_ast_nodes": 168, "n_identifiers": 23, "random_cut": "def test_async_call_same_actor_multiple_times(self):\n \n actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n # 2 asynchronous call to actor 0.\n num_of_calls = manager.foreach_actor_async(\n lambda w: w.call(),\n h", "d_id": 30656, "documentation": { "docstring": "Test multiple asynchronous remote calls to the same actor.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 101313, "commit_id": "9e503bdaa2bfe2baaea50ad2e4bf742f309d9d10", "repo": "faceswap", "path": "scripts/fsmedia.py", "file_name": "fsmedia.py", "fun_name": "_load", "commit_message": "bugfix: debug landmarks", "code": "def _load(self):\n \n data = {}\n if not self._is_extract:\n if not self.have_alignments_file:\n return data\n data = super()._load()\n return data\n\n skip_existing = hasattr(self._args, 'skip_existing') and self._args.skip_existing\n skip_faces = hasattr(self._args, 'skip_faces') and self._args.skip_faces\n\n if not skip_existing and not skip_faces:\n logger.debug(\"No skipping selected. Returning empty dictionary\")\n return data\n\n if not self.have_alignments_file and (skip_existing or skip_faces):\n logger.warning(\"Skip Existing/Skip Faces selected, but no alignments file found!\")\n return data\n\n data = super()._load()\n\n if skip_faces:\n # Remove items from alignments that have no faces so they will\n # be re-detected\n del_keys = [key for key, val in data.items() if not val[\"faces\"]]\n logger.debug(\"Frames with no faces selected for redetection: %s\", len(del_keys))\n for key in del_keys:\n if key in data:\n logger.trace(\"Selected for redetection: '%s'\", key)\n del data[key]\n return data\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 389, "n_words": 119, "vocab_size": 73, "complexity": 15, "nloc": 24, "token_counts": 171, "n_ast_nodes": 290, "n_identifiers": 19, "random_cut": "def _load(self):\n \n data = {}\n if not self._is_extract:\n if not self.have_alignments_file:\n return data\n data = super()._load()\n return data\n\n skip_existing = hasattr(self._args, 'skip_existing') and self._args.skip_existing\n skip_faces = hasattr(self._args, 'skip_faces') and self._args.skip_faces\n\n if not skip_existing and not skip_faces:\n logge", "d_id": 20731, "documentation": { "docstring": " Override the parent :func:`~lib.align.Alignments._load` to handle skip existing\n frames and faces on extract.\n\n If skip existing has been selected, existing alignments are loaded and returned to the\n calling script.\n\n Returns\n -------\n dict\n Any alignments that have already been extracted if skip existing has been selected\n otherwise an empty dictionary\n ", "n_words": 49, "vocab_size": 37, "n_whitespaces": 121, "language": "en" } }, { "id": 285147, "commit_id": "bd12c203a0585dab6ca3ff81c3b4500e088b41d6", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/discovery/yahoofinance_model.py", "file_name": "yahoofinance_model.py", "fun_name": "get_gtech", "commit_message": "Fixed bad yfinance urls (#2282)", "code": "def get_gtech() -> pd.DataFrame:\n \n return get_df(\n \"https://finance.yahoo.com/screener/predefined/growth_technology_stocks\"\n )\n\n\n@log_start_end(log=logger)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@log_start_end(log=logger)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 24, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 11, "token_counts": 14, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def get_gtech() -> pd.DataFrame:\n \n return get_df(\n \"https://finance.y", "d_id": 85189, "documentation": { "docstring": "Get technology stocks with revenue and earnings growth in excess of 25%. [Source: Yahoo Finance]\n\n Returns\n -------\n pd.DataFrame\n Growth technology stocks\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 40, "language": "en" } }, { "id": 35457, "commit_id": "29c10a41d04f855c433a6cde7797b325651417d2", "repo": "transformers", "path": "tests/encoder_decoder/test_modeling_encoder_decoder.py", "file_name": "test_modeling_encoder_decoder.py", "fun_name": "test_bert2gpt2_summarization", "commit_message": "[Test refactor 1/5] Per-folder tests reorganization (#15725)\n\n* Per-folder tests reorganization\r\n\r\nCo-authored-by: sgugger \r\nCo-authored-by: Stas Bekman ", "code": "def test_bert2gpt2_summarization(self):\n model = EncoderDecoderModel.from_pretrained(\"patrickvonplaten/bert2gpt2-cnn_dailymail-fp16\")\n\n model.to(torch_device)\n tokenizer_in = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n tokenizer_out = AutoTokenizer.from_pretrained(\"../gpt2\")\n\n ARTICLE_STUDENTS = \n\n EXPECTED_SUMMARY_STUDENTS = \n\n input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors=\"pt\")\n output_ids = model.generate(input_dict[\"input_ids\"].to(torch_device))\n summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True)\n\n self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS])\n\n\n@require_torch", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@require_torch", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 100, "n_words": 30, "vocab_size": 23, "complexity": 1, "nloc": 11, "token_counts": 89, "n_ast_nodes": 162, "n_identifiers": 21, "random_cut": "def test_bert2gpt2_summarization(self):\n model = EncoderDecoderModel.from_pretrained(\"patrickvonplaten/bert2gpt2-cnn_dailymail-fp16\")\n\n model.to(torch_device)\n tokenizer_in = AutoTokenizer.from_pretrained(\"bert-base-case", "d_id": 6455, "documentation": { "docstring": "(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which \"369 went to war for the Confederate States and seven for the Union Army,\" the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 \"colonies\" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for \"health and safety incidents\" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. \"The media has labeled us as the 'nation's deadliest fraternity,' \" Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. \"As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world,\" Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.SAS Alpha Epsilon suspended the students, but university president says it's permanent.\\nThe fraternity has had to deal with a string of student deaths since 2010.\\nSAS has more than 200,000 members, many of whom are students.\\nA student died while being forced into excessive alcohol consumption.", "n_words": 403, "vocab_size": 251, "n_whitespaces": 402, "language": "en" } }, { "id": 30056, "commit_id": "3981ae09888569eafe9cbb3a0c659dd337028fa4", "repo": "saleor", "path": "saleor/permission/management.py", "file_name": "management.py", "fun_name": "_get_builtin_permissions", "commit_message": "Move create_permission post migrate signal", "code": "def _get_builtin_permissions(opts): # noqa: D205, D212\n \n perms = []\n for action in opts.default_permissions:\n perms.append(\n (\n get_permission_codename(action, opts),\n \"Can %s %s\" % (action, opts.verbose_name_raw),\n )\n )\n return perms\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 106, "n_words": 27, "vocab_size": 25, "complexity": 2, "nloc": 10, "token_counts": 43, "n_ast_nodes": 70, "n_identifiers": 8, "random_cut": "def _get_builtin_permissions(opts): # noqa: D205, D212\n \n perms = []\n for action in opts.default_permissions:\n perms.append(\n (\n get_permission_codename(action, opts),\n \"Can %s %s\" % (action, opts.verbose_name_raw),\n )\n )\n", "d_id": 5294, "documentation": { "docstring": "\n Return (codename, name) for all autogenerated permissions.\n By default, this is ('add', 'change', 'delete', 'view')\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 337787, "commit_id": "86ce737d7fc94f8000dbd5e13021d0411bb4204a", "repo": "accelerate", "path": "src/accelerate/accelerator.py", "file_name": "accelerator.py", "fun_name": "accumulate", "commit_message": "Introduce automatic gradient accumulation wrapper + fix a few test issues (#484)\n\n* Have accelerator handle gradient accumulation\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def accumulate(self, model):\n \n self._do_sync()\n if self.sync_gradients:\n context = contextlib.nullcontext\n else:\n context = self.no_sync\n\n with context(model):\n yield\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 84, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 8, "token_counts": 37, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def accumulate(self, model):\n \n self._do_sync()\n if self.sync_gradients:\n context = contextl", "d_id": 121117, "documentation": { "docstring": "\n A context manager that will lightly wrap around and perform gradient accumulation automatically\n\n Args:\n model (`torch.nn.Module`):\n PyTorch Module that was prepared with `Accelerator.prepare`\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 71, "language": "en" } }, { "id": 223525, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "get_attribute", "commit_message": "add python 3.10.4 for windows", "code": "def get_attribute(value):\n \n attribute = Attribute()\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n attribute.append(token)\n if value and value[0] in ATTRIBUTE_ENDS:\n raise errors.HeaderParseError(\n \"expected token but found '{}'\".format(value))\n token, value = get_attrtext(value)\n attribute.append(token)\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n attribute.append(token)\n return attribute, value\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 48, "vocab_size": 25, "complexity": 7, "nloc": 14, "token_counts": 99, "n_ast_nodes": 163, "n_identifiers": 13, "random_cut": "def get_attribute(value):\n \n attribute = Attribute()\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n attribute.append(token)\n if value and value[0] in ATTRIBUTE_ENDS:\n raise errors.H", "d_id": 56951, "documentation": { "docstring": " [CFWS] 1*attrtext [CFWS]\n\n This version of the BNF makes the CFWS explicit, and as usual we use a\n value terminal for the actual run of characters. The RFC equivalent of\n attrtext is the token characters, with the subtraction of '*', \"'\", and '%'.\n We include tab in the excluded set just as we do for token.\n\n ", "n_words": 56, "vocab_size": 43, "n_whitespaces": 73, "language": "en" } }, { "id": 154378, "commit_id": "5086a9ea37bc37e6e58da0ceaf5864b16cc8e0ed", "repo": "modin", "path": "modin/pandas/test/test_io.py", "file_name": "test_io.py", "fun_name": "eval_to_file", "commit_message": "TEST-#4879: Use pandas `ensure_clean()` in place of `io_tests_data` (#4881)\n\nSigned-off-by: Karthik Velayutham ", "code": "def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs):\n \n with ensure_clean_dir() as dirname:\n unique_filename_modin = get_unique_filename(\n extension=extension, data_dir=dirname\n )\n unique_filename_pandas = get_unique_filename(\n extension=extension, data_dir=dirname\n )\n\n # parameter `max_retries=0` is set for `to_csv` function on Ray engine,\n # in order to increase the stability of tests, we repeat the call of\n # the entire function manually\n last_exception = None\n for _ in range(3):\n try:\n getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs)\n except EXCEPTIONS as exc:\n last_exception = exc\n continue\n break\n else:\n raise last_exception\n\n getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs)\n\n assert assert_files_eq(unique_filename_modin, unique_filename_pandas)\n\n\n@pytest.fixture", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 282, "n_words": 82, "vocab_size": 63, "complexity": 3, "nloc": 20, "token_counts": 104, "n_ast_nodes": 176, "n_identifiers": 21, "random_cut": "def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs):\n \n with ensure_clean_dir() as dirname:\n unique_filename_modin = get_unique_filename(\n extension=extension, data_dir=dirname\n )\n unique_filename_pandas = get_unique_filename(\n extension=extension, data_dir=dirname\n )\n\n # parameter `max_retries=0` is set for `to_csv` function on Ray engine,\n # in order to increase the stability of tests, we repeat the call of\n # the entire function manually\n last_exception = None\n for _ in range(3):\n try:\n getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs)\n except EXCEPTIONS as exc:\n last_exception = exc\n continue\n break\n else:\n raise last_exception\n\n getattr(pandas_obj, fn)(unique_", "d_id": 35948, "documentation": { "docstring": "Helper function to test `to_` methods.\n\n Args:\n modin_obj: Modin DataFrame or Series to test `to_` method.\n pandas_obj: Pandas DataFrame or Series to test `to_` method.\n fn: name of the method, that should be tested.\n extension: Extension of the test file.\n ", "n_words": 40, "vocab_size": 27, "n_whitespaces": 74, "language": "en" } }, { "id": 203731, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/contenttypes/fields.py", "file_name": "fields.py", "fun_name": "_is_matching_generic_foreign_key", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _is_matching_generic_foreign_key(self, field):\n \n return (\n isinstance(field, GenericForeignKey)\n and field.ct_field == self.content_type_field_name\n and field.fk_field == self.object_id_field_name\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 70, "n_words": 16, "vocab_size": 14, "complexity": 3, "nloc": 6, "token_counts": 33, "n_ast_nodes": 52, "n_identifiers": 9, "random_cut": "def _is_matching_generic_foreign_key(self, field):\n \n return (\n isinstance(field, GenericForeignKey)\n and field.ct_field == self.content_type_field_name\n and field.fk_field == self.object_id_field_name\n )\n", "d_id": 50522, "documentation": { "docstring": "\n Return True if field is a GenericForeignKey whose content type and\n object id fields correspond to the equivalent attributes on this\n GenericRelation.\n ", "n_words": 22, "vocab_size": 22, "n_whitespaces": 51, "language": "en" } }, { "id": 101207, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "lib/align/alignments.py", "file_name": "alignments.py", "fun_name": "hashes_to_frame", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def hashes_to_frame(self):\n \n if not self._hashes_to_frame:\n logger.debug(\"Generating hashes to frame\")\n for frame_name, val in self._data.items():\n for idx, face in enumerate(val[\"faces\"]):\n self._hashes_to_frame.setdefault(face[\"hash\"], {})[frame_name] = idx\n return self._hashes_to_frame\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 102, "n_words": 25, "vocab_size": 23, "complexity": 4, "nloc": 7, "token_counts": 67, "n_ast_nodes": 112, "n_identifiers": 13, "random_cut": "def hashes_to_frame(self):\n \n if not self._hashes_to_frame:\n logger.debug(\"Generating hashes to frame\")\n for frame_name, val in self._data.items():\n for idx, face in enumerate(val[\"faces\"]):\n sel", "d_id": 20628, "documentation": { "docstring": " dict: The SHA1 hash of the face mapped to the frame(s) and face index within the frame\n that the hash corresponds to. The structure of the dictionary is:\n\n {**SHA1_hash** (`str`): {**filename** (`str`): **face_index** (`int`)}}.\n\n Notes\n -----\n This method is depractated and exists purely for updating legacy hash based alignments\n to new png header storage in :class:`lib.align.update_legacy_png_header`.\n\n The first time this property is referenced, the dictionary will be created and cached.\n Subsequent references will be made to this cached dictionary.\n ", "n_words": 79, "vocab_size": 58, "n_whitespaces": 143, "language": "en" } }, { "id": 320770, "commit_id": "a20bb67a878b2e68abf8268c1b0a27f018d01352", "repo": "qutebrowser", "path": "qutebrowser/completion/completiondelegate.py", "file_name": "completiondelegate.py", "fun_name": "_get_textdoc", "commit_message": "mypy: Upgrade to PyQt5-stubs 5.15.6.0\n\nFor some unknown reason, those new stubs cause a *lot* of things now to be\nchecked by mypy which formerly probably got skipped due to Any being implied\nsomewhere.\n\nThe stubs themselves mainly improved, with a couple of regressions too.\n\nIn total, there were some 337 (!) new mypy errors. This commit fixes almost all\nof them, and the next commit improves a fix to get things down to 0 errors\nagain.\n\nOverview of the changes:\n\n==== qutebrowser/app.py\n\n- Drop type ignore due to improved stubs.\n\n==== qutebrowser/browser/browsertab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n This is debatable: I suppose the abstract stuff shouldn't need to know\n anything about the concrete backends at all. But it seems like we cut some\n corners when initially implementing things, and put some code in browsertab.py\n just because the APIs of both backends happened to be compatible. Perhaps\n something to reconsider once we drop QtWebKit and hopefully implement a dummy\n backend.\n\n- Add an additional assertion in AbstractAction.run_string. This is already\n covered by the isinstance(member, self.action_base) above it, but that's too\n dynamic for mypy to understand.\n\n- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x\n and y components), not a single int.\n\n- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x\n and y components), not a single int.\n\n- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass\n fractional percentages too.\n\n- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re\n this being debatable.\n\n- Fix the return type of AbstractTabPrivate.event_target(), which can be None\n (see #3888).\n\n- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS\n return value), not None.\n\n- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can\n be None to use the last used position.\n\n- Declare the type of sub-objects of AbstractTab.\n\n- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.\n\n==== qutebrowser/browser/commands.py\n\n- Make sure the active window is a MainWindow (with a .win_id attribute).\n\n==== qutebrowser/browser/downloadview.py\n\n- Add _model() which makes sure that self.model() is a DownloadModel, not None\n or any other model. This is needed because other methods access a variety of\n custom attributes on it, e.g. last_index().\n\n==== qutebrowser/browser/greasemonkey.py\n\n- Add an ignore for AbstractDownload.requested_url which we patch onto the\n downloads. Probably would be nicer to add it as a proper attribute which always\n gets set by the DownloadManager.\n\n==== qutebrowser/browser/hints.py\n\n- Remove type ignores for QUrl.toString().\n- Add a new type ignore for combining different URL flags (which works, but is\n not exactly type safe... still probably a regression in the stubs).\n- Make sure the things we get back from self._get_keyparser are what we actually\n expect. Probably should introduce a TypedDict (and/or overloads for\n _get_keyparser with typing.Literal) to teach mypy about the exact return value.\n See #7098.\n This is needed because we access Hint/NormalKeyParser-specific attributes such\n as .set_inhibited_timout() or .update_bindings().\n\n==== qutebrowser/browser/inspector.py\n\n- Similar changes than in browsertab.py to make some types where we share API\n (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next\n commit.\n\n==== qutebrowser/browser/network/pac.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/qtnetworkdownloads.py\n\n- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an\n AbstractDownload), so that we can call ._uses_nam() on it.\n\n==== qutebrowser/browser/qutescheme.py\n\n- Remove now unneeded type ignore for QUrl flags.\n\n==== qutebrowser/browser/urlmarks.py\n\n- Specify the type of UrlMarkManager._lineparser, as those only get initialized\n in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.\n\n==== qutebrowser/browser/webelem.py\n\n- New casts to turn single KeyboardModifier (enum) entries into\n KeyboardModifiers (flags). Might not be needed anymore with Qt 6.\n- With that, casting the final value is now unneeded.\n\n==== qutebrowser/browser/webengine/notification.py\n\n- Remove now unneeded type ignore for signal.\n- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()\n is a QProcess, not just any QObject.\n\n==== qutebrowser/browser/webengine/webenginedownloads.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webengine/webengineelem.py\n\n- Specify the type of WebEngineElement._tab.\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webengineinspector.py\n\n- See changes to inspector.py and next commit.\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/webengine/webenginequtescheme.py\n\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webenginesettings.py\n\n- Ignore access of .setter attribute which we patch onto QWebEngineProfile.\n Would be nice to have a subclass or wrapper-class instead.\n\n==== qutebrowser/browser/webengine/webenginetab.py\n\n- Specified the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Remove some now-unneeded type ignores for creating FindFlags.\n- Specify more concrete types for WebEngineTab members where we actually need to\n access WebEngine-specific attributes.\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webengine/webview.py\n\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webkit/network/networkreply.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webkit/webkitinspector.py\n\n- See changes to inspector.py and next commit.\n\n==== qutebrowser/browser/webkit/webkittab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Add a type ignore for WebKitAction because our workaround needs to\n treat them as ints (which is allowed by PyQt, even if not type-safe).\n- Add new ignores for findText calls: The text is a QString and can be None; the\n flags are valid despite mypy thinking they aren't (stubs regression?).\n- Specify the type for WebKitHistoryPrivate._history, because we access\n WebKit-specific attributes. See above (_widget) re this being debatable.\n- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs\n regression?).\n- Make sure the .page() and .page().networkAccessManager() are our subclasses\n rather than the more generic QtWebKit objects, as we use custom attributes.\n- Add new type ignores for signals (stubs regression!)\n\n==== qutebrowser/browser/webkit/webpage.py\n\n- Make sure the .networkAccessManager() is our subclass rather than the more\n generic QtWebKit object, as we use custom attributes.\n- Replace a cast by a type ignore. The cast didn't work anymore.\n\n==== qutebrowser/browser/webkit/webview.py\n\n- Make sure the .page() is our subclass rather than the more generic QtWebKit\n object, as we use custom attributes.\n\n==== qutebrowser/commands/userscripts.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/completion/completer.py\n\n- Add a new _completion() getter (which ensures it actually gets the completion\n view) rather than accessing the .parent() directly (which could be any QObject).\n\n==== qutebrowser/completion/completiondelegate.py\n\n- Make sure self.parent() is a CompletionView (no helper method as there is only\n one instance).\n- Remove a now-unneeded type ignore for adding QSizes.\n\n==== qutebrowser/completion/completionwidget.py\n\n- Add a ._model() getter which ensures that we get a CompletionModel (with\n custom attributes) rather than Qt's .model() which can be any QAbstractItemModel\n (or None).\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/completion/models/completionmodel.py\n\n- Remove now unneeded type ignores for signals.\n- Ignore a complaint about .set_pattern() not being defined. Completion\n categories don't share any common parent class, so it would be good to introduce\n a typing.Protocol for this. See #7098.\n\n==== qutebrowser/components/misccommands.py\n\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/components/readlinecommands.py\n\n- Make sure QApplication.instance() is a QApplication (and not just a\n QCoreApplication). This includes the former \"not None\" check.\n\n==== qutebrowser/components/scrollcommands.py\n\n- Add basic annotation for \"funcs\" dict. Could have a callable protocol to\n specify it needs a count kwarg, see #7098.\n\n==== qutebrowser/config/stylesheet.py\n\n- Correctly specify that stylesheet apply to QWidgets, not any QObject.\n- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy\n about this with overloads and protocols (stylesheet for set_register being None\n => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not\n worth the troble. See #7098.\n\n==== qutebrowser/keyinput/keyutils.py\n\n- Remove some now-unneeded type ignores and add a cast for using a single enum\n value as flags. Might need to look at this again with Qt 6 support.\n\n==== qutebrowser/keyinput/modeman.py\n\n- Add a FIXME for using a TypedDict, see comments for hints.py above.\n\n==== qutebrowser/mainwindow/mainwindow.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n- Improve where we cast from WindowType to WindowFlags, no int needed\n- Use new .tab_bar() getter, see below.\n\n==== qutebrowser/mainwindow/prompt.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n\n==== qutebrowser/mainwindow/statusbar/bar.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/statusbar/command.py\n\n- Fix type for setText() override (from QLineEdit): text can be None\n (QString in C++).\n\n==== qutebrowser/mainwindow/statusbar/url.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/tabbedbrowser.py\n\n- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses\n AbstractTab-specific attributes.\n- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access\n .maybe_hide.\n- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.\n- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and\n .widget(), which ensures that the return values are valid AbstractTabs (or None\n for _tab_by_idx). This is needed because we access AbstractTab-specific\n attributes.\n- For some places, where the tab can be None, continue using .currentTab() but\n add asserts.\n- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None\n possibility now.\n\n==== qutebrowser/mainwindow/tabwidget.py\n\n- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and\n .widget() are of type TabBar and AbstractTab, respectively.\n- Add additional assertions where we expect ._tab_by_idx() to never be None.\n- Remove dead code in get_tab_fields for handling a None y scroll position. I\n was unable to find any place in the code where this could be set to None.\n- Remove some now-unneeded type ignores and casts, as mypy now knows that\n _type_by_idx() could be None.\n- Work around a strange instance where mypy complains about not being able to\n find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,\n despite it clearly being shown as a bool *inside* that class without any\n annotation.\n- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in\n fact a TabWidget.\n\n==== qutebrowser/misc/crashsignal.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/editor.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/ipc.py\n\n- Remove now unneeded type ignores for signals.\n- Add new type ignores for .error() which is both a signal and a getter\n (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was\n renamed to errorOccurred in 5.15.\n\n==== qutebrowser/misc/objects.py\n\n- Make sure mypy knows that objects.app is our custom Application (with custom\n attributes) rather than any QApplication.\n\n==== qutebrowser/utils/objreg.py\n\n- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,\n but ideally, the whole objreg stuff should die one day anyways.\n\n==== tests/unit/completion/test_completer.py\n\n- Make CompletionWidgetStub inherit from CompletionView so that it passes the\n new isinstance() asserts in completer.py (see above).", "code": "def _get_textdoc(self, index):\n \n assert self._opt is not None\n # FIXME we probably should do eliding here. See\n # qcommonstyle.cpp:viewItemDrawText\n # https://github.com/qutebrowser/qutebrowser/issues/118\n text_option = QTextOption()\n if self._opt.features & QStyleOptionViewItem.WrapText:\n text_option.setWrapMode(QTextOption.WordWrap)\n else:\n text_option.setWrapMode(QTextOption.ManualWrap)\n text_option.setTextDirection(self._opt.direction)\n text_option.setAlignment(QStyle.visualAlignment(\n self._opt.direction, self._opt.displayAlignment))\n\n if self._doc is not None:\n self._doc.deleteLater()\n self._doc = QTextDocument(self)\n self._doc.setDefaultFont(self._opt.font)\n self._doc.setDefaultTextOption(text_option)\n self._doc.setDocumentMargin(2)\n\n if index.parent().isValid():\n view = self.parent()\n assert isinstance(view, completionwidget.CompletionView), view\n pattern = view.pattern\n columns_to_filter = index.model().columns_to_filter(index)\n if index.column() in columns_to_filter and pattern:\n if self._opt.state & QStyle.State_Selected:\n color = config.val.colors.completion.item.selected.match.fg\n else:\n color = config.val.colors.completion.match.fg\n _Highlighter(self._doc, pattern, color)\n self._doc.setPlainText(self._opt.text)\n else:\n self._doc.setHtml(\n '{}'.format(\n html.escape(config.val.fonts.completion.category),\n html.escape(self._opt.text)))\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 466, "n_words": 90, "vocab_size": 68, "complexity": 7, "nloc": 33, "token_counts": 292, "n_ast_nodes": 469, "n_identifiers": 55, "random_cut": "def _get_textdoc(self, index):\n \n assert self._opt is not None\n # FIXME we probably should do eliding here. See\n # qcommonstyle.cpp:viewItemDrawText\n # https://github.com/qutebrowser/qutebrowser/issues/118\n text_option = QTextOption()\n if self._opt.features & QStyleOptionViewItem.WrapText:\n text_option.setWrapMode(QTextOption.WordWrap)\n else:\n text_option.setWrapMode(QTextOption.ManualWrap)\n text_option.setTextDirection(self._opt.direction)\n text_option.setAlignment(QStyle.visualAlignment(\n self._opt.direction, self._opt.displayAlignment))\n\n if self._doc is not None:\n self._doc.deleteLater()\n self._doc = QTextDocument(self)\n self._doc.setDefaultFont(self._opt.font)\n self._doc.setDefaultTextOption(text_option)\n self._doc.setDocumentMargin(2)\n\n if index.parent().isValid():\n view = self.parent()\n assert isinstance(view, completionwidget.CompletionView), view\n pattern = view.pattern\n columns_to_filter = index.model().columns_to_filter(index)\n if index.column() in columns_to_filter and pattern:\n if self._opt.state & QStyle.State_Selected:\n color = config.val.colors.completion.item.selected.match.fg\n else:\n color = config.val.colors.completion.match.fg\n _Highlighter(self._doc, pattern, color)\n self._doc.setPlainText(self._opt.text)\n else:\n self._doc.setHtml(\n '{}'.", "d_id": 117338, "documentation": { "docstring": "Create the QTextDocument of an item.\n\n Args:\n index: The QModelIndex of the item to draw.\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 40, "language": "en" } }, { "id": 9296, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/face_detector/detect_face.py", "file_name": "detect_face.py", "fun_name": "feed", "commit_message": "initialize ostec", "code": "def feed(self, *args):\n \n assert len(args) != 0\n self.terminals = []\n for fed_layer in args:\n if isinstance(fed_layer, str):\n try:\n fed_layer = self.layers[fed_layer]\n except KeyError:\n raise KeyError('Unknown layer name fed: %s' % fed_layer)\n self.terminals.append(fed_layer)\n return self\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 159, "n_words": 34, "vocab_size": 32, "complexity": 4, "nloc": 11, "token_counts": 65, "n_ast_nodes": 107, "n_identifiers": 11, "random_cut": "def feed(self, *args):\n \n assert len(args) != 0\n self.terminals = []", "d_id": 1585, "documentation": { "docstring": "Set the input(s) for the next operation by replacing the terminal nodes.\n The arguments can be either layer names or the actual layers.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 37, "language": "en" } }, { "id": 271843, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_utils_v1.py", "file_name": "training_utils_v1.py", "fun_name": "unpack_iterator_input", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def unpack_iterator_input(iterator):\n \n try:\n next_element = iterator.get_next()\n except tf.errors.OutOfRangeError:\n raise RuntimeError(\n \"Your dataset iterator ran out of data; \"\n \"Make sure that your dataset can generate \"\n \"required number of samples.\"\n )\n\n if isinstance(next_element, (list, tuple)):\n if len(next_element) not in [2, 3]:\n raise ValueError(\n \"Please provide model inputs as a list or tuple of 2 or 3 \"\n \"elements: (input, target) or (input, target, sample_weights) \"\n \"Received %s\" % next_element\n )\n if len(next_element) == 2:\n x, y = next_element\n weights = None\n else:\n x, y, weights = next_element\n else:\n x = next_element\n y = None\n weights = None\n return x, y, weights\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 315, "n_words": 101, "vocab_size": 67, "complexity": 5, "nloc": 26, "token_counts": 105, "n_ast_nodes": 180, "n_identifiers": 16, "random_cut": "def unpack_iterator_input(iterator):\n \n try:\n next_element = iterator.get_next()\n except tf.errors.OutOfRangeError:\n raise RuntimeError(\n \"Your dataset iterator ran out of data; \"\n \"Make sure that your dataset can generate \"\n \"required number of samples.\"\n )\n\n if isinstance(next_element, (list, tuple)):\n if len(next_element) not in [2, 3]:\n raise ValueError(\n \"Please pr", "d_id": 80862, "documentation": { "docstring": "Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.\n\n Args:\n iterator: Instance of a dataset iterator.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n ", "n_words": 33, "vocab_size": 25, "n_whitespaces": 52, "language": "en" } }, { "id": 120211, "commit_id": "3f9e45e0c5b035de27b14588cd3b4cfd5f3c1f04", "repo": "jax", "path": "tests/mesh_utils_test.py", "file_name": "mesh_utils_test.py", "fun_name": "mock_2x2x4_devices", "commit_message": "[mesh_utils] Support creating device meshes for hybrid networks\n\nAlso makes some NFCs to other mesh_utils code.\n\nPiperOrigin-RevId: 442581767", "code": "def mock_2x2x4_devices(one_device_per_chip):\n \n return mock_devices(2, 2, 4, 'TPU v4', one_device_per_chip)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 11, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 31, "n_identifiers": 3, "random_cut": "def mock_2x2x4_devices(one_device_per_chip):\n \n return mock_devices(2, 2, 4, 'TPU v4', one_device_pe", "d_id": 26798, "documentation": { "docstring": "Hard-coded reproduction of jax.devices() output on 2x2x4.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 6855, "commit_id": "698a0e0f1ed95d20116dc51aa9c6a7ed48446deb", "repo": "ludwig", "path": "ludwig/export.py", "file_name": "export.py", "fun_name": "export_triton", "commit_message": "Adding new export for Triton (#2078)\n\n* Adding new export for triton. Fixes for load model for neuropod export, add output dict format\r\n\r\n* Adding test for triton. Fix to cast int to string for os.path.join. Added annotation for neurpod\r\n\r\n* Minor tweaks to config.pbtxt output\r\n\r\n* Remove logger that is not being used\r\n\r\n* Restrict torchmetrics<0.9 and whylogs<1.0 until compatibility fixed\r\n\r\n* Update export_triton to return model path, and added docstrings\r\n\r\n* Update api to return both model path and config path\r\n\r\nCo-authored-by: Travis Addair ", "code": "def export_triton(model_path, output_path=\"model_repository\", model_name=\"ludwig_model\", model_version=1, **kwargs):\n \n logger.info(f\"Model path: {model_path}\")\n logger.info(f\"Output path: {output_path}\")\n logger.info(f\"Model name: {model_name}\")\n logger.info(f\"Model version: {model_version}\")\n logger.info(\"\\n\")\n\n model = LudwigModel.load(model_path)\n os.makedirs(output_path, exist_ok=True)\n utils_export_triton(model, output_path, model_name, model_version)\n\n logger.info(f\"Saved to: {output_path}\")\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 61, "n_words": 31, "vocab_size": 27, "complexity": 1, "nloc": 10, "token_counts": 90, "n_ast_nodes": 170, "n_identifiers": 15, "random_cut": "def export_triton(model_path, output_path=\"model_repository\", model_name=\"ludwig_model\", model_version=1, **kwargs):\n \n logger.info(f\"Model path: {model_path}\")\n logger.info(f\"Output path: {output_path}\"", "d_id": 1079, "documentation": { "docstring": "Exports a model in torchscript format with config for Triton serving.\n\n # Inputs\n\n :param model_path: (str) filepath to pre-trained model.\n :param output_path: (str, default: `'model_repository'`) directory to store the\n triton models.\n :param model_name: (str, default: `'ludwig_model'`) save triton under this name.\n :param model_name: (int, default: `1`) save neuropod under this verison.\n\n # Return\n\n :returns: (`None`)\n ", "n_words": 55, "vocab_size": 42, "n_whitespaces": 87, "language": "en" } }, { "id": 85389, "commit_id": "6aaaf5089b2c39757883179df5a8512db3b0c716", "repo": "sentry", "path": "src/sentry/eventstore/models.py", "file_name": "models.py", "fun_name": "tags", "commit_message": "feat(perf_issues): Add `GroupEvent` and split some functionality in `Event` into a base class. (#38143)\n\nSince we can now have events with multiple groups, we can no longer rely on the `Event.group`\r\nproperty. This pr adds in a `GroupEvent` subclass that should be passed around wherever we expect an\r\nevent to have a single `Group` associated with it.\r\n\r\n`Event` has been split up into `BaseEvent` and `Event`. We will deprecate and remove uses of\r\n`group_id` and `group` in the `Event` class going forward. If we need an event with a `Group`, we\r\ncan use `build_group_events` to fetch all `GroupEvents` associated with the `Event`, or `for_group`\r\nif we just need a specific `Event`/`Group` pairing.\r\n\r\nGoing forward, the plan is to store all groups in the `groups` property. This means that error\r\nevents being sent via eventstream will have their group included in `groups` as well. We'll\r\nneed to update the errors processor in snuba to look there instead of `group_id`. This seems cleaner\r\nlong term, instead of having both `group_id` and `group_ids` passed through.\r\n\r\nTo figure out where we need to use `build_group_events` and `for_group` we can do a mix of searching\r\nthe codebase and commenting out the `group_id` and `group` properties and see how CI goes.", "code": "def tags(self) -> Sequence[Tuple[str, str]]:\n \n tags_key_column = self._get_column_name(Columns.TAGS_KEY)\n tags_value_column = self._get_column_name(Columns.TAGS_VALUE)\n\n if tags_key_column in self._snuba_data and tags_value_column in self._snuba_data:\n keys = self._snuba_data[tags_key_column]\n values = self._snuba_data[tags_value_column]\n if keys and values and len(keys) == len(values):\n return sorted(zip(keys, values))\n else:\n return []\n # Nodestore implementation\n try:\n rv = sorted(\n (t, v)\n for t, v in get_path(self.data, \"tags\", filter=True) or ()\n if t is not None and v is not None\n )\n return rv\n except ValueError:\n # at one point Sentry allowed invalid tag sets such as (foo, bar)\n # vs ((tag, foo), (tag, bar))\n return []\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 327, "n_words": 93, "vocab_size": 67, "complexity": 11, "nloc": 23, "token_counts": 145, "n_ast_nodes": 229, "n_identifiers": 24, "random_cut": "def tags(self) -> Sequence[Tuple[str, str]]:\n \n tags_key_column = self._get_column_name(Columns.TAGS_KEY)\n tags_value_column = self._get_column_name(Columns.TAGS_VALUE)\n\n if tags_key_column in self._snuba_data and tags_value_column in self._snuba_data:\n keys = self._snuba_data[tags_key_column]\n values = self._snuba_data[tags_value_column]\n if keys and values and len(keys) == len(values):\n return sorted(zip(keys, values))\n else:\n return []\n # Nodestore implementation\n try:\n rv = sorted(\n (t, v)\n for t, v in get_path(self.data, \"tags\", filter=True) or ()\n if t is not None and v is not None\n )\n return rv\n except ValueError:\n # at one point Sentry allowed inva", "d_id": 17983, "documentation": { "docstring": "\n Tags property uses tags from snuba if loaded otherwise falls back to\n nodestore.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 264988, "commit_id": "6280398bc17211bbc5b321039144c1eb0461f4a9", "repo": "netbox", "path": "netbox/dcim/tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_cable_validates_compatible_types", "commit_message": "Clean up tests", "code": "def test_cable_validates_compatible_types(self):\n \n # An interface cannot be connected to a power port\n cable = Cable(a_terminations=[self.interface1, self.interface2], b_terminations=[self.interface3])\n with self.assertRaises(ValidationError):\n cable.clean()\n\n # TODO: Remove this?\n # def test_cable_front_port_cannot_connect_to_corresponding_rear_port(self):\n # \n # cable = Cable(a_terminations=[self.front_port1], b_terminations=[self.rear_port1])\n # with self.assertRaises(ValidationError):\n # cable.clean()\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 116, "n_words": 38, "vocab_size": 26, "complexity": 1, "nloc": 4, "token_counts": 43, "n_ast_nodes": 81, "n_identifiers": 12, "random_cut": "def test_cable_validates_compatible_types(self):\n \n # An interface cannot be connected to a power port\n cable = Cable(a_terminations=[self.interface1, self.interface2], b_terminations=[self.interface3])\n with self.assertRaises(ValidationError):\n cable.clean()\n\n # TODO: Remove this?\n # def", "d_id": 77934, "documentation": { "docstring": "\n The clean method should have a check to ensure only compatible port types can be connected by a cable\n \n # A cable cannot connect a front port to its corresponding rear port\n # ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 63, "language": "en" } }, { "id": 275865, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/hdf5_format.py", "file_name": "hdf5_format.py", "fun_name": "save_model_to_hdf5", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):\n \n\n if h5py is None:\n raise ImportError(\n \"`save_model()` using h5 format requires h5py. Could not \"\n \"import h5py.\"\n )\n\n # TODO(psv) Add warning when we save models that contain non-serializable\n # entities like metrics added using `add_metric` and losses added using\n # `add_loss.`\n if len(model.weights) != len(model._undeduplicated_weights):\n logging.warning(\n \"Found duplicated `Variable`s in Model's `weights`. \"\n \"This is usually caused by `Variable`s being shared by \"\n \"Layers in the Model. These `Variable`s will be treated \"\n \"as separate `Variable`s when the Model is restored. To \"\n 'avoid this, please save with `save_format=\"tf\"`.'\n )\n\n if not isinstance(filepath, h5py.File):\n # If file exists and should not be overwritten.\n if not overwrite and os.path.isfile(filepath):\n proceed = ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n\n # Try creating dir if not exist\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n tf.io.gfile.makedirs(dirpath)\n\n f = h5py.File(filepath, mode=\"w\")\n opened_new_file = True\n else:\n f = filepath\n opened_new_file = False\n\n try:\n model_metadata = saving_utils.model_metadata(model, include_optimizer)\n for k, v in model_metadata.items():\n if isinstance(v, (dict, list, tuple)):\n f.attrs[k] = json.dumps(\n v, default=json_utils.get_json_type\n ).encode(\"utf8\")\n else:\n f.attrs[k] = v\n\n model_weights_group = f.create_group(\"model_weights\")\n save_weights_to_hdf5_group(model_weights_group, model)\n\n # TODO(b/128683857): Add integration tests between tf.keras and external\n # Keras, to avoid breaking TF.js users.\n if isinstance(model.optimizer, optimizer_experimental.Optimizer):\n logging.warning(\n \"HDF5 format does not save weights of\"\n \" `optimizer_experimental.Optimizer`, your optimizer will\"\n \" be recompiled at loading time.\"\n )\n elif (\n include_optimizer\n and model.optimizer\n and not isinstance(model.optimizer, optimizer_v1.TFOptimizer)\n ):\n save_optimizer_weights_to_hdf5_group(f, model.optimizer)\n\n f.flush()\n finally:\n if opened_new_file:\n f.close()\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 766, "n_words": 235, "vocab_size": 164, "complexity": 16, "nloc": 54, "token_counts": 290, "n_ast_nodes": 490, "n_identifiers": 55, "random_cut": "def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):\n \n\n if h5py is None:\n raise ImportError(\n \"`save_model()` using h5 format requires h5py. Could not \"\n \"import h5py.\"\n )\n\n # TODO(psv) Add warning when we save models that contain non-serializable\n # entities like metrics added using `add_metric` and losses added using\n # `add_loss.`\n if len(model.weights) != len(model._undeduplicated_weights):\n logging.warning(\n \"Found duplicated `Variable`s in Model's `weights`. \"\n \"This is usually caused by `Variable`s being shared by \"\n \"Layers in the Model. These `Variable`s will be treated \"\n \"as separate `Variable`s when the Model is restored. To \"\n 'avoid this, please save with `save_format=\"tf\"`.'\n )\n\n if not isinstance(filepath, h5py.File):\n # If file exists and should not be overwritten.\n if not overwrite and os.path.isfile(filepath):\n proceed = ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n\n # Try creating dir if not exist\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n tf.io.gfile.makedirs(dirpath)\n\n f = h5py.File(filepath, mode=\"w\")\n opened_new_file = True\n else:\n f = filepath\n opened_new_file = False\n\n try:\n model_metadata = saving_utils.model_metadata(model, include_optimiz", "d_id": 81490, "documentation": { "docstring": "Saves a model to a HDF5 file.\n\n The saved model contains:\n - the model's configuration (topology)\n - the model's weights\n - the model's optimizer's state (if any)\n\n Thus the saved model can be reinstantiated in\n the exact same state, without any of the code\n used for model definition or training.\n\n Args:\n model: Keras model instance to be saved.\n filepath: One of the following:\n - String, path where to save the model\n - `h5py.File` object where to save the model\n overwrite: Whether we should overwrite any existing\n model at the target location, or instead\n ask the user with a manual prompt.\n include_optimizer: If True, save optimizer's state together.\n\n Raises:\n ImportError: if h5py is not available.\n ", "n_words": 114, "vocab_size": 76, "n_whitespaces": 235, "language": "en" } }, { "id": 2278, "commit_id": "066545e8a88e842aa7d0a5d57bac88716001bced", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/node_manager/user_manager.py", "file_name": "user_manager.py", "fun_name": "set", "commit_message": "replaced all methods of usermanager class,\nworking login\nCo-Authored By: Ionesio", "code": "def set(self, **kwargs) -> None: # nosec\n \n attributes = {}\n user_id = kwargs[\"user_id\"]\n user = self.first(id_int=int(user_id))\n if not user:\n raise UserNotFoundError\n\n for k, v in kwargs.items():\n if k in user.__attr_searchable__:\n attributes[k] = v\n\n if kwargs.get(\"email\", None):\n user.email = kwargs[\"email\"]\n elif kwargs.get(\"role\", None):\n user.role = kwargs[\"role\"]\n elif kwargs.get(\"name\", None):\n user.name = kwargs[\"name\"]\n elif kwargs.get(\"budget\", None):\n user.budget = kwargs[\"budget\"]\n elif kwargs.get(\"website\", None):\n user.website = kwargs[\"website\"]\n elif kwargs.get(\"institution\", None):\n user.institution = kwargs[\"institution\"]\n else:\n raise Exception\n\n attributes[\"__blob__\"] = user.to_bytes()\n\n self.update_one({\"id_int\": int(user_id)}, {\"$set\": attributes})\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 299, "n_words": 79, "vocab_size": 55, "complexity": 10, "nloc": 41, "token_counts": 205, "n_ast_nodes": 351, "n_identifiers": 24, "random_cut": "def set(self, **kwargs) -> None: # nosec\n \n attributes = {}\n user_id = kwargs[\"u", "d_id": 270, "documentation": { "docstring": "Updates the information for the given user id.\n\n Args:\n user_id (str): unique id of the user in the database.\n email (str, optional): email of the user. Defaults to \"\".\n password (str, optional): password of the user. Defaults to \"\".\n role (int, optional): role of the user. Defaults to 0.\n name (str, optional): name of the user. Defaults to \"\".\n website (str, optional): website of the institution of the user. Defaults to \"\".\n institution (str, optional): name of the institution of the user. Defaults to \"\".\n budget (float, optional): privacy budget allocated to the user. Defaults to 0.0.\n\n Raises:\n UserNotFoundError: Raised when a user does not exits for the given user id.\n Exception: Raised when an invalid argument/property is passed.\n ", "n_words": 119, "vocab_size": 48, "n_whitespaces": 250, "language": "en" } }, { "id": 92964, "commit_id": "cd803d173c72b64d06c0687170bf9a945d0b503c", "repo": "sentry", "path": "tests/sentry/snuba/metrics/fields/test_base.py", "file_name": "test_base.py", "fun_name": "test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric", "commit_message": "fix(snuba): Add appropriate `UseCaseKey` for indexer [TET-146] (#36308)\n\n* fix(snuba): Add appropriate `UseCaseKey` for indexer\r\n\r\nUpdate indexer invocation call to have the appropriate\r\n`UseCaseKey` depending on use case.\r\n\r\nIn `src/sentry/sentry_metrics/indexer/base.py::StringIndexer`\r\nwhen using `resolve` and `reverse_resolve` callers should not\r\nrely on the default use_case_id.\r\n\r\nImportant changes:\r\n- Add required parameter `use_case_id: UseCaseKey` to `get_series` from `src/sentry/snuba/metrics/datasource.py#L612`;\r\n- Add required parameter to `get_metrics` in `src/sentry/snuba/metrics/datasource.py`\r\n- Add required parameter to `get_tags` in `src/sentry/snuba/metrics/datasource.py`\r\n- Add required parameter to `get_tag_values` in `src/sentry/snuba/metrics/datasource.py`", "code": "def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric(self):\n \n use_case_id = UseCaseKey.RELEASE_HEALTH\n expected_derived_metrics_entities = {\n SessionMRI.ALL.value: \"metrics_counters\",\n SessionMRI.ALL_USER.value: \"metrics_sets\",\n SessionMRI.CRASHED.value: \"metrics_counters\",\n SessionMRI.CRASHED_USER.value: \"metrics_sets\",\n SessionMRI.ABNORMAL.value: \"metrics_counters\",\n SessionMRI.ABNORMAL_USER.value: \"metrics_sets\",\n SessionMRI.CRASH_FREE_RATE.value: \"metrics_counters\",\n SessionMRI.CRASH_FREE_USER_RATE.value: \"metrics_sets\",\n SessionMRI.ERRORED_PREAGGREGATED.value: \"metrics_counters\",\n SessionMRI.ERRORED_SET.value: \"metrics_sets\",\n SessionMRI.ERRORED_USER_ALL.value: \"metrics_sets\",\n SessionMRI.CRASHED_AND_ABNORMAL_USER.value: \"metrics_sets\",\n SessionMRI.ERRORED_USER.value: \"metrics_sets\",\n }\n for key, value in expected_derived_metrics_entities.items():\n assert (\n MOCKED_DERIVED_METRICS[key].get_entity(\n projects=[self.project], use_case_id=use_case_id\n )\n ) == value\n\n # Incorrectly setup SingularEntityDerivedMetric with metrics spanning multiple entities\n with pytest.raises(DerivedMetricParseException):\n self.crash_free_fake.get_entity(projects=[self.project], use_case_id=use_case_id)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 336, "n_words": 62, "vocab_size": 47, "complexity": 2, "nloc": 25, "token_counts": 180, "n_ast_nodes": 292, "n_identifiers": 31, "random_cut": "def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric(self):\n \n use_case_id = UseCaseKey.RELEASE_HEALTH\n expected_derived_metrics_entities = {\n SessionMRI.ALL.value: \"metrics_counters\",\n SessionMRI.ALL_USER.value: \"metrics_sets\",\n SessionMRI.CRASHED.value: \"metrics_counters\",\n SessionMRI.CRASHED_USER.value: \"metrics_sets\",\n SessionMRI.ABNORMAL.value: \"metrics_counters\",\n SessionMRI.ABNORMAL_USER.value: \"metrics_sets\",\n SessionMRI.CRASH_FREE_RATE.value: \"metrics_counters\",\n SessionMRI.CRASH_FREE_USER_RATE.value: \"metrics_sets\",\n SessionMRI.ERRORED_PREAGGREGATED.value: \"metrics_counters\",\n SessionMRI.ERRORED_SET.value: \"metrics_sets\",\n SessionMRI.ERRORED_USER_ALL.value: \"metrics_sets\",\n SessionMRI.CRASHED_AND_ABNORMAL_USER.value: \"metrics_sets\",\n SessionMRI.ERRORED_USER.value: \"metrics_sets\",\n }\n for key, value in expected_derived_metrics_entities.items():\n assert (\n MOCKED_DERIVED_METRICS[key].get_entity(\n projects=[self.project], use_case_id=use_case_id\n )\n ) == value\n\n # Incorrectly setup SingularEntityDerivedMetric with metrics spanning multiple entities\n with pytest.raises(DerivedMetricParseException):\n self.crash_free_fake.get_entity(projects=[self.proje", "d_id": 18963, "documentation": { "docstring": "\n Tests that ensures that get_entity method works expected in the sense that:\n - Since it is the first function that is called by the query_builder, validation is\n applied there to ensure that if it is an instance of a SingleEntityDerivedMetric,\n then it is composed of only other SingleEntityDerivedMetric or\n RawMetric that belong to the same entity\n - Return the entity of that derived metric\n ", "n_words": 64, "vocab_size": 44, "n_whitespaces": 114, "language": "en" } }, { "id": 260356, "commit_id": "db6123fe40400828918037f3fae949bfcc4d9d05", "repo": "scikit-learn", "path": "sklearn/decomposition/_sparse_pca.py", "file_name": "_sparse_pca.py", "fun_name": "transform", "commit_message": "MAINT Use _validate_params in SparsePCA and MiniBatchSparsePCA (#23710)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: jeremiedbb ", "code": "def transform(self, X):\n \n check_is_fitted(self)\n\n X = self._validate_data(X, reset=False)\n X = X - self.mean_\n\n U = ridge_regression(\n self.components_.T, X.T, self.ridge_alpha, solver=\"cholesky\"\n )\n\n return U\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 83, "n_words": 23, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 55, "n_ast_nodes": 87, "n_identifiers": 13, "random_cut": "def transform(self, X):\n \n check_is_fitted(self)\n\n X = self._validate_data(X, reset=False)\n X = X - self.mean_\n\n U = ridge_regression(\n self.components_.T, X.T, self.ridge_alpha, solver=\"cholesky\"\n )\n\n return U\n", "d_id": 76202, "documentation": { "docstring": "Least Squares projection of the data onto the sparse components.\n\n To avoid instability issues in case the system is under-determined,\n regularization can be applied (Ridge regression) via the\n `ridge_alpha` parameter.\n\n Note that Sparse PCA components orthogonality is not enforced as in PCA\n hence one cannot use a simple linear projection.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test data to be transformed, must have the same number of\n features as the data used to train the model.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Transformed data.\n ", "n_words": 90, "vocab_size": 69, "n_whitespaces": 207, "language": "en" } }, { "id": 264756, "commit_id": "4bb9b6ee2639db683b70d6ddbee055497e0a3647", "repo": "netbox", "path": "netbox/utilities/utils.py", "file_name": "utils.py", "fun_name": "serialize_object", "commit_message": "Extend Cable model to support multiple A/B terminations", "code": "def serialize_object(obj, extra=None):\n \n json_str = serialize('json', [obj])\n print(json_str)\n data = json.loads(json_str)[0]['fields']\n\n # Exclude any MPTTModel fields\n if issubclass(obj.__class__, MPTTModel):\n for field in ['level', 'lft', 'rght', 'tree_id']:\n data.pop(field)\n\n # Include custom_field_data as \"custom_fields\"\n if hasattr(obj, 'custom_field_data'):\n data['custom_fields'] = data.pop('custom_field_data')\n\n # Include any tags. Check for tags cached on the instance; fall back to using the manager.\n if is_taggable(obj):\n tags = getattr(obj, '_tags', None) or obj.tags.all()\n data['tags'] = [tag.name for tag in tags]\n\n # Append any extra data\n if extra is not None:\n data.update(extra)\n\n # Copy keys to list to avoid 'dictionary changed size during iteration' exception\n for key in list(data):\n # Private fields shouldn't be logged in the object change\n if isinstance(key, str) and key.startswith('_'):\n data.pop(key)\n\n return data\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 233, "n_words": 117, "vocab_size": 86, "complexity": 11, "nloc": 18, "token_counts": 167, "n_ast_nodes": 285, "n_identifiers": 27, "random_cut": "def serialize_object(obj, extra=None):\n \n json_str = serialize('json', [obj])\n print(json_str)\n data = json.loads(json_str)[0]['fields']\n\n # Exclude any MPTTModel fields\n if issubclass(obj.__class__, MPTTModel):\n for field in ['level', 'lft', 'rght', 'tree_id']:\n data.pop", "d_id": 77797, "documentation": { "docstring": "\n Return a generic JSON representation of an object using Django's built-in serializer. (This is used for things like\n change logging, not the REST API.) Optionally include a dictionary to supplement the object data. A list of keys\n can be provided to exclude them from the returned dictionary. Private fields (prefaced with an underscore) are\n implicitly excluded.\n ", "n_words": 56, "vocab_size": 49, "n_whitespaces": 72, "language": "en" } }, { "id": 289378, "commit_id": "31a787558fd312331b55e5c2c4b33341fc3601fc", "repo": "core", "path": "tests/components/history/test_init.py", "file_name": "test_init.py", "fun_name": "test_statistics_during_period", "commit_message": "Ensure recorder test fixture is setup before hass fixture (#80528)\n\n* Ensure recorder test fixture is setup before hass fixture\r\n\r\n* Adjust more tests", "code": "async def test_statistics_during_period(recorder_mock, hass, hass_ws_client, caplog):\n \n now = dt_util.utcnow()\n await async_setup_component(hass, \"history\", {})\n client = await hass_ws_client()\n\n # Test the WS API works and issues a warning\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"history/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"end_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"hour\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {}\n\n assert (\n \"WS API 'history/statistics_during_period' is deprecated and will be removed in \"\n \"Home Assistant Core 2022.12. Use 'recorder/statistics_during_period' instead\"\n ) in caplog.text\n\n # Test the WS API forwards to recorder\n with patch(\n \"homeassistant.components.history.recorder_ws.ws_handle_get_statistics_during_period\",\n wraps=ws_handle_get_statistics_during_period,\n ) as ws_mock:\n await client.send_json(\n {\n \"id\": 2,\n \"type\": \"history/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"end_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"hour\",\n }\n )\n await client.receive_json()\n ws_mock.assert_awaited_once()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 405, "n_words": 112, "vocab_size": 76, "complexity": 1, "nloc": 37, "token_counts": 173, "n_ast_nodes": 319, "n_identifiers": 20, "random_cut": "async def test_statistics_during_period(recorder_mock, hass, hass_ws_client, caplog):\n \n now = dt_util.utcnow()\n await async_setup_component(hass, \"history\", {})\n client = await hass_ws_client()\n\n # Test the WS API works and issues a warning\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"history/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"end_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"hour\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {}\n\n assert (\n \"WS API 'history/statistics_during_period' is deprecated and will be removed in \"\n \"Home Assistant Core 2022.12. Use 'recorder/statistics_during_period' instead\"\n ) in caplog.text\n\n # Test the WS API forwards to recorder\n with patch(\n \"homeassistant.components.history.recorder_ws.ws_handle_get_statistics_during_period\",\n wraps=ws_handle_get_statist", "d_id": 88520, "documentation": { "docstring": "Test history/statistics_during_period forwards to recorder.", "n_words": 5, "vocab_size": 5, "n_whitespaces": 4, "language": "en" } }, { "id": 75606, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/management/commands/update_index.py", "file_name": "update_index.py", "fun_name": "queryset_chunks", "commit_message": "Reformat with black", "code": "def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE):\n \n i = 0\n while True:\n items = list(qs[i * chunk_size :][:chunk_size])\n if not items:\n break\n yield items\n i += 1\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 104, "n_words": 24, "vocab_size": 21, "complexity": 3, "nloc": 8, "token_counts": 44, "n_ast_nodes": 73, "n_identifiers": 8, "random_cut": "def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE):\n \n i = 0", "d_id": 16427, "documentation": { "docstring": "\n Yield a queryset in chunks of at most ``chunk_size``. The chunk yielded\n will be a list, not a queryset. Iterating over the chunks is done in a\n transaction so that the order and count of items in the queryset\n remains stable.\n ", "n_words": 41, "vocab_size": 31, "n_whitespaces": 77, "language": "en" } }, { "id": 95431, "commit_id": "2a4da479b2d4a2faa901701f4c73ff823236e9e8", "repo": "sentry", "path": "src/sentry/search/events/builder.py", "file_name": "builder.py", "fun_name": "flattened_having", "commit_message": "fix(snql): Add aggregations to select in auto_aggregation (#31061)\n\n- This is to fix an issue for queries that have the uniq aggregation in\r\n the HAVING clause, and is not selected.\r\n - Previously we would not add the aggregation to the select clause in\r\n these cases\r\n - Now anything in the having clause will get added to the select\r\n clause as well if auto_aggregation is enabled\r\n - if its disabled we raise an invalid search query error\r\n- This also fixes a bug where this having validation wasn't working\r\n correctly for boolean conditions", "code": "def flattened_having(self) -> List[Condition]:\n \n flattened: List[Condition] = []\n boolean_conditions: List[BooleanCondition] = []\n\n for condition in self.having:\n if isinstance(condition, Condition):\n flattened.append(condition)\n elif isinstance(condition, BooleanCondition):\n boolean_conditions.append(condition)\n\n while len(boolean_conditions) > 0:\n boolean_condition = boolean_conditions.pop()\n for condition in boolean_condition.conditions:\n if isinstance(condition, Condition):\n flattened.append(condition)\n elif isinstance(condition, BooleanCondition):\n boolean_conditions.append(condition)\n\n return flattened\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 229, "n_words": 45, "vocab_size": 30, "complexity": 8, "nloc": 20, "token_counts": 116, "n_ast_nodes": 184, "n_identifiers": 15, "random_cut": "def flattened_having(self) -> List[Condition]:\n \n flattened: List[Condition] = []\n boolean_conditions: List[BooleanCondition] = []\n\n for condition in self.having:\n if isinstance(condition, Condition):\n flattened.append(condition)\n elif isinstance(condition, BooleanCondition):\n boolean_conditions.append(condition)\n\n while len(boolean_conditions) > 0:\n boolean_condition = boolean_conditions.pop()\n for condition in boolean_condition.conditions:\n if isinstance(condition, Condition):\n flattened.append(condition)\n ", "d_id": 19211, "documentation": { "docstring": "Return self.having as a flattened list ignoring boolean operators\n This is because self.having can have a mix of BooleanConditions and Conditions. And each BooleanCondition can in\n turn be a mix of either type.\n ", "n_words": 33, "vocab_size": 27, "n_whitespaces": 54, "language": "en" } }, { "id": 163634, "commit_id": "a0b40c0f2ad73420a54e48ec4f564b9667e3f452", "repo": "pandas", "path": "pandas/core/arrays/datetimelike.py", "file_name": "datetimelike.py", "fun_name": "_add_timedelta_arraylike", "commit_message": "EA interface: rename ExtensionArray._hasnans to ._hasna (#45519)", "code": "def _add_timedelta_arraylike(self, other):\n \n # overridden by PeriodArray\n\n if len(self) != len(other):\n raise ValueError(\"cannot add indices of unequal length\")\n\n if isinstance(other, np.ndarray):\n # ndarray[timedelta64]; wrap in TimedeltaIndex for op\n from pandas.core.arrays import TimedeltaArray\n\n other = TimedeltaArray._from_sequence(other)\n\n self_i8 = self.asi8\n other_i8 = other.asi8\n new_values = checked_add_with_arr(\n self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan\n )\n if self._hasna or other._hasna:\n mask = self._isnan | other._isnan\n np.putmask(new_values, mask, iNaT)\n\n return type(self)(new_values, dtype=self.dtype)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 211, "n_words": 64, "vocab_size": 57, "complexity": 5, "nloc": 15, "token_counts": 122, "n_ast_nodes": 191, "n_identifiers": 27, "random_cut": "def _add_timedelta_arraylike(self, other):\n \n # overridden by PeriodArray\n\n if len(self) != len(other):\n raise ValueError(\"cannot add indices of unequal length\")\n\n if isinstance(other, np.ndarray):\n ", "d_id": 39468, "documentation": { "docstring": "\n Add a delta of a TimedeltaIndex\n\n Returns\n -------\n Same type as self\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 48, "language": "en" } }, { "id": 271248, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/functional.py", "file_name": "functional.py", "fun_name": "_map_graph_network", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _map_graph_network(inputs, outputs):\n \n # \"depth\" is number of layers between output Node and the Node.\n # Nodes are ordered from inputs -> outputs.\n nodes_in_decreasing_depth, layer_indices = _build_map(outputs)\n network_nodes = {\n _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node))\n for node in nodes_in_decreasing_depth\n }\n\n nodes_depths = {} # dict {node: depth value}\n layers_depths = {} # dict {layer: depth value}\n\n for node in reversed(nodes_in_decreasing_depth):\n # If the depth is not set, the node has no outbound nodes (depth 0).\n depth = nodes_depths.setdefault(node, 0)\n\n # Update the depth of the corresponding layer\n previous_depth = layers_depths.get(node.layer, 0)\n # If we've seen this layer before at a higher depth,\n # we should use that depth instead of the node depth.\n # This is necessary for shared layers that have inputs at different\n # depth levels in the graph.\n depth = max(depth, previous_depth)\n layers_depths[node.layer] = depth\n nodes_depths[node] = depth\n\n # Update the depth of inbound nodes.\n # The \"depth\" of a node is the max of the depths\n # of all nodes it is connected to + 1.\n for node_dep in node.parent_nodes:\n previous_depth = nodes_depths.get(node_dep, 0)\n nodes_depths[node_dep] = max(depth + 1, previous_depth)\n\n # Handle inputs that are not connected to outputs.\n # We do not error out here because the inputs may be used to compute losses\n # and metrics.\n for input_t in inputs:\n input_layer = input_t._keras_history[0]\n if input_layer not in layers_depths:\n layers_depths[input_layer] = 0\n layer_indices[input_layer] = -1\n nodes_depths[input_layer._inbound_nodes[0]] = 0\n network_nodes.add(_make_node_key(input_layer.name, 0))\n\n # Build a dict {depth: list of nodes with this depth}\n nodes_by_depth = collections.defaultdict(list)\n for node, depth in nodes_depths.items():\n nodes_by_depth[depth].append(node)\n\n # Build a dict {depth: list of layers with this depth}\n layers_by_depth = collections.defaultdict(list)\n for layer, depth in layers_depths.items():\n layers_by_depth[depth].append(layer)\n\n # Get sorted list of layer depths.\n depth_keys = list(layers_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Set self.layers ordered by depth.\n layers = []\n for depth in depth_keys:\n layers_for_depth = layers_by_depth[depth]\n # Network.layers needs to have a deterministic order:\n # here we order them by traversal order.\n layers_for_depth.sort(key=lambda x: layer_indices[x])\n layers.extend(layers_for_depth)\n\n # Get sorted list of node depths.\n depth_keys = list(nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Check that all tensors required are computable.\n # computable_tensors: all tensors in the graph\n # that can be computed from the inputs provided.\n computable_tensors = set()\n for x in inputs:\n computable_tensors.add(id(x))\n\n layers_with_complete_input = [] # To provide a better error msg.\n for depth in depth_keys:\n for node in nodes_by_depth[depth]:\n layer = node.layer\n if layer and not node.is_input:\n for x in tf.nest.flatten(node.keras_inputs):\n if id(x) not in computable_tensors:\n raise ValueError(\n f\"Graph disconnected: cannot obtain value for tensor {x} \"\n f'at layer \"{layer.name}\". The following previous layers '\n f\"were accessed without issue: {layers_with_complete_input}\"\n )\n for x in tf.nest.flatten(node.outputs):\n computable_tensors.add(id(x))\n layers_with_complete_input.append(layer.name)\n\n # Ensure name unicity, which will be crucial for serialization\n # (since serialized nodes refer to layers by their name).\n all_names = [layer.name for layer in layers]\n for name in all_names:\n if all_names.count(name) != 1:\n raise ValueError(\n f'The name \"{name}\" is used {all_names.count(name)} '\n \"times in the model. All layer names should be unique.\"\n )\n return network_nodes, nodes_by_depth, layers, layers_by_depth\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1164, "n_words": 488, "vocab_size": 245, "complexity": 20, "nloc": 65, "token_counts": 470, "n_ast_nodes": 792, "n_identifiers": 55, "random_cut": "def _map_graph_network(inputs, outputs):\n \n # \"depth\" is number of layers between output Node and the Node.\n # Nodes are ordered from inputs -> outputs.\n nodes_in_decreasing_depth, layer_indices = _build_map(outputs)\n network_nodes = {\n _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node))\n for node in nodes_in_decreasing_depth\n }\n\n nodes_depths = {} # dict {node: depth value}\n layers_depths = {} # dict {layer: depth value}\n\n for node in reversed(nodes_in_decreasing_depth):\n # If the depth is not set, the node has no outbound nodes (depth 0).\n depth = nodes_depths.setdefault(node, 0)\n\n # Update the depth of the corresponding layer\n previous_depth = layers_depths.get(node.layer, 0)\n # If we've seen this layer before at a higher depth,\n # we should use that depth instead of the node depth.\n # This is necessary for shared layers that have inputs at different\n # depth levels in the graph.\n depth = max(depth, previous_depth)\n layers_depths[node.layer] = depth\n nodes_depths[node] = depth\n\n # Update the depth of inbound nodes.\n # The \"depth\" of a node is the max of the depths\n # of all nodes it is connected to + 1.\n for node_dep in node.parent_nodes:\n previous_depth = nodes_depths.get(node_dep, 0)\n nodes_depths[node_dep] = max(depth + 1, previous_depth)\n\n # Handle inputs that are not connected to outputs.\n # We do not error out here because the inputs may be used to compute losses\n # and metrics.\n for input_t in inputs:\n input_layer = input_t._keras_history[0]\n if input_layer not in layers_depths:\n layers_depths[input_layer] = 0\n layer_indices[input_layer] = -1\n nodes_depths[input_layer._inbound_nodes[0]] = 0\n network_nodes.add(_make_node_key(input_layer.name, 0))\n\n # Build a dict {depth: list of nodes with this depth}\n nodes_by_depth = collections.defaultdict(list)\n for node, depth in nodes_depths.items():\n nodes_by_depth[depth].append(node)\n\n # Build a dict {depth: list of layers with this depth}\n layers_by_depth = collections.defaultdict(list)\n for layer, depth in layers_depths.items():\n layers_by_depth[depth].append(layer)\n\n # Get sorted list of layer depths.\n depth_keys = list(layers_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Set self.layers ordered by depth.\n layers = []\n for depth in depth_keys:\n layers_for_depth = layers_by_depth[depth]\n # Network.layers needs to have a deterministic order:\n # here we order them by traversal order.\n layers_for_depth.sort(key=lambda x: layer_indices[x])\n layers.extend(layers_for_depth)\n\n # Get sorted list of node depths.\n depth_keys = list(nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Check that all tensors required are computable.\n # computable_tensors: all tensors in the graph\n # that can be computed from the inputs provided.\n computable_tensors = set()\n for x in inputs:\n computable_tensors.add(id(x))\n\n layers_with_complete_input = [] # To provide a better error msg.\n for depth in depth_keys:\n for node in nodes_by_depth[depth]:\n layer = node.layer\n if layer and not node.is_input:\n for x in tf.nest.flatten(node.keras_inputs):\n if id(x) not in computable_tensors:\n ", "d_id": 80737, "documentation": { "docstring": "Validates a network's topology and gather its layers and nodes.\n\n Args:\n inputs: List of input tensors.\n outputs: List of outputs tensors.\n\n Returns:\n A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.\n - nodes: list of Node instances.\n - nodes_by_depth: dict mapping ints (depth) to lists of node instances.\n - layers: list of Layer instances.\n - layers_by_depth: dict mapping ints (depth) to lists of layer instances.\n\n Raises:\n ValueError: In case the network is not valid (e.g. disconnected graph).\n ", "n_words": 74, "vocab_size": 53, "n_whitespaces": 126, "language": "en" } }, { "id": 127723, "commit_id": "206e847694cba414dc4664e4ae02b20e10e3f25d", "repo": "ray", "path": "python/ray/data/dataset.py", "file_name": "dataset.py", "fun_name": "default_batch_format", "commit_message": "[Datasets] Add `Dataset.default_batch_format` (#28434)\n\nParticipants in the PyTorch UX study couldn't understand how the \"native\" batch format works. This PR introduces a method Dataset.native_batch_format that tells users exactly what the native batch format is, so users don't have to guess.", "code": "def default_batch_format(self) -> Type:\n # noqa: E501\n import pandas as pd\n import pyarrow as pa\n\n schema = self.schema()\n assert isinstance(schema, (type, PandasBlockSchema, pa.Schema))\n\n if isinstance(schema, type):\n return list\n\n if isinstance(schema, (PandasBlockSchema, pa.Schema)):\n if schema.names == [VALUE_COL_NAME]:\n return np.ndarray\n return pd.DataFrame\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 138, "n_words": 40, "vocab_size": 32, "complexity": 4, "nloc": 72, "token_counts": 79, "n_ast_nodes": 124, "n_identifiers": 18, "random_cut": "def default_batch_format(self) -> Type:\n # noqa: E501\n import pandas as pd\n import pyarrow as pa\n\n schema = self.schema()\n assert isinstance(schema,", "d_id": 28513, "documentation": { "docstring": "Return this dataset's default batch format.\n\n The default batch format describes what batches of data look like. To learn more\n about batch formats, read\n :ref:`writing user-defined functions `.\n\n Example:\n\n If your dataset represents a list of Python objects, then the default batch\n format is ``list``.\n\n >>> ds = ray.data.range(100)\n >>> ds # doctest: +SKIP\n Dataset(num_blocks=20, num_rows=100, schema=)\n >>> ds.default_batch_format()\n \n >>> next(ds.iter_batches(batch_size=4))\n [0, 1, 2, 3]\n\n If your dataset contains a single ``TensorDtype`` or ``ArrowTensorType``\n column named ``__value__`` (as created by :func:`ray.data.from_numpy`), then\n the default batch format is ``np.ndarray``. For more information on tensor\n datasets, read the :ref:`tensor support guide `.\n\n >>> ds = ray.data.range_tensor(100)\n >>> ds # doctest: +SKIP\n Dataset(num_blocks=20, num_rows=100, schema={__value__: ArrowTensorType(shape=(1,), dtype=int64)})\n >>> ds.default_batch_format()\n \n >>> next(ds.iter_batches(batch_size=4))\n array([[0],\n [1],\n [2],\n [3]])\n\n If your dataset represents tabular data and doesn't only consist of a\n ``__value__`` tensor column (such as is created by\n :meth:`ray.data.from_numpy`), then the default batch format is\n ``pd.DataFrame``.\n\n >>> import pandas as pd\n >>> df = pd.DataFrame({\"foo\": [\"a\", \"b\"], \"bar\": [0, 1]})\n >>> ds = ray.data.from_pandas(df)\n >>> ds # doctest: +SKIP\n Dataset(num_blocks=1, num_rows=2, schema={foo: object, bar: int64})\n >>> ds.default_batch_format()\n \n >>> next(ds.iter_batches(batch_size=4))\n foo bar\n 0 a 0\n 1 b 1\n\n .. seealso::\n\n :meth:`~Dataset.map_batches`\n Call this function to transform batches of data.\n\n :meth:`~Dataset.iter_batches`\n Call this function to iterate over batches of data.\n\n ", "n_words": 219, "vocab_size": 130, "n_whitespaces": 768, "language": "en" } }, { "id": 259378, "commit_id": "f89a40bd92004368dee38ea76a1b9eaddaff4d7a", "repo": "scikit-learn", "path": "sklearn/tree/tests/test_tree.py", "file_name": "test_tree.py", "fun_name": "test_decision_tree_regressor_sample_weight_consistency", "commit_message": "MNT fix typo in tree test name (#22943)", "code": "def test_decision_tree_regressor_sample_weight_consistency(criterion):\n \n tree_params = dict(criterion=criterion)\n tree = DecisionTreeRegressor(**tree_params, random_state=42)\n for kind in [\"zeros\", \"ones\"]:\n check_sample_weights_invariance(\n \"DecisionTreeRegressor_\" + criterion, tree, kind=\"zeros\"\n )\n\n rng = np.random.RandomState(0)\n n_samples, n_features = 10, 5\n\n X = rng.rand(n_samples, n_features)\n y = np.mean(X, axis=1) + rng.rand(n_samples)\n # make it positive in order to work also for poisson criterion\n y += np.min(y) + 0.1\n\n # check that multiplying sample_weight by 2 is equivalent\n # to repeating corresponding samples twice\n X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)\n y2 = np.concatenate([y, y[: n_samples // 2]])\n sample_weight_1 = np.ones(len(y))\n sample_weight_1[: n_samples // 2] = 2\n\n tree1 = DecisionTreeRegressor(**tree_params).fit(\n X, y, sample_weight=sample_weight_1\n )\n\n tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None)\n\n assert tree1.tree_.node_count == tree2.tree_.node_count\n # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not\n # exactly the same, but on the training set, those differences do not\n # matter and thus predictions are the same.\n assert_allclose(tree1.predict(X), tree2.predict(X))\n\n\n# TODO: Remove in v1.2\n@pytest.mark.parametrize(\"Tree\", REG_TREES.values())\n@pytest.mark.parametrize(\n \"old_criterion, new_criterion\",\n [\n (\"mse\", \"squared_error\"),\n (\"mae\", \"absolute_error\"),\n ],\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"Tree\", REG_TREES.values())\n@pytest.mark.parametrize(\n \"old_criterion, new_criterion\",\n [\n (\"mse\", \"squared_error\"),\n (\"mae\", \"absolute_error\"),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 282, "n_words": 159, "vocab_size": 123, "complexity": 2, "nloc": 22, "token_counts": 212, "n_ast_nodes": 431, "n_identifiers": 40, "random_cut": "def test_decision_tree_regressor_sample_weight_consistency(criterion):\n \n tree_params = dict(criterion=criterion)\n tree = DecisionTreeRegressor(**tree_params, random_state=42)\n for kind in [\"zeros\", \"ones\"]:\n check_sample_weights_invariance(\n \"DecisionTreeRegressor_\" + criterion, tree, kind=\"zeros\"\n )\n\n rng = np.random.RandomState(0)\n n_samples, n_features = 10, 5\n\n X = rng.rand(n_samples, n_features)\n y = np.mean(X, axis=1) + rng.rand(n_samples)\n # make it positive in order to work also for poisson criterion\n y += np.min(y) + 0.1\n\n # check that multiplying sample_weight by 2 is equivalent\n # to repeating corresponding samples twice\n X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)\n y2 = np.concatenate([y, y[: n_samples // 2]])\n sample_weight_1 = np.ones(len(y))\n sample_weight_1[: n_samples // 2] = 2\n\n tree1 = DecisionTreeRegressor(**tree_params).fit(\n X, y, sample_weight=sample_weight_1\n )\n\n tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None)\n\n assert tree1.tree_.node_count == tree2.tree_.node_count\n # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not\n # exactly the same, but on the training set, those differences do not\n # matter and thus predictions are the same.\n assert_allclose(tree1.predict(X), tree2.predict(X))\n\n\n# TODO: Remove in v1.2\n@pytest.mark.parametrize(\"Tree\", REG_TREES.values())\n@pytest.mark.parametrize(\n \"old_criterion, new_criterion\",\n [\n (\"mse\", \"squared_error\"),\n ", "d_id": 75740, "documentation": { "docstring": "Test that the impact of sample_weight is consistent.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 207608, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_with_fk_to_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_with_fk_to_field(self):\n \n response = self.client.get(\n reverse(\"admin:auth_user_changelist\") + \"?q=joe&%s=id\" % TO_FIELD_VAR\n )\n self.assertContains(response, \"\\n1 user\\n\")\n self.assertContains(\n response,\n '' % TO_FIELD_VAR,\n html=True,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 10, "token_counts": 46, "n_ast_nodes": 83, "n_identifiers": 9, "random_cut": "def test_with_fk_to_field(self):\n \n response = self.client.get(\n reverse(\"admin:auth_user_changelist\") + \"?q=joe&%s=id\" % TO_FIELD_V", "d_id": 52020, "documentation": { "docstring": "\n The to_field GET parameter is preserved when a search is performed.\n Refs #10918.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 35, "language": "en" } }, { "id": 190450, "commit_id": "4fc3616712edb19179b17dd270ad6cf63abf99c2", "repo": "DeOldify", "path": "fastai/torch_core.py", "file_name": "torch_core.py", "fun_name": "remove_module_load", "commit_message": "Upgrading to support latest Pytorch version", "code": "def remove_module_load(state_dict):\n \n new_state_dict = OrderedDict()\n for k, v in state_dict.items(): new_state_dict[k[7:]] = v\n return new_state_dict\n", "url": "https://github.com/jantic/DeOldify.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 27, "n_words": 15, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "def remove_module_load(state_dict):\n \n new_state_dict = OrderedDict()\n fo", "d_id": 46351, "documentation": { "docstring": "create new OrderedDict that does not contain `module.`", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 33182, "commit_id": "de8548ebf3242305d0f9792dacb6f86b196a3a33", "repo": "transformers", "path": "src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py", "file_name": "modeling_tf_layoutlmv3.py", "fun_name": "serving", "commit_message": "[LayoutLMv3] Add TensorFlow implementation (#18678)\n\nCo-authored-by: Esben Toke Christensen \r\nCo-authored-by: Lasse Reedtz \r\nCo-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>\r\nCo-authored-by: Joao Gante ", "code": "def serving(self, inputs):\n \n output = self.call(inputs)\n\n return self.serving_output(output)\n\n\nLAYOUTLMV3_START_DOCSTRING = r\n\nLAYOUTLMV3_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.\",\n LAYOUTLMV3_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.\",\n LAYOUTLMV3_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 54, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def serving(self, inputs):\n \n output = self.call(inputs)\n\n return self.serving_output(output)\n\n\nLAYOUTLMV3_START_DOCSTRING = r\n\nLAYOUTLMV3_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare LayoutLMv3 Model transformer outputting raw hidden-states w", "d_id": 6073, "documentation": { "docstring": "\n Method used for serving the model.\n\n Args:\n inputs (`Dict[str, tf.Tensor]`):\n The input of the saved model as a dictionary of tensors.\n \n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n \n\n Parameters:\n config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\n bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*):\n Bounding boxes of each input sequence tokens. Selected in the range `[0,\n config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)\n format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,\n y1) represents the position of the lower right corner.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,\n config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /\n config.patch_size) * (width / config.patch_size))`.\n\n attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]\n token. See `pixel_values` for `patch_sequence_length`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "n_words": 689, "vocab_size": 304, "n_whitespaces": 1372, "language": "en" } }, { "id": 308556, "commit_id": "9f0805f51293851096d7ece48f48a041e4a809e0", "repo": "core", "path": "homeassistant/components/sisyphus/media_player.py", "file_name": "media_player.py", "fun_name": "media_image_url", "commit_message": "Sisyphus: Fix bad super call (#63327)\n\nCo-authored-by: Franck Nijhof ", "code": "def media_image_url(self):\n \n\n if self._table.active_track:\n return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE)\n\n return super().media_image_url\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 40, "n_words": 8, "vocab_size": 7, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 57, "n_identifiers": 9, "random_cut": "def media_image_url(self):\n \n\n if self._table.active_track:\n return self._table.active_track.get_th", "d_id": 107305, "documentation": { "docstring": "Return the URL for a thumbnail image of the current track.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 129433, "commit_id": "75b3080834bceb184e9ba19e21511eb0ea19955b", "repo": "ray", "path": "python/ray/serve/tests/test_autoscaling_policy.py", "file_name": "test_autoscaling_policy.py", "fun_name": "test_fluctuating_ongoing_requests", "commit_message": "[Serve] Serve Autoscaling Release tests (#21208)", "code": "def test_fluctuating_ongoing_requests(delay_s):\n \n\n config = AutoscalingConfig(\n min_replicas=1,\n max_replicas=10,\n target_num_ongoing_requests_per_replica=50,\n upscale_delay_s=delay_s,\n downscale_delay_s=delay_s)\n\n policy = BasicAutoscalingPolicy(config)\n\n if delay_s > 0:\n wait_periods = int(delay_s / CONTROL_LOOP_PERIOD_S)\n assert wait_periods > 1\n\n underload_requests, overload_requests = [20, 20], [100]\n trials = 1000\n\n new_num_replicas = None\n for trial in range(trials):\n if trial % 2 == 0:\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests,\n curr_target_num_replicas=1)\n if delay_s > 0:\n assert new_num_replicas == 1, trial\n else:\n assert new_num_replicas == 2, trial\n else:\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=underload_requests,\n curr_target_num_replicas=2)\n if delay_s > 0:\n assert new_num_replicas == 2, trial\n else:\n assert new_num_replicas == 1, trial\n\n\n@pytest.mark.parametrize(\n \"ongoing_requests\",\n [[7, 1, 8, 4], [8, 1, 8, 4], [6, 1, 8, 4], [0, 1, 8, 4]])", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"ongoing_requests\",\n [[7, 1, 8, 4], [8, 1, 8, 4], [6, 1, 8, 4], [0, 1, 8, 4]])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 385, "n_words": 107, "vocab_size": 55, "complexity": 6, "nloc": 31, "token_counts": 155, "n_ast_nodes": 301, "n_identifiers": 26, "random_cut": "def test_fluctuating_ongoing_requests(delay_s):\n \n\n config = AutoscalingConfig(\n min_replicas=1,\n max_replicas=10,\n target_num_ongoing_requests_per_replica=50,\n upscale_delay_s=delay_s,\n downscale_delay_s=delay_s)\n\n policy = BasicAutoscalingPolicy(config)\n\n if delay_s > 0:\n wait_periods = int(delay_s / CONTROL_LOOP_PERIOD_S)\n assert wait_periods > 1\n\n underload_requests, overload_requests = [20, 20], [100]\n trials = 1000\n\n new_num_replicas = None\n for trial in range(trials):\n if trial % 2 == 0:\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=overload_requests,\n curr_target_num_replicas=1)\n if delay_s > 0:\n assert new_num_replicas == 1, trial\n else:\n assert new_num_replicas == 2, trial\n else:\n new_num_replicas = policy.get_decision_num_replicas(\n current_num_ongoing_requests=underload_requests,\n curr_target_num_replicas=2)\n if delay_s > 0:\n assert new_num_replicas == 2, trial\n else:\n ", "d_id": 28953, "documentation": { "docstring": "\n Simulates a workload that switches between too many and too few\n ongoing requests.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 23, "language": "en" } }, { "id": 52487, "commit_id": "7eef3bfde63d03acbd1fc9a15a5e56bef47c0ef7", "repo": "PaddleHub", "path": "modules/audio/svs/diffsinger/utils/audio.py", "file_name": "audio.py", "fun_name": "librosa_pad_lr", "commit_message": "Add Diffsinger Module (#2120)\n\n* add diffsinger\r\n\r\n* update README\r\n\r\n* update README", "code": "def librosa_pad_lr(x, fsize, fshift, pad_sides=1):\n \n assert pad_sides in (1, 2)\n # return int(fsize // 2)\n pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]\n if pad_sides == 1:\n return 0, pad\n else:\n return pad // 2, pad // 2 + pad % 2\n\n\n# Conversions", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 78, "n_words": 47, "vocab_size": 32, "complexity": 2, "nloc": 7, "token_counts": 46, "n_ast_nodes": 105, "n_identifiers": 7, "random_cut": "def librosa_pad_lr(x, fsize, fshift, pad_sides=1):", "d_id": 10583, "documentation": { "docstring": "compute right padding (final frame) or both sides padding (first and final frames)\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 16, "language": "en" } }, { "id": 122164, "commit_id": "0cc4066bb7bf758a5ba8c5def9c2c32a1c98fb89", "repo": "jax", "path": "jax/tools/colab_tpu.py", "file_name": "colab_tpu.py", "fun_name": "setup_tpu", "commit_message": "Pin default jax.tools.colab_tpu.setup_tpu driver version.\n\nPrior to this change, we were defaulting to the TPU nightly driver\nversion. We should instead pin to the version associated with the\ndefault jaxlib version that Colab uses.", "code": "def setup_tpu(tpu_driver_version='tpu_driver-0.2'):\n \n global TPU_DRIVER_MODE\n\n if not TPU_DRIVER_MODE:\n colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0]\n url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}'\n requests.post(url)\n TPU_DRIVER_MODE = 1\n\n # The following is required to use TPU Driver as JAX's backend.\n config.FLAGS.jax_xla_backend = \"tpu_driver\"\n config.FLAGS.jax_backend_target = \"grpc://\" + os.environ['COLAB_TPU_ADDR']\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 55, "n_words": 37, "vocab_size": 32, "complexity": 2, "nloc": 9, "token_counts": 64, "n_ast_nodes": 125, "n_identifiers": 14, "random_cut": "def setup_tpu(tpu_driver_version='tpu_driver-0.2'):\n \n global TPU_DRIVER_MODE\n\n if not TPU_DRIVER_MODE:\n colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0]\n url = f", "d_id": 27112, "documentation": { "docstring": "Sets up Colab to run on TPU.\n\n Note: make sure the Colab Runtime is set to Accelerator: TPU.\n\n Args\n ----\n tpu_driver_version : (str) specify the version identifier for the tpu driver.\n Defaults to \"tpu_driver-0.2\", which can be used with jaxlib 0.3.20. Set to\n \"tpu_driver_nightly\" to use the nightly tpu driver build.\n ", "n_words": 51, "vocab_size": 41, "n_whitespaces": 62, "language": "en" } }, { "id": 247899, "commit_id": "f0b03186d96305fd44d74a89bf4230beec0c5c31", "repo": "synapse", "path": "tests/storage/databases/main/test_lock.py", "file_name": "test_lock.py", "fun_name": "test_timeout_lock", "commit_message": "Add type hints for `tests/unittest.py`. (#12347)\n\nIn particular, add type hints for get_success and friends, which are then helpful in a bunch of places.", "code": "def test_timeout_lock(self):\n \n\n lock = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n assert lock is not None\n\n self.get_success(lock.__aenter__())\n\n # We simulate the process getting stuck by cancelling the looping call\n # that keeps the lock active.\n lock._looping_call.stop()\n\n # Wait for the lock to timeout.\n self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000)\n\n lock2 = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n self.assertIsNotNone(lock2)\n\n self.assertFalse(self.get_success(lock.is_still_valid()))\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 133, "n_words": 49, "vocab_size": 38, "complexity": 1, "nloc": 9, "token_counts": 95, "n_ast_nodes": 166, "n_identifiers": 16, "random_cut": "def test_timeout_lock(self):\n \n\n lock = self.get_success(self.store.try_acquire_lock(\"name\", \"key\"))\n assert lock is not None\n\n self.get_success(lock.__aenter__())\n\n # We simulate the process getting stuck by cancelling the looping call\n # that keeps the lock active.\n ", "d_id": 71987, "documentation": { "docstring": "Test that we time out locks if they're not updated for ages", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 275725, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/preprocessing/image.py", "file_name": "image.py", "fun_name": "random_brightness", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def random_brightness(x, brightness_range, scale=True):\n \n if len(brightness_range) != 2:\n raise ValueError(\n \"`brightness_range should be tuple or list of two floats. \"\n \"Received: %s\" % (brightness_range,)\n )\n\n u = np.random.uniform(brightness_range[0], brightness_range[1])\n return apply_brightness_shift(x, u, scale)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 81, "n_words": 33, "vocab_size": 33, "complexity": 2, "nloc": 8, "token_counts": 58, "n_ast_nodes": 92, "n_identifiers": 11, "random_cut": "def random_brightness(x, brightness_range, scale=True):\n \n if len(brightness_range) != 2:\n raise ValueError(\n \"`brightness_range should be tuple or list of two floats. \"\n \"Received: %s\" % (brightness_range,)\n )\n\n u = np.random.uniform(brightness_range[0], bri", "d_id": 81454, "documentation": { "docstring": "Performs a random brightness shift.\n\n Deprecated: `tf.keras.preprocessing.image.random_brightness` does not operate\n on tensors and is not recommended for new code. Prefer\n `tf.keras.layers.RandomBrightness` which provides equivalent functionality as\n a preprocessing layer. For more information, see the tutorial for\n [augmenting images](\n https://www.tensorflow.org/tutorials/images/data_augmentation), as well as\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers).\n\n Args:\n x: Input tensor. Must be 3D.\n brightness_range: Tuple of floats; brightness range.\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Default: True.\n\n Returns:\n Numpy image tensor.\n\n Raises:\n ValueError if `brightness_range` isn't a tuple.\n ", "n_words": 90, "vocab_size": 77, "n_whitespaces": 172, "language": "en" } }, { "id": 309255, "commit_id": "dc58bc375ae203e3d394225f9c3a5a14d43cb2f3", "repo": "core", "path": "tests/util/test_async.py", "file_name": "test_async.py", "fun_name": "test_check_loop_sync", "commit_message": "Warn on`time.sleep` in event loop (#63766)\n\nCo-authored-by: Martin Hjelmare ", "code": "def test_check_loop_sync(caplog):\n \n hasync.check_loop()\n assert \"Detected blocking call inside the event loop\" not in caplog.text\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 23, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def test_check_loop_sync(caplog):\n \n hasync.check_loop()\n assert \"Detected block", "d_id": 107962, "documentation": { "docstring": "Test check_loop does nothing when called from thread.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 128240, "commit_id": "65d0c0aa48be8f9f7faae857d3ab71444997755a", "repo": "ray", "path": "python/ray/serve/_private/deployment_state.py", "file_name": "deployment_state.py", "fun_name": "update", "commit_message": "[Serve] add alpha gRPC support (#28175)", "code": "def update(self) -> bool:\n \n try:\n # Add or remove DeploymentReplica instances in self._replicas.\n # This should be the only place we adjust total number of replicas\n # we manage.\n\n running_replicas_changed = self._scale_deployment_replicas()\n\n # Check the state of existing replicas and transition if necessary.\n running_replicas_changed |= self._check_and_update_replicas()\n\n if running_replicas_changed:\n self._notify_running_replicas_changed()\n\n deleted = self._check_curr_status()\n except Exception:\n self._curr_status_info = DeploymentStatusInfo(\n name=self._name,\n status=DeploymentStatus.UNHEALTHY,\n message=\"Failed to update deployment:\" f\"\\n{traceback.format_exc()}\",\n )\n deleted = False\n\n return deleted\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 279, "n_words": 70, "vocab_size": 56, "complexity": 3, "nloc": 24, "token_counts": 72, "n_ast_nodes": 138, "n_identifiers": 20, "random_cut": "def update(self) -> bool:\n \n try:\n # Add or remove DeploymentReplica instances in self._replicas.\n # This should be the only place we adjust total number of replicas\n # we manage.\n\n running_replicas_changed = self._scale_deployment_replicas()\n\n # Check the state of existing replicas and transition if necessary.\n running_replicas_changed |= self._check_and_update_replicas()\n\n if running_replicas_changed:\n self._notify_running_replicas_changed()\n\n deleted = self._check_curr_status()\n except Exception:\n self._curr_status_info = DeploymentStatusInfo(\n name=self._name,\n status=DeploymentStatus.UNHEALTHY,\n ", "d_id": 28641, "documentation": { "docstring": "Attempts to reconcile this deployment to match its goal state.\n\n This is an asynchronous call; it's expected to be called repeatedly.\n\n Also updates the internal DeploymentStatusInfo based on the current\n state of the system.\n\n Returns true if this deployment was successfully deleted.\n ", "n_words": 42, "vocab_size": 36, "n_whitespaces": 77, "language": "en" } }, { "id": 202382, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_https_malformed_host", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_https_malformed_host(self):\n \n req = self._get_request(method=\"POST\")\n req._is_secure_override = True\n req.META[\"HTTP_HOST\"] = \"@malformed\"\n req.META[\"HTTP_REFERER\"] = \"https://www.evil.org/somepage\"\n req.META[\"SERVER_PORT\"] = \"443\"\n mw = CsrfViewMiddleware(token_view)\n expected = (\n \"Referer checking failed - https://www.evil.org/somepage does not \"\n \"match any trusted origins.\"\n )\n with self.assertRaisesMessage(RejectRequest, expected):\n mw._check_referer(req)\n response = mw.process_view(req, token_view, (), {})\n self.assertEqual(response.status_code, 403)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 165, "n_words": 48, "vocab_size": 41, "complexity": 1, "nloc": 15, "token_counts": 99, "n_ast_nodes": 176, "n_identifiers": 18, "random_cut": "def test_https_malformed_host(self):\n \n req = self._get_request(method=\"POST\")\n req._is_secure_override = True\n req.META[\"HTTP_HOST\"] = \"@malformed\"\n req.META[\"HTTP_REFERER\"] = \"https://www.evil.org/somepage\"\n req.META[\"S", "d_id": 50095, "documentation": { "docstring": "\n CsrfViewMiddleware generates a 403 response if it receives an HTTPS\n request with a bad host.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 3894, "commit_id": "1e0ac30ebdcfce55a5644bcd486044da45c93dd6", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-orb/source_orb/source.py", "file_name": "source.py", "fun_name": "stream_slices", "commit_message": "🎉 New Source: Orb (#9985)\n\n* V1 of source_orb connector\r\n\r\n* add boostrap.md file\r\n\r\n* add clause on Pagination to bootstrap.md\r\n\r\n* add SUMMARY documentation\r\n\r\n* add lookback_window_days connector parameter\r\n\r\n* Add support for start_date parameter\r\n\r\n* Add ability to transform record in order to un-nest IDs\r\n\r\n* Add support for extracting event properties based on connector configuration", "code": "def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:\n \n # TODO: self.authenticator should optionally pull from self._session.auth\n customers_stream = Customers(authenticator=self._session.auth)\n for customer in customers_stream.read_records(sync_mode=SyncMode.full_refresh):\n yield {\"customer_id\": customer[\"id\"]}\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 63, "n_words": 24, "vocab_size": 24, "complexity": 2, "nloc": 11, "token_counts": 57, "n_ast_nodes": 93, "n_identifiers": 18, "random_cut": "def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:\n \n # TODO: self.authenticator should optionally pull from sel", "d_id": 594, "documentation": { "docstring": "\n This stream is sliced per `customer_id`. This has two implications:\n (1) State can be checkpointed after processing each slice\n (2) The other parameters (e.g. request_params, path) can be dependent on this slice.\n\n This allows us to pull data on a per customer_id basis, since that's what Orb exposes.\n ", "n_words": 48, "vocab_size": 42, "n_whitespaces": 84, "language": "en" } }, { "id": 278476, "commit_id": "80ee2fa4e1db2dda14370110830db82be3eb97b7", "repo": "keras", "path": "keras/utils/metrics_utils.py", "file_name": "metrics_utils.py", "fun_name": "ragged_assert_compatible_and_get_flat_values", "commit_message": "resolve line-too-long in utils", "code": "def ragged_assert_compatible_and_get_flat_values(values, mask=None):\n \n if isinstance(values, list):\n is_all_ragged = all(isinstance(rt, tf.RaggedTensor) for rt in values)\n is_any_ragged = any(isinstance(rt, tf.RaggedTensor) for rt in values)\n else:\n is_all_ragged = isinstance(values, tf.RaggedTensor)\n is_any_ragged = is_all_ragged\n if is_all_ragged and ((mask is None) or isinstance(mask, tf.RaggedTensor)):\n to_be_stripped = False\n if not isinstance(values, list):\n values = [values]\n to_be_stripped = True\n\n # NOTE: we leave the flat_values compatibility to\n # tf.TensorShape `assert_is_compatible_with` check if both dynamic\n # dimensions are equal and then use the flat_values.\n nested_row_split_list = [rt.nested_row_splits for rt in values]\n assertion_list = _assert_splits_match(nested_row_split_list)\n\n # if both are ragged sample_weights also should be ragged with same\n # dims.\n if isinstance(mask, tf.RaggedTensor):\n assertion_list_for_mask = _assert_splits_match(\n [nested_row_split_list[0], mask.nested_row_splits]\n )\n with tf.control_dependencies(assertion_list_for_mask):\n mask = tf.expand_dims(mask.flat_values, -1)\n\n # values has at least 1 element.\n flat_values = []\n for value in values:\n with tf.control_dependencies(assertion_list):\n flat_values.append(tf.expand_dims(value.flat_values, -1))\n\n values = flat_values[0] if to_be_stripped else flat_values\n\n elif is_any_ragged:\n raise TypeError(\n \"Some of the inputs are not tf.RaggedTensor. \"\n f\"Input received: {values}\"\n )\n # values are empty or value are not ragged and mask is ragged.\n elif isinstance(mask, tf.RaggedTensor):\n raise TypeError(\n \"Ragged mask is not allowed with non-ragged inputs. \"\n f\"Input received: {values}, mask received: {mask}\"\n )\n\n return values, mask\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 526, "n_words": 193, "vocab_size": 107, "complexity": 14, "nloc": 36, "token_counts": 244, "n_ast_nodes": 405, "n_identifiers": 24, "random_cut": "def ragged_assert_compatible_and_get_flat_values(values, mask=None):\n \n if isinstance(values, list):\n is_all_ragged = all(isinstance(rt, tf.RaggedTensor) for rt in values)\n is_any_ragged = any(isinstance(rt, tf.RaggedTensor) for rt in values)\n else:\n is_all_ragged = isinstance(values, tf.RaggedTensor)\n is_any_ragged = is_all_ragged\n if is_all_ragged and ((mask is None) or isinstance(mask, tf.RaggedTensor)):\n to_be_stri", "d_id": 82567, "documentation": { "docstring": "If ragged, it checks the compatibility and then returns the flat_values.\n\n Note: If two tensors are dense, it does not check their compatibility.\n Note: Although two ragged tensors with different ragged ranks could have\n identical overall rank and dimension sizes and hence be compatible,\n we do not support those cases.\n Args:\n values: A list of potentially ragged tensor of the same ragged_rank.\n mask: A potentially ragged tensor of the same ragged_rank as elements in\n Values.\n\n Returns:\n A tuple in which the first element is the list of tensors and the second\n is the mask tensor. ([Values], mask). Mask and the element in Values\n are equal to the flat_values of the input arguments (if they were\n ragged).\n ", "n_words": 116, "vocab_size": 77, "n_whitespaces": 205, "language": "en" } }, { "id": 12428, "commit_id": "7c4c39a9d82c58ef2493c21a288c755901a9594e", "repo": "jina", "path": "jina/orchestrate/deployments/__init__.py", "file_name": "__init__.py", "fun_name": "update_sandbox_args", "commit_message": "fix: do not deploy sandbox on init (#4844)", "code": "def update_sandbox_args(self):\n \n if self.is_sandbox:\n host, port = HubIO.deploy_public_sandbox(self.args)\n self._sandbox_deployed = True\n self.first_pod_args.host = host\n self.first_pod_args.port = port\n if self.head_args:\n self.pod_args['head'].host = host\n self.pod_args['head'].port = port\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 124, "n_words": 25, "vocab_size": 16, "complexity": 3, "nloc": 9, "token_counts": 67, "n_ast_nodes": 112, "n_identifiers": 12, "random_cut": "def update_sandbox_args(self):\n \n if self.is_sandbox:\n host, port = HubIO.deploy_public_sandbox(self.args)\n self._sandbox_deployed = True\n self.first_pod_args.host = host\n self.first_pod_args.port = port\n if self.head_args:\n self.pod_args['head'].host = h", "d_id": 2286, "documentation": { "docstring": "Update args of all its pods based on the host and port returned by Hubble", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 158146, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "download_all", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def download_all():\n \n for name in DATA_HUB:\n download(name)\n\nDATA_HUB['kaggle_house_train'] = (\n DATA_URL + 'kaggle_house_pred_train.csv',\n '585e9cc93e70b39160e7921475f9bcd7d31219ce')\n\nDATA_HUB['kaggle_house_test'] = (\n DATA_URL + 'kaggle_house_pred_test.csv',\n 'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 21, "vocab_size": 17, "complexity": 2, "nloc": 3, "token_counts": 14, "n_ast_nodes": 72, "n_identifiers": 5, "random_cut": "def download_all():\n \n for name in DATA_HUB:\n download(name)\n\nDATA_HUB['kaggle_house_train'] = (\n DATA_URL + 'kaggle_house_pred_train.csv',\n '585e9cc93e70b39160e7921475f9bcd7d31219ce')\n\nDATA_HUB['kaggle_house_test'] = (\n DATA_URL + 'kaggle_house", "d_id": 37328, "documentation": { "docstring": "Download all files in the DATA_HUB.\n\n Defined in :numref:`sec_kaggle_house`", "n_words": 9, "vocab_size": 8, "n_whitespaces": 11, "language": "en" } }, { "id": 218873, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/pytree.py", "file_name": "pytree.py", "fun_name": "generate_matches", "commit_message": "add python 3.10.4 for windows", "code": "def generate_matches(self, nodes):\n \n r = {}\n if nodes and self.match(nodes[0], r):\n yield 1, r\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 4, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 5, "random_cut": "def generate_matches(self, nodes):\n \n r = {}\n if nodes and self.match(nodes[0], r):\n yield 1, r\n\n", "d_id": 55519, "documentation": { "docstring": "\n Generator yielding all matches for this pattern.\n\n Default implementation for non-wildcard patterns.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 34, "language": "en" } }, { "id": 73139, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/helpers/permission.py", "file_name": "permission.py", "fun_name": "user_can_delete_obj", "commit_message": "Reformat with black", "code": "def user_can_delete_obj(self, user, obj):\n \n perm_codename = self.get_perm_codename(\"delete\")\n return self.user_has_specific_permission(user, perm_codename)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def user_can_delete_obj(self, user, obj):\n \n perm_codenam", "d_id": 15953, "documentation": { "docstring": "\n Return a boolean to indicate whether `user` is permitted to 'delete'\n a specific `self.model` instance.\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 37, "language": "en" } }, { "id": 101409, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "tools/preview/preview.py", "file_name": "preview.py", "fun_name": "_busy_indicator_trace", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def _busy_indicator_trace(self, *args) -> None:\n \n logger.trace(\"Busy indicator trace: %s\", args) # type: ignore\n if self._busy_tkvar.get():\n self._start_busy_indicator()\n else:\n self._stop_busy_indicator()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 13, "token_counts": 40, "n_ast_nodes": 72, "n_identifiers": 9, "random_cut": "def _busy_indicator_trace(self, *args) -> None:\n \n logger.trace(\"Busy indicator trace: %s\", args) # type: ignor", "d_id": 20823, "documentation": { "docstring": " Show or hide busy indicator based on whether the preview is updating.\n\n Parameters\n ----------\n args: unused\n Required for tkinter event, but unused\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 62, "language": "en" } }, { "id": 189448, "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", "repo": "manim", "path": "manim/mobject/svg/code_mobject.py", "file_name": "code_mobject.py", "fun_name": "gen_html_string", "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", "code": "def _gen_html_string(self):\n \n self.html_string = _hilite_me(\n self.code_string,\n self.language,\n self.style,\n self.insert_line_no,\n \"border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;\",\n self.file_path,\n self.line_no_from,\n )\n\n if self.generate_html_file:\n os.makedirs(\n os.path.join(\"assets\", \"codes\", \"generated_html_files\"),\n exist_ok=True,\n )\n with open(\n os.path.join(\n \"assets\",\n \"codes\",\n \"generated_html_files\",\n self.file_name + \".html\",\n ),\n \"w\",\n ) as file:\n file.write(self.html_string)\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 356, "n_words": 41, "vocab_size": 37, "complexity": 2, "nloc": 25, "token_counts": 103, "n_ast_nodes": 170, "n_identifiers": 20, "random_cut": "def _gen_html_string(self):\n \n self.html_string = _hilite_me(\n self.code_string,\n self.language,\n self.style,\n self.insert_line_no,\n \"border:solid gray;bor", "d_id": 46056, "documentation": { "docstring": "Function to generate html string with code highlighted and stores in variable html_string.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 22074, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/cookies.py", "file_name": "cookies.py", "fun_name": "__getstate__", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def __getstate__(self):\n \n state = self.__dict__.copy()\n # remove the unpickleable RLock object\n state.pop(\"_cookies_lock\")\n return state\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "def __getstate__(self):\n \n state = self.__di", "d_id": 4155, "documentation": { "docstring": "Unlike a normal CookieJar, this class is pickleable.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 211247, "commit_id": "ff8a7b1d090a2f57048d3e87892706a8407dcfe6", "repo": "PaddleDetection", "path": "deploy/pipeline/pipeline.py", "file_name": "pipeline.py", "fun_name": "get_model_dir", "commit_message": "move initialize part into class (#6621)", "code": "def get_model_dir(cfg):\n \n for key in cfg.keys():\n if type(cfg[key]) == dict and \\\n (\"enable\" in cfg[key].keys() and cfg[key]['enable']\n or \"enable\" not in cfg[key].keys()):\n\n if \"model_dir\" in cfg[key].keys():\n model_dir = cfg[key][\"model_dir\"]\n downloaded_model_dir = auto_download_model(model_dir)\n if downloaded_model_dir:\n model_dir = downloaded_model_dir\n cfg[key][\"model_dir\"] = model_dir\n print(key, \" model dir: \", model_dir)\n elif key == \"VEHICLE_PLATE\":\n det_model_dir = cfg[key][\"det_model_dir\"]\n downloaded_det_model_dir = auto_download_model(det_model_dir)\n if downloaded_det_model_dir:\n det_model_dir = downloaded_det_model_dir\n cfg[key][\"det_model_dir\"] = det_model_dir\n print(\"det_model_dir model dir: \", det_model_dir)\n\n rec_model_dir = cfg[key][\"rec_model_dir\"]\n downloaded_rec_model_dir = auto_download_model(rec_model_dir)\n if downloaded_rec_model_dir:\n rec_model_dir = downloaded_rec_model_dir\n cfg[key][\"rec_model_dir\"] = rec_model_dir\n print(\"rec_model_dir model dir: \", rec_model_dir)\n\n elif key == \"MOT\": # for idbased and skeletonbased actions\n model_dir = cfg[key][\"model_dir\"]\n downloaded_model_dir = auto_download_model(model_dir)\n if downloaded_model_dir:\n model_dir = downloaded_model_dir\n cfg[key][\"model_dir\"] = model_dir\n print(\"mot_model_dir model_dir: \", model_dir)\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 554, "n_words": 116, "vocab_size": 56, "complexity": 13, "nloc": 32, "token_counts": 228, "n_ast_nodes": 387, "n_identifiers": 14, "random_cut": "def get_model_dir(cfg):\n \n for key in cfg.keys():\n if type(cfg[key]) == dict and \\\n (\"enable\" in cfg[key].keys() and cfg[key]['enable']\n or \"enable\" not in cfg[key].keys()):\n\n if \"model_dir\" in cfg[key].keys():\n model_dir = cfg[key][\"model_dir\"]\n downloaded_model_dir = auto_download_model(model_dir)\n if downloaded_model_dir:\n model_dir = downloaded_model_dir\n cfg[key][\"model_dir\"] = model_dir\n print(key, \" model dir: \", model_dir)\n elif key == \"VEHICLE_PLATE\":\n det_model_dir = cfg[key][\"det_model_dir\"]\n downloaded_det_model_dir = auto_download_model(det_model_dir)\n if downloaded_det_model_dir:\n det_model_dir = downloaded_det_model_dir\n cfg[key][\"det_model_dir\"] = det_model_dir\n print(\"det_model_dir model dir: \", det_model_dir)\n\n rec_model_dir = cfg[key][\"rec_model_dir\"]\n downloaded_rec_", "d_id": 53050, "documentation": { "docstring": " \n Auto download inference model if the model_path is a url link. \n Otherwise it will use the model_path directly.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 22669, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "linear-algebra-python/src/lib.py", "file_name": "lib.py", "fun_name": "component", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def component(self, x, y):\n \n if x >= 0 and x < self.__height and y >= 0 and y < self.__width:\n return self.__matrix[x][y]\n else:\n raise Exception(\"changeComponent: indices out of bounds\")\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 72, "n_words": 29, "vocab_size": 22, "complexity": 5, "nloc": 5, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 8, "random_cut": "def component(self, x, y):\n \n if x >= 0 and x < self.__height and y >= 0 and y < s", "d_id": 4398, "documentation": { "docstring": "\n returns the specified (x,y) component\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 109908, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/axisartist/axis_artist.py", "file_name": "axis_artist.py", "fun_name": "toggle", "commit_message": "Improve mpl_toolkit documentation", "code": "def toggle(self, all=None, ticks=None, ticklabels=None, label=None):\n \n if all:\n _ticks, _ticklabels, _label = True, True, True\n elif all is not None:\n _ticks, _ticklabels, _label = False, False, False\n else:\n _ticks, _ticklabels, _label = None, None, None\n\n if ticks is not None:\n _ticks = ticks\n if ticklabels is not None:\n _ticklabels = ticklabels\n if label is not None:\n _label = label\n\n if _ticks is not None:\n self.major_ticks.set_visible(_ticks)\n self.minor_ticks.set_visible(_ticks)\n if _ticklabels is not None:\n self.major_ticklabels.set_visible(_ticklabels)\n self.minor_ticklabels.set_visible(_ticklabels)\n if _label is not None:\n self.label.set_visible(_label)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 270, "n_words": 79, "vocab_size": 34, "complexity": 9, "nloc": 21, "token_counts": 151, "n_ast_nodes": 230, "n_identifiers": 14, "random_cut": "def toggle(self, all=None, ticks=None, ticklabels=None, label=None):\n \n if all:\n _ticks, _ticklabels, _label = True, True, True\n elif all is not None:\n _ticks, _ticklabels, _label = False, False, False\n else:\n _ticks, _ticklabels, _label = None, None, None\n\n if ticks is not Non", "d_id": 23815, "documentation": { "docstring": "\n Toggle visibility of ticks, ticklabels, and (axis) label.\n To turn all off, ::\n\n axis.toggle(all=False)\n\n To turn all off but ticks on ::\n\n axis.toggle(all=False, ticks=True)\n\n To turn all on but (axis) label off ::\n\n axis.toggle(all=True, label=False)\n\n ", "n_words": 35, "vocab_size": 23, "n_whitespaces": 98, "language": "en" } }, { "id": 257136, "commit_id": "4eec2dc45ee60e8b8780aa4f956aea8ad3624da3", "repo": "haystack", "path": "test/test_pipeline_yaml.py", "file_name": "test_pipeline_yaml.py", "fun_name": "mock_json_schema", "commit_message": "Change YAML version exception into a warning (#2385)\n\n* Change exception into warning, add strict_version param, and remove compatibility between schemas\r\n\r\n* Simplify update_json_schema\r\n\r\n* Rename unstable into master\r\n\r\n* Prevent validate_config from changing the config to validate\r\n\r\n* Fix version validation and add tests\r\n\r\n* Rename master into ignore\r\n\r\n* Complete parameter rename\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def mock_json_schema(request, monkeypatch, tmp_path):\n \n # Do not patch integration tests\n if \"integration\" in request.keywords:\n return\n\n # Mock the subclasses list to make it very small, containing only mock nodes\n monkeypatch.setattr(\n haystack.nodes._json_schema,\n \"find_subclasses_in_modules\",\n lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)],\n )\n # Point the JSON schema path to tmp_path\n monkeypatch.setattr(haystack.pipelines.config, \"JSON_SCHEMAS_PATH\", tmp_path)\n\n # Generate mock schema in tmp_path\n filename = f\"haystack-pipeline-master.schema.json\"\n test_schema = _json_schema.get_json_schema(filename=filename, version=\"ignore\")\n\n with open(tmp_path / filename, \"w\") as schema_file:\n json.dump(test_schema, schema_file, indent=4)\n\n\n#\n# Integration\n#\n\n\n@pytest.mark.integration\n@pytest.mark.elasticsearch", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.integration\n@pytest.mark.elasticsearch", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 148, "n_words": 82, "vocab_size": 68, "complexity": 2, "nloc": 13, "token_counts": 114, "n_ast_nodes": 206, "n_identifiers": 30, "random_cut": "def mock_json_schema(request, monkeypatch, tmp_path):\n \n # Do not patch integration tests\n if \"integration\" in request.keywords:\n return\n\n # Mock the subclasses list to make it very small, containing only mock nodes\n monkeypatch.setattr(\n haystack.nodes._json_schema,\n \"find_subclasses_in_modules\",\n lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)],\n )\n # Point the JSON schema path to tmp_path\n monkeypatch.setattr(haystack.pipelines.config, \"JSON_SCHEMAS_PATH\", tmp_path)\n\n # Generate mock schema in tmp_path\n filename = f\"haystack-pipeline-master.schema.json\"\n test_schema = _json_schema.get_json_schema(filename=filename, version=\"ignore\")\n\n with open(tm", "d_id": 75030, "documentation": { "docstring": "\n JSON schema with the master version and only mocked nodes.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 269606, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "_has_nchw_support", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _has_nchw_support():\n \n explicitly_on_cpu = _is_current_explicit_device(\"CPU\")\n gpus_available = bool(_get_available_gpus())\n return not explicitly_on_cpu and gpus_available\n\n\n# VARIABLE MANIPULATION\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 27, "n_words": 16, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 24, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def _has_nchw_support():\n \n explicitly_on_cpu = _is_current_explicit_device(\"CPU\")\n gpus_available = bool(_get_available_gpus())\n return not explicitly_on_cpu and gpus_available\n\n\n# VARIABLE MANIPULATI", "d_id": 80226, "documentation": { "docstring": "Check whether the current scope supports NCHW ops.\n\n TensorFlow does not support NCHW on CPU. Therefore we check if we are not\n explicitly put on\n CPU, and have GPUs available. In this case there will be soft-placing on the\n GPU device.\n\n Returns:\n bool: if the current scope device placement would support nchw\n ", "n_words": 52, "vocab_size": 41, "n_whitespaces": 77, "language": "en" } }, { "id": 157549, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/modules/image_degradation/utils_image.py", "file_name": "utils_image.py", "fun_name": "tensor2img", "commit_message": "release more models", "code": "def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):\n \n tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp\n tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]\n n_dim = tensor.dim()\n if n_dim == 4:\n n_img = len(tensor)\n img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 3:\n img_np = tensor.numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 2:\n img_np = tensor.numpy()\n else:\n raise TypeError(\n 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))\n if out_type == np.uint8:\n img_np = (img_np * 255.0).round()\n # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.\n return img_np.astype(out_type)\n\n\n\n\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 225, "n_words": 117, "vocab_size": 77, "complexity": 5, "nloc": 24, "token_counts": 228, "n_ast_nodes": 358, "n_identifiers": 27, "random_cut": "def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):\n \n ", "d_id": 36978, "documentation": { "docstring": "\n Converts a torch Tensor into an image Numpy array of BGR channel order\n Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\n Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\n \n# --------------------------------------------\n# Augmentation, flipe and/or rotate\n# --------------------------------------------\n# The following two are enough.\n# (1) augmet_img: numpy image of WxHxC or WxH\n# (2) augment_img_tensor4: tensor image 1xCxWxH\n# --------------------------------------------\n", "n_words": 62, "vocab_size": 46, "n_whitespaces": 68, "language": "en" } }, { "id": 181347, "commit_id": "51824608865b66ab04b018f55055124edbe603f3", "repo": "gradio", "path": "gradio/utils.py", "file_name": "utils.py", "fun_name": "get_local_ip_address", "commit_message": "Patching `test_get_ip` attempt 2 (#2810)\n\n* ip-patch-2\r\n\r\n* formatting\r\n\r\n* patch 2", "code": "def get_local_ip_address() -> str:\n \n try:\n ip_address = requests.get(\n \"https://checkip.amazonaws.com/\", timeout=3\n ).text.strip()\n except (requests.ConnectionError, requests.exceptions.ReadTimeout):\n ip_address = \"No internet connection\"\n return ip_address\n\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 65, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 9, "token_counts": 45, "n_ast_nodes": 78, "n_identifiers": 11, "random_cut": "def get_local_ip_address() -> str:\n \n try:\n ip_address = requests.get(\n \"https://checkip.amazonaws.com/\", timeout=3\n ).text.strip()\n except (requests.ConnectionError, requests.exceptions.ReadTimeout):\n ip_address = \"No internet connection\"\n return ip_address\n\n", "d_id": 43310, "documentation": { "docstring": "Gets the public IP address or returns the string \"No internet connection\" if unable to obtain it.", "n_words": 17, "vocab_size": 16, "n_whitespaces": 16, "language": "en" } }, { "id": 189674, "commit_id": "e040bcacd38378386749db18aeba575b93f4ebca", "repo": "manim", "path": "manim/mobject/geometry/arc.py", "file_name": "arc.py", "fun_name": "get_tip", "commit_message": "Improved structure of the :mod:`.mobject` module (#2476)\n\n* group graphing and update its references\r\n\r\n* group text and update its references\r\n\r\n* group opengl and update its references\r\n\r\n* group three_d and update its references\r\n\r\n* group geometry and update (most) references\r\n\r\n* move some chaning.py + updater files into animation\r\n\r\n* refactor arc.py\r\n\r\n* refactor line.py\r\n\r\n* refactor polygram.py\r\n\r\n* refactor tips.py\r\n\r\n* black + isort\r\n\r\n* import new files in __init__.py\r\n\r\n* refactor places where geometry was used\r\n\r\n* black + isort again\r\n\r\n* remove unused imports\r\n\r\n* update reference.rst\r\n\r\n* add descriptions to files\r\n\r\n* fix circular imports\r\n\r\n* forgot ArrowTip\r\n\r\n* fix tests\r\n\r\n* fix doctests\r\n\r\n* satisfy mypy?\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix ALL merge conflicts\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* one VMobject import slipped through\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* re-add imports to `manim/opengl/__init__.py`\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix reference manual\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* ignore unknown directive type\r\n\r\n* fix arrow tip imports in docstrings\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Benjamin Hackl ", "code": "def get_tip(self):\n \n tips = self.get_tips()\n if len(tips) == 0:\n raise Exception(\"tip not found\")\n else:\n return tips[0]\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 66, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 33, "n_ast_nodes": 59, "n_identifiers": 6, "random_cut": "def get_tip(self):\n \n tips = self.get_tips()\n if len(tips) == 0:\n raise Excep", "d_id": 46161, "documentation": { "docstring": "Returns the TipableVMobject instance's (first) tip,\n otherwise throws an exception.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 153839, "commit_id": "b22b93df20ad25ae7a11f0c89d32fb2f234d4641", "repo": "modin", "path": "modin/core/dataframe/pandas/partitioning/axis_partition.py", "file_name": "axis_partition.py", "fun_name": "shuffle", "commit_message": "FIX-#4464: Refactor Ray utils and quick fix groupby.count failing on virtual partitions (#4490)\n\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: jeffreykennethli ", "code": "def shuffle(self, func, lengths, **kwargs):\n \n num_splits = len(lengths)\n # We add these to kwargs and will pop them off before performing the operation.\n kwargs[\"manual_partition\"] = True\n kwargs[\"_lengths\"] = lengths\n args = [self.axis, func, num_splits, False]\n args.extend(self.list_of_blocks)\n return self._wrap_partitions(self.deploy_axis_func(*args, **kwargs))\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 39, "vocab_size": 35, "complexity": 1, "nloc": 7, "token_counts": 68, "n_ast_nodes": 109, "n_identifiers": 13, "random_cut": "def shuffle(self, func, lengths, **kwargs):\n \n num_splits = len(lengths)\n # We add these to kwargs and will pop them off before performing the operation.\n kwargs[\"manual_partition\"] = True\n kwargs[\"_lengths\"] = lengths\n args = [self.axis, func, num_splits, False]\n args.extend(self.list_of_blocks)\n return self._wrap_partitions(self.d", "d_id": 35652, "documentation": { "docstring": "\n Shuffle the order of the data in this axis partition based on the `lengths`.\n\n Parameters\n ----------\n func : callable\n The function to apply before splitting.\n lengths : list\n The list of partition lengths to split the result into.\n **kwargs : dict\n Additional keywords arguments to be passed in `func`.\n\n Returns\n -------\n list\n A list of `PandasDataframePartition` objects split by `lengths`.\n ", "n_words": 60, "vocab_size": 42, "n_whitespaces": 175, "language": "en" } }, { "id": 248677, "commit_id": "13e359aec8ae8be8dc56a036ae6d9f2bc1d07385", "repo": "synapse", "path": "tests/storage/databases/main/test_room.py", "file_name": "test_room.py", "fun_name": "test_background_add_room_type_column", "commit_message": "Implement MSC3827: Filtering of `/publicRooms` by room type (#13031)\n\nSigned-off-by: Šimon Brandner ", "code": "def test_background_add_room_type_column(self):\n \n\n # Create a room without a type\n room_id = self._generate_room()\n\n # Get event_id of the m.room.create event\n event_id = self.get_success(\n self.store.db_pool.simple_select_one_onecol(\n table=\"current_state_events\",\n keyvalues={\n \"room_id\": room_id,\n \"type\": \"m.room.create\",\n },\n retcol=\"event_id\",\n )\n )\n\n # Fake a room creation event with a room type\n event = {\n \"content\": {\n \"creator\": \"@user:server.org\",\n \"room_version\": \"9\",\n \"type\": RoomTypes.SPACE,\n },\n \"type\": \"m.room.create\",\n }\n self.get_success(\n self.store.db_pool.simple_update(\n table=\"event_json\",\n keyvalues={\"event_id\": event_id},\n updatevalues={\"json\": json.dumps(event)},\n desc=\"test\",\n )\n )\n\n # Insert and run the background update\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n {\n \"update_name\": _BackgroundUpdates.ADD_ROOM_TYPE_COLUMN,\n \"progress_json\": \"{}\",\n },\n )\n )\n\n # ... and tell the DataStore that it hasn't finished all updates yet\n self.store.db_pool.updates._all_done = False\n\n # Now let's actually drive the updates to completion\n self.wait_for_background_updates()\n\n # Make sure the background update filled in the room type\n room_type_after = self.get_success(\n self.store.db_pool.simple_select_one_onecol(\n table=\"room_stats_state\",\n keyvalues={\"room_id\": room_id},\n retcol=\"room_type\",\n allow_none=True,\n )\n )\n self.assertEqual(room_type_after, RoomTypes.SPACE)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 757, "n_words": 136, "vocab_size": 88, "complexity": 1, "nloc": 48, "token_counts": 211, "n_ast_nodes": 368, "n_identifiers": 29, "random_cut": "def test_background_add_room_type_column(self):\n \n\n # Create a room without a type\n room_id = self._generate_room()\n\n # Get event_id of the m.room.create event\n event_id = self.get_success(\n self.store.db_pool.simple_select_one_onecol(\n table=\"current_state_events\",\n keyvalues={\n \"room_id\": room_id,\n \"type\": \"m.room.create\",\n ", "d_id": 72413, "documentation": { "docstring": "Test that the background update to populate the `room_type` column in\n `room_stats_state` works properly.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 256622, "commit_id": "4e940be85902dc93f3924662ba83111df72bb4d3", "repo": "haystack", "path": "rest_api/controller/feedback.py", "file_name": "feedback.py", "fun_name": "get_feedback", "commit_message": "Allow Linux CI to push changes to forks (#2182)\n\n* Add explicit reference to repo name to allow CI to push code back\r\n\r\n* Run test matrix only on tested code changes\r\n\r\n* Isolate the bot to check if it works\r\n\r\n* Clarify situation with a comment\r\n\r\n* Simplify autoformat.yml\r\n\r\n* Add code and docs check\r\n\r\n* Add git pull to make sure to fetch changes if they were created\r\n\r\n* Add cache to autoformat.yml too\r\n\r\n* Add information on forks in CONTRIBUTING.md\r\n\r\n* Add a not about code quality tools in CONTRIBUTING.md\r\n\r\n* Add image file types to the CI exclusion list\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def get_feedback():\n \n labels = DOCUMENT_STORE.get_all_labels()\n return labels\n\n\n@router.delete(\"/feedback\")", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@router.delete(\"/feedback\")", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 16, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 41, "n_identifiers": 6, "random_cut": "def get_feedback():\n \n labels = DOCUMENT_", "d_id": 74903, "documentation": { "docstring": "\n This endpoint allows the API user to retrieve all the feedback that has been submitted\n through the `POST /feedback` endpoint.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 30, "language": "en" } }, { "id": 20288, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/__init__.py", "file_name": "__init__.py", "fun_name": "get_formatter_for_filename", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def get_formatter_for_filename(fn, **options):\n \n fn = basename(fn)\n for modname, name, _, filenames, _ in FORMATTERS.values():\n for filename in filenames:\n if _fn_matches(fn, filename):\n if name not in _formatter_cache:\n _load_formatters(modname)\n return _formatter_cache[name](**options)\n for cls in find_plugin_formatters():\n for filename in cls.filenames:\n if _fn_matches(fn, filename):\n return cls(**options)\n raise ClassNotFound(\"no formatter found for file name %r\" % fn)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 167, "n_words": 52, "vocab_size": 37, "complexity": 8, "nloc": 13, "token_counts": 99, "n_ast_nodes": 155, "n_identifiers": 17, "random_cut": "def get_formatter_for_filename(fn, **options):\n \n fn = basename(fn)\n for modname, name, _, filenames, _ in FORMATTERS.values():\n for filename in filenames:\n ", "d_id": 3311, "documentation": { "docstring": "Lookup and instantiate a formatter by filename pattern.\n\n Raises ClassNotFound if not found.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 137676, "commit_id": "e76ccee69aaa7583be1a9d81cf7b2aa72cf25647", "repo": "ray", "path": "python/ray/util/spark/utils.py", "file_name": "utils.py", "fun_name": "setup_sigterm_on_parent_death", "commit_message": "Ray on spark implementation (#28771)\n\nREP: ray-project/enhancements#14", "code": "def setup_sigterm_on_parent_death():\n \n try:\n import ctypes\n import signal\n\n libc = ctypes.CDLL(\"libc.so.6\")\n # Set the parent process death signal of the command process to SIGTERM.\n libc.prctl(1, signal.SIGTERM) # PR_SET_PDEATHSIG, see prctl.h\n except OSError as e:\n _logger.warning(f\"Setup libc.prctl PR_SET_PDEATHSIG failed, error {repr(e)}.\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 91, "n_words": 39, "vocab_size": 34, "complexity": 2, "nloc": 8, "token_counts": 41, "n_ast_nodes": 86, "n_identifiers": 12, "random_cut": "def setup_sigterm_on_parent_death():\n \n try:\n import ctypes\n import sig", "d_id": 31214, "documentation": { "docstring": "\n Uses prctl to automatically send SIGTERM to the child process when its parent is\n dead. The child process itself should handle SIGTERM properly.\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 33, "language": "en" } }, { "id": 144090, "commit_id": "fe167c94b10c832071544d82e83b51e534526c5b", "repo": "ray", "path": "python/ray/data/dataset.py", "file_name": "dataset.py", "fun_name": "force_reads", "commit_message": "Deflake occasional deadlock in test_dataset.py::test_basic_actors[True] (#21970)", "code": "def force_reads(self) -> \"Dataset[T]\":\n \n blocks = self.get_internal_block_refs()\n bar = ProgressBar(\"Force reads\", len(blocks))\n bar.block_until_complete(blocks)\n return self\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 10, "token_counts": 34, "n_ast_nodes": 62, "n_identifiers": 8, "random_cut": "def force_reads(self) -> \"Dataset[T]\":\n \n blocks = self.get_internal_block_refs()\n bar = ProgressBar(\"Force reads\", len(blocks))\n bar.block_until_complete(blocks)\n return self\n", "d_id": 33111, "documentation": { "docstring": "Force full evaluation of the blocks of this dataset.\n\n This can be used to read all blocks into memory. By default, Datasets\n doesn't read blocks from the datasource until the first transform.\n ", "n_words": 32, "vocab_size": 26, "n_whitespaces": 53, "language": "en" } }, { "id": 182658, "commit_id": "1a20b9de7d4cef7f93e4500757d3fb42e680f40c", "repo": "textual", "path": "src/textual/_compositor.py", "file_name": "_compositor.py", "fun_name": "__iter__", "commit_message": "docstring", "code": "def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]:\n \n layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True)\n intersection = Region.intersection\n for widget, (region, _order, clip, virtual_size, container_size) in layers:\n yield (\n widget,\n intersection(region, clip),\n region,\n virtual_size,\n container_size,\n )\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 161, "n_words": 36, "vocab_size": 32, "complexity": 2, "nloc": 17, "token_counts": 90, "n_ast_nodes": 126, "n_identifiers": 22, "random_cut": "def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]:\n \n layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True)\n intersection = Region.intersection\n for widget, (region, _order, clip, virtual_size, container_size) in layers:\n yield (\n widget,\n intersection(region, clip),\n region,\n virtual_size,\n container", "d_id": 43896, "documentation": { "docstring": "Iterate map with information regarding each widget and is position\n\n Yields:\n Iterator[tuple[Widget, Region, Region, Size, Size]]: Iterates a tuple of\n Widget, clip region, region, virtual size, and container size.\n ", "n_words": 29, "vocab_size": 26, "n_whitespaces": 69, "language": "en" } }, { "id": 160358, "commit_id": "569fc6a40ea53054409e00c7d1c0e7f5f53cb0ce", "repo": "numpy", "path": "numpy/lib/recfunctions.py", "file_name": "recfunctions.py", "fun_name": "get_names_flat", "commit_message": "Fix docstring and examples for rfn.get_names*", "code": "def get_names_flat(adtype):\n \n listnames = []\n names = adtype.names\n for name in names:\n listnames.append(name)\n current = adtype[name]\n if current.names is not None:\n listnames.extend(get_names_flat(current))\n return tuple(listnames)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 71, "n_words": 24, "vocab_size": 22, "complexity": 3, "nloc": 9, "token_counts": 54, "n_ast_nodes": 89, "n_identifiers": 9, "random_cut": "def get_names_flat(adtype):\n \n listnames = []\n names = adtype.names\n for name in names:\n listnames.append(name)\n current = adtype[name]\n if current.names is not None:\n listnames.extend(ge", "d_id": 38608, "documentation": { "docstring": "\n Returns the field names of the input datatype as a tuple. Input datatype\n has to have fields otherwise error is raised.\n Nested structure are flattened beforehand.\n\n Parameters\n ----------\n adtype : dtype\n Input datatype\n\n Examples\n --------\n >>> from numpy.lib import recfunctions as rfn\n >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None\n False\n >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)\n ('A', 'B')\n >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])\n >>> rfn.get_names_flat(adtype)\n ('a', 'b', 'ba', 'bb')\n ", "n_words": 72, "vocab_size": 59, "n_whitespaces": 131, "language": "en" } }, { "id": 164932, "commit_id": "03fef5f0e35200aa5828975b62782bcf11faa0d2", "repo": "pandas", "path": "pandas/tests/plotting/frame/test_frame.py", "file_name": "test_frame.py", "fun_name": "test_memory_leak", "commit_message": "TST: Clean tests/plotting (#45992)", "code": "def test_memory_leak(self):\n \n import gc\n import weakref\n\n results = {}\n for kind in plotting.PlotAccessor._all_kinds:\n\n args = {}\n if kind in [\"hexbin\", \"scatter\", \"pie\"]:\n df = DataFrame(\n {\n \"A\": np.random.uniform(size=20),\n \"B\": np.random.uniform(size=20),\n \"C\": np.arange(20) + np.random.uniform(size=20),\n }\n )\n args = {\"x\": \"A\", \"y\": \"B\"}\n elif kind == \"area\":\n df = tm.makeTimeDataFrame().abs()\n else:\n df = tm.makeTimeDataFrame()\n\n # Use a weakref so we can see if the object gets collected without\n # also preventing it from being collected\n results[kind] = weakref.proxy(df.plot(kind=kind, **args))\n\n # have matplotlib delete all the figures\n tm.close()\n # force a garbage collection\n gc.collect()\n msg = \"weakly-referenced object no longer exists\"\n for key in results:\n # check that every plot was collected\n with pytest.raises(ReferenceError, match=msg):\n # need to actually access something to get an error\n results[key].lines\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 512, "n_words": 124, "vocab_size": 92, "complexity": 5, "nloc": 26, "token_counts": 184, "n_ast_nodes": 323, "n_identifiers": 31, "random_cut": "def test_memory_leak(self):\n \n import gc\n import weakref\n\n results = {}\n for kind in plotting.PlotAccessor._all_kinds:\n\n args = {}\n if kind in [\"hexbin\", \"scatter\", \"pie\"]:\n df = DataFrame(\n {\n \"A\": np.random.uniform(size=20),\n \"B\": np.random.uniform(size=20),\n \"C\": np.arange", "d_id": 39623, "documentation": { "docstring": "Check that every plot type gets properly collected.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 320637, "commit_id": "60de9523ba42d35dc2bf8e0ed5c1521ffbc9b7f5", "repo": "qutebrowser", "path": "qutebrowser/utils/utils.py", "file_name": "utils.py", "fun_name": "disabled_excepthook", "commit_message": "Update code for latest mypy", "code": "def disabled_excepthook() -> Iterator[None]:\n \n old_excepthook = sys.excepthook\n sys.excepthook = sys.__excepthook__\n try:\n yield\n finally:\n # If the code we did run did change sys.excepthook, we leave it\n # unchanged. Otherwise, we reset it.\n if sys.excepthook is sys.__excepthook__:\n sys.excepthook = old_excepthook\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 93, "n_words": 39, "vocab_size": 29, "complexity": 3, "nloc": 9, "token_counts": 41, "n_ast_nodes": 73, "n_identifiers": 6, "random_cut": "def disabled_excepthook() -> Iterator[None]:\n \n old_excepthook = sys.excepthook\n sys.excepthook = sys.__excepthook__\n try:\n yield\n finally:\n # If the code we did run did change sys.excepthook, we leave it\n # unchanged. Otherwise, we reset it.\n if sys.excepthook is sys.__excepthook__:\n sys.excepthook = old_excepthook\n\n", "d_id": 117242, "documentation": { "docstring": "Run code with the exception hook temporarily disabled.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 154536, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "asv_bench/benchmarks/utils/common.py", "file_name": "common.py", "fun_name": "trigger_import", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def trigger_import(*dfs):\n \n if ASV_USE_STORAGE_FORMAT != \"hdk\" or ASV_USE_IMPL == \"pandas\":\n return\n\n from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import (\n DbWorker,\n )\n\n for df in dfs:\n df.shape # to trigger real execution\n df._query_compiler._modin_frame._partitions[0][\n 0\n ].frame_id = DbWorker().import_arrow_table(\n df._query_compiler._modin_frame._partitions[0][0].get()\n ) # to trigger real execution\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 120, "n_words": 39, "vocab_size": 33, "complexity": 4, "nloc": 13, "token_counts": 86, "n_ast_nodes": 134, "n_identifiers": 21, "random_cut": "def trigger_import(*dfs):\n \n if ASV_USE_STORAGE_FORMAT != \"hdk\" or ASV_USE_IMPL == \"pandas\":\n return\n\n from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import (\n DbWorker,\n )\n\n for df in dfs:\n df.shape # to trigger real execution\n df._query_compiler._modin_frame._partitions[0][\n 0\n ].frame_id = DbWorker().i", "d_id": 36057, "documentation": { "docstring": "\n Trigger import execution for DataFrames obtained by HDK engine.\n\n Parameters\n ----------\n *dfs : iterable\n DataFrames to trigger import.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 272187, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/integration_test/forwardprop_test.py", "file_name": "forwardprop_test.py", "fun_name": "_jacfwd", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _jacfwd(f, primals):\n \n jac_flat = []\n flat_primals = tf.nest.flatten(primals)\n tangent_mask = [tf.zeros_like(primal) for primal in flat_primals]\n for primal_index, primal in enumerate(flat_primals):\n primal_vector = tf.reshape(primal, [-1])\n primal_vector_length = tf.size(primal_vector)\n jac_columns = []\n for element_index in tf.range(primal_vector_length):\n mask = tf.one_hot(element_index, primal_vector_length)\n tangent_mask[primal_index] = tf.reshape(mask, tf.shape(primal))\n jac_columns.append(\n tf.nest.map_structure(\n functools.partial(tf.reshape, shape=[-1]),\n _jvp(\n f,\n primals,\n tf.nest.pack_sequence_as(primals, tangent_mask),\n )[1],\n )\n )\n jac_flat.append(tf.stack(jac_columns, axis=1))\n tangent_mask[primal_index] = tf.zeros_like(primal)\n return tf.nest.pack_sequence_as(primals, jac_flat)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 323, "n_words": 63, "vocab_size": 46, "complexity": 4, "nloc": 24, "token_counts": 196, "n_ast_nodes": 299, "n_identifiers": 31, "random_cut": "def _jacfwd(f, primals):\n \n jac_flat = []\n flat_primals = tf.nest.flatten(primals)\n tangent_mask = [tf.zeros_like(primal) for primal in flat_primals]\n for primal_index, primal in enumerate(flat_primals):\n primal_vector = tf.reshape(primal, [-1])\n primal_vector_length = tf.size(primal_vector)\n jac_columns = []\n for element_index in tf.range(primal_vector_length):\n mask = tf.one_hot(element_index, primal", "d_id": 80974, "documentation": { "docstring": "Compute the jacobian of `f` at `primals` using forward-mode autodiff.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 40162, "commit_id": "c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c", "repo": "dash", "path": "dash/_callback_context.py", "file_name": "_callback_context.py", "fun_name": "record_timing", "commit_message": "f-strings everywhere! fffff", "code": "def record_timing(name, duration=None, description=None):\n \n timing_information = getattr(flask.g, \"timing_information\", {})\n\n if name in timing_information:\n raise KeyError(f'Duplicate resource name \"{name}\" found.')\n\n timing_information[name] = {\"dur\": round(duration * 1000), \"desc\": description}\n\n setattr(flask.g, \"timing_information\", timing_information)\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 30, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 67, "n_ast_nodes": 114, "n_identifiers": 11, "random_cut": "def record_timing(name, duration=None, description=None):\n \n timing_information = getattr(flask.g, \"timing_information\", {})\n\n if name in timing_information:\n raise KeyError(f'Duplicate resource name \"{name}\" found.')\n\n timing_information[name] = {\"dur\": round(duration * 1000), \"desc\": description}\n\n setattr(flask.g, \"timing_information\", timing_information)\n", "d_id": 7329, "documentation": { "docstring": "Records timing information for a server resource.\n\n :param name: The name of the resource.\n :type name: string\n\n :param duration: The time in seconds to report. Internally, this\n is rounded to the nearest millisecond.\n :type duration: float or None\n\n :param description: A description of the resource.\n :type description: string or None\n ", "n_words": 50, "vocab_size": 33, "n_whitespaces": 110, "language": "en" } }, { "id": 203493, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/templatetags/admin_list.py", "file_name": "admin_list.py", "fun_name": "admin_actions", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def admin_actions(context):\n \n context[\"action_index\"] = context.get(\"action_index\", -1) + 1\n return context\n\n\n@register.tag(name=\"admin_actions\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.tag(name=\"admin_actions\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 19, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 61, "n_identifiers": 6, "random_cut": "def admin_actions(context):\n \n context[\"action_index\"] = context.get(\"action_index\", -1) + 1\n return context\n\n\n@register.tag(name=\"admin_ac", "d_id": 50409, "documentation": { "docstring": "\n Track the number of times the action field has been rendered on the page,\n so we know which value to use.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 31, "language": "en" } }, { "id": 294272, "commit_id": "dbef90654f3693401a2df88fa00afbbffbdffcd2", "repo": "core", "path": "tests/components/hue/test_light_v2.py", "file_name": "test_light_v2.py", "fun_name": "test_lights", "commit_message": "Add effects feature to Hue lights (#68567)", "code": "async def test_lights(hass, mock_bridge_v2, v2_resources_test_data):\n \n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"light\")\n # there shouldn't have been any requests at this point\n assert len(mock_bridge_v2.mock_requests) == 0\n # 6 entities should be created from test data (grouped_lights are disabled by default)\n assert len(hass.states.async_all()) == 6\n\n # test light which supports color and color temperature\n light_1 = hass.states.get(\"light.hue_light_with_color_and_color_temperature_1\")\n assert light_1 is not None\n assert (\n light_1.attributes[\"friendly_name\"]\n == \"Hue light with color and color temperature 1\"\n )\n assert light_1.state == \"on\"\n assert light_1.attributes[\"brightness\"] == int(46.85 / 100 * 255)\n assert light_1.attributes[\"mode\"] == \"normal\"\n assert light_1.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert set(light_1.attributes[\"supported_color_modes\"]) == {\n COLOR_MODE_COLOR_TEMP,\n COLOR_MODE_XY,\n }\n assert light_1.attributes[\"xy_color\"] == (0.5614, 0.4058)\n assert light_1.attributes[\"min_mireds\"] == 153\n assert light_1.attributes[\"max_mireds\"] == 500\n assert light_1.attributes[\"dynamics\"] == \"dynamic_palette\"\n assert light_1.attributes[\"effect_list\"] == [\"None\", \"candle\", \"fire\"]\n assert light_1.attributes[\"effect\"] == \"None\"\n\n # test light which supports color temperature only\n light_2 = hass.states.get(\"light.hue_light_with_color_temperature_only\")\n assert light_2 is not None\n assert (\n light_2.attributes[\"friendly_name\"] == \"Hue light with color temperature only\"\n )\n assert light_2.state == \"off\"\n assert light_2.attributes[\"mode\"] == \"normal\"\n assert light_2.attributes[\"supported_color_modes\"] == [COLOR_MODE_COLOR_TEMP]\n assert light_2.attributes[\"min_mireds\"] == 153\n assert light_2.attributes[\"max_mireds\"] == 454\n assert light_2.attributes[\"dynamics\"] == \"none\"\n assert light_2.attributes[\"effect_list\"] == [\"None\", \"candle\", \"sunrise\"]\n\n # test light which supports color only\n light_3 = hass.states.get(\"light.hue_light_with_color_only\")\n assert light_3 is not None\n assert light_3.attributes[\"friendly_name\"] == \"Hue light with color only\"\n assert light_3.state == \"on\"\n assert light_3.attributes[\"brightness\"] == 128\n assert light_3.attributes[\"mode\"] == \"normal\"\n assert light_3.attributes[\"supported_color_modes\"] == [COLOR_MODE_XY]\n assert light_3.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert light_3.attributes[\"dynamics\"] == \"dynamic_palette\"\n\n # test light which supports on/off only\n light_4 = hass.states.get(\"light.hue_on_off_light\")\n assert light_4 is not None\n assert light_4.attributes[\"friendly_name\"] == \"Hue on/off light\"\n assert light_4.state == \"off\"\n assert light_4.attributes[\"mode\"] == \"normal\"\n assert light_4.attributes[\"supported_color_modes\"] == []\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 458, "n_words": 264, "vocab_size": 124, "complexity": 1, "nloc": 52, "token_counts": 423, "n_ast_nodes": 729, "n_identifiers": 22, "random_cut": "async def test_lights(hass, mock_bridge_v2, v2_resources_test_data):\n \n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"light\")\n # there shouldn't have been any requests at this point\n assert len(mock_bridge_v2.mock_requests) == 0\n # 6 entities should be created from test data (grouped_lights are disabled by default)\n assert len(hass.states.async_all()) == 6\n\n # test light which supports color and color temperature\n light_1 = hass.states.get(\"light.hue_light_with_color_and_color_temperature_1\")\n assert light_1 is not None\n assert (\n light_1.attributes[\"friendly_name\"]\n == \"Hue light with color and color temperature 1\"\n )\n assert light_1.state == \"on\"\n assert light_1.attributes[\"brightness\"] == int(46.85 / 100 * 255)\n assert light_1.attributes[\"mode\"] == \"normal\"\n assert light_1.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert set(light_1.attributes[\"supported_color_modes\"]) == {\n COLOR_MODE_COLOR_TEMP,\n COLOR_MODE_XY,\n }\n assert light_1.attributes[\"xy_color\"] == (0.5614, 0.4058)\n assert light_1.attributes[\"min_mireds\"] == 153\n assert light_1.attributes[\"max_mireds\"] == 500\n assert light_1.attributes[\"dynamics\"] == \"dynamic_palette\"\n assert light_1.attributes[\"effect_list\"] == [\"None\", \"candle\", \"fire\"]\n assert light_1.attributes[\"effect\"] == \"None\"\n\n # test light which supports color temperature only\n light_2 = hass.states.get(\"light.hue_light_with_color_temperature_only\")\n assert light_2 is not None\n assert (\n light_2.attributes[\"friendly_name\"] == \"Hue light with color temperature only\"\n )\n assert light_2.state == \"off\"\n assert light_2.attributes[\"mode\"] == \"normal\"\n assert light_2.attributes[\"supported_color_modes\"] == [COLOR_MODE_COLOR_TEMP]\n assert light_2.attributes[\"min_mireds\"] == 153\n assert light_2.attributes[\"max_mireds\"] == 454\n assert light_2.attributes[\"dynamics\"] == \"none\"\n assert light_2.attributes[\"effect_list\"] == [\"None\", \"candle\", \"sunrise\"]\n\n # test light which supports color only\n light_3 = hass.states.get(\"light.hue_light_with_color_only\")\n assert light_3 is not None\n assert light_3.attributes[\"friendly_name\"] == \"Hue light with color only\"\n assert light_3.state == \"on\"\n assert light_3.attributes[\"brightness\"] == 128\n assert light_3.attributes[\"mode\"] == \"normal\"\n assert light_3.attributes[\"supported_color_modes\"] == [COLOR_MOD", "d_id": 93309, "documentation": { "docstring": "Test if all v2 lights get created with correct features.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 195246, "commit_id": "b1acb681207559da56a787ba96e16f0e23697d92", "repo": "ParlAI", "path": "projects/bb3/holistic_bias/scripts/eval_175b_model.py", "file_name": "eval_175b_model.py", "fun_name": "setup_data", "commit_message": "Patch 8322 (#4709)\n\n* add dafetymix teacher\r\n\r\n* safety_mix teacher\r\n\r\n* safety_mix teacher pos and neg teachers\r\n\r\n* add tests for teacher\r\n\r\n* add license info\r\n\r\n* improvement\r\n\r\n* add task list\r\n\r\n* add task list and lint\r\n\r\n* add init.py\r\n\r\n* adding some patch to director\r\n\r\n* seeker changes\r\n\r\n* th\r\n\r\n* 3\r\n\r\n* jing\r\n\r\n* changes\r\n\r\n* z and r\r\n\r\n* remove .opts\r\n\r\n* fix docs\r\n\r\n* add contrractions\r\n\r\n* lint\r\n\r\nCo-authored-by: Dexter Ju \r\nCo-authored-by: Jing Xu ", "code": "def setup_data(self, path):\n \n for message, new_episode in super().setup_data(path):\n assert (\n message['text'] == '__SILENCE__'\n ), 'The expected original context string is not found!'\n message['text'] = 'Person 1:'\n yield message, new_episode\n\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 102, "n_words": 29, "vocab_size": 26, "complexity": 2, "nloc": 7, "token_counts": 43, "n_ast_nodes": 78, "n_identifiers": 6, "random_cut": "def setup_data(self, path):\n \n for message, new_episode in super().setup_data(path):\n assert (\n message['text'] == '__SILENCE__'\n ), 'The expected original context string is not found!'\n message['text'] = 'Person", "d_id": 47233, "documentation": { "docstring": "\n Modify each output message to add in an OPT-compatible context string.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 101477, "commit_id": "13cfb3f39e72e9ca181f173b7b3db2a048db0d08", "repo": "faceswap", "path": "scripts/extract.py", "file_name": "extract.py", "fun_name": "_get_input_locations", "commit_message": "extract: Add batch processing mode", "code": "def _get_input_locations(self) -> List[str]:\n \n if not self._args.batch_mode or os.path.isfile(self._args.input_dir):\n return [self._args.input_dir] # Not batch mode or a single file\n\n retval = [os.path.join(self._args.input_dir, fname)\n for fname in os.listdir(self._args.input_dir)\n if os.path.isdir(os.path.join(self._args.input_dir, fname))\n or os.path.splitext(fname)[-1].lower() in _video_extensions]\n logger.debug(\"Input locations: %s\", retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 138, "n_words": 40, "vocab_size": 34, "complexity": 6, "nloc": 17, "token_counts": 122, "n_ast_nodes": 192, "n_identifiers": 20, "random_cut": "def _get_input_locations(self) -> List[str]:\n \n if not self._args.batch_mode or os.path.isfile(self._args.input_dir):\n return [self._args.input_dir] # Not batch mode or a single file\n\n retval = [os.path.join(self._args.input_dir, fname)\n for fname in os.listdir(self._args.input_dir)\n if os.path.isdir(os.path.join(self._args.input_dir, fname))\n or os.p", "d_id": 20890, "documentation": { "docstring": " Obtain the full path to input locations. Will be a list of locations if batch mode is\n selected, or a containing a single location if batch mode is not selected.\n\n Returns\n -------\n list:\n The list of input location paths\n ", "n_words": 39, "vocab_size": 29, "n_whitespaces": 86, "language": "en" } }, { "id": 65129, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/party.py", "file_name": "party.py", "fun_name": "get_party_gle_currency", "commit_message": "style: format code with black", "code": "def get_party_gle_currency(party_type, party, company):\n\tdef generator():\n\t\texisting_gle_currency = frappe.db.sql(\n\t\t\t,\n\t\t\t{\"company\": company, \"party_type\": party_type, \"party\": party},\n\t\t)\n\n\t\treturn existing_gle_currency[0][0] if existing_gle_currency else None\n\n\treturn frappe.local_cache(\n\t\t\"party_gle_currency\", (party_type, party, company), generator, regenerate_if_none=True\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 22, "n_words": 32, "vocab_size": 27, "complexity": 1, "nloc": 5, "token_counts": 32, "n_ast_nodes": 109, "n_identifiers": 11, "random_cut": "def get_party_gle_currency(party_type, party, company):\n\tdef generator():\n\t\texisting_gle_currency = frappe.db.sql(\n\t\t\t,\n\t\t\t{\"company\": company, \"party_type\": party_type, \"party\": party},\n\t\t)\n\n\t\treturn existing_gle_currency[0][0] if existing_gle_currency else None\n\n\treturn frappe.local_cache(\n\t\t\"party_gle_currency\", (party_type, pa", "d_id": 13801, "documentation": { "docstring": "select account_currency from `tabGL Entry`\n\t\t\twhere docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s\n\t\t\tlimit 1", "n_words": 15, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 258644, "commit_id": "a793c1f0ad7dd63b2a896d2e84087089a11e7fca", "repo": "scikit-learn", "path": "sklearn/datasets/_base.py", "file_name": "_base.py", "fun_name": "load_breast_cancer", "commit_message": "DOC Ensures that sklearn.datasets._base.load_breast_cancer passes numpydoc validation (#22346)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Arturo Amor <86408019+ArturoAmorQ@users.noreply.github.com>", "code": "def load_breast_cancer(*, return_X_y=False, as_frame=False):\n \n data_file_name = \"breast_cancer.csv\"\n data, target, target_names, fdescr = load_csv_data(\n data_file_name=data_file_name, descr_file_name=\"breast_cancer.rst\"\n )\n\n feature_names = np.array(\n [\n \"mean radius\",\n \"mean texture\",\n \"mean perimeter\",\n \"mean area\",\n \"mean smoothness\",\n \"mean compactness\",\n \"mean concavity\",\n \"mean concave points\",\n \"mean symmetry\",\n \"mean fractal dimension\",\n \"radius error\",\n \"texture error\",\n \"perimeter error\",\n \"area error\",\n \"smoothness error\",\n \"compactness error\",\n \"concavity error\",\n \"concave points error\",\n \"symmetry error\",\n \"fractal dimension error\",\n \"worst radius\",\n \"worst texture\",\n \"worst perimeter\",\n \"worst area\",\n \"worst smoothness\",\n \"worst compactness\",\n \"worst concavity\",\n \"worst concave points\",\n \"worst symmetry\",\n \"worst fractal dimension\",\n ]\n )\n\n frame = None\n target_columns = [\n \"target\",\n ]\n if as_frame:\n frame, data, target = _convert_data_dataframe(\n \"load_breast_cancer\", data, target, feature_names, target_columns\n )\n\n if return_X_y:\n return data, target\n\n return Bunch(\n data=data,\n target=target,\n frame=frame,\n target_names=target_names,\n DESCR=fdescr,\n feature_names=feature_names,\n filename=data_file_name,\n data_module=DATA_MODULE,\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 610, "n_words": 125, "vocab_size": 68, "complexity": 3, "nloc": 59, "token_counts": 177, "n_ast_nodes": 297, "n_identifiers": 21, "random_cut": "def load_breast_cancer(*, return_X_y=False, as_frame=False):\n \n data_file_name = \"breast_cancer.csv\"\n data, target, target_names, fdescr = load_csv_data(\n data_file_name=data_file_name, descr_file_name=\"breast_cancer.rst\"\n )\n\n feature_names = np.array(\n [\n \"mean radius\",\n \"mean texture\",\n \"mean perimeter\",\n \"mean area\",\n \"mean smoothness\",\n \"mean compactness\",\n \"mean concavity\",\n \"mean concave points\",\n \"mean symmetry\",\n \"mean fractal dimension\",\n \"radius error\",\n \"texture error\",\n \"perimeter error\",\n \"area error\",\n \"smoothness error\",\n \"compactness error\",\n \"concavity error\",\n \"concave points error\",\n \"symmetry error\",\n \"fractal dimension error\",\n \"worst radius\",\n \"worst texture\",\n \"worst perimeter\",\n \"worst area\",\n ", "d_id": 75348, "documentation": { "docstring": "Load and return the breast cancer wisconsin dataset (classification).\n\n The breast cancer dataset is a classic and very easy binary classification\n dataset.\n\n ================= ==============\n Classes 2\n Samples per class 212(M),357(B)\n Samples total 569\n Dimensionality 30\n Features real, positive\n ================= ==============\n\n The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is\n downloaded from:\n https://goo.gl/U2Uwz2\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object.\n See below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.18\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric). The target is\n a pandas DataFrame or Series depending on the number of target columns.\n If `return_X_y` is True, then (`data`, `target`) will be pandas\n DataFrames or Series as described below.\n\n .. versionadded:: 0.23\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (569, 30)\n The data matrix. If `as_frame=True`, `data` will be a pandas\n DataFrame.\n target : {ndarray, Series} of shape (569,)\n The classification target. If `as_frame=True`, `target` will be\n a pandas Series.\n feature_names : list\n The names of the dataset columns.\n target_names : list\n The names of target classes.\n frame : DataFrame of shape (569, 31)\n Only present when `as_frame=True`. DataFrame with `data` and\n `target`.\n\n .. versionadded:: 0.23\n DESCR : str\n The full description of the dataset.\n filename : str\n The path to the location of the data.\n\n .. versionadded:: 0.20\n\n (data, target) : tuple if ``return_X_y`` is True\n A tuple of two ndarrays by default. The first contains a 2D ndarray of\n shape (569, 30) with each row representing one sample and each column\n representing the features. The second ndarray of shape (569,) contains\n the target samples. If `as_frame=True`, both arrays are pandas objects,\n i.e. `X` a dataframe and `y` a series.\n\n .. versionadded:: 0.18\n\n Examples\n --------\n Let's say you are interested in the samples 10, 50, and 85, and want to\n know their class name.\n\n >>> from sklearn.datasets import load_breast_cancer\n >>> data = load_breast_cancer()\n >>> data.target[[10, 50, 85]]\n array([0, 1, 0])\n >>> list(data.target_names)\n ['malignant', 'benign']\n ", "n_words": 356, "vocab_size": 205, "n_whitespaces": 823, "language": "en" } }, { "id": 20279, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/filters/__init__.py", "file_name": "__init__.py", "fun_name": "get_filter_by_name", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def get_filter_by_name(filtername, **options):\n \n cls = find_filter_class(filtername)\n if cls:\n return cls(**options)\n else:\n raise ClassNotFound('filter %r not found' % filtername)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 44, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 33, "n_ast_nodes": 59, "n_identifiers": 6, "random_cut": "def get_filter_by_name(filtername, **options):\n \n cls = find_filter_class(filtername)\n if cls:\n return cls(**options)\n else:\n raise ClassNotFound('filter %r not found' % filte", "d_id": 3307, "documentation": { "docstring": "Return an instantiated filter.\n\n Options are passed to the filter initializer if wanted.\n Raise a ClassNotFound if not found.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 28, "language": "en" } }, { "id": 50301, "commit_id": "ffcde21305c61d950a9f93e57e6180c9a9665b87", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/resnet.py", "file_name": "resnet.py", "fun_name": "wide_resnet50_2", "commit_message": "add disco_diffusion_ernievil_base", "code": "def wide_resnet50_2(pretrained=False, **kwargs):\n \n kwargs['width'] = 64 * 2\n return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs)\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 5, "random_cut": "def wide_resnet50_2(pretrained=False, **kwargs):\n \n kwargs['width'] = 64 * 2\n return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs)\n\n", "d_id": 10081, "documentation": { "docstring": "Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" `_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.vision.models import wide_resnet50_2\n\n # build model\n model = wide_resnet50_2()\n\n # build model and load imagenet pretrained weight\n # model = wide_resnet50_2(pretrained=True)\n\n x = paddle.rand([1, 3, 224, 224])\n out = model(x)\n\n print(out.shape)\n ", "n_words": 57, "vocab_size": 43, "n_whitespaces": 182, "language": "en" } }, { "id": 246216, "commit_id": "901b264c0c88f39cbfb8b2229e0dc57968882658", "repo": "synapse", "path": "tests/rest/admin/test_username_available.py", "file_name": "test_username_available.py", "fun_name": "test_username_available", "commit_message": "Add type hints to `tests/rest/admin` (#11851)", "code": "def test_username_available(self) -> None:\n \n\n url = \"%s?username=%s\" % (self.url, \"allowed\")\n channel = self.make_request(\"GET\", url, access_token=self.admin_user_tok)\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertTrue(channel.json_body[\"available\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 64, "n_ast_nodes": 106, "n_identifiers": 14, "random_cut": "def test_username_available(self) -> None:\n \n\n url = \"%s?username=%s\" % (self.url, \"allowed\")\n channel = self.make_request(\"GET\", url, access_token=self.admin_user_tok)\n\n self.asser", "d_id": 71110, "documentation": { "docstring": "\n The endpoint should return a HTTPStatus.OK response if the username does not exist\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 314745, "commit_id": "a8349a4866d22cddbca9ac9367d4affae39a8325", "repo": "core", "path": "tests/helpers/test_entityfilter.py", "file_name": "test_entityfilter.py", "fun_name": "test_with_include_glob_filtering_case4a_include_strong", "commit_message": "Adjust entity filters to make includes stronger than excludes (#74080)\n\n* Adjust entity filters to make includes stronger than excludes\r\n\r\nFixes #59080\r\n\r\n* adjust test for stronger entity glob includes\r\n\r\n* sync with docs", "code": "def test_with_include_glob_filtering_case4a_include_strong():\n \n incl_dom = {}\n incl_glob = {\"*working\"}\n incl_ent = {\"binary_sensor.specificly_included\"}\n excl_dom = {}\n excl_glob = {\"*broken\", \"*notworking\", \"binary_sensor.*\"}\n excl_ent = {\"light.ignoreme\"}\n testfilter = generate_filter(\n incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob\n )\n\n assert testfilter(\"sensor.working\") is True\n assert testfilter(\"sensor.notworking\") is True # include is stronger\n assert testfilter(\"sensor.broken\") is False\n assert testfilter(\"light.test\") is False\n assert testfilter(\"light.notworking\") is True # include is stronger\n assert testfilter(\"light.ignoreme\") is False\n assert testfilter(\"binary_sensor.not_working\") is True # include is stronger\n assert testfilter(\"binary_sensor.another\") is False\n assert testfilter(\"binary_sensor.specificly_included\") is True\n assert testfilter(\"sun.sun\") is False\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 151, "n_words": 84, "vocab_size": 41, "complexity": 1, "nloc": 20, "token_counts": 123, "n_ast_nodes": 227, "n_identifiers": 9, "random_cut": "def test_with_include_glob_filtering_case4a_include_strong():\n \n incl_dom = {}\n incl_glob = {\"*working\"}\n incl_ent = {\"binary_sensor.specificly_included\"}\n excl_dom = {}\n excl_glob = {\"*broken\", \"*notworking\", \"binary_sensor.*\"}\n excl_ent = {\"light.ignoreme\"}\n testfilter = generate_filter(\n incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob\n )\n\n assert testfilter(\"sensor.working\") is True\n assert testfilter(\"sensor.notworking\") is True # include is stronger\n assert testfilter(\"sensor.broken\") is False\n assert testfilter(\"light.test\") is False\n assert testfilter(\"light.notworking\") is True # include is stronger\n assert testfilter(\"light.ignoreme\") is False\n assert testfilter(\"binary_sensor.not_working\") is True # include is stronger\n assert t", "d_id": 113349, "documentation": { "docstring": "Test case 4 - include and exclude specified, both have globs, and a specifically included entity.", "n_words": 16, "vocab_size": 15, "n_whitespaces": 15, "language": "en" } }, { "id": 66418, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/production_plan/production_plan.py", "file_name": "production_plan.py", "fun_name": "get_sales_orders", "commit_message": "style: format code with black", "code": "def get_sales_orders(self):\n\tso_filter = item_filter = \"\"\n\tbom_item = \"bom.item = so_item.item_code\"\n\n\tdate_field_mapper = {\n\t\t\"from_date\": (\">=\", \"so.transaction_date\"),\n\t\t\"to_date\": (\"<=\", \"so.transaction_date\"),\n\t\t\"from_delivery_date\": (\">=\", \"so_item.delivery_date\"),\n\t\t\"to_delivery_date\": (\"<=\", \"so_item.delivery_date\"),\n\t}\n\n\tfor field, value in date_field_mapper.items():\n\t\tif self.get(field):\n\t\t\tso_filter += f\" and {value[1]} {value[0]} %({field})s\"\n\n\tfor field in [\"customer\", \"project\", \"sales_order_status\"]:\n\t\tif self.get(field):\n\t\t\tso_field = \"status\" if field == \"sales_order_status\" else field\n\t\t\tso_filter += f\" and so.{so_field} = %({field})s\"\n\n\tif self.item_code and frappe.db.exists(\"Item\", self.item_code):\n\t\tbom_item = self.get_bom_item() or bom_item\n\t\titem_filter += \" and so_item.item_code = %(item_code)s\"\n\n\topen_so = frappe.db.sql(\n\t\tf,\n\t\tself.as_dict(),\n\t\tas_dict=1,\n\t)\n\n\treturn open_so\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 67, "n_words": 93, "vocab_size": 59, "complexity": 9, "nloc": 38, "token_counts": 158, "n_ast_nodes": 329, "n_identifiers": 20, "random_cut": "def get_sales_orders(self):\n\tso_filter = item_filter = \"\"\n\tbom_item = \"bom.item = so_item.item_code\"\n\n\tdate_field_mapper = {\n\t\t\"from_date\": (\">=\", \"so.transaction_date\"),\n\t\t\"to_date\": (\"<=\", \"so.transaction_date\"),\n\t\t\"from_delivery_date\": (\">=\", \"so_item.delivery_date\"),\n\t\t\"to_delivery_date\": (\"<=\", \"so_item.delivery_date\"),\n\t}\n\n\tfor ", "d_id": 14183, "documentation": { "docstring": "\n\t\tselect distinct so.name, so.transaction_date, so.customer, so.base_grand_total\n\t\tfrom `tabSales Order` so, `tabSales Order Item` so_item\n\t\twhere so_item.parent = so.name\n\t\t\tand so.docstatus = 1 and so.status not in (\"Stopped\", \"Closed\")\n\t\t\tand so.company = %(company)s\n\t\t\tand so_item.qty > so_item.work_order_qty {so_filter} {item_filter}\n\t\t\tand (exists (select name from `tabBOM` bom where {bom_item}\n\t\t\t\t\tand bom.is_active = 1)\n\t\t\t\tor exists (select name from `tabPacked Item` pi\n\t\t\t\t\twhere pi.parent = so.name and pi.parent_item = so_item.item_code\n\t\t\t\t\t\tand exists (select name from `tabBOM` bom where bom.item=pi.item_code\n\t\t\t\t\t\t\tand bom.is_active = 1)))\n\t\t", "n_words": 80, "vocab_size": 49, "n_whitespaces": 68, "language": "en" } }, { "id": 74366, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_copy_published_emits_signal", "commit_message": "Reformat with black", "code": "def test_copy_published_emits_signal(self):\n \n christmas_page = EventPage.objects.get(url_path=\"/home/events/christmas/\")\n\n signal_fired = False\n signal_page = None\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 39, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 11, "token_counts": 65, "n_ast_nodes": 44, "n_identifiers": 9, "random_cut": "def test_copy_published_emits_signal(self):\n \n christmas_page = EventPage.objects.get(url_path=\"/h", "d_id": 16251, "documentation": { "docstring": "Test that copying of a published page emits a page_published signal.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 38418, "commit_id": "38043d8453b82a9c712f8d5c98323150fbee7503", "repo": "transformers", "path": "utils/tests_fetcher.py", "file_name": "tests_fetcher.py", "fun_name": "get_all_tests", "commit_message": "Update self-push workflow (#17177)\n\n* update push ci\r\n\r\n* install git-python\r\n\r\n* update comment\r\n\r\n* update deepspeed jobs\r\n\r\n* fix report\r\n\r\n* skip 2 more tests that require fairscale\r\n\r\n* Fix changes in test_fetcher.py (to deal with `setup.py` is changed)\r\n\r\n* set RUN_PT_TF_CROSS_TESTS=1 and final clean-up\r\n\r\n* remove SIGOPT_API_TOKEN\r\n\r\n* remove echo \"$matrix_folders\"\r\n\r\nCo-authored-by: ydshieh ", "code": "def get_all_tests():\n \n test_root_dir = os.path.join(PATH_TO_TRANFORMERS, \"tests\")\n\n # test folders/files directly under `tests` folder\n tests = os.listdir(test_root_dir)\n tests = sorted(\n list(filter(lambda x: os.path.isdir(x) or x.startswith(\"tests/test_\"), [f\"tests/{x}\" for x in tests]))\n )\n\n # model specific test folders\n model_tests_folders = os.listdir(os.path.join(test_root_dir, \"models\"))\n model_test_folders = sorted(list(filter(os.path.isdir, [f\"tests/models/{x}\" for x in model_tests_folders])))\n\n tests.remove(\"tests/models\")\n tests = model_test_folders + tests\n\n return tests\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 98, "n_words": 55, "vocab_size": 40, "complexity": 4, "nloc": 11, "token_counts": 118, "n_ast_nodes": 205, "n_identifiers": 17, "random_cut": "def get_all_tests():\n \n test_root_dir = os.path.join(PATH_TO_TRANFORMERS, \"tests\")\n\n # test folders/files directly under `tests` folder\n tests = os.listdir(test_root_dir)\n tests = sorted(\n list(filter(lambda x: os.path.isdir(x) or x.startswith(\"tests/test_\"), [f\"tests/{x}\" for x in tests]))\n )\n\n # model specific test folders\n model_tests_folders = os.listdir(os.path.join(test_root_dir, \"models\"))\n model_test_folders = sorted(list(filter(os.path.isdir, [f\"tests/models/{x}\" for x in model_tests", "d_id": 6971, "documentation": { "docstring": "\n Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`.\n\n - folders under `tests`: `tokenization`, `pipelines`, etc. The folder `models` is excluded.\n - folders under `tests/models`: `bert`, `gpt2`, etc.\n - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc.\n ", "n_words": 46, "vocab_size": 32, "n_whitespaces": 62, "language": "en" } }, { "id": 133739, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/impala/tests/test_vtrace.py", "file_name": "test_vtrace.py", "fun_name": "test_vtrace", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_vtrace(self):\n \n seq_len = 5\n batch_size = 10\n\n # Create log_rhos such that rho will span from near-zero to above the\n # clipping thresholds. In particular, calculate log_rhos in\n # [-2.5, 2.5),\n # so that rho is in approx [0.08, 12.2).\n space_w_time = Box(-1.0, 1.0, (seq_len, batch_size), np.float32)\n space_only_batch = Box(-1.0, 1.0, (batch_size,), np.float32)\n log_rhos = space_w_time.sample() / (batch_size * seq_len)\n log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5).\n values = {\n \"log_rhos\": log_rhos,\n # T, B where B_i: [0.9 / (i+1)] * T\n \"discounts\": np.array(\n [[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)]\n ),\n \"rewards\": space_w_time.sample(),\n \"values\": space_w_time.sample() / batch_size,\n \"bootstrap_value\": space_only_batch.sample() + 1.0,\n \"clip_rho_threshold\": 3.7,\n \"clip_pg_rho_threshold\": 2.2,\n }\n\n for fw, sess in framework_iterator(frameworks=(\"torch\", \"tf\"), session=True):\n vtrace = vtrace_tf if fw != \"torch\" else vtrace_torch\n output = vtrace.from_importance_weights(**values)\n if sess:\n output = sess.run(output)\n\n ground_truth_v = _ground_truth_calculation(vtrace, **values)\n check(output, ground_truth_v)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 433, "n_words": 150, "vocab_size": 109, "complexity": 6, "nloc": 25, "token_counts": 230, "n_ast_nodes": 344, "n_identifiers": 30, "random_cut": "def test_vtrace(self):\n \n seq_len = 5\n batch_size = 10\n\n # Create log_rhos such that rho will span from near-zero to above the\n # clipping thresholds. In particular, calculate log_rhos in\n # [-2.5, 2.5),\n # so that rho is in approx [0.08, 12.2).\n space_w_time ", "d_id": 30090, "documentation": { "docstring": "Tests V-trace against ground truth data calculated in python.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 209854, "commit_id": "a2b7a28faff1db058dd22ce097a268e0ad5d1d33", "repo": "scapy", "path": "scapy/base_classes.py", "file_name": "base_classes.py", "fun_name": "pdfdump", "commit_message": "[Hinty] Core typing: windows (#3684)\n\n* Core typing: windows\r\n\r\nCo-authored-by: Pierre ", "code": "def pdfdump(self, filename=None, **kargs):\n # type: (Optional[str], **Any) -> None\n \n from scapy.config import conf\n from scapy.utils import get_temp_file, ContextManagerSubprocess\n canvas = self.canvas_dump(**kargs)\n if filename is None:\n fname = get_temp_file(autoext=kargs.get(\"suffix\", \".pdf\"))\n canvas.writePDFfile(fname)\n if WINDOWS and not conf.prog.pdfreader:\n os.startfile(fname)\n else:\n with ContextManagerSubprocess(conf.prog.pdfreader):\n subprocess.Popen([conf.prog.pdfreader, fname])\n else:\n canvas.writePDFfile(filename)\n print()\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 205, "n_words": 45, "vocab_size": 40, "complexity": 4, "nloc": 15, "token_counts": 115, "n_ast_nodes": 193, "n_identifiers": 24, "random_cut": "def pdfdump(self, filename=None, **kargs):\n # type: (Optional[str], **Any) -> None\n \n from scapy.config import conf\n from scapy.utils import get_temp_file, ContextManagerSubprocess\n canvas = self.canvas_dump(**kargs)\n if filename is None:\n fname = get_temp_file(autoext=kargs.get(\"suffix\", \".pdf\"))\n canvas.writePDFfile(fname)\n if WINDOWS and not conf.prog.pdfreader:\n os.startfile(fname)\n else:\n with ContextManagerSubprocess(conf.prog.pdfreader):\n subprocess.Popen([conf.prog.pdfreader, fname])\n else:\n canvas.writePDFfile(filename)\n print", "d_id": 52821, "documentation": { "docstring": "\n pdfdump(filename=None, layer_shift=0, rebuild=1)\n\n Creates a PDF file describing a packet. If filename is not provided a\n temporary file is created and xpdf is called.\n\n :param filename: the file's filename\n ", "n_words": 29, "vocab_size": 23, "n_whitespaces": 65, "language": "en" } }, { "id": 322143, "commit_id": "621357338437ee420eabbbf5ab19065bc85e73a5", "repo": "PaddleNLP", "path": "examples/dependency_parsing/ddparser/utils.py", "file_name": "utils.py", "fun_name": "index_sample", "commit_message": "Update neural search readme and Add Paddle Serving Support (#1558)\n\n* add recall inference similarity\r\n\r\n* update examples\r\n\r\n* updatea readme\r\n\r\n* update dir name\r\n\r\n* update neural search readme\r\n\r\n* update milvus readme\r\n\r\n* update domain adaptive pretraining readme\r\n\r\n* fix the mistakes\r\n\r\n* update readme\r\n\r\n* add recall Paddle Serving Support\r\n\r\n* update readme\r\n\r\n* update readme and format the code\r\n\r\n* reformat the files\r\n\r\n* move the files\r\n\r\n* reformat the code\r\n\r\n* remove redundant code\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: tianxin ", "code": "def index_sample(x, index):\n \n x_s = x.shape\n dim = len(index.shape) - 1\n assert x_s[:dim] == index.shape[:dim]\n\n if len(x_s) == 3 and dim == 1:\n r_x = paddle.reshape(x, shape=[-1, x_s[1], x_s[-1]])\n else:\n r_x = paddle.reshape(x, shape=[-1, x_s[-1]])\n\n index = paddle.reshape(index, shape=[len(r_x), -1, 1])\n # Generate arange index, shape like index\n arr_index = paddle.arange(start=0, end=len(index), dtype=index.dtype)\n arr_index = paddle.unsqueeze(arr_index, axis=[1, 2])\n arr_index = paddle.expand(arr_index, index.shape)\n # Genrate new index\n new_index = paddle.concat((arr_index, index), -1)\n new_index = paddle.reshape(new_index, (-1, 2))\n # Get output\n out = paddle.gather_nd(r_x, new_index)\n if len(x_s) == 3 and dim == 2:\n out = paddle.reshape(out, shape=[x_s[0], x_s[1], -1])\n else:\n out = paddle.reshape(out, shape=[x_s[0], -1])\n return out\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 190, "n_words": 105, "vocab_size": 64, "complexity": 5, "nloc": 20, "token_counts": 272, "n_ast_nodes": 410, "n_identifiers": 22, "random_cut": "def index_sample(x, index):\n \n x_s = x.shape\n dim = len(index.shape) - 1\n assert x_s[:dim] == index.shape[:dim]\n\n if len(x_s) == 3 and dim == 1:\n r_x = paddle.reshape(x, shape=[-1, x_s[1], x_s[-1]])\n else:\n r_x = paddle.reshape(x, shape=[-1, x_s[-1]])\n\n index = paddle.reshape(index, shape=[len(r_x), -1, 1])\n # Generate arange index, shape like index\n ", "d_id": 118072, "documentation": { "docstring": "\n Select input value according to index\n \n Arags:\n input: input matrix\n index: index matrix\n Returns:\n output\n >>> input\n [\n [1, 2, 3],\n [4, 5, 6]\n ]\n >>> index\n [\n [1, 2],\n [0, 1]\n ]\n >>> index_sample(input, index)\n [\n [2, 3],\n [4, 5]\n ]\n ", "n_words": 42, "vocab_size": 28, "n_whitespaces": 149, "language": "en" } }, { "id": 197094, "commit_id": "f8674bfe4988332e7ce60ceb36b365ce9aff662a", "repo": "sympy", "path": "sympy/diffgeom/diffgeom.py", "file_name": "diffgeom.py", "fun_name": "__new__", "commit_message": "Update the sympy.diffgeom mutability deprecations", "code": "def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):\n if not isinstance(name, Str):\n name = Str(name)\n\n # canonicallize the symbols\n if symbols is None:\n names = kwargs.get('names', None)\n if names is None:\n symbols = Tuple(\n *[Symbol('%s_%s' % (name.name, i), real=True)\n for i in range(patch.dim)]\n )\n else:\n sympy_deprecation_warning(\n f,\n deprecated_since_version=\"1.7\",\n active_deprecations_target=\"deprecated-diffgeom-mutable\",\n )\n symbols = Tuple(\n *[Symbol(n, real=True) for n in names]\n )\n else:\n syms = []\n for s in symbols:\n if isinstance(s, Symbol):\n syms.append(Symbol(s.name, **s._assumptions.generator))\n elif isinstance(s, str):\n sympy_deprecation_warning(\n f,\n\n deprecated_since_version=\"1.7\",\n active_deprecations_target=\"deprecated-diffgeom-mutable\",\n )\n syms.append(Symbol(s, real=True))\n symbols = Tuple(*syms)\n\n # canonicallize the relations\n rel_temp = {}\n for k,v in relations.items():\n s1, s2 = k\n if not isinstance(s1, Str):\n s1 = Str(s1)\n if not isinstance(s2, Str):\n s2 = Str(s2)\n key = Tuple(s1, s2)\n\n # Old version used Lambda as a value.\n if isinstance(v, Lambda):\n v = (tuple(v.signature), tuple(v.expr))\n else:\n v = (tuple(v[0]), tuple(v[1]))\n rel_temp[key] = v\n relations = Dict(rel_temp)\n\n # construct the object\n obj = super().__new__(cls, name, patch, symbols, relations)\n\n # Add deprecated attributes\n obj.transforms = _deprecated_dict(\n , {})\n obj._names = [str(n) for n in symbols]\n obj.patch.coord_systems.append(obj) # deprecated\n obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated\n obj._dummy = Dummy()\n\n return obj\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 923, "n_words": 188, "vocab_size": 109, "complexity": 15, "nloc": 73, "token_counts": 399, "n_ast_nodes": 681, "n_identifiers": 50, "random_cut": "def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):\n if not isinstance(name, Str):\n name = Str(name)\n\n # canonicallize the symbols\n if symbols is None:\n names = kwargs.get('names', None)\n if names is None:\n symbols = Tuple(\n *[Symbol('%s_%s' % (name.name, i), real=True)\n for i in range(patch.dim)]\n )\n else:\n sympy_deprecation_warning(\n f,\n deprecated_since_version=\"1.7\",\n active_deprecations_target=\"deprecated-diffgeom-mutable\",\n )\n symbols = Tuple(\n *[Symbol(n, real=True) for n in names]\n ", "d_id": 48334, "documentation": { "docstring": "\nThe 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That\nis, replace\n\n CoordSystem(..., names={names})\n\nwith\n\n CoordSystem(..., symbols=[{', '.join([\"Symbol(\" + repr(n) + \", real=True)\" for n in names])}])\n \n\nPassing a string as the coordinate symbol name to CoordSystem is deprecated.\nPass a Symbol with the appropriate name and assumptions instead.\n\nThat is, replace {s} with Symbol({s!r}, real=True).\n \n CoordSystem.transforms is deprecated. The CoordSystem class is now\n immutable. Use the 'relations' keyword argument to the\n CoordSystems() constructor to specify relations.\n ", "n_words": 78, "vocab_size": 52, "n_whitespaces": 167, "language": "en" } }, { "id": 320610, "commit_id": "57155e329ada002245ab3fac45d906f6707c14cf", "repo": "qutebrowser", "path": "qutebrowser/completion/models/miscmodels.py", "file_name": "miscmodels.py", "fun_name": "tab_focus", "commit_message": "Fixes qutebrowser/qutebrowser#6967 by adding win id param in _tabs & using it in delete_tabs\n\nAs delete_tab was assuming that completion column contains window ID, it was showing\nexception in case of tab-focus, as it doesn't have the window ID in completion column.\nSo instead a new parameter named current_win_id is used in _tabs which is also passed\nin all uses of the function.", "code": "def tab_focus(*, info):\n \n model = _tabs(win_id_filter=lambda win_id: win_id == info.win_id,\n add_win_id=False, current_win_id=info.win_id)\n\n special = [\n (\"last\", \"Focus the last-focused tab\"),\n (\"stack-next\", \"Go forward through a stack of focused tabs\"),\n (\"stack-prev\", \"Go backward through a stack of focused tabs\"),\n ]\n model.add_category(listcategory.ListCategory(\"Special\", special))\n\n return model\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 99, "n_words": 43, "vocab_size": 34, "complexity": 1, "nloc": 10, "token_counts": 70, "n_ast_nodes": 118, "n_identifiers": 12, "random_cut": "def tab_focus(*, info):\n \n m", "d_id": 117222, "documentation": { "docstring": "A model to complete on open tabs in the current window.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 68836, "commit_id": "74a782d81d8f8c4a4d9214a9c06377e5e6e464dd", "repo": "erpnext", "path": "erpnext/accounts/report/sales_payment_summary/sales_payment_summary.py", "file_name": "sales_payment_summary.py", "fun_name": "get_mode_of_payment_details", "commit_message": "refactor: DB independent quoting and truthy/falsy values (#31358)\n\n* refactor: DB independent quoting and truthy/falsy values\r\n\r\n* style: reformat to black spec\r\n\r\n* fix: ifnull -> coalesce\r\n\r\n* fix: coalesce -> Coalesce\r\n\r\n* fix: revert pypika comparison\r\n\r\n* refactor: convert queries to QB\r\n\r\n* fix: incorrect value types for query\r\n\r\n`=` query makes no sense with list of values\r\n\r\n* fix: remove warehouse docstatus condition\r\n\r\n* fix: keep using base rate as rate\r\n\r\nCo-authored-by: Ankush Menat ", "code": "def get_mode_of_payment_details(filters):\n\tmode_of_payment_details = {}\n\tinvoice_list = get_invoices(filters)\n\tinvoice_list_names = \",\".join(\"'\" + invoice[\"name\"] + \"'\" for invoice in invoice_list)\n\tif invoice_list:\n\t\tinv_mop_detail = frappe.db.sql(\n\t\t\t.format(\n\t\t\t\tinvoice_list_names=invoice_list_names\n\t\t\t),\n\t\t\tas_dict=1,\n\t\t)\n\n\t\tinv_change_amount = frappe.db.sql(\n\t\t\t.format(\n\t\t\t\tinvoice_list_names=invoice_list_names\n\t\t\t),\n\t\t\tas_dict=1,\n\t\t)\n\n\t\tfor d in inv_change_amount:\n\t\t\tfor det in inv_mop_detail:\n\t\t\t\tif (\n\t\t\t\t\tdet[\"owner\"] == d[\"owner\"]\n\t\t\t\t\tand det[\"posting_date\"] == d[\"posting_date\"]\n\t\t\t\t\tand det[\"mode_of_payment\"] == d[\"mode_of_payment\"]\n\t\t\t\t):\n\t\t\t\t\tpaid_amount = det[\"paid_amount\"] - d[\"change_amount\"]\n\t\t\t\t\tdet[\"paid_amount\"] = paid_amount\n\n\t\tfor d in inv_mop_detail:\n\t\t\tmode_of_payment_details.setdefault(d[\"owner\"] + cstr(d[\"posting_date\"]), []).append(\n\t\t\t\t(d.mode_of_payment, d.paid_amount)\n\t\t\t)\n\n\treturn mode_of_payment_details\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 49, "n_words": 80, "vocab_size": 50, "complexity": 9, "nloc": 71, "token_counts": 181, "n_ast_nodes": 304, "n_identifiers": 22, "random_cut": "def get_mode_of_payment_details(filters):\n\tmode_of_payment_details = {}\n\tinvoice_list = get_invoices(filters)\n\tinvoice_list_names = \",\".join(\"'\" + invoice[\"name\"] + \"'\" for invoice in invoice_list)\n\tif invoice_list:\n\t\tinv_mop_detail = frappe.db.sq", "d_id": 14907, "documentation": { "docstring": "\n\t\t\tselect t.owner,\n\t\t\t t.posting_date,\n\t\t\t\t t.mode_of_payment,\n\t\t\t\t sum(t.paid_amount) as paid_amount\n\t\t\tfrom (\n\t\t\t\tselect a.owner, a.posting_date,\n\t\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(b.base_amount) as paid_amount\n\t\t\t\tfrom `tabSales Invoice` a, `tabSales Invoice Payment` b\n\t\t\t\twhere a.name = b.parent\n\t\t\t\tand a.docstatus = 1\n\t\t\t\tand a.name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t\tunion\n\t\t\t\tselect a.owner,a.posting_date,\n\t\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(c.allocated_amount) as paid_amount\n\t\t\t\tfrom `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c\n\t\t\t\twhere a.name = c.reference_name\n\t\t\t\tand b.name = c.parent\n\t\t\t\tand b.docstatus = 1\n\t\t\t\tand a.name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t\tunion\n\t\t\t\tselect a.owner, a.posting_date,\n\t\t\t\tifnull(a.voucher_type,'') as mode_of_payment, sum(b.credit)\n\t\t\t\tfrom `tabJournal Entry` a, `tabJournal Entry Account` b\n\t\t\t\twhere a.name = b.parent\n\t\t\t\tand a.docstatus = 1\n\t\t\t\tand b.reference_type = 'Sales Invoice'\n\t\t\t\tand b.reference_name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t) t\n\t\t\tgroup by t.owner, t.posting_date, t.mode_of_payment\n\t\t\tselect a.owner, a.posting_date,\n\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(a.base_change_amount) as change_amount\n\t\t\tfrom `tabSales Invoice` a, `tabSales Invoice Payment` b\n\t\t\twhere a.name = b.parent\n\t\t\tand a.name in ({invoice_list_names})\n\t\t\tand b.type = 'Cash'\n\t\t\tand a.base_change_amount > 0\n\t\t\tgroup by a.owner, a.posting_date, mode_of_payment", "n_words": 169, "vocab_size": 64, "n_whitespaces": 142, "language": "en" } }, { "id": 265164, "commit_id": "379880cd8431da6cc39753a8b3a7c8bfcd8f9cc1", "repo": "netbox", "path": "netbox/extras/models/configcontexts.py", "file_name": "configcontexts.py", "fun_name": "get_config_context", "commit_message": "Closes #9582: Enable assigning config contexts based on device location", "code": "def get_config_context(self):\n \n data = {}\n\n if not hasattr(self, 'config_context_data'):\n # The annotation is not available, so we fall back to manually querying for the config context objects\n config_context_data = ConfigContext.objects.get_for_object(self, aggregate_data=True)\n else:\n # The attribute may exist, but the annotated value could be None if there is no config context data\n config_context_data = self.config_context_data or []\n\n for context in config_context_data:\n data = deepmerge(data, context)\n\n # If the object has local config context data defined, merge it last\n if self.local_context_data:\n data = deepmerge(data, self.local_context_data)\n\n return data\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 207, "n_words": 85, "vocab_size": 59, "complexity": 5, "nloc": 11, "token_counts": 73, "n_ast_nodes": 122, "n_identifiers": 12, "random_cut": "def get_config_context(self):\n \n data = {}\n\n if not hasattr(self, 'config_context_data'):\n # The annotation is not available, so we fall back to manually querying for the config context objects\n config_context_data = ConfigContext.objects.get_for_object(self, aggregate_data=True)\n else:\n # The attribute may exist, but the annotated value could be None if there is no config context data\n config_context_data = self.config_context_data or []\n\n for context in config_context_data:\n data = deepmerge(data,", "d_id": 78013, "documentation": { "docstring": "\n Compile all config data, overwriting lower-weight values with higher-weight values where a collision occurs.\n Return the rendered configuration context for a device or VM.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 46, "language": "en" } }, { "id": 249408, "commit_id": "37f329c9adf6ed02df15661850f999edd9e5fd93", "repo": "synapse", "path": "tests/rest/admin/test_server_notice.py", "file_name": "test_server_notice.py", "fun_name": "test_displayname_is_set_avatar_is_none", "commit_message": "Fix that sending server notices fail if avatar is `None` (#13566)\n\nIndroduced in #11846.", "code": "def test_displayname_is_set_avatar_is_none(self) -> None:\n \n channel = self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content={\n \"user_id\": self.other_user,\n \"content\": {\"msgtype\": \"m.text\", \"body\": \"test msg\"},\n },\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n\n # user has one invite\n self._check_invite_and_join_status(self.other_user, 1, 0)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 159, "n_words": 32, "vocab_size": 32, "complexity": 1, "nloc": 17, "token_counts": 78, "n_ast_nodes": 129, "n_identifiers": 14, "random_cut": "def test_displayname_is_set_avatar_is_none(self) -> None:\n \n channel = self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content={\n \"user_id\": self.other_user,\n \"content\": {\"msgtype\": \"m.text\", \"body\": \"test msg\"},\n },\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n\n # user has one inv", "d_id": 72899, "documentation": { "docstring": "\n Tests that sending a server notices is successfully,\n if a display_name is set, avatar_url is `None` and\n \"check avatar size and mime type\" is set.\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 54, "language": "en" } }, { "id": 132250, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/schedulers/hyperband.py", "file_name": "hyperband.py", "fun_name": "cur_iter_done", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def cur_iter_done(self) -> bool:\n \n return all(\n self._get_result_time(result) >= self._cumul_r\n for result in self._live_trials.values()\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 57, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 8, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 9, "random_cut": "def cur_iter_done(self) -> bool:\n \n return all(\n self._get_result_time(result) >", "d_id": 29712, "documentation": { "docstring": "Checks if all iterations have completed.\n\n TODO(rliaw): also check that `t.iterations == self._r`", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 65946, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/report/student_monthly_attendance_sheet/student_monthly_attendance_sheet.py", "file_name": "student_monthly_attendance_sheet.py", "fun_name": "get_attendance_years", "commit_message": "style: format code with black", "code": "def get_attendance_years():\n\tyear_list = frappe.db.sql_list(\n\t\t\n\t)\n\tif not year_list:\n\t\tyear_list = [getdate().year]\n\treturn \"\\n\".join(str(year) for year in year_list)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 12, "n_words": 18, "vocab_size": 16, "complexity": 3, "nloc": 7, "token_counts": 41, "n_ast_nodes": 72, "n_identifiers": 9, "random_cut": "def get_attendance_years():\n\tyear_list = frappe.db.sql_list(\n\t\t\n\t)\n\tif not year_list:\n\t\tyear_list ", "d_id": 14065, "documentation": { "docstring": "select distinct YEAR(date) from `tabStudent Attendance` ORDER BY YEAR(date) DESC", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 316444, "commit_id": "7cd68381f1d4f58930ffd631dfbfc7159d459832", "repo": "core", "path": "tests/test_config_entries.py", "file_name": "test_config_entries.py", "fun_name": "test_discovery_notification", "commit_message": "Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)", "code": "async def test_discovery_notification(hass):\n \n mock_integration(hass, MockModule(\"test\"))\n mock_entity_platform(hass, \"config_flow.test\", None)\n\n with patch.dict(config_entries.HANDLERS):\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 28, "token_counts": 223, "n_ast_nodes": 61, "n_identifiers": 9, "random_cut": "async def test_discovery_notification(hass):\n \n mock_integration(hass, MockModule(\"test\"))\n mock_entity_platform(hass, \"config_flow.test\", None)\n\n with patch.dict(config_entries.", "d_id": 115022, "documentation": { "docstring": "Test that we create/dismiss a notification when source is discovery.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 63674, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/resolvelib/providers.py", "file_name": "providers.py", "fun_name": "get_preference", "commit_message": "upd; format", "code": "def get_preference(self, identifier, resolutions, candidates, information):\n \n raise NotImplementedError\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 24, "n_identifiers": 7, "random_cut": "def get_preference(self, identifier, resolutions, candidates, information):\n \n raise NotImplementedError\n", "d_id": 13468, "documentation": { "docstring": "Produce a sort key for given requirement based on preference.\n\n The preference is defined as \"I think this requirement should be\n resolved first\". The lower the return value is, the more preferred\n this group of arguments is.\n\n :param identifier: An identifier as returned by ``identify()``. This\n identifies the dependency matches of which should be returned.\n :param resolutions: Mapping of candidates currently pinned by the\n resolver. Each key is an identifier, and the value a candidate.\n The candidate may conflict with requirements from ``information``.\n :param candidates: Mapping of each dependency's possible candidates.\n Each value is an iterator of candidates.\n :param information: Mapping of requirement information of each package.\n Each value is an iterator of *requirement information*.\n\n A *requirement information* instance is a named tuple with two members:\n\n * ``requirement`` specifies a requirement contributing to the current\n list of candidates.\n * ``parent`` specifies the candidate that provides (dependend on) the\n requirement, or ``None`` to indicate a root requirement.\n\n The preference could depend on a various of issues, including (not\n necessarily in this order):\n\n * Is this package pinned in the current resolution result?\n * How relaxed is the requirement? Stricter ones should probably be\n worked on first? (I don't know, actually.)\n * How many possibilities are there to satisfy this requirement? Those\n with few left should likely be worked on first, I guess?\n * Are there any known conflicts for this requirement? We should\n probably work on those with the most known conflicts.\n\n A sortable value should be returned (this will be used as the ``key``\n parameter of the built-in sorting function). The smaller the value is,\n the more preferred this requirement is (i.e. the sorting function\n is called with ``reverse=False``).\n ", "n_words": 279, "vocab_size": 160, "n_whitespaces": 526, "language": "en" } }, { "id": 19946, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/req/req_install.py", "file_name": "req_install.py", "fun_name": "prepare_metadata", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def prepare_metadata(self) -> None:\n \n assert self.source_dir\n details = self.name or f\"from {self.link}\"\n\n if self.use_pep517:\n assert self.pep517_backend is not None\n if (\n self.editable\n and self.permit_editable_wheels\n and self.supports_pyproject_editable()\n ):\n self.metadata_directory = generate_editable_metadata(\n build_env=self.build_env,\n backend=self.pep517_backend,\n details=details,\n )\n else:\n self.metadata_directory = generate_metadata(\n build_env=self.build_env,\n backend=self.pep517_backend,\n details=details,\n )\n else:\n self.metadata_directory = generate_metadata_legacy(\n build_env=self.build_env,\n setup_py_path=self.setup_py_path,\n source_dir=self.unpacked_source_directory,\n isolated=self.isolated,\n details=details,\n )\n\n # Act on the newly generated metadata, based on the name and version.\n if not self.name:\n self._set_requirement()\n else:\n self.warn_on_mismatching_name()\n\n self.assert_source_matches_version()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 517, "n_words": 72, "vocab_size": 50, "complexity": 7, "nloc": 39, "token_counts": 157, "n_ast_nodes": 252, "n_identifiers": 23, "random_cut": "def prepare_metadata(self) -> None:\n \n assert self.source_dir\n details = self.name or f\"from {self.link}\"\n\n if self.use_pep517:\n assert self.pep517_backend is not None\n if (\n self.editable\n and self.permit_editable_wheels\n and self.supports_pyproject_editable()\n ):\n self.metadata_directory = generate_editable_metadata(\n build_env=self.build_env,\n backend=self.pep517_backend,\n details=details,\n )\n else:\n self.metadata_directory = generate_metadata(\n build_env=self.build_env,\n backend=self.pep517_backend,\n details=details,\n )\n else:\n self.metadata_directory = generate_metadata_legacy(\n build_env=self.build_env,\n setup_py_path=self.setup_py_path,\n source", "d_id": 3155, "documentation": { "docstring": "Ensure that project metadata is available.\n\n Under PEP 517 and PEP 660, call the backend hook to prepare the metadata.\n Under legacy processing, call setup.py egg-info.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 47, "language": "en" } }, { "id": 75204, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_blocks.py", "file_name": "test_blocks.py", "fun_name": "get_image_filename", "commit_message": "Reformat with black", "code": "def get_image_filename(self, image, filterspec):\n \n name, ext = os.path.splitext(os.path.basename(image.file.name))\n return \"{}images/{}.{}{}\".format(settings.MEDIA_URL, name, filterspec, ext)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 34, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 48, "n_ast_nodes": 75, "n_identifiers": 14, "random_cut": "def get_image_filename(self, image, filterspec):\n \n name, ext = os.path.splitext(os.path.basename(i", "d_id": 16381, "documentation": { "docstring": "\n Get the generated filename for a resized image\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 219648, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "min", "commit_message": "add python 3.10.4 for windows", "code": "def min(self, other, context=None):\n \n other = _convert_other(other, raiseit=True)\n\n if context is None:\n context = getcontext()\n\n if self._is_special or other._is_special:\n # If one operand is a quiet NaN and the other is number, then the\n # number is always returned\n sn = self._isnan()\n on = other._isnan()\n if sn or on:\n if on == 1 and sn == 0:\n return self._fix(context)\n if sn == 1 and on == 0:\n return other._fix(context)\n return self._check_nans(other, context)\n\n c = self._cmp(other)\n if c == 0:\n c = self.compare_total(other)\n\n if c == -1:\n ans = self\n else:\n ans = other\n\n return ans._fix(context)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 340, "n_words": 95, "vocab_size": 51, "complexity": 12, "nloc": 21, "token_counts": 143, "n_ast_nodes": 232, "n_identifiers": 17, "random_cut": "def min(self, other, context=None):\n \n other = _convert_other(other, raiseit=True)\n\n if context is None:\n context = getcontext()\n\n if self._is_special or other._is_special:\n # If one operand is a quiet NaN and the other is number, then the\n # number is always returned\n sn = self._isnan()\n on = other._isnan()\n if sn or on:\n if on == 1 and sn == 0:\n return self._fix(context)\n if sn == 1 and on == 0:\n return other._fix(context)\n return self._check_nans", "d_id": 55678, "documentation": { "docstring": "Returns the smaller value.\n\n Like min(self, other) except if one is not a number, returns\n NaN (and signals if one is sNaN). Also rounds.\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 46, "language": "en" } }, { "id": 207160, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_filters/tests.py", "file_name": "tests.py", "fun_name": "test_simplelistfilter_with_none_returning_lookups", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_simplelistfilter_with_none_returning_lookups(self):\n \n modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0]\n self.assertEqual(len(filterspec), 0)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 70, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 64, "n_ast_nodes": 105, "n_identifiers": 17, "random_cut": "def test_simplelistfilter_with_none_returning_lookups(self):\n \n modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)\n request = self.request_f", "d_id": 51886, "documentation": { "docstring": "\n A SimpleListFilter lookups method can return None but disables the\n filter completely.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 6991, "commit_id": "3030fc2f7d414d54a9aaa0f7b47ccf8d4f54b12c", "repo": "ludwig", "path": "ludwig/data/preprocessing.py", "file_name": "preprocessing.py", "fun_name": "precompute_fill_value", "commit_message": "Fixes NaN handling in boolean dtypes (#2058)\n\n* reorganizes cast_columns and handle_missing_values\r\n\r\n* fix failing tests with logs\r\n\r\n* remove logs\r\n\r\n* re-added deflaked test\r\n\r\n* cleanup\r\n\r\n* refactor to avoid calling handle missing values twice\r\n\r\n* refactored build preprocessing and metadata to separate fns\r\n\r\n* improve style with metadata\r\n\r\n* preserves outputs as booleans for binary output feature\r\n\r\n* remove extraneous casting\r\n\r\n* fixes related to manual boolean casting\r\n\r\n* leaving a note comment in read_xsv for prosperity\r\n\r\n* updates wording\r\n\r\n* cast changed from np fixed length str (modin) to object\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* unit tests\r\n\r\n* revert back to str type again\r\n\r\n* add backwards compatible behavior in torchscript\r\n\r\n* add comment in precompute_fill_value to remind devs of NaNs\r\n\r\n* revert changes to test_class_imbalance_feature::test_imbalance_ray\r\n\r\n* cleanup", "code": "def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend):\n \n missing_value_strategy = preprocessing_parameters[\"missing_value_strategy\"]\n if missing_value_strategy == FILL_WITH_CONST:\n return preprocessing_parameters[\"fill_value\"]\n elif missing_value_strategy == FILL_WITH_MODE:\n return dataset_cols[feature[COLUMN]].value_counts().index[0]\n elif missing_value_strategy == FILL_WITH_MEAN:\n if feature[TYPE] != NUMBER:\n raise ValueError(\n f\"Filling missing values with mean is supported \"\n f\"only for number types, not for type {feature[TYPE]}.\",\n )\n return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean())\n elif missing_value_strategy == FILL_WITH_FALSE:\n distinct_values = backend.df_engine.compute(\n dataset_cols[feature[COLUMN]].drop_duplicates().dropna()\n ).values.tolist()\n if len(distinct_values) > 2:\n raise ValueError(\n f\"Missing value strategy `fill_with_false` \"\n f\"for column {feature[COLUMN]} expects 2 distinct values, \"\n f\"found: {len(distinct_values)} (ex: {distinct_values[:10]})\"\n )\n\n # Determine the False label.\n # Distinct values are sorted in reverse to mirror the selection of the default fallback_true_label (in\n # binary_feature.get_feature_meta) for binary columns with unconventional boolean values, \"human\"/\"bot\".\n for v in sorted(distinct_values, reverse=True):\n fallback_true_label = preprocessing_parameters.get(\"fallback_true_label\", \"true\")\n if strings_utils.str2bool(v, fallback_true_label) is False:\n return v\n raise ValueError(\n f\"Unable to determine False value for column {feature[COLUMN]} with distinct values: {distinct_values}.\"\n )\n # Otherwise, we cannot precompute the fill value for this dataset\n return None\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 450, "n_words": 157, "vocab_size": 105, "complexity": 9, "nloc": 31, "token_counts": 188, "n_ast_nodes": 351, "n_identifiers": 32, "random_cut": "def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend):\n \n missing_value_strategy = preprocessing_parameters[\"missing_value_strategy\"]\n if missing_value_strategy == FILL_WITH_CONST:\n return preprocessing_parameters[\"fill_value\"]\n elif missing_value_strategy == FILL_WITH_MODE:\n return dataset_cols[feature[COLUMN]].value_counts().index[0]\n elif missing_value_strategy == FILL_WITH_MEAN:\n if feature[TYPE] != NUMBER:\n raise ValueError(\n f\"Filling missing values with mean is supported \"\n f\"only for number types, not for type {feature[TYPE]}.\",\n )\n return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean())\n elif missing_value_strategy == FILL_WITH_FALSE:\n distinct_values = backend.df_engine.compute(\n dataset_cols[feature[COLUMN]].drop_duplicates().dropna()\n ).values.tolist()\n if len(distinct_values) > 2:\n raise ValueError(\n f\"Missing value strategy `fill_with_false` \"\n f\"for column {feature[COLUMN]} expects 2 distinct values, \"\n f\"found: {len(distinct_values)} (ex: {distinct_values[:10]})\"\n )\n\n # Determine the False label.\n # Distinct values are sorted in reverse to mirror the selection of the default fallback_true_label (in\n # binary_feature.get_feature_meta) for binary columns with unconventional boolean values, \"human\"/\"bot\".\n for v in sorted(distinct_values, reverse=True):\n fallback_true_label = preprocessing_parameters.get(\"fallback_true_label\", \"true\")\n if strings_utils.str2bool(v, fallback_true_label) is False:\n return v\n raise ValueError(\n f\"Unable to determine False value for column {feature[COLUMN]} with distinct values: {distin", "d_id": 1099, "documentation": { "docstring": "Precomputes the fill value for a feature.\n\n NOTE: this is called before NaNs are removed from the dataset. Modifications here must handle NaNs gracefully.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 30, "language": "en" } }, { "id": 33645, "commit_id": "59407bbeb31fff8340938768051c9daabd38d7a7", "repo": "transformers", "path": "src/transformers/models/deformable_detr/modeling_deformable_detr.py", "file_name": "modeling_deformable_detr.py", "fun_name": "loss_labels", "commit_message": "Add Deformable DETR (#17281)\n\n* First draft\r\n\r\n* More improvements\r\n\r\n* Improve model, add custom CUDA code\r\n\r\n* Import torch before\r\n\r\n* Add script that imports custom layer\r\n\r\n* Add everything in new ops directory\r\n\r\n* Import custom layer in modeling file\r\n\r\n* Fix ARCHIVE_MAP typo\r\n\r\n* Creating the custom kernel on the fly.\r\n\r\n* Import custom layer in modeling file\r\n\r\n* More improvements\r\n\r\n* Fix CUDA loading\r\n\r\n* More improvements\r\n\r\n* Improve conversion script\r\n\r\n* Improve conversion script\r\n\r\n* Make it work until encoder_outputs\r\n\r\n* Make forward pass work\r\n\r\n* More improvements\r\n\r\n* Make logits match original implementation\r\n\r\n* Make implementation also support single_scale model\r\n\r\n* Add support for single_scale and dilation checkpoint\r\n\r\n* Add support for with_box_refine model\r\n\r\n* Support also two stage model\r\n\r\n* Improve tests\r\n\r\n* Fix more tests\r\n\r\n* Make more tests pass\r\n\r\n* Upload all models to the hub\r\n\r\n* Clean up some code\r\n\r\n* Improve decoder outputs\r\n\r\n* Rename intermediate hidden states and reference points\r\n\r\n* Improve model outputs\r\n\r\n* Move tests to dedicated folder\r\n\r\n* Improve model outputs\r\n\r\n* Fix retain_grad test\r\n\r\n* Improve docs\r\n\r\n* Clean up and make test_initialization pass\r\n\r\n* Improve variable names\r\n\r\n* Add copied from statements\r\n\r\n* Improve docs\r\n\r\n* Fix style\r\n\r\n* Improve docs\r\n\r\n* Improve docs, move tests to model folder\r\n\r\n* Fix rebase\r\n\r\n* Remove DetrForSegmentation from auto mapping\r\n\r\n* Apply suggestions from code review\r\n\r\n* Improve variable names and docstrings\r\n\r\n* Apply some more suggestions from code review\r\n\r\n* Apply suggestion from code review\r\n\r\n* better docs and variables names\r\n\r\n* hint to num_queries and two_stage confusion\r\n\r\n* remove asserts and code refactor\r\n\r\n* add exception if two_stage is True and with_box_refine is False\r\n\r\n* use f-strings\r\n\r\n* Improve docs and variable names\r\n\r\n* Fix code quality\r\n\r\n* Fix rebase\r\n\r\n* Add require_torch_gpu decorator\r\n\r\n* Add pip install ninja to CI jobs\r\n\r\n* Apply suggestion of @sgugger\r\n\r\n* Remove DeformableDetrForObjectDetection from auto mapping\r\n\r\n* Remove DeformableDetrModel from auto mapping\r\n\r\n* Add model to toctree\r\n\r\n* Add model back to mappings, skip model in pipeline tests\r\n\r\n* Apply @sgugger's suggestion\r\n\r\n* Fix imports in the init\r\n\r\n* Fix copies\r\n\r\n* Add CPU implementation\r\n\r\n* Comment out GPU function\r\n\r\n* Undo previous change\r\n\r\n* Apply more suggestions\r\n\r\n* Remove require_torch_gpu annotator\r\n\r\n* Fix quality\r\n\r\n* Add logger.info\r\n\r\n* Fix logger\r\n\r\n* Fix variable names\r\n\r\n* Fix initializaztion\r\n\r\n* Add missing initialization\r\n\r\n* Update checkpoint name\r\n\r\n* Add model to doc tests\r\n\r\n* Add CPU/GPU equivalence test\r\n\r\n* Add Deformable DETR to pipeline tests\r\n\r\n* Skip model for object detection pipeline\r\n\r\nCo-authored-by: Nicolas Patry \r\nCo-authored-by: Nouamane Tazi \r\nCo-authored-by: Sylvain Gugger ", "code": "def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n \n if \"logits\" not in outputs:\n raise ValueError(\"No logits were found in the outputs\")\n\n source_logits = outputs[\"logits\"]\n\n idx = self._get_source_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"class_labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device\n )\n target_classes[idx] = target_classes_o\n\n target_classes_onehot = torch.zeros(\n [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1],\n dtype=source_logits.dtype,\n layout=source_logits.layout,\n device=source_logits.device,\n )\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n loss_ce = (\n sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2)\n * source_logits.shape[1]\n )\n losses = {\"loss_ce\": loss_ce}\n\n return losses\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 284, "n_words": 84, "vocab_size": 68, "complexity": 3, "nloc": 24, "token_counts": 226, "n_ast_nodes": 337, "n_identifiers": 36, "random_cut": "def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n \n if \"logits\" not in outputs:\n raise ValueError(\"No logits were found in the outputs\")\n\n source_logits = outputs[\"logits\"]\n\n idx = self._get_source_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"class_labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n source_logits.shape[:2], self.num_classes, dtype=torch.int64, device", "d_id": 6129, "documentation": { "docstring": "Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 30, "language": "en" } }, { "id": 260920, "commit_id": "c18460f78441f11b3e6c15c12238695fcfe3c872", "repo": "scikit-learn", "path": "sklearn/ensemble/tests/test_stacking.py", "file_name": "test_stacking.py", "fun_name": "test_stacking_classifier_multilabel_predict_proba", "commit_message": "EHN Add multilabel classification support for `StackingClassifier` (#24146)\n\n* Add stacking multilabel functionality\n\n* Add underscore to a class attr\n\n* Remove model from base estimator in test_stacking\n\n* Remove scale in train/test split in test_stacking_classifier_multilabel\n\n* Add stack_method as a test parameter, change RandomForestClassifier to KNeighborsClassifier in test\n\n* Update Changelog\n\n* fix doc typos\n\n* predict_proba output will be concatenate this list in an array of shape n_samples, n_outputs * n_classes - 1. Update test.\n\n* Update sklearn/ensemble/_stacking.py\n\nCo-authored-by: Guillaume Lemaitre \n\n* Update doc/whats_new/v1.0.rst\n\nCo-authored-by: Guillaume Lemaitre \n\n* update whats_new\n\n* add passthrough test\n\n* update whats_new with current PR\n\n* Apply suggestions from code review\n\nCo-authored-by: Julien Jerphanion \n\n* update tests\n\n* Apply suggestion to update comments on `concatenate`\n\nCo-authored-by: Julien Jerphanion \n\n* parametrized the two tests into one\n\n* parametrized the two tests into one\n\n* strip the mysterious trailing _r\n\n* fix multilabel list scenario\n\n* add Guillaume's recommendations\n\n* add test for\n\n* some fix\n\n* split tests\n\n* fix flake8\n\n* add suggestions\n\n* Trigger CI\n\n* remove multiclass-multioutput from comments and docstrings\n\nCo-authored-by: Nicolas \nCo-authored-by: Nestor Navarro \nCo-authored-by: Nestor Navarro \nCo-authored-by: Guillaume Lemaitre \nCo-authored-by: Julien Jerphanion ", "code": "def test_stacking_classifier_multilabel_predict_proba(estimator):\n \n X_train, X_test, y_train, y_test = train_test_split(\n X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42\n )\n n_outputs = 3\n\n estimators = [(\"est\", estimator)]\n stacker = StackingClassifier(\n estimators=estimators,\n final_estimator=KNeighborsClassifier(),\n stack_method=\"predict_proba\",\n ).fit(X_train, y_train)\n\n X_trans = stacker.transform(X_test)\n assert X_trans.shape == (X_test.shape[0], n_outputs)\n # we should not have any collinear classes and thus nothing should sum to 1\n assert not any(np.isclose(X_trans.sum(axis=1), 1.0))\n\n y_pred = stacker.predict(X_test)\n assert y_pred.shape == y_test.shape\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 129, "n_words": 62, "vocab_size": 52, "complexity": 1, "nloc": 16, "token_counts": 127, "n_ast_nodes": 193, "n_identifiers": 29, "random_cut": "def test_stacking_classifier_multilabel_predict_proba(estimator):\n \n X_train, X_test, y_train, y_test = train_test_split(\n X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42\n )\n n_outputs = 3\n\n estimators = [(\"est\", estimator)]\n stacker = StackingClassifier(\n estimators=estimators,\n final_estimator=KNeighborsClassifier(),\n stack_method=\"predict_proba\",\n ).fit(X_train, y_train)\n\n X_trans = stacker.transform(X_test)\n assert X_trans.shape == (X_test.shape[0], n_outputs)\n # we should not have any collinear classes and thus nothing should sum to 1\n assert not any(np.isclose(X_trans.sum(axis=1), 1.0))\n\n y_pred = stac", "d_id": 76567, "documentation": { "docstring": "Check the behaviour for the multilabel classification case and the\n `predict_proba` stacking method.\n\n Estimators are not consistent with the output arrays and we need to ensure that\n we handle all cases.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 43, "language": "en" } }, { "id": 62033, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py", "file_name": "locators.py", "fun_name": "get_page", "commit_message": "upd; format", "code": "def get_page(self, url):\n \n # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api\n scheme, netloc, path, _, _, _ = urlparse(url)\n if scheme == 'file' and os.path.isdir(url2pathname(path)):\n url = urljoin(ensure_slash(url), 'index.html')\n\n if url in self._page_cache:\n result = self._page_cache[url]\n logger.debug('Returning %s from cache: %s', url, result)\n else:\n host = netloc.split(':', 1)[0]\n result = None\n if host in self._bad_hosts:\n logger.debug('Skipping %s due to bad host %s', url, host)\n else:\n req = Request(url, headers={'Accept-encoding': 'identity'})\n try:\n logger.debug('Fetching %s', url)\n resp = self.opener.open(req, timeout=self.timeout)\n logger.debug('Fetched %s', url)\n headers = resp.info()\n content_type = headers.get('Content-Type', '')\n if HTML_CONTENT_TYPE.match(content_type):\n final_url = resp.geturl()\n data = resp.read()\n encoding = headers.get('Content-Encoding')\n if encoding:\n decoder = self.decoders[encoding] # fail if not found\n data = decoder(data)\n encoding = 'utf-8'\n m = CHARSET.search(content_type)\n if m:\n encoding = m.group(1)\n try:\n data = data.decode(encoding)\n except UnicodeError: # pragma: no cover\n data = data.decode('latin-1') # fallback\n result = Page(data, final_url)\n self._page_cache[final_url] = result\n except HTTPError as e:\n if e.code != 404:\n logger.exception('Fetch failed: %s: %s', url, e)\n except URLError as e: # pragma: no cover\n logger.exception('Fetch failed: %s: %s', url, e)\n with self._lock:\n self._bad_hosts.add(host)\n except Exception as e: # pragma: no cover\n logger.exception('Fetch failed: %s: %s', url, e)\n finally:\n self._page_cache[url] = result # even if None (failure)\n return result\n\n _distname_re = re.compile(']*>([^<]+)<')\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 1086, "n_words": 199, "vocab_size": 111, "complexity": 14, "nloc": 49, "token_counts": 362, "n_ast_nodes": 624, "n_identifiers": 57, "random_cut": "def get_page(self, url):\n \n # http:", "d_id": 12842, "documentation": { "docstring": "\n Get the HTML for an URL, possibly from an in-memory cache.\n\n XXX TODO Note: this cache is never actually cleared. It's assumed that\n the data won't get stale over the lifetime of a locator instance (not\n necessarily true for the default_locator).\n ", "n_words": 41, "vocab_size": 36, "n_whitespaces": 77, "language": "en" } }, { "id": 20567, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/core.py", "file_name": "core.py", "fun_name": "enable_all_warnings", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def enable_all_warnings() -> None:\n \n __diag__.enable_all_warnings()\n\n\n# hide abstract class\ndel __config_flags\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 15, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 12, "n_ast_nodes": 28, "n_identifiers": 3, "random_cut": "def enable_all_warnings() -> None:\n \n __diag__.enable_all_warnings()\n\n\n# hide abstract class\ndel ", "d_id": 3429, "documentation": { "docstring": "\n Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 264300, "commit_id": "54834c47f8870e7faabcd847c3270da0bd3d2884", "repo": "netbox", "path": "netbox/netbox/views/generic/bulk_views.py", "file_name": "bulk_views.py", "fun_name": "export_yaml", "commit_message": "Refactor generic views; add plugins dev documentation", "code": "def export_yaml(self):\n \n yaml_data = [obj.to_yaml() for obj in self.queryset]\n\n return '---\\n'.join(yaml_data)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def export_yaml(self):\n \n yaml_da", "d_id": 77672, "documentation": { "docstring": "\n Export the queryset of objects as concatenated YAML documents.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 206700, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/http.py", "file_name": "http.py", "fun_name": "url_has_allowed_host_and_scheme", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n \n if url is not None:\n url = url.strip()\n if not url:\n return False\n if allowed_hosts is None:\n allowed_hosts = set()\n elif isinstance(allowed_hosts, str):\n allowed_hosts = {allowed_hosts}\n # Chrome treats \\ completely as / in paths but it could be part of some\n # basic auth credentials so we need to check both URLs.\n return _url_has_allowed_host_and_scheme(\n url, allowed_hosts, require_https=require_https\n ) and _url_has_allowed_host_and_scheme(\n url.replace(\"\\\\\", \"/\"), allowed_hosts, require_https=require_https\n )\n\n\n# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 150, "n_words": 79, "vocab_size": 60, "complexity": 6, "nloc": 14, "token_counts": 83, "n_ast_nodes": 136, "n_identifiers": 10, "random_cut": "def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):\n \n if url is not None:\n url = url.strip()\n if not url:\n return False\n if allowed_hosts is None:\n allowed_hosts = set()\n elif isinstance(allowed_hosts, str):\n allowed_hosts = {allowed_hosts}\n # Chrome treats \\ completely as / in paths but it could be part of some\n # basic auth credentials so we need to check both URLs.\n return _url_has_allowed_host_and_scheme(\n url, allowed_hosts, require_https=require_https\n ) and _url_has_allowed_host_and_sche", "d_id": 51643, "documentation": { "docstring": "\n Return ``True`` if the url uses an allowed host and a safe scheme.\n\n Always return ``False`` on an empty url.\n\n If ``require_https`` is ``True``, only 'https' will be considered a valid\n scheme, as opposed to 'http' and 'https' with the default, ``False``.\n\n Note: \"True\" doesn't entail that a URL is \"safe\". It may still be e.g.\n quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()\n on the path component of untrusted URLs.\n ", "n_words": 70, "vocab_size": 59, "n_whitespaces": 95, "language": "en" } }, { "id": 266523, "commit_id": "fee90b15a25b588bfb8a9ff047e851d43e78511f", "repo": "ansible", "path": "lib/ansible/module_utils/service.py", "file_name": "service.py", "fun_name": "get_ps", "commit_message": "Misc typo fixes in module_utils (#76564)", "code": "def get_ps(module, pattern):\n \n found = False\n if platform.system() == 'SunOS':\n flags = '-ef'\n else:\n flags = 'auxww'\n psbin = module.get_bin_path('ps', True)\n\n (rc, psout, pserr) = module.run_command([psbin, flags])\n if rc == 0:\n for line in psout.splitlines():\n if pattern in line:\n # FIXME: should add logic to prevent matching 'self', though that should be extremely rare\n found = True\n break\n return found\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 161, "n_words": 60, "vocab_size": 47, "complexity": 5, "nloc": 14, "token_counts": 81, "n_ast_nodes": 138, "n_identifiers": 15, "random_cut": "def get_ps(module, pattern):\n \n found = False\n if platform.system() == 'SunOS':\n flags = '-ef'\n else:\n flags = 'auxww'\n psbin = module.get_bin_path('ps', True)\n\n (rc, psout, pserr) = module.run_comm", "d_id": 78455, "documentation": { "docstring": "\n Last resort to find a service by trying to match pattern to programs in memory\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 22, "language": "en" } }, { "id": 211300, "commit_id": "34d7832946145006083b602d5d090f7f104e661e", "repo": "PaddleDetection", "path": "ppdet/data/transform/operators.py", "file_name": "operators.py", "fun_name": "apply", "commit_message": "[dev] add ppyoloe_plus configs and alter NormalizeImage (#6675)\n\n* [dev] add ppyoloe_plus configs and alter NormalizeImage\r\n\r\n* alter other NormalizeImage\r\n\r\n* alter cpp NormalizeImage", "code": "def apply(self, sample, context=None):\n \n im = sample['image']\n im = im.astype(np.float32, copy=False)\n if self.is_scale:\n scale = 1.0 / 255.0\n im *= scale\n\n if self.norm_type == 'mean_std':\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n im -= mean\n im /= std\n sample['image'] = im\n return sample\n\n\n@register_op", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "@register_op", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 161, "n_words": 47, "vocab_size": 30, "complexity": 3, "nloc": 13, "token_counts": 114, "n_ast_nodes": 176, "n_identifiers": 17, "random_cut": "def apply(self, sample, context=None):\n \n ", "d_id": 53060, "documentation": { "docstring": "Normalize the image.\n Operators:\n 1.(optional) Scale the pixel to [0,1]\n 2.(optional) Each pixel minus mean and is divided by std\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 56, "language": "en" } }, { "id": 205412, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/base.py", "file_name": "base.py", "fun_name": "_check_m2m_through_same_relationship", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_m2m_through_same_relationship(cls):\n \n\n errors = []\n seen_intermediary_signatures = []\n\n fields = cls._meta.local_many_to_many\n\n # Skip when the target model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))\n\n # Skip when the relationship model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))\n\n for f in fields:\n signature = (\n f.remote_field.model,\n cls,\n f.remote_field.through,\n f.remote_field.through_fields,\n )\n if signature in seen_intermediary_signatures:\n errors.append(\n checks.Error(\n \"The model has two identical many-to-many relations \"\n \"through the intermediate model '%s'.\"\n % f.remote_field.through._meta.label,\n obj=cls,\n id=\"models.E003\",\n )\n )\n else:\n seen_intermediary_signatures.append(signature)\n return errors\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 460, "n_words": 88, "vocab_size": 53, "complexity": 7, "nloc": 26, "token_counts": 136, "n_ast_nodes": 215, "n_identifiers": 21, "random_cut": "def _check_m2m_through_same_relationship(cls):\n \n\n errors = []\n seen_intermediary_signatures = []\n\n fields = cls._meta.local_many_to_many\n\n # Skip when the target model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.model, Mod", "d_id": 51119, "documentation": { "docstring": "Check if no relationship model is used by more than one m2m field.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 101350, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "plugins/extract/pipeline.py", "file_name": "pipeline.py", "fun_name": "image", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def image(self) -> \"np.ndarray\":\n \n assert self._image is not None\n return self._image\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 34, "n_identifiers": 3, "random_cut": "def image(self) -> \"np.ndarray\":\n \n assert self.", "d_id": 20765, "documentation": { "docstring": " :class:`numpy.ndarray`: The source frame for this object. ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 8, "language": "en" } }, { "id": 85880, "commit_id": "afbf9a3334ce9cad1a62fced372d7fcee40a3133", "repo": "sentry", "path": "tests/sentry/notifications/test_notifications.py", "file_name": "test_notifications.py", "fun_name": "test_sends_deployment_notification", "commit_message": "chore(notification): Pass User ID into notification analytics (#38924)\n\nWe pass in the actor_id to notification analytics events but we should\r\nalso include a user_id if the recipient is a user", "code": "def test_sends_deployment_notification(self, record_analytics):\n \n\n release = self.create_release()\n version_parsed = self.version_parsed = parse_release(release.version)[\"description\"]\n url = f\"/api/0/organizations/{self.organization.slug}/releases/{release.version}/deploys/\"\n with self.tasks():\n response = self.client.post(\n url, format=\"json\", data={\"environment\": self.environment.name}\n )\n assert response.status_code == 201, response.content\n\n msg = mail.outbox[0]\n # check the txt version\n assert f\"Version {version_parsed} was deployed to {self.environment.name} on\" in msg.body\n # check the html version\n assert (\n f\"Version {version_parsed} was deployed to {self.environment.name}\\n \\n\"\n in msg.alternatives[0][0]\n )\n\n attachment, text = get_attachment()\n\n assert (\n text\n == f\"Release {version_parsed} was deployed to {self.environment.name} for this project\"\n )\n assert (\n attachment[\"actions\"][0][\"url\"]\n == f\"http://testserver/organizations/{self.organization.slug}/releases/{release.version}/?project={self.project.id}&unselectedSeries=Healthy/\"\n )\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \"\n )\n assert analytics_called_with_args(\n record_analytics,\n \"integrations.email.notification_sent\",\n user_id=self.user.id,\n actor_id=self.user.actor_id,\n organization_id=self.organization.id,\n )\n assert analytics_called_with_args(\n record_analytics,\n \"integrations.slack.notification_sent\",\n user_id=self.user.id,\n actor_id=self.user.actor_id,\n organization_id=self.organization.id,\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 512, "n_words": 113, "vocab_size": 67, "complexity": 1, "nloc": 42, "token_counts": 211, "n_ast_nodes": 433, "n_identifiers": 36, "random_cut": "def test_sends_deployment_notification(self, record_analytics):\n \n\n release = self.create_release()\n version_parsed = self.version_parsed = parse_release(release.version)[\"description\"]\n url", "d_id": 18055, "documentation": { "docstring": "\n Test that an email AND Slack notification are sent with\n the expected values when a release is deployed.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 65049, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/sales_invoice/sales_invoice.py", "file_name": "sales_invoice.py", "fun_name": "get_all_mode_of_payments", "commit_message": "style: format code with black", "code": "def get_all_mode_of_payments(doc):\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"company\": doc.company},\n\t\tas_dict=1,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 3, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 9, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def get_all_mode_of_payments(doc):\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"compa", "d_id": 13781, "documentation": { "docstring": "\n\t\tselect mpa.default_account, mpa.parent, mp.type as type\n\t\tfrom `tabMode of Payment Account` mpa,`tabMode of Payment` mp\n\t\twhere mpa.parent = mp.name and mpa.company = %(company)s and mp.enabled = 1", "n_words": 27, "vocab_size": 23, "n_whitespaces": 24, "language": "en" } }, { "id": 109744, "commit_id": "4896ec1a2cfb8c454e385632d8df213c915ced52", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/axes3d.py", "file_name": "axes3d.py", "fun_name": "_on_move", "commit_message": "Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449)\n\n* ENH: Add pan and zoom toolbar handling to 3D Axes\r\n\r\n1) This moves the pan logic that was already in the mouse move handler\r\ninto the \"drag_pan\" method to make it available from the toolbar.\r\n\r\n2) This expands upon the panning logic to enable a zoom-to-box feature.\r\nThe zoom-to-box is done relative to the Axes, so it shrinks/expands\r\nthe box as a fraction of each delta, from lower-left Axes to lower-left\r\nzoom-box. Thus, it tries to handle non-centered zooms, which adds more\r\ncases to handle versus the current right-click zoom only scaling from\r\nthe center of the projection.\r\n\r\n* Rewrite zooming with bounding box\r\n\r\n* Rewrite 3d panning to work with a roll angle\r\n\r\n* Whats new for zoom and pan buttons\r\n\r\n* Make pan button configurable\r\n\r\n* Do not jump when zooming and mouse goes over other subplot\r\n\r\n* Rework zooming for 3d plots\r\n\r\n* Handle x/y lock when zooming and panning\r\n\r\n* Update tests\r\n\r\n* Docstrings\r\n\r\n* Dont assume a scale_z\r\n\r\n* Limit zoom box\r\n\r\n* Test zoom pan key modifiers\r\n\r\n* Save some calculation by saving view axes\r\n\r\n* Deprecation warnings for Axes3D.eye, .vvec\r\n\r\n* Remove Axes3D._prepare_view_from_bbox for now\r\n\r\n* Comments and docstrings\r\n\r\n* Switch from uvn to uvw\r\n\r\n* Save aspect to axes\r\n\r\n* Constrain zooming with mouse when one of the equal aspect ratios is set\r\n\r\n* Cleanup\r\n\r\n* Cleanup\r\n\r\n* Consolidate finding equal aspect axis indices\r\n\r\n* linting\r\n\r\n* More intuitive scaling\r\n\r\n* Box zoom keeps existing aspect ratios\r\n\r\n* Linting\r\n\r\n* Code review comments\r\n\r\n* Revert parameters for view_transformation\r\n\r\n* Fix new 3d pan/zoom view going on view stack twice\r\n\r\n* Better clipping\r\n\r\n* Test 3d toolbar navigation\r\n\r\n* Privatize helper functions\r\n\r\n* Deprecations\r\n\r\n* Code review changes\r\n\r\n* Deprecation note\r\n\r\n* Undeprecate proj3d.view_transformation\r\n\r\n* Undeprecate proj3d.view_transformation\r\n\r\n* Update doc/api/next_api_changes/deprecations/23449-SS.rst\r\n\r\n\r\nCo-authored-by: Greg Lucas \r\nCo-authored-by: Scott Shambaugh \r\nCo-authored-by: Oscar Gustafsson ", "code": "def _on_move(self, event):\n \n\n if not self.button_pressed:\n return\n\n if self.get_navigate_mode() is not None:\n # we don't want to rotate if we are zooming/panning\n # from the toolbar\n return\n\n if self.M is None:\n return\n\n x, y = event.xdata, event.ydata\n # In case the mouse is out of bounds.\n if x is None or event.inaxes != self:\n return\n\n dx, dy = x - self._sx, y - self._sy\n w = self._pseudo_w\n h = self._pseudo_h\n\n # Rotation\n if self.button_pressed in self._rotate_btn:\n # rotate viewing point\n # get the x and y pixel coords\n if dx == 0 and dy == 0:\n return\n\n roll = np.deg2rad(self.roll)\n delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll)\n dazim = -(dy/h)*180*np.sin(roll) - (dx/w)*180*np.cos(roll)\n self.elev = self.elev + delev\n self.azim = self.azim + dazim\n self.stale = True\n\n elif self.button_pressed in self._pan_btn:\n # Start the pan event with pixel coordinates\n px, py = self.transData.transform([self._sx, self._sy])\n self.start_pan(px, py, 2)\n # pan view (takes pixel coordinate input)\n self.drag_pan(2, None, event.x, event.y)\n self.end_pan()\n\n # Zoom\n elif self.button_pressed in self._zoom_btn:\n # zoom view (dragging down zooms in)\n scale = h/(h - dy)\n self._scale_axis_limits(scale, scale, scale)\n\n # Store the event coordinates for the next time through.\n self._sx, self._sy = x, y\n # Always request a draw update at the end of interaction\n self.figure.canvas.draw_idle()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 615, "n_words": 203, "vocab_size": 125, "complexity": 11, "nloc": 32, "token_counts": 306, "n_ast_nodes": 490, "n_identifiers": 44, "random_cut": "def _on_move(self, event):\n \n\n if not self.button_pressed:\n return\n\n if self.get_navigate_mode() is not None:\n # we don't want to rotate if we are zooming/panning\n # from the toolbar\n return\n\n if self.M is None:\n return\n\n x, y = event.xdata, event.ydata\n # In case the mouse is out of bounds.\n if x is None or event.inaxes != self:\n return\n\n dx, dy = x - self._sx, y - self._sy\n w = self._pseudo_w\n h = self._pseudo_h\n\n # Rotation\n if self.button_pressed in self._rotate_btn:\n # rotate viewing point\n # get the x and y pixel coords\n if dx == 0 and dy == 0:\n return\n\n roll = np.deg2rad(self.roll)\n delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll)\n dazim = -(d", "d_id": 23728, "documentation": { "docstring": "\n Mouse moving.\n\n By default, button-1 rotates, button-2 pans, and button-3 zooms;\n these buttons can be modified via `mouse_init`.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 47, "language": "en" } }, { "id": 215075, "commit_id": "fbcc707e76f11770712e6828155258ac61e00ff8", "repo": "salt", "path": "salt/modules/aixpkg.py", "file_name": "aixpkg.py", "fun_name": "install", "commit_message": "work in progress while resolve issue of python3_32 usage by dnf and yum", "code": "def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):\n \n targets = salt.utils.args.split_input(pkgs) if pkgs else [name]\n if not targets:\n return {}\n\n if pkgs:\n log.debug(\"Removing these fileset(s)/rpm package(s) %s: %s\", name, targets)\n\n # Get a list of the currently installed pkgs.\n old = list_pkgs()\n\n # Install the fileset (normally ends with bff or rte) or rpm package(s)\n errors = []\n for target in targets:\n filename = os.path.basename(target)\n if filename.endswith(\".bff\") or filename.endswith(\".rte\"):\n if _is_installed(target):\n continue\n\n cmd = \"/usr/sbin/installp -acYXg\"\n if test:\n cmd += \"p\"\n cmd += \" -d \"\n dirpath = os.path.dirname(target)\n cmd += dirpath + \" \" + filename\n out = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n else:\n if _is_installed_rpm(filename.split(\".aix\")[0]):\n continue\n\n # assume use dnf or yum\n cmdflags = \" install --allowerasing \"\n if pathlib.Path(\"/opt/freeware/bin/dnf\").is_file():\n cmdexe = \"/opt/freeware/bin/dnf\"\n if test:\n cmdflags += \" --assumeno\"\n else:\n cmdflags += \" --assumeyes\"\n if refresh:\n cmdflags += \" --refresh\"\n\n elif pathlib.Path(\"/opt/freeware/bin/yum\").is_file():\n cmdexe = \"/opt/freeware/bin/yum\"\n if test:\n cmdflags += \" --assumeno\"\n else:\n cmdflags += \" --assumeyes\"\n if refresh:\n cmdflags += \" --refresh\"\n\n elif pathlib.Path(\"/usr/bin/yum\").is_file():\n cmdexe = \"/usr/bin/yum\"\n if test:\n cmdflags += \" --assumeno\"\n else:\n cmdflags += \" --assumeyes\"\n else:\n cmdexe = \"/usr/bin/rpm\"\n cmdflags = \" -Uivh \"\n if test:\n cmdflags += \" --test\"\n\n cmd = [cmdexe, cmdflags, target]\n out = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n\n if 0 != out[\"retcode\"]:\n errors.append(out[\"stderr\"])\n\n # Get a list of the packages after the uninstall\n __context__.pop(\"pkg.list_pkgs\", None)\n new = list_pkgs()\n ret = salt.utils.data.compare_dicts(old, new)\n\n if errors:\n raise CommandExecutionError(\n \"Problems encountered installing filesets(s)/package(s)\",\n info={\"changes\": ret, \"errors\": errors},\n )\n\n # No error occurred\n if test:\n return \"Test succeeded.\"\n\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 998, "n_words": 248, "vocab_size": 129, "complexity": 22, "nloc": 66, "token_counts": 371, "n_ast_nodes": 663, "n_identifiers": 46, "random_cut": "def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):\n \n targets = salt.utils.args.split_input(pkgs) if pkgs else [name]\n if not targets:\n return {}\n\n if pkgs:\n log.debug(\"Removing these fileset(s)/rpm package(s) %s: %s\", name, targets)\n\n # Get a list of the currently installed pkgs.\n old = list_pkgs()\n\n # Install the fileset (normally ends with bff or rte) or rpm package(s)\n errors = []\n for target in targets:\n filename = os.path.basename(target)\n if filename.endswith(\".bff\") or filename.endswith(\".rte\"):\n if _is_installed(target):\n continue\n\n cmd = \"/usr/sbin/installp -acYXg\"\n if test:\n cmd += \"p\"\n cmd += \" -d \"\n dirpath = os.path.dirname(target)\n cmd += dirpath + \" \" + filename\n out = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n else:\n if _is_installed_rpm(filename.split(\".aix\")[0]):\n continue\n\n # assume use dnf or yum\n cmdflags = \" install --allowerasing \"\n if pathlib.Path(\"/opt/freeware/bin/dnf\").is_file():\n cmdexe = \"/opt/freeware/bin/dnf\"\n if test:\n cmdflags += \" --assumeno\"\n else:\n cmdflags += \" --assumeyes\"\n ", "d_id": 53793, "documentation": { "docstring": "\n Install the named fileset(s)/rpm package(s).\n\n .. versionadded:: 3005\n\n preference to install rpm packages are to use in the following order:\n /opt/freeware/bin/dnf\n /opt/freeware/bin/yum\n /usr/bin/yum\n /usr/bin/rpm\n\n Note: use of rpm to install implies that rpm's dependencies must have been previously installed.\n dnf and yum automatically install rpm's dependencies as part of the install process\n\n name\n The name of the fileset or rpm package to be installed.\n\n refresh\n Whether or not to update the yum database before executing.\n\n\n Multiple Package Installation Options:\n\n pkgs\n A list of filesets and/or rpm packages to install.\n Must be passed as a python list. The ``name`` parameter will be\n ignored if this option is passed.\n\n version\n Install a specific version of a fileset/rpm package.\n (Unused at present).\n\n test\n Verify that command functions correctly:\n\n Returns a dict containing the new fileset(s)/rpm package(s) names and versions:\n\n {'': {'old': '',\n 'new': ''}}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm\n salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True\n salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff\n salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte\n salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base\n salt '*' pkg.install pkgs='[\"foo\", \"bar\"]'\n ", "n_words": 172, "vocab_size": 115, "n_whitespaces": 405, "language": "en" } }, { "id": 337705, "commit_id": "873dcc63a461558152eec20af991482204e8248f", "repo": "accelerate", "path": "src/accelerate/utils/deepspeed.py", "file_name": "deepspeed.py", "fun_name": "is_false", "commit_message": "Migrate HFDeepSpeedConfig from trfrs to accelerate (#432)\n\n* Migrate HFDeepSpeedConfig from trfrs to accelerate\r\n\r\n* update state.py to resolve comments\r\n\r\n1. Adds static method to have a simple API for integrating deepspeed config in transformers trainer.\r\n\r\n* reverting changes and addressing comments\r\n\r\n* Marking DepSpeed and FSDP as experimental in accelerate", "code": "def is_false(self, ds_key_long):\n \n value = self.get_value(ds_key_long)\n return False if value is None else not bool(value)\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 36, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 3, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "def is_false(self, ds_key_long):\n \n value = self.get_value(ds_key_long)\n return False if value is None else not bool(value)\n", "d_id": 121108, "documentation": { "docstring": "\n Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very\n specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set).\n ", "n_words": 38, "vocab_size": 30, "n_whitespaces": 60, "language": "en" } }, { "id": 6647, "commit_id": "a95f611d582a724740af772ead1fa439b3713124", "repo": "ludwig", "path": "scripts/extract_schema.py", "file_name": "extract_schema.py", "fun_name": "extract_pytorch_structures", "commit_message": "fix: Naming scheme cleanup that includes: renaming `ludwig.marshmallow` module to `ludwig.validation` to avoid implicit import errors, and moving `ludwig.utils.schema` into this new module. (#1936)\n\n* Rename marshmallow/ folder to marshmallow_schema_utils/, marshmallow_schema_utils.py to utils.py (under folder), update all refs.\r\n\r\n* Rename marshmallow/ folder to marshmallow_schema_utils/, marshmallow_schema_utils.py to utils.py (under folder), update all refs.\r\n\r\n* update extract_schema\r\n\r\n* update generated files.\r\n\r\n* update manifest\r\n\r\n* rename using validation/schema_utils naming\r\n\r\n* update generated files\r\n\r\n* new naming scheme\r\n\r\n* fix imports.\r\n\r\n* rerun extract_schema", "code": "def extract_pytorch_structures():\n \n for opt in lmo.optimizer_registry:\n # Get the torch class:\n optimizer_class = lmo.optimizer_registry[opt][0]\n\n # Parse and clean the class structure:\n path = get_fully_qualified_class_name(optimizer_class)\n opt_struct = get_pytkdocs_structure_for_path(path, \"google\")[\"objects\"][0]\n prune_pytorch_structures(opt_struct)\n\n # Write it to a file:\n parent_dir = str(Path(__file__).parent.parent)\n filename = os.path.join(parent_dir, \"ludwig/validation/generated/torch/\", optimizer_class.__name__) + \".json\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, \"w\") as outfile:\n json.dump(\n opt_struct,\n outfile,\n indent=4,\n sort_keys=True,\n separators=(\",\", \": \"),\n )\n outfile.write(\"\\n\")\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 253, "n_words": 62, "vocab_size": 55, "complexity": 2, "nloc": 18, "token_counts": 136, "n_ast_nodes": 229, "n_identifiers": 30, "random_cut": "def extract_pytorch_structures():\n \n for opt in lmo.optimizer_registry:\n # Get the torch class:\n optimizer_class = lmo.optimizer_registry[opt][0]\n\n # Parse and clean the class structure:\n path = get_fully_qualified_class_name(optimizer_class)\n opt_struct = get_pytkdocs_structure_for_path(path, \"google\")[\"objects\"][0]\n prune_pytorch_structures(opt_struct)\n\n # Write it to a file:\n ", "d_id": 1042, "documentation": { "docstring": "Extracts and saves the parsed structure of all pytorch classes referenced in\n `ludwig.modules.optimization_modules.optimizer_registry` as JSON files under\n `ludwig/validation/generated/torch/`.", "n_words": 18, "vocab_size": 18, "n_whitespaces": 23, "language": "en" } }, { "id": 116777, "commit_id": "4e12722621c12ca2b2b075421f30e5ae8a58ebe8", "repo": "mindsdb", "path": "tests/unit/test_ml_handlers.py", "file_name": "test_ml_handlers.py", "fun_name": "test_hf_classification_bin", "commit_message": "huggingface handler in new ml handler api\n- permanent is property of handler", "code": "def test_hf_classification_bin(self, mock_handler):\n\n\n # create predictor\n create_sql = \n\n model_name = 'spam_classifier'\n\n predict_sql = \n self.hf_test_run(mock_handler, model_name, create_sql, predict_sql)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 53, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 17, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 7, "random_cut": "def test_hf_classification_bin(self, mock_handler):\n\n\n # create predictor\n create_sql = \n\n model_name = 'spam_classifier'\n\n predict_sql = \n self.hf_test_run(mock_handler, model_", "d_id": 25830, "documentation": { "docstring": "\n CREATE PREDICTOR huggingface.spam_classifier\n predict PRED\n USING\n task='text-classification',\n model_name= \"mrm8488/bert-tiny-finetuned-sms-spam-detection\",\n input_column = 'text_spammy',\n labels=['ham','spam']\n \n SELECT h.*\n FROM pg.df as t \n JOIN huggingface.spam_classifier as h\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 166, "language": "en" } }, { "id": 109439, "commit_id": "c73f4c455514cf5422d27bf38c93250de8316b21", "repo": "matplotlib", "path": "lib/matplotlib/_constrained_layout.py", "file_name": "_constrained_layout.py", "fun_name": "match_submerged_margins", "commit_message": "Merge SubplotBase into AxesBase.", "code": "def match_submerged_margins(layoutgrids, fig):\n \n\n for sfig in fig.subfigs:\n match_submerged_margins(layoutgrids, sfig)\n\n axs = [a for a in fig.get_axes()\n if a.get_subplotspec() is not None and a.get_in_layout()]\n\n for ax1 in axs:\n ss1 = ax1.get_subplotspec()\n if ss1.get_gridspec() not in layoutgrids:\n axs.remove(ax1)\n continue\n lg1 = layoutgrids[ss1.get_gridspec()]\n\n # interior columns:\n if len(ss1.colspan) > 1:\n maxsubl = np.max(\n lg1.margin_vals['left'][ss1.colspan[1:]] +\n lg1.margin_vals['leftcb'][ss1.colspan[1:]]\n )\n maxsubr = np.max(\n lg1.margin_vals['right'][ss1.colspan[:-1]] +\n lg1.margin_vals['rightcb'][ss1.colspan[:-1]]\n )\n for ax2 in axs:\n ss2 = ax2.get_subplotspec()\n lg2 = layoutgrids[ss2.get_gridspec()]\n if lg2 is not None and len(ss2.colspan) > 1:\n maxsubl2 = np.max(\n lg2.margin_vals['left'][ss2.colspan[1:]] +\n lg2.margin_vals['leftcb'][ss2.colspan[1:]])\n if maxsubl2 > maxsubl:\n maxsubl = maxsubl2\n maxsubr2 = np.max(\n lg2.margin_vals['right'][ss2.colspan[:-1]] +\n lg2.margin_vals['rightcb'][ss2.colspan[:-1]])\n if maxsubr2 > maxsubr:\n maxsubr = maxsubr2\n for i in ss1.colspan[1:]:\n lg1.edit_margin_min('left', maxsubl, cell=i)\n for i in ss1.colspan[:-1]:\n lg1.edit_margin_min('right', maxsubr, cell=i)\n\n # interior rows:\n if len(ss1.rowspan) > 1:\n maxsubt = np.max(\n lg1.margin_vals['top'][ss1.rowspan[1:]] +\n lg1.margin_vals['topcb'][ss1.rowspan[1:]]\n )\n maxsubb = np.max(\n lg1.margin_vals['bottom'][ss1.rowspan[:-1]] +\n lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]]\n )\n\n for ax2 in axs:\n ss2 = ax2.get_subplotspec()\n lg2 = layoutgrids[ss2.get_gridspec()]\n if lg2 is not None:\n if len(ss2.rowspan) > 1:\n maxsubt = np.max([np.max(\n lg2.margin_vals['top'][ss2.rowspan[1:]] +\n lg2.margin_vals['topcb'][ss2.rowspan[1:]]\n ), maxsubt])\n maxsubb = np.max([np.max(\n lg2.margin_vals['bottom'][ss2.rowspan[:-1]] +\n lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]]\n ), maxsubb])\n for i in ss1.rowspan[1:]:\n lg1.edit_margin_min('top', maxsubt, cell=i)\n for i in ss1.rowspan[:-1]:\n lg1.edit_margin_min('bottom', maxsubb, cell=i)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 26, "n_whitespaces": 1147, "n_words": 190, "vocab_size": 91, "complexity": 21, "nloc": 64, "token_counts": 623, "n_ast_nodes": 986, "n_identifiers": 33, "random_cut": "def match_submerged_margins(layoutgrids, fig):\n \n\n for sfig in fig.subfigs:\n match_submerged_margins(layoutgrids, sfig)\n\n axs = [a for a in fig.get_axes()\n if a.get_subplotspec() is not None and a.get_in_layout()]\n\n for ax1 in axs:\n ss1 = ax1.get_subplotspec()\n if ss1.get_gridspec() not in layoutgrids:\n axs.remove(ax1)\n continue\n lg1 = layoutgrids[ss1.get_gridspec()]\n\n # interior columns:\n if len(ss1.colspan) > 1:\n maxsubl = np.max(\n lg1.margin_vals['left'][ss1.colspan[1:]] +\n lg1.margin_vals['leftcb'][ss1.colspan[1:]]\n )\n maxsubr = np.max(\n lg1.margin_vals['right'][ss1.colspan[:-1]] +\n lg1.margin_vals['rightcb'][ss1.colspan[:-1]]\n )\n for ax2 in axs:\n ss2 = ax2.get_subplotspec()\n lg2 = layoutgrids[ss2.get_gridspec()]\n if lg2 is not None and len(ss2.colspan) > 1:\n maxsubl2 = np.max(\n lg2.margin_vals['left'][ss2.colspan[1:]] +\n lg2.margin_vals['leftcb'][ss2.colspan[1:]])\n if maxsubl2 > maxsubl:\n ", "d_id": 23586, "documentation": { "docstring": "\n Make the margins that are submerged inside an Axes the same size.\n\n This allows axes that span two columns (or rows) that are offset\n from one another to have the same size.\n\n This gives the proper layout for something like::\n fig = plt.figure(constrained_layout=True)\n axs = fig.subplot_mosaic(\"AAAB\\nCCDD\")\n\n Without this routine, the axes D will be wider than C, because the\n margin width between the two columns in C has no width by default,\n whereas the margins between the two columns of D are set by the\n width of the margin between A and B. However, obviously the user would\n like C and D to be the same size, so we need to add constraints to these\n \"submerged\" margins.\n\n This routine makes all the interior margins the same, and the spacing\n between the three columns in A and the two column in C are all set to the\n margins between the two columns of D.\n\n See test_constrained_layout::test_constrained_layout12 for an example.\n ", "n_words": 158, "vocab_size": 87, "n_whitespaces": 218, "language": "en" } }, { "id": 245473, "commit_id": "035b915983ace07533f1a718a983315d126f3a40", "repo": "mmdetection", "path": "mmdet/version.py", "file_name": "version.py", "fun_name": "parse_version_info", "commit_message": "[Enhance] Update mmdet, mmcv, and mmdet version in MMDetection (#8417)\n\n* Update dev-3.x circleci (#8396)\r\n\r\n* update circleci\r\n\r\n* update test config\r\n\r\n* tmp delete github action\r\n\r\n* update\r\n\r\n* tmp reduce the coverage requirement\r\n\r\n* update branch\r\n\r\n* update branch\r\n\r\n* [Fix] Fix metafile 3.x (#8404)\r\n\r\n* update reppoints readme and metafile\r\n\r\n* update openimages metafile\r\n\r\n* update faster rcnn readme and metafile\r\n\r\n* update convnext readme and metafile\r\n\r\n* update guided_anchoring metafile\r\n\r\n* update groie metafile and readme\r\n\r\n* update reppoints readme and metafile\r\n\r\n* update metafile\r\n\r\n* update metafile\r\n\r\n* release ld and mask_rcnn models\r\n\r\n* update metafile\r\n\r\n* update regnet metafile\r\n\r\n* fix markdown format\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* Update README.md\r\n\r\n* update md format\r\n\r\n* release lad\r\n\r\n* rename\r\n\r\n* rename\r\n\r\n* update solov2 metafile\r\n\r\n* update cascase rcnn metafile\r\n\r\n* [Doc]: fix markdown version (#8408)\r\n\r\n* [Enhance] Update mmdet, mmcv, and mmdet version in MMDetection\r\n\r\n* Fix anchor_free load_from_state_dict\r\n\r\nCo-authored-by: RangiLyu \r\nCo-authored-by: Cedric Luo \r\nCo-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>", "code": "def parse_version_info(version_str):\n \n version_info = []\n for x in version_str.split('.'):\n if x.isdigit():\n version_info.append(int(x))\n elif x.find('rc') != -1:\n patch_version = x.split('rc')\n version_info.append(int(patch_version[0]))\n version_info.append(f'rc{patch_version[1]}')\n return tuple(version_info)\n\n\nversion_info = parse_version_info(__version__)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 95, "n_words": 26, "vocab_size": 23, "complexity": 4, "nloc": 10, "token_counts": 79, "n_ast_nodes": 156, "n_identifiers": 12, "random_cut": "def parse_version_info(version_str):\n \n version_info = []\n for x in version_str.split('.'):\n if x.isdigit():\n version_info.append(int(x))\n elif x.find('rc') != -1:\n patch_version = x.split('rc')\n version_info.append(int(patch_version[0]))\n version_info.append(f'rc{patch_version[1]}')\n return tuple(version_info)\n\n\nversion_", "d_id": 70805, "documentation": { "docstring": "Parse a version string into a tuple.\n\n Args:\n version_str (str): The version string.\n Returns:\n tuple[int | str]: The version info, e.g., \"1.3.0\" is parsed into\n (1, 3, 0), and \"2.0.0rc1\" is parsed into (2, 0, 0, 'rc1').\n ", "n_words": 37, "vocab_size": 28, "n_whitespaces": 71, "language": "en" } }, { "id": 97275, "commit_id": "8429cf33623b759a3ff7bddcf13d251b0dab9b8e", "repo": "sentry", "path": "src/sentry/utils/pytest/relay.py", "file_name": "relay.py", "fun_name": "adjust_settings_for_relay_tests", "commit_message": "feat: Improve relay debug in CI (#32625)", "code": "def adjust_settings_for_relay_tests(settings):\n \n settings.ALLOWED_HOSTS = [\n \"localhost\",\n \"testserver\",\n \"host.docker.internal\",\n \"0.0.0.0\",\n \"127.0.0.1\",\n ]\n settings.KAFKA_CLUSTERS = {\n \"default\": {\n \"common\": {\"bootstrap.servers\": \"127.0.0.1:9092\"},\n \"producers\": {\n \"compression.type\": \"lz4\",\n \"message.max.bytes\": 50000000, # 50MB, default is 1MB\n },\n }\n }\n settings.SENTRY_RELAY_WHITELIST_PK = [\"SMSesqan65THCV6M4qs4kBzPai60LzuDn-xNsvYpuP8\"]\n settings.SENTRY_USE_RELAY = True\n\n\n@pytest.fixture", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 173, "n_words": 40, "vocab_size": 34, "complexity": 1, "nloc": 19, "token_counts": 65, "n_ast_nodes": 132, "n_identifiers": 8, "random_cut": "def adjust_settings_for_relay_tests(settings):\n \n settings.ALLOWED_HOSTS = [\n \"localhost\",\n \"testserver\",\n \"host.docker", "d_id": 19398, "documentation": { "docstring": "\n Adjusts the application settings to accept calls from a Relay instance running inside a\n docker container.\n\n :param settings: the app settings\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 34, "language": "en" } }, { "id": 121234, "commit_id": "c0ec3b33e687ce37b431906109d4a2bc4655285f", "repo": "jax", "path": "jax/_src/api.py", "file_name": "api.py", "fun_name": "clear_backends", "commit_message": "Introduce jax.experimental.clear_backends to delete all JAX runtime backends.\n\nIn cases like unit tests, users may want to clean up all the backends along with the resources used in the end of the test, and reinitialize them in the next test.\n\nPiperOrigin-RevId: 462239974", "code": "def clear_backends():\n \n\n if xc._version < 79:\n raise RuntimeError(\"clear_backends is not supported in the jaxlib used.\"\n \"Please update your jaxlib package.\")\n\n xb._clear_backends()\n jax.lib.xla_bridge._backends = {}\n dispatch.xla_callable.cache_clear() # type: ignore\n dispatch.xla_primitive_callable.cache_clear()\n _cpp_jit_cache.clear()\n jax_jit.CompiledFunctionCache.clear_all()\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 31, "vocab_size": 30, "complexity": 2, "nloc": 10, "token_counts": 59, "n_ast_nodes": 108, "n_identifiers": 19, "random_cut": "def clear_backends():\n \n\n if xc._version < 79:\n rais", "d_id": 27047, "documentation": { "docstring": "\n Clear all backend clients so that new backend clients can be created later.\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 16, "language": "en" } }, { "id": 265205, "commit_id": "fcd1daaf798d62023f999c3e09e035f7b3f47c8f", "repo": "netbox", "path": "netbox/dcim/models/racks.py", "file_name": "racks.py", "fun_name": "get_power_utilization", "commit_message": "Update power utilization calculations for new cabling model", "code": "def get_power_utilization(self):\n \n powerfeeds = PowerFeed.objects.filter(rack=self)\n available_power_total = sum(pf.available_power for pf in powerfeeds)\n print(f'available_power_total: {available_power_total}')\n if not available_power_total:\n return 0\n\n powerports = []\n for powerfeed in powerfeeds:\n powerports.extend([\n peer for peer in powerfeed.link_peers if isinstance(peer, PowerPort)\n ])\n\n allocated_draw = 0\n for powerport in powerports:\n allocated_draw += powerport.get_power_draw()['allocated']\n print(f'allocated_draw: {allocated_draw}')\n\n return int(allocated_draw / available_power_total * 100)\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 190, "n_words": 54, "vocab_size": 39, "complexity": 7, "nloc": 16, "token_counts": 103, "n_ast_nodes": 175, "n_identifiers": 23, "random_cut": "def get_power_utilization(self):\n \n powerfeeds = PowerFeed.objects.filter(rack=self)\n available_power_total = sum(pf.available_power for pf in powerfeeds)\n print(f'available_power_total: {available_power_total}')\n if not available_power_total:\n return 0\n\n pow", "d_id": 78027, "documentation": { "docstring": "\n Determine the utilization rate of power in the rack and return it as a percentage.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 30, "language": "en" } }, { "id": 81631, "commit_id": "67190100500819eb1237c4572accafa72816ae54", "repo": "awx", "path": "awx/main/dispatch/pool.py", "file_name": "pool.py", "fun_name": "cleanup", "commit_message": "Add back in cleanup call", "code": "def cleanup(self):\n \n orphaned = []\n for w in self.workers[::]:\n if not w.alive:\n # the worker process has exited\n # 1. take the task it was running and enqueue the error\n # callbacks\n # 2. take any pending tasks delivered to its queue and\n # send them to another worker\n logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))\n if w.current_task:\n if w.current_task != 'QUIT':\n try:\n for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):\n reaper.reap_job(j, 'failed')\n except Exception:\n logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))\n orphaned.extend(w.orphaned_tasks)\n self.workers.remove(w)\n elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down:\n # the process has an empty queue (it's idle) and we have\n # more processes in the pool than we need (> min)\n # send this process a message so it will exit gracefully\n # at the next opportunity\n logger.info(f'scaling down worker pid:{w.pid} prior total:{len(self.workers)}')\n w.quit()\n self.workers.remove(w)\n if w.alive:\n # if we discover a task manager invocation that's been running\n # too long, reap it (because otherwise it'll just hold the postgres\n # advisory lock forever); the goal of this code is to discover\n # deadlocks or other serious issues in the task manager that cause\n # the task manager to never do more work\n current_task = w.current_task\n if current_task and isinstance(current_task, dict):\n endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']\n current_task_name = current_task.get('task', '')\n if any(current_task_name.endswith(e) for e in endings):\n if 'started' not in current_task:\n w.managed_tasks[current_task['uuid']]['started'] = time.time()\n age = time.time() - current_task['started']\n w.managed_tasks[current_task['uuid']]['age'] = age\n if age > self.task_manager_timeout:\n logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')\n os.kill(w.pid, signal.SIGTERM)\n\n for m in orphaned:\n # if all the workers are dead, spawn at least one\n if not len(self.workers):\n self.up()\n idx = random.choice(range(len(self.workers)))\n self.write(idx, m)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 1121, "n_words": 270, "vocab_size": 171, "complexity": 18, "nloc": 36, "token_counts": 330, "n_ast_nodes": 602, "n_identifiers": 52, "random_cut": "def cleanup(self):\n \n orphaned = []\n for w in self.workers[::]:\n if not w.alive:\n # the worker process has exited\n # 1. take the task it was running and enqueue the error\n # callbacks\n # 2. take any pending tasks delivered to its queue and\n # send them to another worker\n logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))\n if w.current_task:\n if w.current_task != 'QUIT':\n try:\n for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):\n reaper.reap_job(j, 'failed')\n except Exception:\n logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))\n orphaned.extend(w.orphaned_tasks)\n self.workers.remove(w)\n elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down:\n # the process has an empty queue (it's idle) and we have\n # more processes in the pool than we need (> min)\n # send this process a message so it will exit gracefully\n # at the next opportunity\n logger", "d_id": 17238, "documentation": { "docstring": "\n Perform some internal account and cleanup. This is run on\n every cluster node heartbeat:\n\n 1. Discover worker processes that exited, and recover messages they\n were handling.\n 2. Clean up unnecessary, idle workers.\n\n IMPORTANT: this function is one of the few places in the dispatcher\n (aside from setting lookups) where we talk to the database. As such,\n if there's an outage, this method _can_ throw various\n django.db.utils.Error exceptions. Act accordingly.\n ", "n_words": 69, "vocab_size": 64, "n_whitespaces": 149, "language": "en" } }, { "id": 202366, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_token_node_empty_csrf_cookie", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_token_node_empty_csrf_cookie(self):\n \n req = self._get_request(cookie=\"\")\n mw = CsrfViewMiddleware(token_view)\n mw.process_view(req, token_view, (), {})\n resp = token_view(req)\n\n token = get_token(req)\n self.assertIsNotNone(token)\n csrf_secret = _unmask_cipher_token(token)\n self._check_token_present(resp, csrf_secret)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 9, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 16, "random_cut": "def test_token_node_empty_csrf_cookie(self):\n \n req = self._get_request(cookie=\"\")\n mw = CsrfViewMiddleware(token_view)\n mw.process_view(req, token_view, (), {})\n resp = token_view(req)\n\n token = get_token(req)\n self.assertIsNotNone(token)\n csrf_secret = _unmask_cipher_token(token)\n ", "d_id": 50084, "documentation": { "docstring": "\n A new token is sent if the csrf_cookie is the empty string.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 27, "language": "en" } }, { "id": 106865, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "save", "commit_message": "apply black py to all python files", "code": "def save(self, envs):\n \n assert isinstance(envs, list), \"envs should be a list\"\n if len(envs) > 0:\n for env in envs:\n assert isstr(env), \"env should be a string\"\n\n return self._send(\n {\n \"data\": envs,\n },\n \"save\",\n )\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 143, "n_words": 34, "vocab_size": 30, "complexity": 3, "nloc": 11, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 9, "random_cut": "def save(self, envs):\n \n assert isinstance(envs, list), \"envs should be a list\"\n if le", "d_id": 22483, "documentation": { "docstring": "\n This function allows the user to save envs that are alive on the\n Tornado server. The envs can be specified as a list of env ids.\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 48, "language": "en" } }, { "id": 207885, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_has_related_field_in_list_display_o2o", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_has_related_field_in_list_display_o2o(self):\n \n media = Media.objects.create(name=\"Foo\")\n Vodcast.objects.create(media=media)\n response = self.client.get(reverse(\"admin:admin_views_vodcast_changelist\"), {})\n\n response.context[\"cl\"].list_display = [\"media\"]\n self.assertIs(response.context[\"cl\"].has_related_field_in_list_display(), True)\n\n response.context[\"cl\"].list_display = [\"media_id\"]\n self.assertIs(response.context[\"cl\"].has_related_field_in_list_display(), False)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 20, "vocab_size": 15, "complexity": 1, "nloc": 8, "token_counts": 102, "n_ast_nodes": 177, "n_identifiers": 16, "random_cut": "def test_has_related_field_in_list_display_o2o(self):\n \n media = Media.objects.create(name=\"Foo\")\n Vodcast.objects.create(media=media)\n response = self.client.get(reverse(\"admin:admin_views_vodcast_changelist\"), {})\n\n response.context[\"cl\"].list_display = [\"media\"]\n sel", "d_id": 52148, "documentation": { "docstring": "Joins shouldn't be performed for _id fields in list display.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 269598, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "binary_crossentropy", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def binary_crossentropy(target, output, from_logits=False):\n \n target = tf.convert_to_tensor(target)\n output = tf.convert_to_tensor(output)\n\n # Use logits whenever they are available. `softmax` and `sigmoid`\n # activations cache logits on the `output` Tensor.\n if hasattr(output, \"_keras_logits\"):\n output = output._keras_logits # pylint: disable=protected-access\n if from_logits:\n warnings.warn(\n '\"`binary_crossentropy` received `from_logits=True`, but the `output`'\n \" argument was produced by a sigmoid or softmax activation and thus \"\n 'does not represent logits. Was this intended?\"',\n stacklevel=2,\n )\n from_logits = True\n\n if from_logits:\n return tf.nn.sigmoid_cross_entropy_with_logits(\n labels=target, logits=output\n )\n\n if (\n not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable))\n and output.op.type == \"Sigmoid\"\n ) and not hasattr(output, \"_keras_history\"):\n # When sigmoid activation function is used for output operation, we\n # use logits from the sigmoid function directly to compute loss in order\n # to prevent collapsing zero when training.\n assert len(output.op.inputs) == 1\n output = output.op.inputs[0]\n return tf.nn.sigmoid_cross_entropy_with_logits(\n labels=target, logits=output\n )\n\n epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)\n output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)\n\n # Compute cross entropy from probabilities.\n bce = target * tf.math.log(output + epsilon())\n bce += (1 - target) * tf.math.log(1 - output + epsilon())\n return -bce\n\n\n@keras_export(\"keras.backend.binary_focal_crossentropy\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.binary_focal_crossentropy\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 421, "n_words": 176, "vocab_size": 121, "complexity": 7, "nloc": 31, "token_counts": 222, "n_ast_nodes": 387, "n_identifiers": 37, "random_cut": "def binary_crossentropy(target, output, from_logits=False):\n \n target = tf.convert_to_tensor(target)\n output = tf.convert_to_tensor(output)\n\n # Use logits whenever they are available. `softmax` and `sigmoid`\n # activations cache logits on the `output` Tensor.\n ", "d_id": 80219, "documentation": { "docstring": "Binary crossentropy between an output tensor and a target tensor.\n\n Args:\n target: A tensor with the same shape as `output`.\n output: A tensor.\n from_logits: Whether `output` is expected to be a logits tensor.\n By default, we consider that `output`\n encodes a probability distribution.\n\n Returns:\n A tensor.\n ", "n_words": 46, "vocab_size": 37, "n_whitespaces": 105, "language": "en" } }, { "id": 247858, "commit_id": "9b43df1f7b2977431563b3cda8fed1ed879651ba", "repo": "synapse", "path": "tests/handlers/test_federation_event.py", "file_name": "test_federation_event.py", "fun_name": "test_process_pulled_event_with_missing_state", "commit_message": "Optimise `_get_state_after_missing_prev_event`: use `/state` (#12040)\n\nIf we're missing most of the events in the room state, then we may as well call the /state endpoint, instead of individually requesting each and every event.", "code": "def test_process_pulled_event_with_missing_state(self) -> None:\n \n return self._test_process_pulled_event_with_missing_state(False)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 12, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 3, "random_cut": "def test_process_pulled_event_with_missing_state(self) -> None:\n \n return self._test_process_pulled_event_with_missing_state(False)\n", "d_id": 71960, "documentation": { "docstring": "Ensure that we correctly handle pulled events with lots of missing state\n\n In this test, we pretend we are processing a \"pulled\" event (eg, via backfill\n or get_missing_events). The pulled event has a prev_event we haven't previously\n seen, so the server requests the state at that prev_event. There is a lot\n of state we don't have, so we expect the server to make a /state request.\n\n We check that the pulled event is correctly persisted, and that the state is\n as we expect.\n ", "n_words": 83, "vocab_size": 54, "n_whitespaces": 132, "language": "en" } }, { "id": 99586, "commit_id": "1730c481f1a8a71446326fa1ff72e10663016385", "repo": "sentry", "path": "tests/sentry/integrations/slack/notifications/test_resolved_in_release.py", "file_name": "test_resolved_in_release.py", "fun_name": "test_resolved_in_release", "commit_message": "fix(notifications): Use `metrics_key` (#34572)", "code": "def test_resolved_in_release(self, mock_func):\n \n notification = ResolvedInReleaseActivityNotification(\n Activity(\n project=self.project,\n group=self.group,\n user=self.user,\n type=ActivityType.SET_RESOLVED_IN_RELEASE,\n data={\"version\": \"meow\"},\n )\n )\n with self.tasks():\n notification.send()\n\n attachment, text = get_attachment()\n release_name = notification.activity.data[\"version\"]\n assert text == f\"Issue marked as resolved in {release_name} by {self.name}\"\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 238, "n_words": 45, "vocab_size": 38, "complexity": 1, "nloc": 19, "token_counts": 92, "n_ast_nodes": 174, "n_identifiers": 22, "random_cut": "def test_resolved_in_release(self, mock_func):\n \n notification = ResolvedInReleaseActivityNotification(\n Activity(\n project=self.project,\n group=self.group,\n user=self.user,\n type=ActivityType.SET_RESOLVED_IN_RELEASE,\n data={\"version\": \"meow\"},\n )\n )\n with self.tasks():\n notification.send()\n\n attachment, text = get_attachment()\n release_name = notification.activity.data[\"version\"]\n assert text == f\"Issue marked as resolved in {release_name} by {self.name}\"\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \r\nCo-authored-by: Aki Nitta \r\nCo-authored-by: Carlos Mocholí ", "code": "def test_error_raised_with_float_limited_eval_batches():\n \n model = BoringModel()\n dl_size = len(model.val_dataloader())\n limit_val_batches = 1 / (dl_size + 2)\n trainer = Trainer(limit_val_batches=limit_val_batches)\n trainer._data_connector.attach_data(model)\n with pytest.raises(\n MisconfigurationException,\n match=fr\"{limit_val_batches} \\* {dl_size} < 1. Please increase the `limit_val_batches`\",\n ):\n trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model)\n\n\n@pytest.mark.parametrize(\n \"val_dl\",\n [\n DataLoader(dataset=RandomDataset(32, 64), shuffle=True),\n CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)),\n CombinedLoader(\n [DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)]\n ),\n CombinedLoader(\n {\n \"dl1\": DataLoader(dataset=RandomDataset(32, 64)),\n \"dl2\": DataLoader(dataset=RandomDataset(32, 64), shuffle=True),\n }\n ),\n ],\n)", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"val_dl\",\n [\n DataLoader(dataset=RandomDataset(32, 64), shuffle=True),\n CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)),\n CombinedLoader(\n [DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)]\n ),\n CombinedLoader(\n {\n \"dl1\": DataLoader(dataset=RandomDataset(32, 64)),\n \"dl2\": DataLoader(dataset=RandomDataset(32, 64), shuffle=True),\n }\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 220, "n_words": 63, "vocab_size": 50, "complexity": 1, "nloc": 11, "token_counts": 71, "n_ast_nodes": 303, "n_identifiers": 25, "random_cut": "def test_error_raised_with_float_limited_eval_batches():\n \n model = BoringModel()\n dl_size = len(model.val_dataloader())\n limit_val_batches = 1 / (dl_size + 2)\n trainer = Trainer(limit_val_batches=limit_val_batches)\n trainer._data_connector.attach_data(model)\n with pytest.raises(\n MisconfigurationException,\n match=fr\"{limit_val_batches} \\* {dl_size} < 1. Please increase the `limit_val_batches`\",\n ):\n trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model)\n\n\n@py", "d_id": 69640, "documentation": { "docstring": "Test that an error is raised if there are not enough batches when passed with float value of\n limit_eval_batches.", "n_words": 19, "vocab_size": 19, "n_whitespaces": 21, "language": "en" } }, { "id": 121997, "commit_id": "980aa318fbe1e3653906465788e919027cf4d680", "repo": "jax", "path": "jax/_src/dispatch.py", "file_name": "dispatch.py", "fun_name": "not_none_device_or_backend_on_jit", "commit_message": "Minimally support `device` argument on `jit` in the `jax.Array` path\n\nThis means that only a single device is allowed to flow through this path. This is a compromise i.e. it will support the existing codepaths but won't support sharded arrays to go through this path and encourage users to use other well supported techniques like using device_put explicitly instead of relying on `jit` to do that for you.\n\nPiperOrigin-RevId: 473373822", "code": "def not_none_device_or_backend_on_jit(backend, device, num_ins):\n \n # TODO(yashkatariya): Remove this entire function when backend and device are\n # removed as arguments on jit.\n\n from jax.experimental import sharding\n\n if device is not None and backend is not None:\n raise ValueError(\"can't specify both a device and a backend for jit, \"\n \"got device={} and backend={}\".format(device, backend))\n\n if backend is not None:\n da = [xb.get_backend(backend).get_default_device_assignment(1)[0]]\n else:\n assert device is not None\n da = [device]\n\n assert len(da) == 1\n # Set committed to True for this path because it simulates a device_put on\n # behalf of a user.\n committed = True\n # in_shardings will be marked as replicated regardless of whatever the input\n # had. Given that only a single device is allowed above, this is correct.\n in_shardings = [sharding.OpShardingSharding.get_replicated(da)] * num_ins\n return committed, da, in_shardings\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 177, "n_words": 130, "vocab_size": 85, "complexity": 4, "nloc": 14, "token_counts": 106, "n_ast_nodes": 174, "n_identifiers": 18, "random_cut": "def not_none_device_or_backend_on_jit(backend, device, num_ins):\n \n # TODO(yashkatariya): Remove this entire function when backend and device are\n # removed as arguments on jit.\n\n from jax.experimental import sharding\n\n if device is not None and backend is not None:\n raise ValueError(\"can't specify both a device and a backend for jit, \"\n \"got device={} and backend={}\".format(device, backend))\n\n if backend ", "d_id": 27096, "documentation": { "docstring": "This is to support the backend and device argument on jit. It's a feature\n that's deprecated but needs to be supported for feature parity and so that we\n can delete the non-Array paths when Array is switched on.\n ", "n_words": 38, "vocab_size": 33, "n_whitespaces": 41, "language": "en" } }, { "id": 194435, "commit_id": "1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4", "repo": "kivy", "path": "kivy/input/motionevent.py", "file_name": "motionevent.py", "fun_name": "is_mouse_scrolling", "commit_message": "Feature: EventManagerBase (#7658)\n\n* Added EventManagerBase class and event_managers attribute to WindowBase class.\r\n* Added on_motion event to Widget class.\r\n* Updated post_dispatch_input in EventLoopBase to skip non-touch events.\r\n* Using type ids in MouseMotionEventProvider.\r\n* Added on_motion method to Widget subclasses.\r\n* Updated Widget.on_motion method to dispatch to filtered widgets if 'pos' is not in me.profile.\r\n* Changed motion_filter property in Widget to store key to list values.\r\n* Updated Widget.on_motion to not dispatch event to children if widget is disabled.\r\n* Widget: Using flags to control dispatching in on_motion method.\r\n* Widget: Don't dispatch on_motion to children if only self is registered.\r\n* Widget: Removed collision on disabled check from on_motion method.\r\n* Widget: Added docstrings for motion_filter and related methods.\r\n* EventManager: Moved motion event flags to eventmanager/__init__.py module.\r\n* ScreenManager: Overrode the on_motion method.\r\n* WindowBase: Using attributes event_managers and event_managers_dict.\r\n* WindowBase: Added doc for register_event_manager and unregister_event_manager methods.\r\n* Widget: Improved default dispatch to stop after the last registered widgets.\r\n* EventManagerBase: Added initial docs class and module.\r\n* Widget: Added experimental warnings to motion_filter property and to on_motion and (un)register_for_motion_event methods.\r\n* WindowBase: Added docs for event_managers and event_managers_dict attributes.\r\n* MotionEvent: Added type_id and flags to push_attrs list.\r\n* EventManagerBase: Added versionadded tag on all flags.\r\n* EventManagerBase: Use dispatch modes instead of flags.", "code": "def is_mouse_scrolling(self, *args):\n \n return 'button' in self.profile and 'scroll' in self.button\n", "url": "https://github.com/kivy/kivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 2, "token_counts": 21, "n_ast_nodes": 39, "n_identifiers": 5, "random_cut": "def is_mouse_scrolling(self, *args):\n \n return 'button' in self.profile and 'scroll' in self.button\n", "d_id": 46969, "documentation": { "docstring": "Returns True if the touch event is a mousewheel scrolling\n\n .. versionadded:: 1.6.0\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 27, "language": "en" } }, { "id": 212886, "commit_id": "935e430420f5ac18df67233040ba86359d98a579", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "easy_print", "commit_message": "Addition of blocking parameter to debug printing. IF True, then execution of your code is stopped until the \"Quit\" button / \"X\" is clicked on the Debug Window.", "code": "def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False,\n no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None,\n erase_all=False, resizable=True, blocking=None):\n \n if _DebugWin.debug_window is None:\n _DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar,\n no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top,\n do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking)\n txt_color, bg_color = _parse_colors_parm(c or colors)\n _DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color,\n erase_all=erase_all, font=font, blocking=blocking)\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 223, "n_words": 60, "vocab_size": 51, "complexity": 1, "nloc": 3, "token_counts": 94, "n_ast_nodes": 279, "n_identifiers": 27, "random_cut": "def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False,\n no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None,\n erase_all=False, resizable=True, blocking=None):\n \n if _DebugWin.debug_window is None:\n _DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar,\n no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top,\n do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking)\n txt_color, bg_color = _parse_colors_parm(c or colors)\n _DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color,\n erase_all=erase_all, font=font, blocking=blocking)\n\n", "d_id": 53492, "documentation": { "docstring": "\n Works like a \"print\" statement but with windowing options. Routes output to the \"Debug Window\"\n\n In addition to the normal text and background colors, you can use a \"colors\" tuple/string\n The \"colors\" or \"c\" parameter defines both the text and background in a single parm.\n It can be a tuple or a single single. Both text and background colors need to be specified\n colors -(str, str) or str. A combined text/background color definition in a single parameter\n c - (str, str) - Colors tuple has format (foreground, backgrouned)\n c - str - can also be a string of the format \"foreground on background\" (\"white on red\")\n\n :param *args: stuff to output\n :type *args: (Any)\n :param size: (w,h) w=characters-wide, h=rows-high\n :type size: (int, int)\n :param end: end character\n :type end: (str)\n :param sep: separator character\n :type sep: (str)\n :param location: Location of upper left corner of the window\n :type location: (int, int)\n :param relative_location: (x,y) location relative to the default location of the window, in pixels. Normally the window centers. This location is relative to the location the window would be created. Note they can be negative.\n :type relative_location: (int, int)\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike\n :type font: (str or (str, int[, str]) or None)\n :param no_titlebar: If True no titlebar will be shown\n :type no_titlebar: (bool)\n :param no_button: don't show button\n :type no_button: (bool)\n :param grab_anywhere: If True: can grab anywhere to move the window (Default = False)\n :type grab_anywhere: (bool)\n :param background_color: color of background\n :type background_color: (str)\n :param text_color: color of the text\n :type text_color: (str)\n :param keep_on_top: If True the window will remain above all current windows\n :type keep_on_top: (bool)\n :param location: Location of upper left corner of the window\n :type location: (int, int)\n :param do_not_reroute_stdout: do not reroute stdout and stderr. If False, both stdout and stderr will reroute to here\n :type do_not_reroute_stdout: (bool)\n :param echo_stdout: If True stdout is sent to both the console and the debug window\n :type echo_stdout: (bool)\n :param colors: Either a tuple or a string that has both the text and background colors\n :type colors: (str) or (str, str)\n :param c: Either a tuple or a string that has both the text and background colors\n :type c: (str) or (str, str)\n :param resizable: if True, the user can resize the debug window. Default is True\n :type resizable: (bool)\n :param erase_all: If True when erase the output before printing\n :type erase_all: (bool)\n :param blocking: if True, makes the window block instead of returning immediately. The \"Quit\" button changers to \"More\"\n :type blocking: (bool | None)\n :return:\n :rtype:\n ", "n_words": 444, "vocab_size": 200, "n_whitespaces": 1135, "language": "en" } }, { "id": 10901, "commit_id": "13edc16d806fb5d77a6849551178ccc75937f25f", "repo": "jina", "path": "jina/orchestrate/pods/__init__.py", "file_name": "__init__.py", "fun_name": "wait_start_success", "commit_message": "refactor: rename pod to deployment (#4230)\n\n* refactor: rename pod to deployment\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: undo daemon mistake\r\n\r\n* refactor: leftover cleanup\r\n\r\n* fix: more test fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more tests\r\n\r\n* fix: fix more tests\r\n\r\n* refactor: fix more tests\r\n\r\n* refactor: more tests fixes\r\n\r\n* refactor: rename pea to pod\r\n\r\n* refactor: adjust docs\r\n\r\n* refactor: complete pea renaming\r\n\r\n* refactor: more fixes\r\n\r\n* fix: pea_type in k8s yamls\r\n\r\n* fix: adjust pod args name\r\n\r\n* refactor: rename peapods parser folder\r\n\r\n* fix: da init\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def wait_start_success(self):\n \n _timeout = self.args.timeout_ready\n if _timeout <= 0:\n _timeout = None\n else:\n _timeout /= 1e3\n if self._wait_for_ready_or_shutdown(_timeout):\n self._check_failed_to_start()\n self.logger.debug(__ready_msg__)\n else:\n self._fail_start_timeout(_timeout)\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 119, "n_words": 22, "vocab_size": 16, "complexity": 3, "nloc": 11, "token_counts": 56, "n_ast_nodes": 95, "n_identifiers": 11, "random_cut": "def wait_start_success(self):\n \n _timeout = self.args.timeout_ready\n if _timeout <= 0:\n _timeout = None\n else:\n _timeout /= ", "d_id": 1978, "documentation": { "docstring": "Block until all pods starts successfully.\n\n If not success, it will raise an error hoping the outer function to catch it\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 35, "language": "en" } }, { "id": 196372, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/decompositions.py", "file_name": "decompositions.py", "fun_name": "_rank_decomposition", "commit_message": "Moved imports to higher level", "code": "def _rank_decomposition(M, iszerofunc=_iszero, simplify=False):\n r\n\n F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc,\n pivots=True)\n rank = len(pivot_cols)\n\n C = M.extract(range(M.rows), pivot_cols)\n F = F[:rank, :]\n\n return C, F\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 25, "vocab_size": 21, "complexity": 1, "nloc": 91, "token_counts": 69, "n_ast_nodes": 105, "n_identifiers": 15, "random_cut": "def _rank_decomposition(M, iszerofunc=_iszero, simplify=False):\n r\n\n F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc,\n pivots=True)\n rank = len(pivot_cols)\n\n C = M.extract(range(M.rows), pivot_cols)\n F = F[:rank, :]\n\n return C,", "d_id": 47872, "documentation": { "docstring": "Returns a pair of matrices (`C`, `F`) with matching rank\n such that `A = C F`.\n\n Parameters\n ==========\n\n iszerofunc : Function, optional\n A function used for detecting whether an element can\n act as a pivot. ``lambda x: x.is_zero`` is used by default.\n\n simplify : Bool or Function, optional\n A function used to simplify elements when looking for a\n pivot. By default SymPy's ``simplify`` is used.\n\n Returns\n =======\n\n (C, F) : Matrices\n `C` and `F` are full-rank matrices with rank as same as `A`,\n whose product gives `A`.\n\n See Notes for additional mathematical details.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> A = Matrix([\n ... [1, 3, 1, 4],\n ... [2, 7, 3, 9],\n ... [1, 5, 3, 1],\n ... [1, 2, 0, 8]\n ... ])\n >>> C, F = A.rank_decomposition()\n >>> C\n Matrix([\n [1, 3, 4],\n [2, 7, 9],\n [1, 5, 1],\n [1, 2, 8]])\n >>> F\n Matrix([\n [1, 0, -2, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 1]])\n >>> C * F == A\n True\n\n Notes\n =====\n\n Obtaining `F`, an RREF of `A`, is equivalent to creating a\n product\n\n .. math::\n E_n E_{n-1} ... E_1 A = F\n\n where `E_n, E_{n-1}, \\dots, E_1` are the elimination matrices or\n permutation matrices equivalent to each row-reduction step.\n\n The inverse of the same product of elimination matrices gives\n `C`:\n\n .. math::\n C = \\left(E_n E_{n-1} \\dots E_1\\right)^{-1}\n\n It is not necessary, however, to actually compute the inverse:\n the columns of `C` are those from the original matrix with the\n same column indices as the indices of the pivot columns of `F`.\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Rank_factorization\n\n .. [2] Piziak, R.; Odell, P. L. (1 June 1999).\n \"Full Rank Factorization of Matrices\".\n Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882\n\n See Also\n ========\n\n sympy.matrices.matrices.MatrixReductions.rref\n ", "n_words": 291, "vocab_size": 172, "n_whitespaces": 543, "language": "en" } }, { "id": 259172, "commit_id": "6d36596c4d724cb1354db9eb824bc84b8e2ce512", "repo": "scikit-learn", "path": "sklearn/preprocessing/_data.py", "file_name": "_data.py", "fun_name": "normalize", "commit_message": "fix docstrings on preprocessing._data.normalize (#22795)\n\nCo-authored-by: ducanne <71016393+ducannelync@users.noreply.github.com>", "code": "def normalize(X, norm=\"l2\", *, axis=1, copy=True, return_norm=False):\n \n if norm not in (\"l1\", \"l2\", \"max\"):\n raise ValueError(\"'%s' is not a supported norm\" % norm)\n\n if axis == 0:\n sparse_format = \"csc\"\n elif axis == 1:\n sparse_format = \"csr\"\n else:\n raise ValueError(\"'%d' is not a supported axis\" % axis)\n\n X = check_array(\n X,\n accept_sparse=sparse_format,\n copy=copy,\n estimator=\"the normalize function\",\n dtype=FLOAT_DTYPES,\n )\n if axis == 0:\n X = X.T\n\n if sparse.issparse(X):\n if return_norm and norm in (\"l1\", \"l2\"):\n raise NotImplementedError(\n \"return_norm=True is not implemented \"\n \"for sparse matrices with norm 'l1' \"\n \"or norm 'l2'\"\n )\n if norm == \"l1\":\n inplace_csr_row_normalize_l1(X)\n elif norm == \"l2\":\n inplace_csr_row_normalize_l2(X)\n elif norm == \"max\":\n mins, maxes = min_max_axis(X, 1)\n norms = np.maximum(abs(mins), maxes)\n norms_elementwise = norms.repeat(np.diff(X.indptr))\n mask = norms_elementwise != 0\n X.data[mask] /= norms_elementwise[mask]\n else:\n if norm == \"l1\":\n norms = np.abs(X).sum(axis=1)\n elif norm == \"l2\":\n norms = row_norms(X)\n elif norm == \"max\":\n norms = np.max(abs(X), axis=1)\n norms = _handle_zeros_in_scale(norms, copy=False)\n X /= norms[:, np.newaxis]\n\n if axis == 0:\n X = X.T\n\n if return_norm:\n return X, norms\n else:\n return X\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 542, "n_words": 172, "vocab_size": 91, "complexity": 16, "nloc": 50, "token_counts": 300, "n_ast_nodes": 501, "n_identifiers": 37, "random_cut": "def normalize(X, norm=\"l2\", *, axis=1, copy=True, return_norm=False):\n \n if norm not in (\"l1\", \"l2\", \"max\"):\n raise ValueError(\"'%s' is not a supported norm\" % norm)\n\n if axis == 0:\n sparse_format = \"csc\"\n elif axis == 1:\n sparse_format = \"csr\"\n else:\n raise ValueError(\"'%d' is not a supported axis\" % axis)\n\n X = check_array(\n X,\n accept_sparse=sparse_format,\n copy=copy,\n estimator=\"the normalize function\",\n dtype=FLOAT_DTYPES,\n )\n if axis == 0:\n X = X.T\n\n if sparse.issparse(X):\n if return_norm and norm in (\"l1\", \"l2\"):\n raise NotImplementedError(\n \"return_norm=True is not implemented \"\n \"for sparse matrices with norm 'l1' \"\n \"or norm 'l2'\"\n )\n if norm == \"l1\":\n inplace_csr_row_normalize_l1(X)\n elif norm == \"l2\":\n inplace_csr_row_normalize_l2(X)\n elif norm == \"max\":\n mins, maxes = min_max_axis(X, 1)\n norms = np.maximum(abs(mins), maxes)\n norms_elementwise = norms.repeat(np.diff(X.indptr))\n mask = norms_elementwise != 0\n X.data[mask] /= norms_elementwise[mask]\n else:\n if norm == \"l1\":\n norms = np.abs(X).sum(axis=1)\n elif norm == \"l2\":\n norms = row_norms(X)\n elif norm == \"max\":\n norms = np.max(abs(X), axis=1)\n norms = _handle_zeros_in_scale(no", "d_id": 75623, "documentation": { "docstring": "Scale input vectors individually to unit norm (vector length).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to normalize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n\n norm : {'l1', 'l2', 'max'}, default='l2'\n The norm to use to normalize each non zero sample (or each non-zero\n feature if axis is 0).\n\n axis : {0, 1}, default=1\n axis used to normalize the data along. If 1, independently normalize\n each sample, otherwise (if 0) normalize each feature.\n\n copy : bool, default=True\n Set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n return_norm : bool, default=False\n Whether to return the computed norms.\n\n Returns\n -------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Normalized input X.\n\n norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )\n An array of norms along given axis for X.\n When X is sparse, a NotImplementedError will be raised\n for norm 'l1' or 'l2'.\n\n See Also\n --------\n Normalizer : Performs normalization using the Transformer API\n (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).\n\n Notes\n -----\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n `.\n ", "n_words": 220, "vocab_size": 142, "n_whitespaces": 395, "language": "en" } }, { "id": 19712, "commit_id": "9a3b3ce70621af6f9adaa9eeac9cf83fa149319c", "repo": "pipenv", "path": "pipenv/installers.py", "file_name": "installers.py", "fun_name": "find_version_to_install", "commit_message": "Issue 4993 Add standard pre commit hooks and apply linting. (#4994)\n\n* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.", "code": "def find_version_to_install(self, name):\n \n version = Version.parse(name)\n if version.patch is not None:\n return name\n try:\n best_match = max(\n (\n inst_version\n for inst_version in self.iter_installable_versions()\n if inst_version.matches_minor(version)\n ),\n key=operator.attrgetter(\"cmpkey\"),\n )\n except ValueError:\n raise ValueError(\n f\"no installable version found for {name!r}\",\n )\n return best_match\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 255, "n_words": 41, "vocab_size": 33, "complexity": 5, "nloc": 18, "token_counts": 73, "n_ast_nodes": 123, "n_identifiers": 16, "random_cut": "def find_version_to_install(self, name):\n \n version = Version.parse(name)\n if version.patch is not None:\n return name\n try:\n best_match = max(\n (\n inst_version\n for inst_version in self.iter_installable_versions()\n if inst_version.matches_minor(version)\n ),\n key=operator.attrgetter(\"cmpkey\"),\n )\n except ValueError:\n ", "d_id": 3075, "documentation": { "docstring": "Find a version in the installer from the version supplied.\n\n A ValueError is raised if a matching version cannot be found.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 35, "language": "en" } }, { "id": 266161, "commit_id": "540bba4544d9f31c126571cc1a45a6783b3b6a89", "repo": "netbox", "path": "netbox/utilities/utils.py", "file_name": "utils.py", "fun_name": "copy_safe_request", "commit_message": "Closes #10920: Include request cookies when queuing a custom script", "code": "def copy_safe_request(request):\n \n meta = {\n k: request.META[k]\n for k in HTTP_REQUEST_META_SAFE_COPY\n if k in request.META and isinstance(request.META[k], str)\n }\n return NetBoxFakeRequest({\n 'META': meta,\n 'COOKIES': request.COOKIES,\n 'POST': request.POST,\n 'GET': request.GET,\n 'FILES': request.FILES,\n 'user': request.user,\n 'path': request.path,\n 'id': getattr(request, 'id', None), # UUID assigned by middleware\n })\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 138, "n_words": 45, "vocab_size": 43, "complexity": 4, "nloc": 16, "token_counts": 97, "n_ast_nodes": 158, "n_identifiers": 16, "random_cut": "def copy_safe_request(request):\n \n meta = {\n k: request.META[k]\n for k in HTTP_REQUEST_META_SAFE_COPY\n if k in request.META and", "d_id": 78322, "documentation": { "docstring": "\n Copy selected attributes from a request object into a new fake request object. This is needed in places where\n thread safe pickling of the useful request data is needed.\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 39, "language": "en" } }, { "id": 195091, "commit_id": "2ef5586ed0d644abe18cd3ff45ef9fa01981e87c", "repo": "ParlAI", "path": "projects/director/director_agent.py", "file_name": "director_agent.py", "fun_name": "batchify", "commit_message": "Added director agent and safety experiment commands. (#4602)\n\n* Added director agent and safety.\r\n\r\n* ran autoformat.sh", "code": "def batchify(self, obs_batch, sort=False):\n \n batch = super().batchify(obs_batch, sort=sort)\n\n if batch.valid_indices is None:\n return batch\n\n batch.classifier_label = torch.tensor(\n [\n [obs_batch[i].get('classifier_label_idx', -1)]\n for i in batch.valid_indices\n ]\n )\n batch.is_ltr = torch.tensor(\n [[obs_batch[i].get('is_ltr', False)] for i in batch.valid_indices]\n )\n return batch\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 168, "n_words": 38, "vocab_size": 27, "complexity": 4, "nloc": 14, "token_counts": 98, "n_ast_nodes": 153, "n_identifiers": 13, "random_cut": "def batchify(self, obs_batch, sort=False):\n \n batch = super().batchify(obs_batch, sort=sort)\n\n if batc", "d_id": 47190, "documentation": { "docstring": "\n This method calls the parent class's batchify method and then add\n classifier_label and is_ltr property to the the batch.\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 41, "language": "en" } }, { "id": 262467, "commit_id": "8be21ec38734e780e787d07d7e979392d7d63f24", "repo": "TTS", "path": "TTS/tts/layers/tacotron/capacitron_layers.py", "file_name": "capacitron_layers.py", "fun_name": "calculate_post_conv_height", "commit_message": "Capacitron (#977)\n\n* new CI config\r\n\r\n* initial Capacitron implementation\r\n\r\n* delete old unused file\r\n\r\n* fix empty formatting changes\r\n\r\n* update losses and training script\r\n\r\n* fix previous commit\r\n\r\n* fix commit\r\n\r\n* Add Capacitron test and first round of test fixes\r\n\r\n* revert formatter change\r\n\r\n* add changes to the synthesizer\r\n\r\n* add stepwise gradual lr scheduler and changes to the recipe\r\n\r\n* add inference script for dev use\r\n\r\n* feat: add posterior inference arguments to synth methods\r\n- added reference wav and text args for posterior inference\r\n- some formatting\r\n\r\n* fix: add espeak flag to base_tts and dataset APIs\r\n- use_espeak_phonemes flag was not implemented in those APIs\r\n- espeak is now able to be utilised for phoneme generation\r\n- necessary phonemizer for the Capacitron model\r\n\r\n* chore: update training script and style\r\n- training script includes the espeak flag and other hyperparams\r\n- made style\r\n\r\n* chore: fix linting\r\n\r\n* feat: add Tacotron 2 support\r\n\r\n* leftover from dev\r\n\r\n* chore:rename parser args\r\n\r\n* feat: extract optimizers\r\n- created a separate optimizer class to merge the two optimizers\r\n\r\n* chore: revert arbitrary trainer changes\r\n\r\n* fmt: revert formatting bug\r\n\r\n* formatting again\r\n\r\n* formatting fixed\r\n\r\n* fix: log func\r\n\r\n* fix: update optimizer\r\n- Implemented load_state_dict for continuing training\r\n\r\n* fix: clean optimizer init for standard models\r\n\r\n* improvement: purge espeak flags and add training scripts\r\n\r\n* Delete capacitronT2.py\r\n\r\ndelete old training script, new one is pushed\r\n\r\n* feat: capacitron trainer methods\r\n- extracted capacitron specific training operations from the trainer into custom\r\nmethods in taco1 and taco2 models\r\n\r\n* chore: renaming and merging capacitron and gst style args\r\n\r\n* fix: bug fixes from the previous commit\r\n\r\n* fix: implement state_dict method on CapacitronOptimizer\r\n\r\n* fix: call method\r\n\r\n* fix: inference naming\r\n\r\n* Delete train_capacitron.py\r\n\r\n* fix: synthesize\r\n\r\n* feat: update tests\r\n\r\n* chore: fix style\r\n\r\n* Delete capacitron_inference.py\r\n\r\n* fix: fix train tts t2 capacitron tests\r\n\r\n* fix: double forward in T2 train step\r\n\r\n* fix: double forward in T1 train step\r\n\r\n* fix: run make style\r\n\r\n* fix: remove unused import\r\n\r\n* fix: test for T1 capacitron\r\n\r\n* fix: make lint\r\n\r\n* feat: add blizzard2013 recipes\r\n\r\n* make style\r\n\r\n* fix: update recipes\r\n\r\n* chore: make style\r\n\r\n* Plot test sentences in Tacotron\r\n\r\n* chore: make style and fix import\r\n\r\n* fix: call forward first before problematic floordiv op\r\n\r\n* fix: update recipes\r\n\r\n* feat: add min_audio_len to recipes\r\n\r\n* aux_input[\"style_mel\"]\r\n\r\n* chore: make style\r\n\r\n* Make capacitron T2 recipe more stable\r\n\r\n* Remove T1 capacitron Ljspeech\r\n\r\n* feat: implement new grad clipping routine and update configs\r\n\r\n* make style\r\n\r\n* Add pretrained checkpoints\r\n\r\n* Add default vocoder\r\n\r\n* Change trainer package\r\n\r\n* Fix grad clip issue for tacotron\r\n\r\n* Fix scheduler issue with tacotron\r\n\r\nCo-authored-by: Eren Gölge \r\nCo-authored-by: WeberJulian \r\nCo-authored-by: Eren Gölge ", "code": "def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs):\n \n for _ in range(n_convs):\n height = (height - kernel_size + 2 * pad) // stride + 1\n return height\n\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 57, "n_words": 25, "vocab_size": 23, "complexity": 2, "nloc": 4, "token_counts": 36, "n_ast_nodes": 59, "n_identifiers": 8, "random_cut": "def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs):\n ", "d_id": 77229, "documentation": { "docstring": "Height of spec after n convolutions with fixed kernel/stride/pad.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 290529, "commit_id": "ee910bd0e41391e00ccd521fe7d605e494d33046", "repo": "core", "path": "tests/components/stream/test_hls.py", "file_name": "test_hls.py", "fun_name": "test_hls_playlist_view", "commit_message": "Refactor camera stream settings (#81663)", "code": "async def test_hls_playlist_view(hass, setup_component, hls_stream, stream_worker_sync):\n \n stream = create_stream(hass, STREAM_SOURCE, {}, dynamic_stream_settings())\n stream_worker_sync.pause()\n hls = stream.add_provider(HLS_PROVIDER)\n for i in range(2):\n segment = Segment(sequence=i, duration=SEGMENT_DURATION)\n hls.put(segment)\n await hass.async_block_till_done()\n\n hls_client = await hls_stream(stream)\n\n resp = await hls_client.get(\"/playlist.m3u8\")\n assert resp.status == HTTPStatus.OK\n assert await resp.text() == make_playlist(\n sequence=0, segments=[make_segment(0), make_segment(1)]\n )\n\n segment = Segment(sequence=2, duration=SEGMENT_DURATION)\n hls.put(segment)\n await hass.async_block_till_done()\n resp = await hls_client.get(\"/playlist.m3u8\")\n assert resp.status == HTTPStatus.OK\n assert await resp.text() == make_playlist(\n sequence=0, segments=[make_segment(0), make_segment(1), make_segment(2)]\n )\n\n stream_worker_sync.resume()\n await stream.stop()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 164, "n_words": 76, "vocab_size": 44, "complexity": 2, "nloc": 24, "token_counts": 209, "n_ast_nodes": 337, "n_identifiers": 34, "random_cut": "async def test_hls_playlist_view(hass, setup_component, hls_stream, stream_worker_sync):\n \n stream = create_stream(hass, STREAM_SOURCE, {}, dynamic_stream_settings())\n stream_worker_sync.pause()\n hls = stream.add_provider(HLS_PROVIDER)\n for i in range(2):\n segment = Segment(sequence=i, duration=SEGMENT_DURATION)\n hls.put(segment)\n await hass.async_block_till_done()\n\n hls_client = await hls_stream(stream)\n\n resp = await hls_client.get(\"/playlist.m3u8\")\n assert resp.status == HTTPStatus.OK\n assert await resp.text() == make_playlist(\n sequence=0, segments=[make_segment(0), make_segment(1)]\n )\n\n segment = Segment(sequence=2, duration=SEGMENT_DURATI", "d_id": 89645, "documentation": { "docstring": "Test rendering the hls playlist with 1 and 2 output segments.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 141415, "commit_id": "80ae651f259e1ea13c21b285d6bfcc7fd834ef9c", "repo": "ray", "path": "python/ray/train/_internal/backend_executor.py", "file_name": "backend_executor.py", "fun_name": "_create_local_rank_map", "commit_message": "[Train] Clean up `ray.train` package (#25566)", "code": "def _create_local_rank_map(self) -> Dict:\n \n rank_mapping = {}\n ip_dict = defaultdict(int)\n for world_rank in range(len(self.worker_group)):\n worker = self.worker_group.workers[world_rank]\n node_ip = worker.metadata.node_ip\n rank_mapping[world_rank] = ip_dict[node_ip]\n ip_dict[node_ip] += 1\n return rank_mapping\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 107, "n_words": 28, "vocab_size": 22, "complexity": 2, "nloc": 30, "token_counts": 65, "n_ast_nodes": 104, "n_identifiers": 15, "random_cut": "def _create_local_rank_map(self) -> Dict:\n \n rank_mapping = {}\n ip_dict = defaultdict(int)\n for world_rank in range(len(self.worker_group)):\n worker = self.worker_group.workers[world_rank]\n node_ip = worker.metadata.node_ip\n rank_mapping[world_rank] = ip_dict[node_ip]\n ", "d_id": 32355, "documentation": { "docstring": "Create mapping from worker world_rank to local_rank.\n\n Example:\n Worker 0: 0.0.0.0\n Worker 1: 0.0.0.0\n Worker 2: 0.0.0.1\n Worker 3: 0.0.0.0\n Worker 4: 0.0.0.1\n\n Workers 0, 1, 3 are on 0.0.0.0.\n Workers 2, 4 are on 0.0.0.1.\n\n Expected Output:\n {\n 0 -> 0,\n 1 -> 1,\n 2 -> 0,\n 3 -> 2,\n 4 -> 1\n }\n ", "n_words": 55, "vocab_size": 34, "n_whitespaces": 254, "language": "en" } }, { "id": 101605, "commit_id": "98d01760e469fd2108eed8d0b0a1ba6297c3177c", "repo": "faceswap", "path": "tools/sort/sort.py", "file_name": "sort.py", "fun_name": "_output_groups", "commit_message": "Overhaul sort:\n - Standardize image data reading and writing\n - Optimize loading (just one pass required)\n - Make all sort groups binnable (to greater or lesser results)\n - Add sort by pitch\n - Deprecate multiple options\n - linting, docs + locales", "code": "def _output_groups(self) -> None:\n \n is_rename = self._args.sort_method != \"none\"\n\n logger.info(\"Creating %s group folders in '%s'.\",\n len(self._sorter.binned), self._args.output_dir)\n bin_names = [f\"_{b}\" for b in self._sorter.bin_names]\n if is_rename:\n bin_names = [f\"{name}_by_{self._args.sort_method}\" for name in bin_names]\n for name in bin_names:\n folder = os.path.join(self._args.output_dir, name)\n if os.path.exists(folder):\n rmtree(folder)\n os.makedirs(folder)\n\n description = f\"{'Copying' if self._args.keep_original else 'Moving'} into groups\"\n description += \" and renaming\" if is_rename else \"\"\n\n pbar = tqdm(range(len(self._sorter.sorted_filelist)),\n desc=description,\n file=sys.stdout,\n leave=False)\n idx = 0\n for bin_id, bin_ in enumerate(self._sorter.binned):\n pbar.set_description(f\"{description}: Bin {bin_id + 1} of {len(self._sorter.binned)}\")\n output_path = os.path.join(self._args.output_dir, bin_names[bin_id])\n if not bin_:\n logger.debug(\"Removing empty bin: %s\", output_path)\n os.rmdir(output_path)\n for source in bin_:\n basename = os.path.basename(source)\n dst_name = f\"{idx:06d}_{basename}\" if is_rename else basename\n dest = os.path.join(output_path, dst_name)\n self._sort_file(source, dest)\n idx += 1\n pbar.update(1)\n\n # Output methods", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 503, "n_words": 124, "vocab_size": 88, "complexity": 11, "nloc": 37, "token_counts": 260, "n_ast_nodes": 486, "n_identifiers": 46, "random_cut": "def _output_groups(self) -> None:\n \n is_rename = self._args.sort_method != \"none\"\n\n logger.info(\"Creating %s group folders in '%s'.\",\n len(self._sorter.binned), self._args.output_dir)\n bin_names = [f\"_{b}\" for b in self._sorter.bin_names]\n if is_rename:\n ", "d_id": 21013, "documentation": { "docstring": " Move the files to folders.\n\n Obtains the bins and original filenames from :attr:`_sorter` and outputs into appropriate\n bins in the output location\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 44, "language": "en" } }, { "id": 178485, "commit_id": "e399c9cade448a8dd0018dc5484613782fcabf63", "repo": "Nuitka", "path": "nuitka/utils/SharedLibraries.py", "file_name": "SharedLibraries.py", "fun_name": "_setSharedLibraryRPATHElf", "commit_message": "macOS: Make sure to check exit code and output problematic command", "code": "def _setSharedLibraryRPATHElf(filename, rpath):\n # TODO: Might write something that makes a shell script replacement\n # in case no rpath is present, or use patchelf, for now our use\n # case seems to use rpaths for executables.\n\n # patchelf --set-rpath \"$ORIGIN/path/to/library\" \n with withEnvironmentVarOverriden(\"LANG\", \"C\"):\n executeToolChecked(\n logger=postprocessing_logger,\n command=(\"patchelf\", \"--set-rpath\", rpath, filename),\n stderr_filter=_filterPatchelfErrorOutput,\n absence_message=,\n )\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 125, "n_words": 53, "vocab_size": 46, "complexity": 1, "nloc": 10, "token_counts": 42, "n_ast_nodes": 75, "n_identifiers": 11, "random_cut": "def _setSharedLibraryRPATHElf(filename, rpath):\n # TODO: Might write something that makes a shell script replacement\n # in case no rpath is present, or use patchelf, for now our use\n # case seems to use rpaths for executables.\n\n # patchelf --set-rpath \"$ORIGIN/path/to/library\" \n with withEnvironmentVarOverriden(\"LANG\", \"C\"):\n execut", "d_id": 42710, "documentation": { "docstring": "\\\nError, needs 'patchelf' on your system, due to 'RPATH' settings that need to be\nset.", "n_words": 16, "vocab_size": 15, "n_whitespaces": 13, "language": "en" } }, { "id": 154137, "commit_id": "adb16a17f721048005520388080627975c6852d8", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_get_columns", "commit_message": "FEAT-#4725: Make index and columns lazy in Modin DataFrame (#4726)\n\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Vasily Litvinov ", "code": "def _get_columns(self):\n \n if self._columns_cache is None:\n self._columns_cache, column_widths = self._compute_axis_labels_and_lengths(\n 1\n )\n if self._column_widths_cache is None:\n self._column_widths_cache = column_widths\n return self._columns_cache\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 105, "n_words": 21, "vocab_size": 14, "complexity": 3, "nloc": 8, "token_counts": 41, "n_ast_nodes": 68, "n_identifiers": 6, "random_cut": "def _get_columns(self):\n ", "d_id": 35802, "documentation": { "docstring": "\n Get the columns from the cache object.\n\n Returns\n -------\n pandas.Index\n An index object containing the column labels.\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 64, "language": "en" } }, { "id": 305142, "commit_id": "23090cb8a268b3f268aefa8477f30af88bf46051", "repo": "core", "path": "homeassistant/components/intesishome/climate.py", "file_name": "climate.py", "fun_name": "async_update", "commit_message": "Improve entity type hints [i] (#77529)", "code": "async def async_update(self) -> None:\n \n # Update values from controller's device dictionary\n self._connected = self._controller.is_connected\n self._current_temp = self._controller.get_temperature(self._device_id)\n self._fan_speed = self._controller.get_fan_speed(self._device_id)\n self._power = self._controller.is_on(self._device_id)\n self._min_temp = self._controller.get_min_setpoint(self._device_id)\n self._max_temp = self._controller.get_max_setpoint(self._device_id)\n self._rssi = self._controller.get_rssi(self._device_id)\n self._run_hours = self._controller.get_run_hours(self._device_id)\n self._target_temp = self._controller.get_setpoint(self._device_id)\n self._outdoor_temp = self._controller.get_outdoor_temperature(self._device_id)\n\n # Operation mode\n mode = self._controller.get_mode(self._device_id)\n self._hvac_mode = MAP_IH_TO_HVAC_MODE.get(mode)\n\n # Preset mode\n preset = self._controller.get_preset_mode(self._device_id)\n self._preset = MAP_IH_TO_PRESET_MODE.get(preset)\n\n # Swing mode\n # Climate module only supports one swing setting.\n self._vvane = self._controller.get_vertical_swing(self._device_id)\n self._hvane = self._controller.get_horizontal_swing(self._device_id)\n\n # Power usage\n self._power_consumption_heat = self._controller.get_heat_power_consumption(\n self._device_id\n )\n self._power_consumption_cool = self._controller.get_cool_power_consumption(\n self._device_id\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 301, "n_words": 90, "vocab_size": 63, "complexity": 1, "nloc": 24, "token_counts": 243, "n_ast_nodes": 393, "n_identifiers": 41, "random_cut": "async def async_update(self) -> None:\n \n # Update values from controller's device dictionary\n self._connected = self._", "d_id": 103934, "documentation": { "docstring": "Copy values from controller dictionary to climate device.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 111524, "commit_id": "1f23c615d7a7326ca5a38a7d768b8b70caaa0e17", "repo": "spaCy", "path": "spacy/tests/pipeline/test_entity_linker.py", "file_name": "test_entity_linker.py", "fun_name": "test_kb_valid_entities", "commit_message": "Refactor KB for easier customization (#11268)\n\n* Add implementation of batching + backwards compatibility fixes. Tests indicate issue with batch disambiguation for custom singular entity lookups.\r\n\r\n* Fix tests. Add distinction w.r.t. batch size.\r\n\r\n* Remove redundant and add new comments.\r\n\r\n* Adjust comments. Fix variable naming in EL prediction.\r\n\r\n* Fix mypy errors.\r\n\r\n* Remove KB entity type config option. Change return types of candidate retrieval functions to Iterable from Iterator. Fix various other issues.\r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/kb_base.pyx\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/kb_base.pyx\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Add error messages to NotImplementedErrors. Remove redundant comment.\r\n\r\n* Fix imports.\r\n\r\n* Remove redundant comments.\r\n\r\n* Rename KnowledgeBase to InMemoryLookupKB and BaseKnowledgeBase to KnowledgeBase.\r\n\r\n* Fix tests.\r\n\r\n* Update spacy/errors.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Move KB into subdirectory.\r\n\r\n* Adjust imports after KB move to dedicated subdirectory.\r\n\r\n* Fix config imports.\r\n\r\n* Move Candidate + retrieval functions to separate module. Fix other, small issues.\r\n\r\n* Fix docstrings and error message w.r.t. class names. Fix typing for candidate retrieval functions.\r\n\r\n* Update spacy/kb/kb_in_memory.pyx\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Update spacy/ml/models/entity_linker.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Fix typing.\r\n\r\n* Change typing of mentions to be Span instead of Union[Span, str].\r\n\r\n* Update docs.\r\n\r\n* Update EntityLinker and _architecture docs.\r\n\r\n* Update website/docs/api/entitylinker.md\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Adjust message for E1046.\r\n\r\n* Re-add section for Candidate in kb.md, add reference to dedicated page.\r\n\r\n* Update docs and docstrings.\r\n\r\n* Re-add section + reference for KnowledgeBase.get_alias_candidates() in docs.\r\n\r\n* Update spacy/kb/candidate.pyx\r\n\r\n* Update spacy/kb/kb_in_memory.pyx\r\n\r\n* Update spacy/pipeline/legacy/entity_linker.py\r\n\r\n* Remove canididate.md. Remove mistakenly added config snippet in entity_linker.py.\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\nCo-authored-by: Sofie Van Landeghem ", "code": "def test_kb_valid_entities(nlp):\n \n mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)\n\n # adding entities\n mykb.add_entity(entity=\"Q1\", freq=19, entity_vector=[8, 4, 3])\n mykb.add_entity(entity=\"Q2\", freq=5, entity_vector=[2, 1, 0])\n mykb.add_entity(entity=\"Q3\", freq=25, entity_vector=[-1, -6, 5])\n\n # adding aliases\n mykb.add_alias(alias=\"douglas\", entities=[\"Q2\", \"Q3\"], probabilities=[0.8, 0.2])\n mykb.add_alias(alias=\"adam\", entities=[\"Q2\"], probabilities=[0.9])\n\n # test the size of the corresponding KB\n assert mykb.get_size_entities() == 3\n assert mykb.get_size_aliases() == 2\n\n # test retrieval of the entity vectors\n assert mykb.get_vector(\"Q1\") == [8, 4, 3]\n assert mykb.get_vector(\"Q2\") == [2, 1, 0]\n assert mykb.get_vector(\"Q3\") == [-1, -6, 5]\n\n # test retrieval of prior probabilities\n assert_almost_equal(mykb.get_prior_prob(entity=\"Q2\", alias=\"douglas\"), 0.8)\n assert_almost_equal(mykb.get_prior_prob(entity=\"Q3\", alias=\"douglas\"), 0.2)\n assert_almost_equal(mykb.get_prior_prob(entity=\"Q342\", alias=\"douglas\"), 0.0)\n assert_almost_equal(mykb.get_prior_prob(entity=\"Q3\", alias=\"douglassssss\"), 0.0)\n\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 157, "n_words": 94, "vocab_size": 67, "complexity": 1, "nloc": 16, "token_counts": 275, "n_ast_nodes": 423, "n_identifiers": 19, "random_cut": "def test_kb_valid_entities(nlp):\n \n mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)\n\n # adding entities\n mykb.add_entity(entity=\"Q1\", freq=19, entity_vector=[8, 4, 3])\n mykb.add_entity(entity=\"Q2\", freq=5, entity_vector=[2, 1, 0])\n mykb.add_entity(entity=\"Q3\", freq=25, entity_vector=[-1, -6, 5])\n\n # adding aliases\n mykb.add_alias(alias=\"douglas\", entities=[\"Q2\", \"Q3\"], probabilities=[0.8, 0.2])\n mykb.add_alias(a", "d_id": 24423, "documentation": { "docstring": "Test the valid construction of a KB with 3 entities and two aliases", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 160327, "commit_id": "f9355942f6ef7c5d27691c4571096234efb67a2b", "repo": "numpy", "path": "numpy/lib/twodim_base.py", "file_name": "twodim_base.py", "fun_name": "eye", "commit_message": "BUG: lib: Allow type uint64 for eye() arguments.\n\nCloses gh-9982.\n\n(Plus a few small PEP 8 fixes.)", "code": "def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):\n \n if like is not None:\n return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)\n if M is None:\n M = N\n m = zeros((N, M), dtype=dtype, order=order)\n if k >= M:\n return m\n # Ensure M and k are integers, so we don't get any surprise casting\n # results in the expressions `M-k` and `M+1` used below. This avoids\n # a problem with inputs with type (for example) np.uint64.\n M = operator.index(M)\n k = operator.index(k)\n if k >= 0:\n i = k\n else:\n i = (-k) * M\n m[:M-k].flat[i::M+1] = 1\n return m\n\n\n_eye_with_like = array_function_dispatch(\n _eye_dispatcher\n)(eye)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 183, "n_words": 104, "vocab_size": 73, "complexity": 5, "nloc": 16, "token_counts": 146, "n_ast_nodes": 239, "n_identifiers": 17, "random_cut": "def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):\n \n if like is not None:\n return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)\n if M is None:\n M = N\n m = zeros((N, M), dtype=dtype, order=order)\n if k >= M:\n return m\n # Ensure M and k are integers, so we don't get any surprise casting\n # result", "d_id": 38596, "documentation": { "docstring": "\n Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n order : {'C', 'F'}, optional\n Whether the output should be stored in row-major (C-style) or\n column-major (Fortran-style) order in memory.\n\n .. versionadded:: 1.14.0\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n I : ndarray of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n\n See Also\n --------\n identity : (almost) equivalent function\n diag : diagonal 2-D array from a 1-D array specified by the user.\n\n Examples\n --------\n >>> np.eye(2, dtype=int)\n array([[1, 0],\n [0, 1]])\n >>> np.eye(3, k=1)\n array([[0., 1., 0.],\n [0., 0., 1.],\n [0., 0., 0.]])\n\n ", "n_words": 176, "vocab_size": 120, "n_whitespaces": 350, "language": "en" } }, { "id": 29606, "commit_id": "eac1ae9cf107b8b0189b8b21ff6668c4131c6a00", "repo": "saleor", "path": "saleor/plugins/base_plugin.py", "file_name": "base_plugin.py", "fun_name": "_clean_configuration_value", "commit_message": "Fix plugin configuration (#11278)\n\n* Fix updating plugin configuration\r\n\r\n* Fix failing tax migration", "code": "def _clean_configuration_value(cls, item_type, new_value):\n \n if (\n item_type == ConfigurationTypeField.BOOLEAN\n and new_value\n and not isinstance(new_value, bool)\n ):\n new_value = new_value.lower() == \"true\"\n if item_type == ConfigurationTypeField.OUTPUT:\n # OUTPUT field is read only. No need to update it\n return\n return new_value\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 140, "n_words": 39, "vocab_size": 31, "complexity": 5, "nloc": 10, "token_counts": 48, "n_ast_nodes": 80, "n_identifiers": 10, "random_cut": "def _clean_configuration_value(cls, item_type, new_value):\n \n ", "d_id": 5241, "documentation": { "docstring": "Clean the value that is saved in plugin configuration.\n\n Change the string provided as boolean into the bool value.\n Return None for Output type, as it's read only field.\n ", "n_words": 29, "vocab_size": 26, "n_whitespaces": 50, "language": "en" } }, { "id": 60232, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/coord_map.py", "file_name": "coord_map.py", "fun_name": "coord_map_from_to", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def coord_map_from_to(top_from, top_to):\n \n # We need to find a common ancestor of top_from and top_to.\n # We'll assume that all ancestors are equivalent here (otherwise the graph\n # is an inconsistent state (which we could improve this to check for)).\n # For now use a brute-force algorithm.\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 62, "n_words": 47, "vocab_size": 42, "complexity": 8, "nloc": 28, "token_counts": 177, "n_ast_nodes": 19, "n_identifiers": 3, "random_cut": "def coord_map_from_to(top_from, top_to):\n \n # We need to find a common ancestor of top_from and top_to.\n # We'll assume that all ancestors are equivalent here (otherwise the graph\n # is an inconsistent state (which we could improve this to check for)", "d_id": 12025, "documentation": { "docstring": "\n Determine the coordinate mapping betweeen a top (from) and a top (to).\n Walk the graph to find a common ancestor while composing the coord maps for\n from and to until they meet. As a last step the from map is inverted.\n ", "n_words": 41, "vocab_size": 31, "n_whitespaces": 54, "language": "en" } }, { "id": 65678, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/stock_controller.py", "file_name": "stock_controller.py", "fun_name": "get_conditions_to_validate_future_sle", "commit_message": "style: format code with black", "code": "def get_conditions_to_validate_future_sle(sl_entries):\n\twarehouse_items_map = {}\n\tfor entry in sl_entries:\n\t\tif entry.warehouse not in warehouse_items_map:\n\t\t\twarehouse_items_map[entry.warehouse] = set()\n\n\t\twarehouse_items_map[entry.warehouse].add(entry.item_code)\n\n\tor_conditions = []\n\tfor warehouse, items in warehouse_items_map.items():\n\t\tor_conditions.append(\n\t\t\tf\n\t\t)\n\n\treturn or_conditions\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 19, "n_words": 31, "vocab_size": 25, "complexity": 4, "nloc": 13, "token_counts": 69, "n_ast_nodes": 155, "n_identifiers": 16, "random_cut": "def get_conditions_to_validate_future_sle(sl_entries):\n\twarehouse_items_map = {}\n\tfor entry in sl_entries:\n\t\tif entry.warehouse not in warehouse_ite", "d_id": 13985, "documentation": { "docstring": "warehouse = {frappe.db.escape(warehouse)}\n\t\t\t\tand item_code in ({', '.join(frappe.db.escape(item) for item in items)})", "n_words": 12, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 262317, "commit_id": "1425a023fe4bc6bda8578295aeeeb02af78cc082", "repo": "TTS", "path": "TTS/tts/datasets/__init__.py", "file_name": "__init__.py", "fun_name": "split_dataset", "commit_message": "Make style and lint", "code": "def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01):\n \n speakers = [item[\"speaker_name\"] for item in items]\n is_multi_speaker = len(set(speakers)) > 1\n if eval_split_size > 1:\n eval_split_size = int(eval_split_size)\n else:\n if eval_split_max_size:\n eval_split_size = min(eval_split_max_size, int(len(items) * eval_split_size))\n else:\n eval_split_size = int(len(items) * eval_split_size)\n\n assert (\n eval_split_size > 0\n ), \" [!] You do not have enough samples for the evaluation set. You can work around this setting the 'eval_split_size' parameter to a minimum of {}\".format(\n 1 / len(items)\n )\n np.random.seed(0)\n np.random.shuffle(items)\n if is_multi_speaker:\n items_eval = []\n speakers = [item[\"speaker_name\"] for item in items]\n speaker_counter = Counter(speakers)\n while len(items_eval) < eval_split_size:\n item_idx = np.random.randint(0, len(items))\n speaker_to_be_removed = items[item_idx][\"speaker_name\"]\n if speaker_counter[speaker_to_be_removed] > 1:\n items_eval.append(items[item_idx])\n speaker_counter[speaker_to_be_removed] -= 1\n del items[item_idx]\n return items_eval, items\n return items[:eval_split_size], items[eval_split_size:]\n\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 324, "n_words": 118, "vocab_size": 82, "complexity": 8, "nloc": 30, "token_counts": 219, "n_ast_nodes": 347, "n_identifiers": 23, "random_cut": "def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01):\n \n speakers = [item[\"speaker_name\"] for item in items]\n is_multi_speaker = len(set(speakers)) > 1\n if eval_split_size > 1:\n eval_split_size = int(eval_split_size)\n else:\n if eval_split_max_size:\n eval_split_size = min(eval_split_max_size, int(len(items) * eval_split_size))\n else:\n eval_split_size = int(len(items) * eval_split_size)\n\n assert (\n eval_split_size > 0\n ), \" [!] You do not have enough samples for the evaluation set. You can work around this setting the 'eval_split_size' parameter to a minimum of {}\".format(\n 1 / len(items)\n )\n np.random.seed(", "d_id": 77180, "documentation": { "docstring": "Split a dataset into train and eval. Consider speaker distribution in multi-speaker training.\n\n Args:\n <<<<<<< HEAD\n items (List[List]):\n A list of samples. Each sample is a list of `[audio_path, text, speaker_id]`.\n\n eval_split_max_size (int):\n Number maximum of samples to be used for evaluation in proportion split. Defaults to None (Disabled).\n\n eval_split_size (float):\n If between 0.0 and 1.0 represents the proportion of the dataset to include in the evaluation set.\n If > 1, represents the absolute number of evaluation samples. Defaults to 0.01 (1%).\n =======\n items (List[List]): A list of samples. Each sample is a list of `[text, audio_path, speaker_id]`.\n >>>>>>> Fix docstring\n ", "n_words": 101, "vocab_size": 65, "n_whitespaces": 224, "language": "en" } }, { "id": 247891, "commit_id": "f0b03186d96305fd44d74a89bf4230beec0c5c31", "repo": "synapse", "path": "tests/rest/admin/test_media.py", "file_name": "test_media.py", "fun_name": "test_quarantine_media", "commit_message": "Add type hints for `tests/unittest.py`. (#12347)\n\nIn particular, add type hints for get_success and friends, which are then helpful in a bunch of places.", "code": "def test_quarantine_media(self) -> None:\n \n\n media_info = self.get_success(self.store.get_local_media(self.media_id))\n assert media_info is not None\n self.assertFalse(media_info[\"quarantined_by\"])\n\n # quarantining\n channel = self.make_request(\n \"POST\",\n self.url % (\"quarantine\", self.server_name, self.media_id),\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertFalse(channel.json_body)\n\n media_info = self.get_success(self.store.get_local_media(self.media_id))\n assert media_info is not None\n self.assertTrue(media_info[\"quarantined_by\"])\n\n # remove from quarantine\n channel = self.make_request(\n \"POST\",\n self.url % (\"unquarantine\", self.server_name, self.media_id),\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertFalse(channel.json_body)\n\n media_info = self.get_success(self.store.get_local_media(self.media_id))\n assert media_info is not None\n self.assertFalse(media_info[\"quarantined_by\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 273, "n_words": 67, "vocab_size": 33, "complexity": 1, "nloc": 27, "token_counts": 215, "n_ast_nodes": 340, "n_identifiers": 21, "random_cut": "def test_quarantine_media(self) -> None:\n \n\n media_info = self.get_success(self.store.get_local_media(self.media_id))\n assert media_info is not None\n self.assertFalse(media_info[\"quarantined_by\"])\n\n # quarantining\n channel = self.make_request(\n \"POST\",\n self.url % (\"quarantine\", self.server_name, self.media_id),\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertFalse(channel.json_body)\n\n media_info = self.get_success(self.store.get_local_media(self.media_id))\n assert media_info is not None\n self.assertTrue(media_info[\"quarantined_by\"])\n\n # remove from quarantine\n channel = self.make_request(\n \"POST\",\n self.url % (\"unquarantine\", self.server_name, self.media_id),\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertFalse(channel.json_body)\n\n media_info = self.get_success(self.store.get_local_media(self.media_id))\n assert media_info is not None\n self.assertFalse(media_info[\"quarantined_by\"])\n", "d_id": 71979, "documentation": { "docstring": "\n Tests that quarantining and remove from quarantine a media is successfully\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 176254, "commit_id": "290ebce534b84f9db20ec58b98cbb170e65a0ba1", "repo": "networkx", "path": "networkx/algorithms/community/modularity_max.py", "file_name": "modularity_max.py", "fun_name": "naive_greedy_modularity_communities", "commit_message": "Add weights to karate club graph (#5285)\n\nAdd weights to the karate_club_graph.\r\nModifies `non_randomness` and `naive_greedy_modularity_communities` to\r\naccept a `weight` parameter and modifies tests that use the kcg accordingly\r\n\r\nCo-authored-by: Kevin Berry \r\nCo-authored-by: Dan Schult ", "code": "def naive_greedy_modularity_communities(G, resolution=1, weight=None):\n r\n # First create one community for each node\n communities = list(frozenset([u]) for u in G.nodes())\n # Track merges\n merges = []\n # Greedily merge communities until no improvement is possible\n old_modularity = None\n new_modularity = modularity(G, communities, resolution=resolution, weight=weight)\n while old_modularity is None or new_modularity > old_modularity:\n # Save modularity for comparison\n old_modularity = new_modularity\n # Find best pair to merge\n trial_communities = list(communities)\n to_merge = None\n for i, u in enumerate(communities):\n for j, v in enumerate(communities):\n # Skip i==j and empty communities\n if j <= i or len(u) == 0 or len(v) == 0:\n continue\n # Merge communities u and v\n trial_communities[j] = u | v\n trial_communities[i] = frozenset([])\n trial_modularity = modularity(\n G, trial_communities, resolution=resolution, weight=weight\n )\n if trial_modularity >= new_modularity:\n # Check if strictly better or tie\n if trial_modularity > new_modularity:\n # Found new best, save modularity and group indexes\n new_modularity = trial_modularity\n to_merge = (i, j, new_modularity - old_modularity)\n elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]):\n # Break ties by choosing pair with lowest min id\n new_modularity = trial_modularity\n to_merge = (i, j, new_modularity - old_modularity)\n # Un-merge\n trial_communities[i] = u\n trial_communities[j] = v\n if to_merge is not None:\n # If the best merge improves modularity, use it\n merges.append(to_merge)\n i, j, dq = to_merge\n u, v = communities[i], communities[j]\n communities[j] = u | v\n communities[i] = frozenset([])\n # Remove empty communities and sort\n return sorted((c for c in communities if len(c) > 0), key=len, reverse=True)\n\n\n# old name\n_naive_greedy_modularity_communities = naive_greedy_modularity_communities\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 804, "n_words": 250, "vocab_size": 136, "complexity": 16, "nloc": 80, "token_counts": 301, "n_ast_nodes": 472, "n_identifiers": 29, "random_cut": "def naive_greedy_modularity_communities(G, resolution=1, weight=None):\n r\n # First create one community for each node\n communities = list(frozenset([u]) for u in G.nodes())\n # Track merges\n merges = []\n # Greedily merge communities until no improvement is possible\n old_modularity = None\n new_modularity = modularity(G, communities, resolution=resolution, weight=weight)\n while old_modularity is None or new_modularity > old_modularity:\n # Save modularity for comparison\n old_modularity = new_modularity\n # Find best pair to merge\n trial_communities = list(communities)\n to_merge = None\n for i, u in enumerate(communities):\n for j, v in enumerate(communities):\n # Skip i==j and empty communities\n if j <= i or len(u) == 0 or len(v) == 0:\n continue\n # Merge communities u and v\n trial_communities[j] = u | v\n trial_communities[i] = frozenset([])\n trial_modularity = modularity(\n G, trial_communities, resolution=resolution, weight=weight\n )\n if trial_modularity >= new_modularity:\n # Check if strictly better or tie\n if trial_modularity > new_modularity:\n # Found new best, save modularity and group indexes\n new_modularity = trial_modularity\n to_merge = (i, j, new_modularity - old_modularity)\n elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]):\n ", "d_id": 41794, "documentation": { "docstring": "Find communities in G using greedy modularity maximization.\n\n This implementation is O(n^4), much slower than alternatives, but it is\n provided as an easy-to-understand reference implementation.\n\n Greedy modularity maximization begins with each node in its own community\n and joins the pair of communities that most increases modularity until no\n such pair exists.\n\n This function maximizes the generalized modularity, where `resolution`\n is the resolution parameter, often expressed as $\\gamma$.\n See :func:`~networkx.algorithms.community.quality.modularity`.\n\n Parameters\n ----------\n G : NetworkX graph\n\n resolution : float (default=1)\n If resolution is less than 1, modularity favors larger communities.\n Greater than 1 favors smaller communities.\n\n weight : string or None, optional (default=None)\n The name of an edge attribute that holds the numerical value used\n as a weight. If None, then each edge has weight 1.\n The degree is the sum of the edge weights adjacent to the node.\n\n Returns\n -------\n list\n A list of sets of nodes, one for each community.\n Sorted by length with largest communities first.\n\n Examples\n --------\n >>> from networkx.algorithms.community import \\\n ... naive_greedy_modularity_communities\n >>> G = nx.karate_club_graph()\n >>> c = naive_greedy_modularity_communities(G)\n >>> sorted(c[0])\n [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]\n\n See Also\n --------\n greedy_modularity_communities\n modularity\n ", "n_words": 199, "vocab_size": 146, "n_whitespaces": 336, "language": "en" } }, { "id": 42747, "commit_id": "e58985598f202395098e15b686aec33645a906ff", "repo": "airflow", "path": "airflow/providers/microsoft/psrp/hooks/psrp.py", "file_name": "psrp.py", "fun_name": "invoke", "commit_message": "Ensure @contextmanager decorates generator func (#23103)", "code": "def invoke(self) -> Generator[PowerShell, None, None]:\n \n logger = copy(self.log)\n logger.setLevel(self._logging_level)\n local_context = self._conn is None\n if local_context:\n self.__enter__()\n try:\n assert self._conn is not None\n ps = PowerShell(self._conn)\n yield ps\n ps.begin_invoke()\n\n streams = [\n ps.output,\n ps.streams.debug,\n ps.streams.error,\n ps.streams.information,\n ps.streams.progress,\n ps.streams.verbose,\n ps.streams.warning,\n ]\n offsets = [0 for _ in streams]\n\n # We're using polling to make sure output and streams are\n # handled while the process is running.\n while ps.state == PSInvocationState.RUNNING:\n ps.poll_invoke(timeout=self._operation_timeout)\n\n for i, stream in enumerate(streams):\n offset = offsets[i]\n while len(stream) > offset:\n record = stream[offset]\n\n # Records received on the output stream during job\n # status polling are handled via an optional callback,\n # while the other streams are simply logged.\n if stream is ps.output:\n if self._on_output_callback is not None:\n self._on_output_callback(record)\n else:\n self._log_record(logger.log, record)\n offset += 1\n offsets[i] = offset\n\n # For good measure, we'll make sure the process has\n # stopped running in any case.\n ps.end_invoke()\n\n self.log.info(\"Invocation state: %s\", str(PSInvocationState(ps.state)))\n if ps.streams.error:\n raise AirflowException(\"Process had one or more errors\")\n finally:\n if local_context:\n self.__exit__(None, None, None)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 871, "n_words": 167, "vocab_size": 116, "complexity": 11, "nloc": 45, "token_counts": 264, "n_ast_nodes": 420, "n_identifiers": 43, "random_cut": "def invoke(self) -> Generator[PowerShell, None, None]:\n \n logger = copy(self.log)\n logger.setLevel(self._logging_level)\n local_context = self._conn is None\n if local_context:\n self.__enter__()\n try:\n assert self._conn is not None\n ps = PowerShell(self._conn)\n yield ps\n ps.begin_invoke()\n\n streams = [\n ps.output,\n ps.streams.debug,\n ps.streams.error,\n ps.streams.information,\n ps.streams.progress,\n ps.streams.verbose,\n ps.streams.warning,\n ]\n offsets = [0 for _ in streams]\n\n # We're using polling to make sure output and streams are\n # handled while the process is running.\n while ps.state == PSInvocationState.RUNNING:\n ps.poll_invoke(timeout=self._operation_timeout)\n\n ", "d_id": 7717, "documentation": { "docstring": "\n Context manager that yields a PowerShell object to which commands can be\n added. Upon exit, the commands will be invoked.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 42, "language": "en" } }, { "id": 320536, "commit_id": "97d6503fefc5737028637c39a2c1f33dd1e12904", "repo": "paperless-ngx", "path": "src/documents/tests/test_task_signals.py", "file_name": "test_task_signals.py", "fun_name": "util_call_before_task_publish_handler", "commit_message": "Switches task serialization over to pickle format", "code": "def util_call_before_task_publish_handler(self, headers_to_use, body_to_use):\n \n self.assertEqual(PaperlessTask.objects.all().count(), 0)\n\n before_task_publish_handler(headers=headers_to_use, body=body_to_use)\n\n self.assertEqual(PaperlessTask.objects.all().count(), 1)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 38, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 56, "n_ast_nodes": 90, "n_identifiers": 12, "random_cut": "def util_call_before_task_publish_handler(self, headers_to_use, body_to_use):\n \n self.assertEqual(PaperlessTask.objects.all().count(), 0)\n\n before_task_publish_handler(headers=headers_to_use, body=body_to_use)\n\n ", "d_id": 117210, "documentation": { "docstring": "\n Simple utility to call the pre-run handle and ensure it created a single task\n instance\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 71963, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_edit_handlers.py", "file_name": "test_edit_handlers.py", "fun_name": "test_form", "commit_message": "Reformat with black", "code": "def test_form(self):\n \n form = self.EventPageForm(instance=self.event_page)\n\n self.assertIn(\"comments\", form.formsets)\n\n comments_formset = form.formsets[\"comments\"]\n self.assertEqual(len(comments_formset.forms), 1)\n self.assertEqual(comments_formset.forms[0].user, self.commenting_user)\n\n replies_formset = comments_formset.forms[0].formsets[\"replies\"]\n self.assertEqual(len(replies_formset.forms), 2)\n self.assertEqual(replies_formset.forms[0].user, self.commenting_user)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 84, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 109, "n_ast_nodes": 174, "n_identifiers": 15, "random_cut": "def test_form(self):\n \n form = self.EventPageForm(instance=self.event_page)\n\n self.assertIn(\"comments\", form.formsets)\n\n comments_formset = form.formsets[\"comments\"]\n self.assertEqual(len(comments_formset.forms), 1)\n self.asse", "d_id": 15807, "documentation": { "docstring": "\n Check that the form has the comments/replies formsets, and that the\n user has been set on each CommentForm/CommentReplyForm subclass\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 41, "language": "en" } }, { "id": 42569, "commit_id": "3ca43e26efd7d5aa37b3cd79446258d8bfa79561", "repo": "nltk", "path": "nltk/corpus/reader/wordnet.py", "file_name": "wordnet.py", "fun_name": "_doc", "commit_message": "Fix wordnet's all_synsets() function (#3078)\n\n* Fix all_synsets() function\r\n\r\n* Add simple regression tests for #3077\r\n\r\n* Add suggestions by @tomaarsen\r\n\r\nCo-authored-by: Tom Aarsen ", "code": "def _doc(self, doc_type, default, lang=\"eng\"):\n \n corpus = self._wordnet_corpus_reader\n if lang not in corpus.langs():\n return None\n elif lang == \"eng\":\n return default\n else:\n corpus._load_lang_data(lang)\n of = corpus.ss2of(self)\n i = corpus.lg_attrs.index(doc_type)\n if of in corpus._lang_data[lang][i]:\n return corpus._lang_data[lang][i][of]\n else:\n return None\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 180, "n_words": 38, "vocab_size": 27, "complexity": 4, "nloc": 14, "token_counts": 94, "n_ast_nodes": 151, "n_identifiers": 15, "random_cut": "def _doc(self, doc_type, default, lang=\"eng\"):\n \n corpus = self._wordnet_corpus_reader\n if lang not in corpus.langs():\n return None\n elif lang == \"eng\":\n return default\n else:\n corpus._load_lang_data(lang)\n of = corpus.ss2of(self)\n i = corpus.lg_attrs.index(doc_type)\n if of in corpus._lang_data[lang][i]:\n return corpus._lang_data[lang][i][", "d_id": 7629, "documentation": { "docstring": "Helper method for Synset.definition and Synset.examples", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 163184, "commit_id": "521259299f7829da667ba39302ec77acedde9e5e", "repo": "pandas", "path": "pandas/core/arrays/categorical.py", "file_name": "categorical.py", "fun_name": "map", "commit_message": "DOC: Improve doc summaries in series.rst (#45237)", "code": "def map(self, mapper):\n \n new_categories = self.categories.map(mapper)\n try:\n return self.from_codes(\n self._codes.copy(), categories=new_categories, ordered=self.ordered\n )\n except ValueError:\n # NA values are represented in self._codes with -1\n # np.take causes NA values to take final element in new_categories\n if np.any(self._codes == -1):\n new_categories = new_categories.insert(len(new_categories), np.nan)\n return np.take(new_categories, self._codes)\n\n __eq__ = _cat_compare_op(operator.eq)\n __ne__ = _cat_compare_op(operator.ne)\n __lt__ = _cat_compare_op(operator.lt)\n __gt__ = _cat_compare_op(operator.gt)\n __le__ = _cat_compare_op(operator.le)\n __ge__ = _cat_compare_op(operator.ge)\n\n # -------------------------------------------------------------\n # Validators; ideally these can be de-duplicated\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 221, "n_words": 73, "vocab_size": 57, "complexity": 3, "nloc": 10, "token_counts": 85, "n_ast_nodes": 216, "n_identifiers": 30, "random_cut": "def map(self, mapper):\n \n new_categories = self.categories.map(mapper)\n try:\n return self.from_codes(\n self._codes.copy(), categories=new_categories, ordered=self.ordered\n )\n except ValueE", "d_id": 39392, "documentation": { "docstring": "\n Map categories using an input mapping or function.\n\n Maps the categories to new categories. If the mapping correspondence is\n one-to-one the result is a :class:`~pandas.Categorical` which has the\n same order property as the original, otherwise a :class:`~pandas.Index`\n is returned. NaN values are unaffected.\n\n If a `dict` or :class:`~pandas.Series` is used any unmapped category is\n mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`\n will be returned.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n\n Returns\n -------\n pandas.Categorical or pandas.Index\n Mapped categorical.\n\n See Also\n --------\n CategoricalIndex.map : Apply a mapping correspondence on a\n :class:`~pandas.CategoricalIndex`.\n Index.map : Apply a mapping correspondence on an\n :class:`~pandas.Index`.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n Series.apply : Apply more complex functions on a\n :class:`~pandas.Series`.\n\n Examples\n --------\n >>> cat = pd.Categorical(['a', 'b', 'c'])\n >>> cat\n ['a', 'b', 'c']\n Categories (3, object): ['a', 'b', 'c']\n >>> cat.map(lambda x: x.upper())\n ['A', 'B', 'C']\n Categories (3, object): ['A', 'B', 'C']\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})\n ['first', 'second', 'third']\n Categories (3, object): ['first', 'second', 'third']\n\n If the mapping is one-to-one the ordering of the categories is\n preserved:\n\n >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)\n >>> cat\n ['a', 'b', 'c']\n Categories (3, object): ['a' < 'b' < 'c']\n >>> cat.map({'a': 3, 'b': 2, 'c': 1})\n [3, 2, 1]\n Categories (3, int64): [3 < 2 < 1]\n\n If the mapping is not one-to-one an :class:`~pandas.Index` is returned:\n\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})\n Index(['first', 'second', 'first'], dtype='object')\n\n If a `dict` is used, all unmapped categories are mapped to `NaN` and\n the result is an :class:`~pandas.Index`:\n\n >>> cat.map({'a': 'first', 'b': 'second'})\n Index(['first', 'second', nan], dtype='object')\n ", "n_words": 269, "vocab_size": 134, "n_whitespaces": 679, "language": "en" } }, { "id": 155204, "commit_id": "193505fdf0c984743397ba3df56262f30aee13a8", "repo": "modin", "path": "modin/experimental/core/execution/unidist/implementations/pandas_on_unidist/io/io.py", "file_name": "io.py", "fun_name": "to_pickle_distributed", "commit_message": "FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)\n\nSigned-off-by: Igoshev, Iaroslav ", "code": "def to_pickle_distributed(cls, qc, **kwargs):\n \n if not (\n isinstance(kwargs[\"filepath_or_buffer\"], str)\n and \"*\" in kwargs[\"filepath_or_buffer\"]\n ) or not isinstance(qc, PandasQueryCompiler):\n warnings.warn(\"Defaulting to Modin core implementation\")\n return PandasOnUnidistIO.to_pickle(qc, **kwargs)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 91, "n_words": 26, "vocab_size": 25, "complexity": 4, "nloc": 12, "token_counts": 93, "n_ast_nodes": 95, "n_identifiers": 11, "random_cut": "def to_pickle_distributed(cls, qc, **kwargs):\n \n if not (\n isinstance(kwargs[\"filepath_or_buffer\"], str)\n and \"*\" in kwargs[\"filepath_or_buffer\"]\n ) or not isinstance(qc, PandasQueryCompiler):\n warnings.warn(\"Defaulting to Modin core implementation\")\n return PandasO", "d_id": 36295, "documentation": { "docstring": "\n When `*` in the filename all partitions are written to their own separate file.\n\n The filenames is determined as follows:\n - if `*` in the filename then it will be replaced by the increasing sequence 0, 1, 2, …\n - if `*` is not the filename, then will be used default implementation.\n\n Examples #1: 4 partitions and input filename=\"partition*.pkl.gz\", then filenames will be:\n `partition0.pkl.gz`, `partition1.pkl.gz`, `partition2.pkl.gz`, `partition3.pkl.gz`.\n\n Parameters\n ----------\n qc : BaseQueryCompiler\n The query compiler of the Modin dataframe that we want\n to run ``to_pickle_distributed`` on.\n **kwargs : dict\n Parameters for ``pandas.to_pickle(**kwargs)``.\n ", "n_words": 92, "vocab_size": 70, "n_whitespaces": 203, "language": "en" } }, { "id": 205304, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/loader.py", "file_name": "loader.py", "fun_name": "project_state", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def project_state(self, nodes=None, at_end=True):\n \n return self.graph.make_state(\n nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 53, "n_identifiers": 8, "random_cut": "def project_state(self, nodes=None, at_end=True):\n \n return self.graph.make_state(\n nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps\n )\n", "d_id": 51080, "documentation": { "docstring": "\n Return a ProjectState object representing the most recent state\n that the loaded migrations represent.\n\n See graph.make_state() for the meaning of \"nodes\" and \"at_end\".\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 52, "language": "en" } }, { "id": 162744, "commit_id": "9120cdffe618c6c2ff16fe6a311b6a1367efdbc8", "repo": "AutoEq", "path": "research/neo_peq/legacy_frequency_response.py", "file_name": "legacy_frequency_response.py", "fun_name": "center", "commit_message": "Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.", "code": "def center(self, frequency=1000):\n \n equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy())\n equal_energy_fr.interpolate()\n interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1)\n if type(frequency) in [list, np.ndarray] and len(frequency) > 1:\n # Use the average of the gain values between the given frequencies as the difference to be subtracted\n diff = np.mean(equal_energy_fr.raw[np.logical_and(\n equal_energy_fr.frequency >= frequency[0],\n equal_energy_fr.frequency <= frequency[1]\n )])\n else:\n if type(frequency) in [list, np.ndarray]:\n # List or array with only one element\n frequency = frequency[0]\n # Use the gain value at the given frequency as the difference to be subtracted\n diff = interpolator(np.log10(frequency))\n\n self.raw -= diff\n if len(self.smoothed):\n self.smoothed -= diff\n if len(self.error):\n self.error += diff\n if len(self.error_smoothed):\n self.error_smoothed += diff\n\n # Everything but raw, smoothed, errors and target is affected by centering, reset them\n self.reset(raw=False, smoothed=False, error=False, error_smoothed=False, target=False)\n\n return -diff\n", "url": "https://github.com/jaakkopasanen/AutoEq.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 375, "n_words": 125, "vocab_size": 87, "complexity": 7, "nloc": 22, "token_counts": 225, "n_ast_nodes": 353, "n_identifiers": 26, "random_cut": "def center(self, frequency=1000):\n \n equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy())\n equal_energy_fr.interpolate()\n interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1)\n if type(frequency) in [list, np.ndarray] and len(frequency) > 1:\n # Use the average of the gain values between the given frequencies as the difference to be subtracted\n diff = np.mean(equal_energy_fr.raw[np.logical_and(\n equal_energy_fr.frequency >= frequency[0],\n equal_energy_fr.frequency <= frequency[1]\n )])\n else:\n if type(frequency) in [list, np.ndarray]:\n # List or array with only one element\n frequency = frequency[0]\n # Use the gain value a", "d_id": 39282, "documentation": { "docstring": "Removed bias from frequency response.\n\n Args:\n frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two\n frequencies is set to 0 dB.\n\n Returns:\n Gain shifted\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 102, "language": "en" } }, { "id": 206570, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/cache.py", "file_name": "cache.py", "fun_name": "_i18n_cache_key_suffix", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _i18n_cache_key_suffix(request, cache_key):\n \n if settings.USE_I18N:\n # first check if LocaleMiddleware or another middleware added\n # LANGUAGE_CODE to request, then fall back to the active language\n # which in turn can also fall back to settings.LANGUAGE_CODE\n cache_key += \".%s\" % getattr(request, \"LANGUAGE_CODE\", get_language())\n if settings.USE_TZ:\n cache_key += \".%s\" % get_current_timezone_name()\n return cache_key\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 98, "n_words": 51, "vocab_size": 38, "complexity": 3, "nloc": 6, "token_counts": 41, "n_ast_nodes": 76, "n_identifiers": 9, "random_cut": "def _i18n_cache_key_suffix(request, cache_key):\n \n if settings.USE_I18N:\n # first check if LocaleMiddleware or another middleware added\n # LANGUAGE_CODE to request, then fall back to the active lan", "d_id": 51569, "documentation": { "docstring": "If necessary, add the current locale or time zone to the cache key.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 260158, "commit_id": "8515b486810e844bc7f5f1a4fb2227405d46871e", "repo": "scikit-learn", "path": "sklearn/datasets/tests/test_arff_parser.py", "file_name": "test_arff_parser.py", "fun_name": "test_pandas_arff_parser_strip_double_quotes", "commit_message": "FIX make pandas and liac arff parser quoting behaviour closer (#23497)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Loïc Estève ", "code": "def test_pandas_arff_parser_strip_double_quotes(parser_func):\n \n pd = pytest.importorskip(\"pandas\")\n\n arff_file = BytesIO(\n textwrap.dedent(\n ", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "arff_file = BytesIO(\n textwrap.dedent(\n \"\"\"", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 37, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 54, "token_counts": 186, "n_ast_nodes": 39, "n_identifiers": 9, "random_cut": "def test_pandas_arff_parser_strip_double_quotes(parser_func):\n \n pd =", "d_id": 76098, "documentation": { "docstring": "Check that we properly strip double quotes from the data.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 20440, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/lexer.py", "file_name": "lexer.py", "fun_name": "get_tokens_unprocessed", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def get_tokens_unprocessed(self, text=None, context=None):\n \n tokendefs = self._tokens\n if not context:\n ctx = LexerContext(text, 0)\n statetokens = tokendefs['root']\n else:\n ctx = context\n statetokens = tokendefs[ctx.stack[-1]]\n text = ctx.text\n while 1:\n for rexmatch, action, new_state in statetokens:\n m = rexmatch(text, ctx.pos, ctx.end)\n if m:\n if action is not None:\n if type(action) is _TokenType:\n yield ctx.pos, action, m.group()\n ctx.pos = m.end()\n else:\n yield from action(self, m, ctx)\n if not new_state:\n # altered the state stack?\n statetokens = tokendefs[ctx.stack[-1]]\n # CAUTION: callback must set ctx.pos!\n if new_state is not None:\n # state transition\n if isinstance(new_state, tuple):\n for state in new_state:\n if state == '#pop':\n if len(ctx.stack) > 1:\n ctx.stack.pop()\n elif state == '#push':\n ctx.stack.append(ctx.stack[-1])\n else:\n ctx.stack.append(state)\n elif isinstance(new_state, int):\n # see RegexLexer for why this check is made\n if abs(new_state) >= len(ctx.stack):\n del ctx.state[1:]\n else:\n del ctx.stack[new_state:]\n elif new_state == '#push':\n ctx.stack.append(ctx.stack[-1])\n else:\n assert False, \"wrong state def: %r\" % new_state\n statetokens = tokendefs[ctx.stack[-1]]\n break\n else:\n try:\n if ctx.pos >= ctx.end:\n break\n if text[ctx.pos] == '\\n':\n # at EOL, reset state to \"root\"\n ctx.stack = ['root']\n statetokens = tokendefs['root']\n yield ctx.pos, Text, '\\n'\n ctx.pos += 1\n continue\n yield ctx.pos, Error, text[ctx.pos]\n ctx.pos += 1\n except IndexError:\n break\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 1512, "n_words": 193, "vocab_size": 108, "complexity": 20, "nloc": 56, "token_counts": 373, "n_ast_nodes": 609, "n_identifiers": 30, "random_cut": "def get_tokens_unprocessed(self, text=None, context=None):\n \n tokendefs = self._tokens\n if not context:\n ctx = LexerContext(text, 0)\n statetokens = tokendefs['root']\n else:\n ctx = context\n statetokens = tokendefs[ctx.stack[-1]]\n text = ctx.text\n while 1:\n for rexmatch, action, new_state in statetokens:\n m = rexmatch(text, ctx.pos, ctx.end)\n if m:\n if action is not None:\n if type(action) is _TokenType:\n yield ctx.pos, action, m.group()\n ctx.pos = m.end()\n else:\n yield from action(self, m, ctx)\n if not new_state:\n # altered the state stack?\n statetokens = tokendefs[ctx.stack[-1]]\n # CAUTION: callback must set ctx.pos!\n if new_state is not None:\n # state transition\n if isinstance(new_state, tuple):\n for state in new_state:\n if state == '#pop':\n if len(ctx.stack) > 1:\n ctx.stack.pop()\n elif state ", "d_id": 3369, "documentation": { "docstring": "\n Split ``text`` into (tokentype, text) pairs.\n If ``context`` is given, use this lexer context instead.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 135458, "commit_id": "30058267363b8de16b809c987bb1f7d7befad24d", "repo": "ray", "path": "rllib/core/rl_module/torch/tests/test_torch_marl_module.py", "file_name": "test_torch_marl_module.py", "fun_name": "get_policy_data_from_agent_data", "commit_message": "[RLlib] MARLModule, RLModule PR 4/N (N=4) (#29449)\n\nSigned-off-by: Kourosh Hakhamaneshi ", "code": "def get_policy_data_from_agent_data(agent_data, policy_map_fn):\n \n policy_data = {}\n for agent_id, data in agent_data.items():\n policy_id = policy_map_fn(agent_id)\n policy_data.setdefault(policy_id, {})\n policy_data[policy_id].setdefault(\"agent_id\", [])\n\n if data[\"obs\"].ndim == 1:\n policy_data[policy_id][\"agent_id\"].append(agent_id)\n else:\n policy_data[policy_id][\"agent_id\"] += [agent_id] * len(data[\"obs\"])\n\n for k, v in data.items():\n policy_data[policy_id].setdefault(k, [])\n if v.ndim == 1:\n v = v[None]\n policy_data[policy_id][k].append(v)\n\n for policy_id in policy_data:\n policy_data[policy_id] = {\n k: np.concatenate(v) if k != \"agent_id\" else v\n for k, v in policy_data[policy_id].items()\n }\n\n return policy_data\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 230, "n_words": 67, "vocab_size": 47, "complexity": 8, "nloc": 21, "token_counts": 182, "n_ast_nodes": 291, "n_identifiers": 16, "random_cut": "def get_policy_data_from_agent_data(agent_data, policy_map_fn):\n \n policy_data = {}\n for agent_id, data in agent_data.items():\n policy_id = policy_map_fn(agent_id)\n policy_data.setdefault(policy_id, {})\n policy_data[policy_id].setdefault(\"agent_id\", [])\n\n if data[\"obs\"].ndim == 1:\n policy_data[policy_id][\"agent_id\"].append(agent_id)\n else:\n policy_data[policy_id][\"agent_id\"] += [agent_id] * len(data[\"obs\"])\n\n for k, v in data.items():\n policy_data[policy_id].setdefault(k, [])\n if v.ndim == 1:\n v = v[None]\n policy_data[policy_id][k].append(v)\n\n for policy_id in policy_data:\n policy_data[policy_id] = {\n k: np.concatenate(v) if k != \"agent_id\" else v\n for k, v in policy_data[policy_id].items()\n }\n\n return policy_data\n\n", "d_id": 30625, "documentation": { "docstring": "Utility function to get policy data from agent data and policy map function.\n\n It also keeps track of agent_id for each row so that we can retreive the agent\n level information after the forward pass.\n\n Returns:\n dict of module_id to module data\n ", "n_words": 42, "vocab_size": 35, "n_whitespaces": 61, "language": "en" } }, { "id": 183911, "commit_id": "c3dcc529b3aa0b168728b3315cfe973218d09685", "repo": "textual", "path": "src/textual/widgets/_data_table.py", "file_name": "_data_table.py", "fun_name": "_update_dimensions", "commit_message": "docstring name change", "code": "def _update_dimensions(self) -> None:\n \n total_width = sum(column.width for column in self.columns)\n self.virtual_size = Size(\n total_width,\n len(self._y_offsets) + (self.header_height if self.show_header else 0),\n )\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 73, "n_words": 23, "vocab_size": 22, "complexity": 3, "nloc": 7, "token_counts": 50, "n_ast_nodes": 78, "n_identifiers": 13, "random_cut": "def _update_dimensions(self) -> None:\n \n total_width = sum(column.width for column in self.columns)\n s", "d_id": 44390, "documentation": { "docstring": "Called to recalculate the virtual (scrollable) size.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 204168, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/messages/storage/base.py", "file_name": "base.py", "fun_name": "_store", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _store(self, messages, response, *args, **kwargs):\n \n raise NotImplementedError(\n \"subclasses of BaseStorage must provide a _store() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 35, "n_identifiers": 7, "random_cut": "def _store(self, messages, response, *args, **kwargs):\n \n raise NotImplementedError(\n \"subclasses of BaseStorage mu", "d_id": 50666, "documentation": { "docstring": "\n Store a list of messages and return a list of any messages which could\n not be stored.\n\n One type of object must be able to be stored, ``Message``.\n\n **This method must be implemented by a subclass.**\n ", "n_words": 36, "vocab_size": 26, "n_whitespaces": 72, "language": "en" } }, { "id": 249309, "commit_id": "2281427175e4c93a30c39607fb4ac23c2a1f399f", "repo": "synapse", "path": "tests/rest/admin/test_event_reports.py", "file_name": "test_event_reports.py", "fun_name": "test_from_is_negative", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13488)\n\n* Use literals in place of `HTTPStatus` constants in tests\r\n\r\n* newsfile\r\n\r\n* code style\r\n\r\n* code style", "code": "def test_from_is_negative(self) -> None:\n \n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=-5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 86, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 11, "token_counts": 60, "n_ast_nodes": 97, "n_identifiers": 13, "random_cut": "def test_from_is_negative(self) -> None:\n \n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=-5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.jso", "d_id": 72812, "documentation": { "docstring": "\n Testing that a negative from parameter returns a 400\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 186112, "commit_id": "e8c87ced33ccac893121e3cc0fb1097b0d8da035", "repo": "textual", "path": "tests/test_binding_inheritance.py", "file_name": "test_binding_inheritance.py", "fun_name": "test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen", "commit_message": "Add test for focused widget, no inherit, empty BINDINGS\n\nTesting the overlap between #1343 and #1351.", "code": "async def test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen() -> None:\n ", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 8, "n_words": 5, "vocab_size": 5, "complexity": 2, "nloc": 5, "token_counts": 53, "n_ast_nodes": 16, "n_identifiers": 1, "random_cut": "async def test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen() -> None:\n ", "d_id": 45340, "documentation": { "docstring": "A focused child widget, that doesn't inherit bindings and sets BINDINGS empty, with movement bindings in the screen, should trigger screen actions.", "n_words": 22, "vocab_size": 21, "n_whitespaces": 21, "language": "en" } }, { "id": 218798, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/pgen2/parse.py", "file_name": "parse.py", "fun_name": "addtoken", "commit_message": "add python 3.10.4 for windows", "code": "def addtoken(self, type, value, context):\n \n # Map from token to label\n ilabel = self.classify(type, value, context)\n # Loop until the token is shifted; may raise exceptions\n while True:\n dfa, state, node = self.stack[-1]\n states, first = dfa\n arcs = states[state]\n # Look for a state with this label\n for i, newstate in arcs:\n t, v = self.grammar.labels[i]\n if ilabel == i:\n # Look it up in the list of labels\n assert t < 256\n # Shift a token; we're done with it\n self.shift(type, value, newstate, context)\n # Pop while we are in an accept-only state\n state = newstate\n while states[state] == [(0, state)]:\n self.pop()\n if not self.stack:\n # Done parsing!\n return True\n dfa, state, node = self.stack[-1]\n states, first = dfa\n # Done with this token\n return False\n elif t >= 256:\n # See if it's a symbol and if we're in its first set\n itsdfa = self.grammar.dfas[t]\n itsstates, itsfirst = itsdfa\n if ilabel in itsfirst:\n # Push a symbol\n self.push(t, self.grammar.dfas[t], newstate, context)\n break # To continue the outer while loop\n else:\n if (0, state) in arcs:\n # An accepting state, pop it and try something else\n self.pop()\n if not self.stack:\n # Done parsing, but another token is input\n raise ParseError(\"too much input\",\n type, value, context)\n else:\n # No success finding a transition\n raise ParseError(\"bad input\", type, value, context)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1039, "n_words": 220, "vocab_size": 123, "complexity": 10, "nloc": 33, "token_counts": 232, "n_ast_nodes": 365, "n_identifiers": 28, "random_cut": "def addtoken(self, type, value, context):\n \n # Map from token to label\n ilabel = self.classify(type, value, context)\n # Loop until the token is shifted; may raise exceptions\n while True:\n dfa, state, node = self.stack[-1]\n states, first = dfa\n arcs = states[state]\n # Look for a state with this label\n for i, newstate in arcs:\n t, v = self.grammar.labels[i]\n if ilabel == i:\n # Look it up in the list of labels\n assert t < 256\n # Shift a token; we're done with it\n self.shift(type, value, newstate, context)\n # Pop while we are in an accept-only state\n state = newstate\n while states[state] == [(0, state)]:\n self.pop()\n if not self.stack:\n # Done parsing!\n return True\n dfa, state, node = self.stack[-1]\n states, first = dfa\n # Done with this token\n return False\n elif t >= 256:\n # See if it's a symbol and if we're in its first set\n itsdfa = self.grammar.dfas[t]\n itsstates, itsfirst = itsdfa\n if ilabel in itsfirst:\n # Push a symbol\n self.push(t, self.grammar.dfas[t], newstate, context)\n break # To continue the outer while loop\n else:\n if (0, state) in arcs:\n # An accepting state, pop it and try something else\n self.pop()\n if not self.stack:\n # Done parsing, but another token is input\n raise ParseError(\"too much input\",\n type, value, context)\n else:\n # No success finding a transition\n raise ParseError(\"bad input\", type, value, context)\n", "d_id": 55486, "documentation": { "docstring": "Add a token; return True iff this is the end of the program.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 43771, "commit_id": "f2039b4c9e15b514661d4facbd710791fe0a2ef4", "repo": "airflow", "path": "airflow/settings.py", "file_name": "settings.py", "fun_name": "import_local_settings", "commit_message": "Speed up creation of DagRun for large DAGs (5k+ tasks) by 25-130% (#20722)\n\n* Speed up creation of DagRun for large DAGs (5k+ tasks) by 15-40%\r\n\r\nThis uses the \"bulk\" operation API of SQLAlchemy to get a big speed\r\nup. Due to the `task_instance_mutation_hook` we still need to keep\r\nactual TaskInstance objects around.\r\n\r\nFor postgresql we have enabled to \"batch operation helpers\"[1] which\r\nmakes it even faster. The default page sizes are chosen somewhat\r\nrandomly based on the SQLA docs.\r\n\r\nTo make these options configurable I have added (and used here and in\r\nKubeConfig) a new `getjson` option to AirflowConfigParser class.\r\n\r\nPostgresql is over 77% faster with bulk_save_objects:\r\n\r\nBefore:\r\n\r\n```\r\nnumber_of_tis=1 mean=0.004397215199423954 per=0.004397215199423954 times=[0.009390181003254838, 0.002814065999700688, 0.00284132499655243, 0.0036120269942330196, 0.0033284770033787936]\r\nnumber_of_tis=10 mean=0.008078816600027494 per=0.0008078816600027494 times=[0.011014281000825576, 0.008476420000079088, 0.00741832799394615, 0.006857775995740667, 0.006627278009545989]\r\nnumber_of_tis=50 mean=0.01927847799670417 per=0.00038556955993408336 times=[0.02556803499464877, 0.01935569499619305, 0.01662322599440813, 0.01840184700267855, 0.01644358699559234]\r\nnumber_of_tis=100 mean=0.03301511880126782 per=0.00033015118801267817 times=[0.04117956099798903, 0.030890661000739783, 0.03007458901265636, 0.03125198099587578, 0.03167880199907813]\r\nnumber_of_tis=500 mean=0.15320950179593637 per=0.0003064190035918727 times=[0.20054609200451523, 0.14052859699586406, 0.14509809199080337, 0.1365471329918364, 0.1433275949966628]\r\nnumber_of_tis=1000 mean=0.2929377429973101 per=0.0002929377429973101 times=[0.3517978919990128, 0.2807794280088274, 0.2806490379880415, 0.27710555399244186, 0.27435680299822707]\r\nnumber_of_tis=3000 mean=0.9935687056015012 per=0.00033118956853383374 times=[1.2047388390055858, 0.8248025969951414, 0.8685875020019012, 0.9017027500085533, 1.1680118399963249]\r\nnumber_of_tis=5000 mean=1.5349355740036117 per=0.00030698711480072236 times=[1.8663743910001358, 1.5182018500054255, 1.5446484510030132, 1.3932801040064078, 1.3521730740030762]\r\nnumber_of_tis=10000 mean=3.7448632712010292 per=0.0003744863271201029 times=[4.135914924001554, 3.4411147559876554, 3.526543836007477, 3.7195197630062466, 3.9012230770022143]\r\nnumber_of_tis=15000 mean=6.3099766838044165 per=0.00042066511225362775 times=[6.552250057997298, 6.1369703890086384, 6.8749958210100885, 6.067943914007628, 5.917723236998427]\r\nnumber_of_tis=20000 mean=8.317583500797628 per=0.00041587917503988143 times=[8.720249108009739, 8.0188543760014, 8.328030352990027, 8.398350054994808, 8.122433611992165]\r\n```\r\n\r\nWhen using bulk_save_objects:\r\n\r\n```\r\nnumber_of_tis=20000 mean=4.678154367001843 per=0.00023390771835009216 times=[4.465847548010061, 4.571855771995615, 4.749505186002352, 4.724330568002188, 4.8792327609990025]\r\n```\r\n\r\nMySQL is only 10-15% faster (and a lot noisier)\r\n\r\nBefore:\r\n\r\n```\r\nnumber_of_tis=1 mean=0.006164804595755413 per=0.006164804595755413 times=[0.013516580002033152, 0.00427598599344492, 0.004508020996581763, 0.004067091998877004, 0.004456343987840228]\r\nnumber_of_tis=10 mean=0.007822793803643435 per=0.0007822793803643434 times=[0.0081135170039488, 0.00719467100861948, 0.009007985994685441, 0.00758794900320936, 0.007209846007754095]\r\nnumber_of_tis=50 mean=0.020377356800599954 per=0.00040754713601199905 times=[0.02612382399092894, 0.018950315003166907, 0.019109474000288174, 0.018008680999628268, 0.019694490008987486]\r\nnumber_of_tis=100 mean=0.040682651600218375 per=0.00040682651600218374 times=[0.05449078499805182, 0.037430580996442586, 0.039291110006161034, 0.03625023599306587, 0.035950546007370576]\r\nnumber_of_tis=500 mean=0.18646696420037187 per=0.00037293392840074375 times=[0.24278165798750706, 0.17090376401029062, 0.1837275660072919, 0.16893767600413412, 0.1659841569926357]\r\nnumber_of_tis=1000 mean=0.5903461098030676 per=0.0005903461098030675 times=[0.6001852740009781, 0.5642872750031529, 0.686630773008801, 0.5578094649972627, 0.5428177620051429]\r\nnumber_of_tis=3000 mean=1.9076304554007948 per=0.0006358768184669316 times=[2.042052763994434, 2.1137778090051142, 1.7461599689995637, 1.7260139089921722, 1.9101478260126896]\r\nnumber_of_tis=5000 mean=2.9185905692051164 per=0.0005837181138410233 times=[2.9221124830073677, 3.2889883980096783, 2.7569778940087417, 2.973596281008213, 2.651277789991582]\r\nnumber_of_tis=10000 mean=8.880191986600403 per=0.0008880191986600403 times=[7.3548113360011484, 9.13715232499817, 9.568511486999341, 8.80206210000324, 9.538422685000114]\r\nnumber_of_tis=15000 mean=15.426499317999696 per=0.0010284332878666464 times=[14.944712879005237, 15.38737604500784, 15.409629273999599, 15.852925243991194, 15.53785314799461]\r\nnumber_of_tis=20000 mean=20.579332908798825 per=0.0010289666454399414 times=[20.362008597003296, 19.878823954990366, 20.73281196100288, 20.837948996995692, 21.085071034001885]\r\n```\r\n\r\nAfter:\r\n\r\n```\r\nnumber_of_tis=20000 mean=18.36637533060275 per=0.0009183187665301375 times=[17.728908119010157, 18.62269214099797, 18.936747477011522, 17.74613195299753, 18.797396962996572]\r\n```\r\n\r\n[1]: https://docs.sqlalchemy.org/en/13/dialects/postgresql.html#psycopg2-batch-mode\r\n\r\n\r\n* Use bulk_insert_mappings for even more speed where possible.\r\n\r\nIt gives us an extra speed up over bulk_save_objects, but we can't\r\nuse it when the task_instance_mutation_hook does anything, as that hook\r\nneeds an actual object.\r\n\r\nSo _when_ we know that hook won't do anything we switch in to\r\ninsert_mappings mode.\r\n\r\nNew speeds (vs baseline, not vs bulk_save_objects) when using\r\nbulk_insert_mappings\r\n\r\nPostgreSQL now 130% faster:\r\n\r\n```\r\nnumber_of_tis=1 mean=0.028053103599813767 per=0.028053103599813767 times=[0.03762496300623752, 0.02637488600157667, 0.025065611000172794, 0.024561002996051684, 0.026639054995030165]\r\nnumber_of_tis=10 mean=0.02647183560184203 per=0.002647183560184203 times=[0.02698062499985099, 0.026417658998980187, 0.027347976007149555, 0.025797458001761697, 0.025815460001467727]\r\nnumber_of_tis=50 mean=0.03149963079486042 per=0.0006299926158972085 times=[0.03810671299288515, 0.03055680700344965, 0.029733988994848914, 0.03016914198815357, 0.02893150299496483]\r\nnumber_of_tis=100 mean=0.033998635396710594 per=0.0003399863539671059 times=[0.0351028829900315, 0.03299884400621522, 0.03358584298985079, 0.03295094799250364, 0.03535465900495183]\r\nnumber_of_tis=500 mean=0.07903424859978259 per=0.00015806849719956516 times=[0.08279920800123364, 0.08588568199775182, 0.07312070899934042, 0.07360191999759991, 0.07976372400298715]\r\nnumber_of_tis=1000 mean=0.12571056479937398 per=0.00012571056479937398 times=[0.12573593499837443, 0.12141938100103289, 0.12616568499652203, 0.12907471299695317, 0.12615711000398733]\r\nnumber_of_tis=3000 mean=0.36025245799683037 per=0.00012008415266561012 times=[0.36071603700111154, 0.3470657339930767, 0.3373015969991684, 0.3337128989951452, 0.42246602299564984]\r\nnumber_of_tis=5000 mean=0.6916533229988999 per=0.00013833066459977998 times=[0.9647149289958179, 0.6451378140045563, 0.5970188640058041, 0.5849326960014878, 0.6664623119868338]\r\nnumber_of_tis=10000 mean=2.071472014003666 per=0.00020714720140036663 times=[2.957865878008306, 1.9388906149979448, 1.766649461002089, 1.8647991580073722, 1.8291549580026185]\r\nnumber_of_tis=15000 mean=2.866650845797267 per=0.00019111005638648446 times=[3.3783503199956613, 2.657773957995232, 2.707275656008278, 2.7875704979960574, 2.802283796991105]\r\nnumber_of_tis=20000 mean=3.5886989389982773 per=0.00017943494694991387 times=[3.969436354993377, 3.436962780993781, 3.9078941010084236, 3.6387251569976797, 2.9904763009981252]\r\n```\r\n\r\nMySQL is (only) 27% faster:\r\n\r\n```\r\nnumber_of_tis=1 mean=0.035956257799989545 per=0.035956257799989545 times=[0.03932315899874084, 0.03545605999534018, 0.03535486999317072, 0.034727805003058165, 0.03491939500963781]\r\nnumber_of_tis=10 mean=0.036957260797498746 per=0.0036957260797498745 times=[0.040442515004542656, 0.0379129799985094, 0.03494819799379911, 0.03562593398964964, 0.03585667700099293]\r\nnumber_of_tis=50 mean=0.04745422120031435 per=0.0009490844240062871 times=[0.06965546800347511, 0.04221734800375998, 0.04038520700123627, 0.040363031992455944, 0.04465005100064445]\r\nnumber_of_tis=100 mean=0.0528092162014218 per=0.000528092162014218 times=[0.06113427500531543, 0.04883724599494599, 0.05276876600692049, 0.047688748003565706, 0.05361704599636141]\r\nnumber_of_tis=500 mean=0.16223246100416872 per=0.0003244649220083374 times=[0.24469116200634744, 0.1407806619972689, 0.14792052800476085, 0.14703868801007047, 0.13073126500239596]\r\nnumber_of_tis=1000 mean=0.285728433605982 per=0.00028572843360598197 times=[0.3230128890136257, 0.27035739900020417, 0.3003890450054314, 0.2638379510026425, 0.2710448840080062]\r\nnumber_of_tis=3000 mean=1.1824120475997915 per=0.0003941373491999305 times=[1.3103130240051541, 1.286688863998279, 1.1455156929878285, 1.1072918410063721, 1.062250816001324]\r\nnumber_of_tis=5000 mean=1.9416745471942705 per=0.0003883349094388541 times=[2.3746965279860888, 1.9103765429899795, 2.0542518720030785, 1.7706374429981224, 1.598410349994083]\r\nnumber_of_tis=10000 mean=5.059874459402636 per=0.0005059874459402636 times=[5.431018351999228, 5.262124675995437, 5.174487816999317, 4.423381198008428, 5.008360254010768]\r\nnumber_of_tis=15000 mean=9.717965700797503 per=0.0006478643800531668 times=[7.884617075993447, 9.466949063993525, 10.005758297003922, 10.105231182998978, 11.127272883997648]\r\nnumber_of_tis=20000 mean=16.2008618004038 per=0.00081004309002019 times=[14.645835625007749, 16.304637463006657, 16.255490412993822, 16.830263861003914, 16.968081640006858]\r\n```", "code": "def import_local_settings():\n \n try:\n import airflow_local_settings\n\n if hasattr(airflow_local_settings, \"__all__\"):\n for i in airflow_local_settings.__all__:\n globals()[i] = getattr(airflow_local_settings, i)\n else:\n for k, v in airflow_local_settings.__dict__.items():\n if not k.startswith(\"__\"):\n globals()[k] = v\n\n # TODO: Remove once deprecated\n if \"policy\" in globals() and \"task_policy\" not in globals():\n warnings.warn(\n \"Using `policy` in airflow_local_settings.py is deprecated. \"\n \"Please rename your `policy` to `task_policy`.\",\n DeprecationWarning,\n stacklevel=2,\n )\n globals()[\"task_policy\"] = globals()[\"policy\"]\n del globals()[\"policy\"]\n\n if not hasattr(task_instance_mutation_hook, 'is_noop'):\n task_instance_mutation_hook.is_noop = False\n\n log.info(\"Loaded airflow_local_settings from %s .\", airflow_local_settings.__file__)\n except ModuleNotFoundError as e:\n if e.name == \"airflow_local_settings\":\n log.debug(\"No airflow_local_settings to import.\", exc_info=True)\n else:\n log.critical(\n \"Failed to import airflow_local_settings due to a transitive module not found error.\",\n exc_info=True,\n )\n raise\n except ImportError:\n log.critical(\"Failed to import airflow_local_settings.\", exc_info=True)\n raise\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 464, "n_words": 115, "vocab_size": 83, "complexity": 11, "nloc": 34, "token_counts": 191, "n_ast_nodes": 336, "n_identifiers": 28, "random_cut": "def import_local_settings():\n \n try:\n import airflow_local_settings\n\n if hasattr(airflow_local_settings, \"__all__\"):\n for i in airflow_local_settings.__all__:\n globals()[i] = getattr(airflow_local_settings, i)\n else:\n for k, v in airflow_local_settings.__dict__.items():\n if not k.startswith(\"__\"):\n globals()[k] = v\n\n # TODO: Remove once deprecated\n if \"policy\" in globals() and \"task_policy\" not in globals():\n warnings.warn(\n \"Using `policy` in airflow_local_settings.py is deprecated. \"\n \"Please rename your `policy` to `task_policy`.\",\n DeprecationWarning,\n stacklevel=2,\n )\n globals()[\"task_policy\"] = globals()[\"policy\"]\n del globals()[\"policy\"]\n\n if not hasattr(task_instance_mutation_hook, 'is_noop'):\n task_instance_mutation_hook.is_noop = False\n\n log.info(\"Loaded airflow_local_settings from %s .\", airflow_local_settings.__file__)\n except ModuleNotFoundError as e:\n if e.name == \"airflow_local_settings\":\n log.debug(\"No airflow_local_settings to import.\", exc_info=True)\n else:\n log.critical(\n \"Failed to import airflow_local_settings due to a trans", "d_id": 8055, "documentation": { "docstring": "Import airflow_local_settings.py files to allow overriding any configs in settings.py file", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 195857, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/functions/elementary/complexes.py", "file_name": "complexes.py", "fun_name": "unpolarify", "commit_message": "Improved documentation formatting", "code": "def unpolarify(eq, subs=None, exponents_only=False):\n \n if isinstance(eq, bool):\n return eq\n\n eq = sympify(eq)\n if subs is not None:\n return unpolarify(eq.subs(subs))\n changed = True\n pause = False\n if exponents_only:\n pause = True\n while changed:\n changed = False\n res = _unpolarify(eq, exponents_only, pause)\n if res != eq:\n changed = True\n eq = res\n if isinstance(res, bool):\n return res\n # Finally, replacing Exp(0) by 1 is always correct.\n # So is polar_lift(0) -> 0.\n return res.subs({exp_polar(0): 1, polar_lift(0): 0})\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 190, "n_words": 75, "vocab_size": 46, "complexity": 7, "nloc": 19, "token_counts": 116, "n_ast_nodes": 184, "n_identifiers": 13, "random_cut": "def unpolarify(eq, subs=None, exponents_only=False):\n \n if isinstance(eq, bool):\n return eq\n\n eq = sympify(eq)\n if subs is not None:\n return unpolarify(eq.subs(subs))\n changed = True\n pause = False\n if exponents_only:\n pause = True\n while changed:\n changed = False\n res = _unpolarify(eq, exponents_only, pause)\n if res != eq:\n changed = True\n eq = res\n if isinstance(res, bool):\n return res\n # Finally, replacing Exp(0) by 1 is always correct.\n # So is polar_lift(0) -> 0.\n return res.subs({exp_pola", "d_id": 47444, "documentation": { "docstring": "\n If `p` denotes the projection from the Riemann surface of the logarithm to\n the complex line, return a simplified version `eq'` of `eq` such that\n `p(eq') = p(eq)`.\n Also apply the substitution subs in the end. (This is a convenience, since\n ``unpolarify``, in a certain sense, undoes :func:`polarify`.)\n\n Examples\n ========\n\n >>> from sympy import unpolarify, polar_lift, sin, I\n >>> unpolarify(polar_lift(I + 2))\n 2 + I\n >>> unpolarify(sin(polar_lift(I + 7)))\n sin(7 + I)\n ", "n_words": 72, "vocab_size": 56, "n_whitespaces": 112, "language": "en" } }, { "id": 101766, "commit_id": "765e385177bda9b9e99951492ef33b34b4e4773e", "repo": "faceswap", "path": "plugins/extract/_base.py", "file_name": "_base.py", "fun_name": "check_and_raise_error", "commit_message": "Extract: Typing and standardization", "code": "def check_and_raise_error(self) -> None:\n \n for thread in self._threads:\n thread.check_and_raise_error()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 7, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 4, "random_cut": "def check_and_raise_error(self) -> None:\n \n for thread in self._th", "d_id": 21170, "documentation": { "docstring": " Check all threads for errors\n\n Exposed for :mod:`~plugins.extract.pipeline` to check plugin's threads for errors\n ", "n_words": 14, "vocab_size": 10, "n_whitespaces": 29, "language": "en" } }, { "id": 293721, "commit_id": "bc862e97ed68cce8c437327651f85892787e755e", "repo": "core", "path": "homeassistant/components/recorder/pool.py", "file_name": "pool.py", "fun_name": "recorder_or_dbworker", "commit_message": "Use a dedicated executor pool for database operations (#68105)\n\nCo-authored-by: Erik Montnemery \r\nCo-authored-by: Franck Nijhof ", "code": "def recorder_or_dbworker(self) -> bool:\n \n thread_name = threading.current_thread().name\n return bool(\n thread_name == \"Recorder\" or thread_name.startswith(DB_WORKER_PREFIX)\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 9, "random_cut": "def recorder_or_dbworker(self) -> bool:\n \n thread_name = threading.current_thread().name\n return bool(\n thread_name == \"Recorder\" or thread_name.startswith(DB_WORKER_PREFIX)\n )\n", "d_id": 92777, "documentation": { "docstring": "Check if the thread is a recorder or dbworker thread.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 335973, "commit_id": "87060e6a9c7754b648e621175b4d73161e82906e", "repo": "diffusers", "path": "scripts/convert_ldm_original_checkpoint_to_diffusers.py", "file_name": "convert_ldm_original_checkpoint_to_diffusers.py", "fun_name": "renew_resnet_paths", "commit_message": "LDM conversion script (#92)\n\nConversion script\r\n\r\nCo-authored-by: Patrick von Platen ", "code": "def renew_resnet_paths(old_list, n_shave_prefix_segments=0):\n \n mapping = []\n for old_item in old_list:\n new_item = old_item.replace('in_layers.0', 'norm1')\n new_item = new_item.replace('in_layers.2', 'conv1')\n\n new_item = new_item.replace('out_layers.0', 'norm2')\n new_item = new_item.replace('out_layers.3', 'conv2')\n\n new_item = new_item.replace('emb_layers.1', 'time_emb_proj')\n new_item = new_item.replace('skip_connection', 'conv_shortcut')\n\n new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)\n\n mapping.append({'old': old_item, 'new': new_item})\n\n return mapping\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 112, "n_words": 44, "vocab_size": 30, "complexity": 2, "nloc": 12, "token_counts": 105, "n_ast_nodes": 189, "n_identifiers": 9, "random_cut": "def renew_resnet_paths(old_list, n_shave_prefix_segments=0):\n \n mapping = []\n for old_item in old_list:\n new_item = old_item.replace('in_layers.0', 'norm1')\n ", "d_id": 120843, "documentation": { "docstring": "\n Updates paths inside resnets to the new naming scheme (local renaming)\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 278998, "commit_id": "3613c3defc39c236fb1592c4f7ba1a9cc887343a", "repo": "keras", "path": "keras/utils/metrics_utils.py", "file_name": "metrics_utils.py", "fun_name": "_assert_splits_match", "commit_message": "Remove pylint comments.\n\nPiperOrigin-RevId: 452353044", "code": "def _assert_splits_match(nested_splits_lists):\n \n error_msg = (\n \"Inputs must have identical ragged splits. \"\n f\"Input received: {nested_splits_lists}\"\n )\n for splits_list in nested_splits_lists:\n if len(splits_list) != len(nested_splits_lists[0]):\n raise ValueError(error_msg)\n return [\n tf.debugging.assert_equal(s1, s2, message=error_msg)\n for splits_list in nested_splits_lists[1:]\n for (s1, s2) in zip(nested_splits_lists[0], splits_list)\n ]\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 113, "n_words": 42, "vocab_size": 37, "complexity": 5, "nloc": 13, "token_counts": 78, "n_ast_nodes": 124, "n_identifiers": 13, "random_cut": "def _assert_splits_match(nested_splits_lists):\n \n error_msg = (\n \"Inputs must have identical ragged splits. \"\n f\"Input received: {nested_splits_lists}\"\n )\n for splits_list in nested_splits_lists:\n if len(splits_list) != len(nested_splits_lists[0]):\n rais", "d_id": 82808, "documentation": { "docstring": "Checks that the given splits lists are identical.\n\n Performs static tests to ensure that the given splits lists are identical,\n and returns a list of control dependency op tensors that check that they are\n fully identical.\n\n Args:\n nested_splits_lists: A list of nested_splits_lists, where each split_list\n is a list of `splits` tensors from a `RaggedTensor`, ordered from\n outermost ragged dimension to innermost ragged dimension.\n\n Returns:\n A list of control dependency op tensors.\n Raises:\n ValueError: If the splits are not identical.\n ", "n_words": 79, "vocab_size": 49, "n_whitespaces": 129, "language": "en" } }, { "id": 101898, "commit_id": "dab823a3eb7a5257cb1e0818ee10ed234d3de97f", "repo": "faceswap", "path": "lib/gui/display_command.py", "file_name": "display_command.py", "fun_name": "_iteration_limit_callback", "commit_message": "Typing - lib.gui.display_command", "code": "def _iteration_limit_callback(self, *args) -> None:\n \n try:\n limit = self.vars[\"display_iterations\"].get()\n except tk.TclError:\n # Don't update when there is no value in the variable\n return\n logger.debug(\"Updating graph iteration limit: (new_value: %s, args: %s)\",\n limit, args)\n for graph in self.subnotebook.children.values():\n graph.calcs.set_iterations_limit(limit)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 137, "n_words": 38, "vocab_size": 36, "complexity": 3, "nloc": 11, "token_counts": 62, "n_ast_nodes": 105, "n_identifiers": 16, "random_cut": "def _iteration_limit_callback(self, *args) -> None:\n \n try:\n limit = self.vars[\"display_iterations\"].get()\n except tk.TclError:\n ", "d_id": 21280, "documentation": { "docstring": " Limit the amount of data displayed in the live graph on a iteration slider\n variable change. ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 24, "language": "en" } }, { "id": 47512, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_queued_dagruns_stops_creating_when_max_active_is_reached", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker):\n \n with dag_maker(max_active_runs=10) as dag:\n EmptyOperator(task_id='mytask')\n\n session = settings.Session()\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n self.scheduler_job.executor = MockExecutor()\n self.scheduler_job.processor_agent = mock.MagicMock()\n\n self.scheduler_job.dagbag = dag_maker.dagbag\n\n session = settings.Session()\n orm_dag = session.query(DagModel).get(dag.dag_id)\n assert orm_dag is not None\n for _ in range(20):\n self.scheduler_job._create_dag_runs([orm_dag], session)\n drs = session.query(DagRun).all()\n assert len(drs) == 10\n\n for dr in drs:\n dr.state = State.RUNNING\n session.merge(dr)\n session.commit()\n assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10\n for _ in range(20):\n self.scheduler_job._create_dag_runs([orm_dag], session)\n assert session.query(DagRun).count() == 10\n assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10\n assert session.query(DagRun.state).filter(DagRun.state == State.QUEUED).count() == 0\n assert orm_dag.next_dagrun_create_after is None\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 290, "n_words": 88, "vocab_size": 48, "complexity": 4, "nloc": 26, "token_counts": 278, "n_ast_nodes": 448, "n_identifiers": 43, "random_cut": "def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker):\n \n with dag_maker(max_active_runs=10) as dag:\n EmptyOperator(task_id='mytask')\n\n session = settings.Session()\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n self.scheduler_job.executor = MockExecutor()\n self.scheduler_job.processor_agent = mock.MagicMock()\n\n self.scheduler_job.dagbag = dag_maker.dagbag\n\n session = settings.Session()\n orm_dag = session.query(DagModel).get(dag.dag_id)\n assert orm_dag is no", "d_id": 9138, "documentation": { "docstring": "This tests that queued dagruns stops creating once max_active_runs is reached", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 183781, "commit_id": "bfb962bacf274373e5706090cd854b6aa0857270", "repo": "textual", "path": "tests/test_xterm_parser.py", "file_name": "test_xterm_parser.py", "fun_name": "test_escape_sequence_resulting_in_multiple_keypresses", "commit_message": "Backtracking unknown escape sequences, various tests for XTermParser", "code": "def test_escape_sequence_resulting_in_multiple_keypresses(parser):\n \n events = list(parser.feed(\"\\x1b[2;4~\"))\n assert len(events) == 2\n assert events[0].key == \"escape\"\n assert events[1].key == \"shift+insert\"\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 75, "n_identifiers": 7, "random_cut": "def test_escape_sequence_resulting_in_multiple_keypresses(parser):\n \n events = list(parser.feed(\"\\x1b[2;4~\"))\n assert len(events) == 2\n assert events[0].key == \"escape\"\n ", "d_id": 44334, "documentation": { "docstring": "Some sequences are interpreted as more than 1 keypress", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 205271, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/autodetector.py", "file_name": "autodetector.py", "fun_name": "generate_altered_options", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def generate_altered_options(self):\n \n models_to_check = self.kept_model_keys.union(\n self.kept_proxy_keys,\n self.kept_unmanaged_keys,\n # unmanaged converted to managed\n self.old_unmanaged_keys & self.new_model_keys,\n # managed converted to unmanaged\n self.old_model_keys & self.new_unmanaged_keys,\n )\n\n for app_label, model_name in sorted(models_to_check):\n old_model_name = self.renamed_models.get(\n (app_label, model_name), model_name\n )\n old_model_state = self.from_state.models[app_label, old_model_name]\n new_model_state = self.to_state.models[app_label, model_name]\n old_options = {\n key: value\n for key, value in old_model_state.options.items()\n if key in AlterModelOptions.ALTER_OPTION_KEYS\n }\n new_options = {\n key: value\n for key, value in new_model_state.options.items()\n if key in AlterModelOptions.ALTER_OPTION_KEYS\n }\n if old_options != new_options:\n self.add_operation(\n app_label,\n operations.AlterModelOptions(\n name=model_name,\n options=new_options,\n ),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 516, "n_words": 85, "vocab_size": 52, "complexity": 7, "nloc": 31, "token_counts": 165, "n_ast_nodes": 248, "n_identifiers": 33, "random_cut": "def generate_altered_options(self):\n \n models_to_check = self.kept_model_keys.union(\n self.kept_proxy_keys,\n self.kept_unmanaged_keys,\n # unmanaged converted to managed\n self.old_unmanaged_keys & self.new_model_keys,\n # managed converted to unmanaged\n self.old_model_keys & self.new_unmanaged_keys,\n )\n\n for app_label, model_name in sorted(models_to_check):\n old_model_name = self.renamed_models.get(\n (app_label, model_name), model_name\n )\n old_model_state = self.from_state.models[app_label, old_model_name]\n new_model_state = self.to_state.models[app_label, model_name]\n old_options = {\n key: value\n for key, value in old_model_state.options.items()\n if key in AlterModelOptions.ALTER_OPTION_KEYS\n }\n new_options = {\n key: value\n for key, value in new_model_state.options.items()\n if key in AlterModelOptions.ALTER_OPTION_KEYS\n }\n if old_options != new_options:\n self.add_operation(\n app_label,\n operations.AlterModelOptions(\n name=model_name,\n options=new_options,\n ),\n ", "d_id": 51057, "documentation": { "docstring": "\n Work out if any non-schema-affecting options have changed and make an\n operation to represent them in state changes (in case Python code in\n migrations needs them).\n ", "n_words": 26, "vocab_size": 25, "n_whitespaces": 55, "language": "en" } }, { "id": 203341, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/checks.py", "file_name": "checks.py", "fun_name": "_check_readonly_fields", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_readonly_fields(self, obj):\n \n\n if obj.readonly_fields == ():\n return []\n elif not isinstance(obj.readonly_fields, (list, tuple)):\n return must_be(\n \"a list or tuple\", option=\"readonly_fields\", obj=obj, id=\"admin.E034\"\n )\n else:\n return list(\n chain.from_iterable(\n self._check_readonly_fields_item(\n obj, field_name, \"readonly_fields[%d]\" % index\n )\n for index, field_name in enumerate(obj.readonly_fields)\n )\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 250, "n_words": 42, "vocab_size": 37, "complexity": 4, "nloc": 16, "token_counts": 85, "n_ast_nodes": 137, "n_identifiers": 16, "random_cut": "def _check_readonly_fields(self, obj):\n \n\n if obj.readonly_fields == ():\n return []\n elif not isinstance(obj.readonly_fields, (list, tuple)):\n return must_be(\n \"a list o", "d_id": 50315, "documentation": { "docstring": "Check that readonly_fields refers to proper attribute or field.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 71268, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/templatetags/wagtailadmin_tags.py", "file_name": "wagtailadmin_tags.py", "fun_name": "querystring", "commit_message": "Reformat with black", "code": "def querystring(context, **kwargs):\n \n request = context[\"request\"]\n querydict = request.GET.copy()\n # Can't do querydict.update(kwargs), because QueryDict.update() appends to\n # the list of values, instead of replacing the values.\n for key, value in kwargs.items():\n if value is None:\n # Remove the key if the value is None\n querydict.pop(key, None)\n else:\n # Set the key otherwise\n querydict[key] = str(value)\n\n return \"?\" + querydict.urlencode()\n\n\n@register.simple_tag(takes_context=True)", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "@register.simple_tag(takes_context=True)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 139, "n_words": 61, "vocab_size": 46, "complexity": 3, "nloc": 9, "token_counts": 67, "n_ast_nodes": 132, "n_identifiers": 16, "random_cut": "def querystring(context, **kwargs):\n \n request = context[\"request\"]\n querydict = request.GET.copy()\n # Can't do querydict.update(kwargs), because QueryDict.update() appends to\n # the list of values, instead of replacing the values.\n for key, value in kwar", "d_id": 15652, "documentation": { "docstring": "\n Print out the current querystring. Any keyword arguments to this template\n tag will be added to the querystring before it is printed out.\n\n \n\n Will result in something like:\n\n \n ", "n_words": 35, "vocab_size": 31, "n_whitespaces": 62, "language": "en" } }, { "id": 159111, "commit_id": "36eb9c9a5fcca2160e54a6cde5076c93db5bd70b", "repo": "rasa", "path": "rasa/graph_components/validators/finetuning_validator.py", "file_name": "finetuning_validator.py", "fun_name": "_get_fingerprint_of_schema_without_irrelevant_keys", "commit_message": "Update dependencies in 3.0 to align with rasa-sdk (#10667)\n\n* align dependencies\r\n* use black 21.7b0\r\n* apply black and docstring reformatting\r\n* add changelog", "code": "def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text:\n \n graph_schema = self._execution_context.graph_schema\n schema_as_dict = graph_schema.as_dict()\n for node_name, node_dict in schema_as_dict[\"nodes\"].items():\n config_copy = copy.deepcopy(node_dict[\"config\"])\n config_copy.pop(EPOCHS, None)\n # ignore default values since they're filled in anyway later and can\n # end up in configs (or not) in mysterious ways\n defaults = graph_schema.nodes[node_name].uses.get_default_config()\n for key, default_value in defaults.items():\n if key in config_copy and config_copy[key] == default_value:\n config_copy.pop(key)\n node_dict[\"config\"] = config_copy\n node_dict.pop(\"eager\")\n node_dict.pop(\"constructor_name\")\n return rasa.shared.utils.io.deep_container_fingerprint(schema_as_dict)\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 234, "n_words": 66, "vocab_size": 52, "complexity": 5, "nloc": 23, "token_counts": 129, "n_ast_nodes": 217, "n_identifiers": 26, "random_cut": "def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text:\n \n graph_schema = self._execution_context.graph_schema\n schema_as_dict = graph_schema.as_dict()\n for node_name, node_dict in schema_as_dict[\"nodes\"].items():\n config_copy = copy.deepcopy(node_dict[\"config\"])\n config_copy.pop(EPOCHS, None)\n # ignore default values since they're filled in anyway later and can\n # end up in configs (or not) in mysterious ways\n defaults = graph_schema.nodes[node_name].uses.get_default_config()\n for key, default_value in defaults.items():\n if key in config_copy and config_copy[key] == default_value:\n config_copy.pop(key)\n node_dict[\"config\"] = config_copy\n node_dict.pop(\"eager\")\n node_dict.pop(\"constructor_name\")\n return rasa.shared.utils.io.deep_container_fingerprint(schema_as_dict)\n", "d_id": 38133, "documentation": { "docstring": "Returns a fingerprint of the given schema with certain items removed.\n\n These items include specifications that do not influence actual training\n results such as \"eager\" mode. The only configuration (in your config) that is\n allowed to change is the number of `epochs`.\n\n Returns:\n fingerprint\n ", "n_words": 44, "vocab_size": 38, "n_whitespaces": 90, "language": "en" } }, { "id": 263829, "commit_id": "684bfac8adcf254fec5777f212c13eb62181f900", "repo": "pyinstaller", "path": "PyInstaller/utils/hooks/gi.py", "file_name": "gi.py", "fun_name": "get_gi_typelibs", "commit_message": "hooks: refactor GObject introspection (gi) hooks\n\nThe modules imported from gi.repository are marked as runtime\nmodules by their corresponding pre-safe-import-module hooks.\nTherefore, their standard hooks are always loaded and executed,\nregardless of whether the modue is actually importable or not.\n\nIn PyInstaller v5, this behavior triggers errors in hooks for\nGI modules that are not importable, because the new `isolated`\nframework propagates the errors instead of swallowing them.\nWhile these errors could be caught and demoted to warnings\nto match the old behavior, it would be better hooks checked\nwhether module is importable before doing any processing\nat all.\n\nTo that end, we introduce new class, `GiModuleInfo` that,\nas part of its initialization, allows us to:\n - perform availability check\n - obtain data previously returned by `get_gi_typelibs`\n - obtain data previously returned by `get_gi_libdir`\nusing a single isolated import attempt (instead of one\nbeing performed in each of those steps).\n\nIn addition, if passed `hook_api` as an optional argument,\nthe `GiModuleInfo` can use hook configuration API to override\nthe GI module version to be collected (which allows the\nstandard use pattern to be removed from the hook itself).\n\nThe old `get_gi_typelibs` and `get_gi_libdir` functions\nnow internally use `GiModuleInfo` to provide backward\ncompatible behavior to (potential) exetnal user.\n\nAll `gi` hooks are ported to the `GiModuleInfo` and now\nbecome no-op if the module is not available.\n\nIn addition, hooks are cleaned up/refactored so that all\nprocessing is performed either in the loading stage (\"simple\"\nhooks that do not require access to hook configuration API)\nor in the `hook()` function (hooks that require access to\nhook configuration API), but not in the mixture of the two.", "code": "def get_gi_typelibs(module, version):\n \n module_info = GiModuleInfo(module, version)\n return module_info.collect_typelib_data()\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 37, "n_identifiers": 6, "random_cut": "def get_gi_typelibs(module, version):\n \n module_info = GiModuleInfo(module, version)\n return module_info.collect_", "d_id": 77454, "documentation": { "docstring": "\n Return a tuple of (binaries, datas, hiddenimports) to be used by PyGObject related hooks. Searches for and adds\n dependencies recursively.\n\n :param module: GI module name, as passed to 'gi.require_version()'\n :param version: GI module version, as passed to 'gi.require_version()'\n ", "n_words": 38, "vocab_size": 30, "n_whitespaces": 54, "language": "en" } }, { "id": 43002, "commit_id": "cc35fcaf89eeff3d89e18088c2e68f01f8baad56", "repo": "airflow", "path": "airflow/www/security.py", "file_name": "security.py", "fun_name": "_sync_dag_view_permissions", "commit_message": "Fix permission issue for dag that has dot in name (#23510)\n\nHow we determine if a DAG is a subdag in airflow.security.permissions.resource_name_for_dag is not right.\r\nIf a dag_id contains a dot, the permission is not recorded correctly.\r\n\r\nThe current solution makes a query every time we check for permission for dags that has a dot in the name. Not that I like it but I think it's better than other options I considered such as changing how we name dags for subdag. That's not\r\ngood in UX. Another option I considered was making a query when parsing, that's not good and it's avoided\r\nby passing root_dag to resource_name_for_dag\r\n\r\nCo-authored-by: Ash Berlin-Taylor \r\nCo-authored-by: Tzu-ping Chung ", "code": "def _sync_dag_view_permissions(self, dag_id, access_control):\n \n\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 7, "nloc": 26, "token_counts": 116, "n_ast_nodes": 30, "n_identifiers": 7, "random_cut": "def _sync_dag_view_permissions(self, dag_id, access_control):\n \n\n dag", "d_id": 7785, "documentation": { "docstring": "\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n ", "n_words": 42, "vocab_size": 33, "n_whitespaces": 82, "language": "en" } }, { "id": 172199, "commit_id": "07b363ea8eee184df30b54bfae9acd04511e1cda", "repo": "pandas", "path": "pandas/tests/util/test_assert_series_equal.py", "file_name": "test_assert_series_equal.py", "fun_name": "test_series_equal_datetime_values_mismatch", "commit_message": "ENH: Include column for ea comparison in asserters (#50323)\n\n* ENH: Include column for ea comparison in asserters\r\n\r\n* Add gh ref\r\n\r\n* Fix test\r\n\r\n* Add gh ref\r\n\r\n* Split tests", "code": "def test_series_equal_datetime_values_mismatch(rtol):\n msg = \n\n s1 = Series(pd.date_range(\"2018-01-01\", periods=3, freq=\"D\"))\n s2 = Series(pd.date_range(\"2019-02-02\", periods=3, freq=\"D\"))\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s2, rtol=rtol)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 39, "n_words": 20, "vocab_size": 16, "complexity": 1, "nloc": 11, "token_counts": 70, "n_ast_nodes": 131, "n_identifiers": 16, "random_cut": "def test_series_equal_datetime_values_mismatch(rtol):\n msg = \n\n s1 = Series(pd.date_range(\"2018-01-01\", periods=3, freq=\"D\"))\n s2 = Series(pd.date_range(\"2019-02-02\", periods=3, freq=\"D\"))\n\n with pytest.raises(AssertionError, match=msg):\n tm.a", "d_id": 40788, "documentation": { "docstring": "Series are different\n\nSeries values are different \\\\(100.0 %\\\\)\n\\\\[index\\\\]: \\\\[0, 1, 2\\\\]\n\\\\[left\\\\]: \\\\[1514764800000000000, 1514851200000000000, 1514937600000000000\\\\]\n\\\\[right\\\\]: \\\\[1549065600000000000, 1549152000000000000, 1549238400000000000\\\\]", "n_words": 21, "vocab_size": 18, "n_whitespaces": 17, "language": "en" } }, { "id": 216038, "commit_id": "b856d3225ef1003cbe94499dc8bd82efffabb661", "repo": "salt", "path": "tests/pytests/functional/pillar/test_gpg.py", "file_name": "test_gpg.py", "fun_name": "test_decrypt_pillar_invalid_renderer", "commit_message": "Add tests for gpg decryption failure option\n\nTest that:\n1. Pillar registers an error when `gpg_decrypt_must_succeed` is `True` and decryption fails\n2. The GPG renderer fails silently when `gpg_decrypt_must_succeed` is `False`\n\nAlso mock `__opts__[\"gpg_decrypt_must_succeed\"]` for gpg renderer unit pytests.", "code": "def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir):\n \n opts = salt_master.config.copy()\n opts[\"decrypt_pillar\"] = [{\"secrets:vault\": \"gpg\"}]\n opts[\"decrypt_pillar_default\"] = \"foo\"\n opts[\"decrypt_pillar_renderers\"] = [\"foo\", \"bar\"]\n pillar_obj = salt.pillar.Pillar(opts, grains, \"test\", \"base\")\n ret = pillar_obj.compile_pillar()\n expected = copy.deepcopy(GPG_PILLAR_ENCRYPTED)\n expected[\"_errors\"] = [\n \"Failed to decrypt pillar key 'secrets:vault': 'gpg' is not a valid decryption\"\n \" renderer. Valid choices are: foo, bar\"\n ]\n assert ret[\"_errors\"] == expected[\"_errors\"]\n assert ret[\"secrets\"][\"vault\"][\"foo\"] == expected[\"secrets\"][\"vault\"][\"foo\"]\n assert ret[\"secrets\"][\"vault\"][\"bar\"] == expected[\"secrets\"][\"vault\"][\"bar\"]\n assert ret[\"secrets\"][\"vault\"][\"baz\"] == expected[\"secrets\"][\"vault\"][\"baz\"]\n assert ret[\"secrets\"][\"vault\"][\"qux\"] == expected[\"secrets\"][\"vault\"][\"qux\"]\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 132, "n_words": 73, "vocab_size": 56, "complexity": 1, "nloc": 17, "token_counts": 185, "n_ast_nodes": 346, "n_identifiers": 16, "random_cut": "def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir):\n \n opts = salt_master.config.copy()\n opts[\"decrypt_pillar\"] = [{\"secrets:vault\": \"gpg\"}]\n opts[\"dec", "d_id": 54344, "documentation": { "docstring": "\n Test decryption using a renderer which is not permitted. It should\n fail, leaving the encrypted keys intact, and add an error to the pillar\n dictionary.\n\n decrypt_pillar_default: foo\n decrypt_pillar_renderers:\n - foo\n - bar\n decrypt_pillar:\n - 'secrets:vault': gpg\n ", "n_words": 36, "vocab_size": 32, "n_whitespaces": 97, "language": "en" } }, { "id": 124104, "commit_id": "ac831fded416381ad3c7fe2ba135eaa4aaab5879", "repo": "ray", "path": "python/ray/tune/trainable/session.py", "file_name": "session.py", "fun_name": "get_trial_name", "commit_message": "[air] update documentation to use `session.report` (#26051)\n\nUpdate documentation to use `session.report`.\r\n\r\nNext steps:\r\n1. Update our internal caller to use `session.report`. Most importantly, CheckpointManager and DataParallelTrainer.\r\n2. Update `get_trial_resources` to use PGF notions to incorporate the requirement of ResourceChangingScheduler. @Yard1 \r\n3. After 2 is done, change all `tune.get_trial_resources` to `session.get_trial_resources`\r\n4. [internal implementation] remove special checkpoint handling logic from huggingface trainer. Optimize the flow for checkpoint conversion with `session.report`.\r\n\r\nCo-authored-by: Antoni Baum ", "code": "def get_trial_name():\n \n warnings.warn(\n _deprecation_msg,\n DeprecationWarning,\n )\n _session = get_session()\n if _session:\n return _session.trial_name\n\n\n@DeveloperAPI", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@DeveloperAPI", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 49, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 8, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 9, "random_cut": "def get_trial_name():\n \n warnings.warn(\n _deprecation_msg,\n DeprecationWarning,\n )\n _session = get_session()\n if _session:\n return _session.trial_name\n\n\n", "d_id": 27515, "documentation": { "docstring": "Trial name for the corresponding trial.\n\n For function API use only.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 17, "language": "en" } }, { "id": 156139, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/utils.py", "file_name": "utils.py", "fun_name": "get_scheduler_lock", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def get_scheduler_lock(collection=None, scheduler=None):\n \n from dask import multiprocessing\n from dask.base import get_scheduler\n\n actual_get = get_scheduler(collections=[collection], scheduler=scheduler)\n\n if actual_get == multiprocessing.get:\n return multiprocessing.get_context().Manager().Lock()\n\n return SerializableLock()\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 48, "n_words": 23, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 14, "random_cut": "def get_scheduler_lock(collection=None, scheduler=None):\n \n from dask import multiprocessing\n from dask.base import get_scheduler\n\n actual_get = get_scheduler(collections=[collection], scheduler=scheduler)\n\n if actual_get == multiprocessing.get:\n return multiprocessing.get_context().Manager().", "d_id": 36580, "documentation": { "docstring": "Get an instance of the appropriate lock for a certain situation based on\n scheduler used.", "n_words": 15, "vocab_size": 15, "n_whitespaces": 17, "language": "en" } }, { "id": 271851, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_utils_v1.py", "file_name": "training_utils_v1.py", "fun_name": "verify_dataset_shuffled", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def verify_dataset_shuffled(x):\n \n assert isinstance(x, tf.data.Dataset)\n graph_def = get_dataset_graph_def(x)\n for node in graph_def.node:\n if node.op.startswith(\"ShuffleDataset\"):\n return True\n # Also check graph_def.library.function for ds.interleave or ds.flat_map\n for function in graph_def.library.function:\n for node in function.node_def:\n if node.op.startswith(\"ShuffleDataset\"):\n return True\n logging.warning(\n \"Expected a shuffled dataset but input dataset `x` is \"\n \"not shuffled. Please invoke `shuffle()` on input dataset.\"\n )\n return False\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 58, "vocab_size": 45, "complexity": 6, "nloc": 15, "token_counts": 79, "n_ast_nodes": 134, "n_identifiers": 16, "random_cut": "def verify_dataset_shuffled(x):\n \n assert isinstance(x, tf.data.Dataset)\n graph_def = get_dataset_graph_def(x)\n for node in graph_def.node:\n if node.op.startswith(\"ShuffleDataset\"):\n return True\n # Also check graph_def.library.function for ds.interleave or ds.flat_map\n for function in graph_def.library.functi", "d_id": 80868, "documentation": { "docstring": "Verifies that the dataset is shuffled.\n\n Args:\n x: Dataset passed as an input to the model.\n\n Returns:\n boolean, whether the input dataset is shuffled or not.\n ", "n_words": 26, "vocab_size": 21, "n_whitespaces": 45, "language": "en" } }, { "id": 282485, "commit_id": "e1b6022b9cf156ffc0697d0d25a5ed2772ea8d68", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/due_diligence/binance_model.py", "file_name": "binance_model.py", "fun_name": "get_binance_available_quotes_for_each_coin", "commit_message": "Global plot styles (#1228)\n\n* Add default stylesheets\r\n\r\n* Add terminal style helper class and global style initialization in cfg\r\n\r\n* Style comments and docstrings\r\n\r\n* Load rich terminal theme from config file\r\n\r\n* Add application chart styles to candle charts\r\n\r\n* Add todos\r\n\r\n* Remove explicit color setting for some ta charts\r\n\r\n* Add user styles folder to gitignore\r\n\r\n* Update default stylesheets\r\n\r\n* Add matplotlib font manager support\r\n\r\n* Add matplotlib font manager support\r\n\r\n* Update docstrings and default style\r\n\r\n* Update stocks candle chart formatting (return fig to style title)\r\n\r\n* Style common ta overlap view\r\n\r\n* Make up and down market colors a part of the style helper\r\n\r\n* Update stylesheets\r\n\r\n* Style common ta volume view\r\n\r\n* Style common ta momentum view\r\n\r\n* Style common ta trend indicators view\r\n\r\n* Style common ta volatility view\r\n\r\n* Style common ta volume view\r\n\r\n* Style common ta custom indicators view\r\n\r\n* Fix styling bugs and remove the obvious time x lablel\r\n\r\n* Style charts in the covid menu\r\n\r\n* Set legend position to upper left in the mpl stylesheet\r\n\r\n* Add mpl_rcparams configs for parameters not covered by stylesheets\r\n\r\n* Remove font configuration files\r\n\r\n* Update style class utility functions\r\n\r\n* Implement passing external axes and style utility usage in ema & stoch\r\n\r\n* Add theme watermark and output helpers\r\n\r\n* Rename style to theme\r\n\r\n* Update helper usage in ta/ma and ta/stoch\r\n\r\n* Update style to theme in sample menus\r\n\r\n* Style forex (#1305)\r\n\r\n* Make tight layout optional 'cause mplfinance doesn't support it\r\n\r\n* Apply global style to the forex menu\r\n\r\n* Update code layout in oanda view and black\r\n\r\n* Style common TA (#1315)\r\n\r\n* Make tight layout optional 'cause mplfinance doesn't support it\r\n\r\n* Apply global style to the forex menu\r\n\r\n* Add linewidth to theme for use in mpf's addplots\r\n\r\n* Add vwap to the stocks notebook api\r\n\r\n* Update common/ta overlap to follow charting style\r\n\r\n* Apply style on TerminalStyle init\r\n\r\n* Enable infrastructure for excluding non-trading days from plots\r\n\r\n* Update notebook api to include there and resolve bandit warning\r\n\r\n* Update ta/common/overlap to exclude non-trading days\r\n\r\n* Enable external ax, style and non-trading days in common/ta/momentum\r\n\r\n* Enable external ax, style and non-trading days in common/ta/trend\r\n\r\n* Update vwap to the argument naming convention\r\n\r\n* Enable external ax, style and non-trading days in common/ta/volatility\r\n\r\n* Enable external ax, style and non-trading days in common/ta/volume\r\n\r\n* Enable external ax, style and non-trading days in common/ta/custom\r\n\r\n* Fix controller tests\r\n\r\n* Forgot to disable rewriting of the cassettes ...\r\n\r\n* Fix controller errors that came up because a merge conflict\r\n\r\n* Fix price label position on fib\r\n\r\n* Fix line having wrong x values in fib\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\n\r\n* Style economy (#1308)\r\n\r\n* Began converting\r\n\r\n* Added alphavan_view\r\n\r\n* Added CNN View\r\n\r\n* Updated nasdaq view, fixed glitch\r\n\r\n* Added fred\r\n\r\n* Refactored URL\r\n\r\n* Theo's requested changes\r\n\r\n* Updated docstrings\r\n\r\n* Updated tests\r\n\r\n* Fixed pylint\r\n\r\n* Fixed tests\r\n\r\n* Theo changes\r\n\r\n* Econ Fix\r\n\r\n* Refactor chart style for Crypto context (#1306)\r\n\r\n* Remove mock for gff\r\n\r\n* Mock visualize_output helper function\r\n\r\n* Refactor\r\n\r\n* Fix plot helper\r\n\r\n* Update legend loc\r\n\r\n* Refactor mplfinance candle plot\r\n\r\n* Fix errors in the helper function\r\n\r\n* Fix binbook having the wrong call_ function name\r\n\r\n* Remove hardcoded style params\r\n\r\n* Resolve kwargs future warning from pandas\r\n\r\n* Remove warnings import\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* funds + custom (#1311)\r\n\r\n* funds + custom\r\n\r\n* cleanup cleanup everybody everywhere\r\n\r\n* Fix external axes conditional and a typo\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Add external axes mode to covid charts (#1328)\r\n\r\n* Add portfolio menu plots (#1318)\r\n\r\n* Portfolio view plots (commenting out report stuff)\r\n\r\n* PA Menu broken. Commenting out and fix tests\r\n\r\n* portfolio optimization\r\n\r\n* comment out commented api line\r\n\r\n* Add notes on disabling the pa submenu\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Plot updates in common BA (#1335)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Etf refactor (#1323)\r\n\r\n* Refactored no ETF\r\n\r\n* Fixed gtff import\r\n\r\n* Fixed tests\r\n\r\n* Fix pie chart style\r\n\r\n* Refactored etf/candle\r\n\r\n* Added pylint fix\r\n\r\n* Fixed tests\r\n\r\n* Update candle chart layout\r\n\r\n* Update etf controller test\r\n\r\n* Remove strange binary file\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Expose ETF candle function in the notebooks API\r\n\r\n* Common BA and Common QA charts update (#1342)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Update stylesheet files\r\n\r\n* Refactor charts for common/qa\r\n\r\n* Update the forgotten line plot\r\n\r\n* Update tests\r\n\r\n* Add missing arg to a docstring\r\n\r\n* Remove scientific notation\r\n\r\n* Black imports\r\n\r\nCo-authored-by: Minh Hoang \r\n\r\n* Options refactor (#1324)\r\n\r\n* Fixed alphaquery_view\r\n\r\n* finished options\r\n\r\n* Fixed pylint\r\n\r\n* Fixed tests\r\n\r\n* Fixed tests\r\n\r\n* Fixed tests\r\n\r\n* update yfinance\r\n\r\n* Tradier + Chartexchange\r\n\r\n* change mocks from gtff to theme.visualize output\r\n\r\n* tests\r\n\r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: james \r\n\r\n* Refactor Stocks menu (#1325)\r\n\r\n* Fix backtesting menu\r\n\r\n* Refactor comparison analysis\r\n\r\n* Refactor Dark pool shorts\r\n\r\n* Refactor rest of menu\r\n\r\n* Fix test\r\n\r\n* Fix tests failing\r\n\r\n* Fix tests fail\r\n\r\n* Fix test failing\r\n\r\n* Remove record mode=none to record new output\r\n\r\n* Rewrite test output\r\n\r\n* Rewrite test outputs\r\n\r\n* Adding more rewritten test output\r\n\r\n* Mock plt.show\r\n\r\n* Mock missing plt.show\r\n\r\n* Missing @pytest.mark.vcr\r\n\r\n* Updating tests : common/behavioural_analysis/finbrain\r\n\r\n* Improve notebooks API coverage for CA and DPS\r\n\r\n* Silence annoying flake8 warning\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Charts update for common/pred (#1344)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Update stylesheet files\r\n\r\n* Refactor charts for common/qa\r\n\r\n* Update the forgotten line plot\r\n\r\n* Update tests\r\n\r\n* Add missing arg to a docstring\r\n\r\n* Style pred helper and controllers\r\n\r\n* Update ETS plot\r\n\r\n* Update plots in KNN and pred helper\r\n\r\n* Update plot and pretty table for arima\r\n\r\n* Update plot for common/pred/regression\r\n\r\n* Refactor mc_view\r\n\r\n* Fix linting\r\n\r\n* Fix mypy\r\n\r\n* Move plot title to the axis level to make more vertical space\r\n\r\nCo-authored-by: Minh Hoang \r\nCo-authored-by: jmaslek \r\n\r\n* linter\r\n\r\n* Update common/ba test data\r\n\r\n* Change etf candle to match stock candle\r\n\r\n* try updating sia test\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: jmaslek \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: Minh Hoang \r\nCo-authored-by: Chavithra PARANA ", "code": "def get_binance_available_quotes_for_each_coin() -> dict:\n \n trading_pairs = _get_trading_pairs()\n results = defaultdict(list)\n for pair in trading_pairs:\n results[pair[\"baseAsset\"]].append(pair[\"quoteAsset\"])\n return results\n\n\n@log_start_end(log=logger)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@log_start_end(log=logger)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 39, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 15, "token_counts": 40, "n_ast_nodes": 82, "n_identifiers": 12, "random_cut": "def get_binance_available_quotes_for_each_coin() -> dict:\n \n ", "d_id": 84165, "documentation": { "docstring": "Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]\n\n Returns\n -------\n dict:\n All quote assets for given coin\n {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]\n\n ", "n_words": 34, "vocab_size": 30, "n_whitespaces": 60, "language": "en" } }, { "id": 67965, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/stock_ledger.py", "file_name": "stock_ledger.py", "fun_name": "update_qty_in_future_sle", "commit_message": "style: format code with black", "code": "def update_qty_in_future_sle(args, allow_negative_stock=False):\n\t\n\tdatetime_limit_condition = \"\"\n\tqty_shift = args.actual_qty\n\n\t# find difference/shift in qty caused by stock reconciliation\n\tif args.voucher_type == \"Stock Reconciliation\":\n\t\tqty_shift = get_stock_reco_qty_shift(args)\n\n\t# find the next nearest stock reco so that we only recalculate SLEs till that point\n\tnext_stock_reco_detail = get_next_stock_reco(args)\n\tif next_stock_reco_detail:\n\t\tdetail = next_stock_reco_detail[0]\n\t\t# add condition to update SLEs before this date & time\n\t\tdatetime_limit_condition = get_datetime_limit_condition(detail)\n\n\tfrappe.db.sql(\n\t\t.format(\n\t\t\tqty_shift=qty_shift, datetime_limit_condition=datetime_limit_condition\n\t\t),\n\t\targs,\n\t)\n\n\tvalidate_negative_qty_in_future_sle(args, allow_negative_stock)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 73, "vocab_size": 59, "complexity": 3, "nloc": 31, "token_counts": 80, "n_ast_nodes": 136, "n_identifiers": 17, "random_cut": "def update_qty_in_future_sle(args, allow_negative_stock=False):\n\t\n\tdatetime_limit_condition = \"\"\n\tqty_shift = args.actual_qty\n\n\t# find difference/shift in qty caused by stock reconciliation\n\tif args.voucher_type == \"Stock Reconciliation\":\n\t\tqty_shift = get_stock_reco_qty_shift(args)\n\n\t# find the next nearest stock reco so that we only recalculate SLEs till that point\n\tnext_stock_reco_detail = get_next_stock_reco(args)\n\tif next_stock_reco_detail:\n\t\tdetail = next_stock_reco_detail[0]\n\t\t# add condi", "d_id": 14688, "documentation": { "docstring": "Recalculate Qty after Transaction in future SLEs based on current SLE.\n\t\tupdate `tabStock Ledger Entry`\n\t\tset qty_after_transaction = qty_after_transaction + {qty_shift}\n\t\twhere\n\t\t\titem_code = %(item_code)s\n\t\t\tand warehouse = %(warehouse)s\n\t\t\tand voucher_no != %(voucher_no)s\n\t\t\tand is_cancelled = 0\n\t\t\tand (timestamp(posting_date, posting_time) > timestamp(%(posting_date)s, %(posting_time)s)\n\t\t\t\tor (\n\t\t\t\t\ttimestamp(posting_date, posting_time) = timestamp(%(posting_date)s, %(posting_time)s)\n\t\t\t\t\tand creation > %(creation)s\n\t\t\t\t)\n\t\t\t)\n\t\t{datetime_limit_condition}\n\t\t", "n_words": 57, "vocab_size": 43, "n_whitespaces": 42, "language": "en" } }, { "id": 203245, "commit_id": "c5cd8783825b5f6384417dac5f3889b4210b7d08", "repo": "django", "path": "django/templatetags/tz.py", "file_name": "tz.py", "fun_name": "get_current_timezone_tag", "commit_message": "Refs #33476 -- Refactored problematic code before reformatting by Black.\n\nIn these cases Black produces unexpected results, e.g.\r\n\r\ndef make_random_password(\r\n self,\r\n length=10,\r\n allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',\r\n):\r\n\r\nor\r\n\r\ncursor.execute(\"\"\"\r\nSELECT ...\r\n\"\"\",\r\n [table name],\r\n)", "code": "def get_current_timezone_tag(parser, token):\n \n # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments\n args = token.contents.split()\n if len(args) != 3 or args[1] != 'as':\n raise TemplateSyntaxError(\n \"'get_current_timezone' requires 'as variable' (got %r)\" % args\n )\n return GetCurrentTimezoneNode(args[2])\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 80, "n_words": 40, "vocab_size": 38, "complexity": 3, "nloc": 7, "token_counts": 47, "n_ast_nodes": 81, "n_identifiers": 9, "random_cut": "def get_current_timezone_tag(parser, token):\n \n # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments\n a", "d_id": 50273, "documentation": { "docstring": "\n Store the name of the current time zone in the context.\n\n Usage::\n\n {% get_current_timezone as TIME_ZONE %}\n\n This will fetch the currently active time zone and put its name\n into the ``TIME_ZONE`` context variable.\n ", "n_words": 34, "vocab_size": 27, "n_whitespaces": 57, "language": "en" } }, { "id": 194834, "commit_id": "d6773a0b4acf1027dc9b68342a1d84344f1a0d95", "repo": "ParlAI", "path": "tests/test_train_model.py", "file_name": "test_train_model.py", "fun_name": "test_save_multiple_world_logs_mutator", "commit_message": "Fixes train_model worldlogging for multitask with mutators. (#4414)\n\n* Fixes train_model worldlogging for multitask with mutators.\r\n\r\n* Fix bug in train_model when evaltask doesn't match task.", "code": "def test_save_multiple_world_logs_mutator(self):\n \n with testing_utils.tempdir() as tmpdir:\n log_report = os.path.join(tmpdir, 'world_logs.jsonl')\n multitask = 'integration_tests:mutators=flatten,integration_tests:ReverseTeacher:mutator=reverse'\n valid, test = testing_utils.train_model(\n {\n 'task': multitask,\n 'validation_max_exs': 10,\n 'model': 'repeat_label',\n 'short_final_eval': True,\n 'num_epochs': 1.0,\n 'world_logs': log_report,\n }\n )\n\n for task in multitask.split(','):\n task_log_report = get_task_world_logs(\n task, log_report, is_multitask=True\n )\n with PathManager.open(task_log_report) as f:\n json_lines = f.readlines()\n assert len(json_lines) == 5\n\n\n@register_agent(\"fake_report\")", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "@register_agent(\"fake_report\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 365, "n_words": 55, "vocab_size": 47, "complexity": 2, "nloc": 21, "token_counts": 113, "n_ast_nodes": 207, "n_identifiers": 25, "random_cut": "def test_save_multiple_world_logs_mutator(self):\n \n with testing_utils.tempdir() as tmpdir:\n log_report = os.path.join(tmpdir, 'world_logs.jsonl')\n multitask = 'integration_tests:mutators=flatt", "d_id": 47106, "documentation": { "docstring": "\n Test that we can save multiple world_logs from train model on multiple tasks\n with mutators present.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 141204, "commit_id": "2a5d322e705df080e9254c9c9a3e187c1ea41c4e", "repo": "ray", "path": "python/ray/tune/tests/test_trial_relative_logdir.py", "file_name": "test_trial_relative_logdir.py", "fun_name": "testDotsInLogdir", "commit_message": "[tune] Relative logdir paths in trials for ExperimentAnalysis in remote buckets (#25063)\n\nWhen running an experiment for example in the cloud and syncing to a bucket the logdir path in the trials will be changed when working with the checkpoints in the bucket. There are some workarounds, but the easier solution is to also add a rel_logdir containing the relative path to the trials/checkpoints that can handle any changes in the location of experiment results.\r\n\r\nAs discussed with @Yard1 and @krfricke\r\n\r\nCo-authored-by: Antoni Baum \r\nCo-authored-by: Kai Fricke ", "code": "def testDotsInLogdir(self):\n \n local_dir_path = Path(\"/tmp/test_rel_dots\")\n local_dir = str(local_dir_path)\n if local_dir_path.exists():\n local_dir = tempfile.mkdtemp(prefix=str(local_dir_path) + \"_\")\n trial = Trial(trainable_name=\"rel_logdir\", local_dir=local_dir)\n\n with self.assertRaises(ValueError):\n trial.logdir = \"/tmp/test_rel/../dots\"\n with self.assertRaises(ValueError):\n trial.logdir = local_dir + \"/../\"\n\n if shutil.rmtree.avoids_symlink_attacks:\n if local_dir_path.exists():\n shutil.rmtree(local_dir)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 151, "n_words": 36, "vocab_size": 22, "complexity": 4, "nloc": 13, "token_counts": 100, "n_ast_nodes": 179, "n_identifiers": 19, "random_cut": "def testDotsInLogdir(self):\n \n local_dir_path = Path(\"/tmp/test_", "d_id": 32293, "documentation": { "docstring": "This should result in errors as dots in paths are not allowed.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 75124, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_delete_post", "commit_message": "Reformat with black", "code": "def test_delete_post(self):\n \n # Send request\n response = self.client.post(\n reverse(\"wagtailimages:delete_multiple\", args=(self.image.id,))\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"Content-Type\"], \"application/json\")\n\n # Make sure the image is deleted\n self.assertFalse(Image.objects.filter(id=self.image.id).exists())\n\n # Check JSON\n response_json = json.loads(response.content.decode())\n self.assertIn(\"image_id\", response_json)\n self.assertIn(\"success\", response_json)\n self.assertEqual(response_json[\"image_id\"], self.image.id)\n self.assertTrue(response_json[\"success\"])\n\n\n@override_settings(WAGTAILIMAGES_IMAGE_MODEL=\"tests.CustomImage\")", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "@override_settings(WAGTAILIMAGES_IMAGE_MODEL=\"tests.CustomImage\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 155, "n_words": 40, "vocab_size": 33, "complexity": 1, "nloc": 12, "token_counts": 128, "n_ast_nodes": 232, "n_identifiers": 25, "random_cut": "def test_delete_post(self):\n \n # Send request\n response = self.client.post(\n reverse(\"wagtailimages:delete_multiple\", args=(self.ima", "d_id": 16361, "documentation": { "docstring": "\n This tests that a POST request to the delete view deletes the image\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 189389, "commit_id": "c4217731e08470d5a56cf02cf76cae01c03fb78f", "repo": "manim", "path": "tests/utils/GraphicalUnitTester.py", "file_name": "GraphicalUnitTester.py", "fun_name": "_show_diff_helper", "commit_message": "Added MyPy Support (#1972)\n\n* MyPy Support\r\n\r\n* MyPy Hook\r\n\r\n* Removing MyPy Hook\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Delete __init__.pyi\r\n\r\n* Delete color.pyi\r\n\r\n* Update .mypy.ini\r\n\r\nCo-authored-by: Christopher Besch \r\n\r\n* changes\r\n\r\n* quick fix\r\n\r\n* MyPy Hook\r\n\r\n* MyPy Hook\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Christopher Besch ", "code": "def _show_diff_helper(self, frame_data, expected_frame_data):\n \n import matplotlib.gridspec as gridspec # type: ignore\n import matplotlib.pyplot as plt\n\n gs = gridspec.GridSpec(2, 2)\n fig = plt.figure()\n fig.suptitle(f\"Test for {str(self.scene).replace('Test', '')}\", fontsize=16)\n\n ax = fig.add_subplot(gs[0, 0])\n ax.imshow(frame_data)\n ax.set_title(\"Generated :\")\n\n ax = fig.add_subplot(gs[0, 1])\n ax.imshow(expected_frame_data)\n ax.set_title(\"Expected :\")\n\n ax = fig.add_subplot(gs[1, :])\n diff_im = expected_frame_data.copy()\n diff_im = np.where(\n frame_data != np.array([0, 0, 0, 255]),\n np.array([0, 255, 0, 255], dtype=\"uint8\"),\n np.array([0, 0, 0, 255], dtype=\"uint8\"),\n ) # Set any non-black pixels to green\n np.putmask(\n diff_im,\n expected_frame_data != frame_data,\n np.array([255, 0, 0, 255], dtype=\"uint8\"),\n ) # Set any different pixels to red\n ax.imshow(diff_im, interpolation=\"nearest\")\n ax.set_title(\"Differences summary : (green = same, red = different)\")\n\n plt.show()\n plt.savefig(f\"{self.scene}.png\")\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 329, "n_words": 106, "vocab_size": 69, "complexity": 1, "nloc": 28, "token_counts": 240, "n_ast_nodes": 407, "n_identifiers": 31, "random_cut": "def _show_diff_helper(self, frame_data, expected_frame_data):\n \n import matplotlib.gridspec as gridspec # type: ignore\n import matplotlib.pyplot as plt\n\n gs = gridspec.GridSpec(2, 2)\n fig = plt.figure()\n fig.suptitle(f\"Test for {str(self.scene", "d_id": 46030, "documentation": { "docstring": "Will visually display with matplotlib differences between frame generated and the one expected.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 82188, "commit_id": "86856f242aec6051c1cace683fe1761c0775babb", "repo": "awx", "path": "awx/main/scheduler/task_manager_models.py", "file_name": "task_manager_models.py", "fun_name": "consume_capacity", "commit_message": "Add max concurrent jobs and max forks per ig\n\nThe intention of this feature is primarily to provide some notion of max\ncapacity of container groups, but the logic I've left generic. Default\nis 0, which will be interpereted as no maximum number of jobs or forks.\n\nIncludes refactor of variable and method names for clarity.\ninstances_by_hostname is an internal attribute of TaskManagerInstances.\nClarify when we are expecting the actual TaskManagerInstances object.\n\nUnify how we process running tasks and consume capacity. This has the\neffect that we do less expensive work in after_lock_init and have 1 less\nloop over all the running tasks. Previously we looped for both building\nthe dependency graph as well as for calculating the starting capacity of\nall the instances and instance groups. Now we acheive both tasks in the\nsame loop.\n\nBecause of how this changes the somewhat subtle \"do-si-do\" of how to\ninitialize the Task Manager models, introduce a wrapper class that tries\nto take some of that burden off of other areas where we re-use this like\nin the serializer and the metrics. Also use this wrapper class to handle\nnicities of how to track capacity consumption on instances and instance\ngroups.\n\nAdd tests for max_forks and max_concurrent_jobs\n\nFixup tests that use TaskManagerModels to accomodate changes.\n\nassign ig before call to consume capacity\n\nif we don't do it in that order, then we don't correctly account for\nthe container group jobs we are starting in the middle of the task\nmanager run", "code": "def consume_capacity(self, task):\n \n if self.is_container_group:\n self.container_group_jobs += 1\n self.container_group_consumed_forks += task.task_impact\n else:\n raise RuntimeError(\"We only track capacity for container groups at the instance group level. Otherwise, consume capacity on instances.\")\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 30, "vocab_size": 28, "complexity": 2, "nloc": 6, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "def consume_capacity(self, task):\n \n if self.is_container_gr", "d_id": 17326, "documentation": { "docstring": "We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level.", "n_words": 23, "vocab_size": 18, "n_whitespaces": 22, "language": "en" } }, { "id": 137681, "commit_id": "e76ccee69aaa7583be1a9d81cf7b2aa72cf25647", "repo": "ray", "path": "python/ray/util/spark/utils.py", "file_name": "utils.py", "fun_name": "get_avail_mem_per_ray_worker_node", "commit_message": "Ray on spark implementation (#28771)\n\nREP: ray-project/enhancements#14", "code": "def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node):\n \n num_cpus_per_spark_task = int(\n spark.sparkContext.getConf().get(\"spark.task.cpus\", \"1\")\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 25, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 20, "token_counts": 83, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node):\n \n num_cpus_per_spark_task = int(\n spark.sparkContext.getConf().get(\"spark.task.cpus\", \"1\")\n )\n", "d_id": 31216, "documentation": { "docstring": "\n Return the available heap memory and object store memory for each ray worker.\n NB: We have one ray node per spark task.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 32, "language": "en" } }, { "id": 21285, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/metadata/importlib/_dists.py", "file_name": "_dists.py", "fun_name": "_iter_egg_info_dependencies", "commit_message": "Vendor in pip 22.1.2", "code": "def _iter_egg_info_dependencies(self) -> Iterable[str]:\n \n for entry in self._iter_requires_txt_entries():\n if entry.extra and entry.marker:\n marker = f'({entry.marker}) and extra == \"{safe_extra(entry.extra)}\"'\n elif entry.extra:\n marker = f'extra == \"{safe_extra(entry.extra)}\"'\n elif entry.marker:\n marker = entry.marker\n else:\n marker = \"\"\n if marker:\n yield f\"{entry.requirement} ; {marker}\"\n else:\n yield entry.requirement\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 214, "n_words": 44, "vocab_size": 30, "complexity": 7, "nloc": 26, "token_counts": 69, "n_ast_nodes": 161, "n_identifiers": 10, "random_cut": "def _iter_egg_info_dependencies(self) -> Iterable[str]:\n \n for entry i", "d_id": 3751, "documentation": { "docstring": "Get distribution dependencies from the egg-info directory.\n\n To ease parsing, this converts a legacy dependency entry into a PEP 508\n requirement string. Like ``_iter_requires_txt_entries()``, there is code\n in ``importlib.metadata`` that does mostly the same, but not do exactly\n what we need.\n\n Namely, ``importlib.metadata`` does not normalize the extra name before\n putting it into the requirement string, which causes marker comparison\n to fail because the dist-info format do normalize. This is consistent in\n all currently available PEP 517 backends, although not standardized.\n ", "n_words": 81, "vocab_size": 66, "n_whitespaces": 144, "language": "en" } }, { "id": 218009, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/imp.py", "file_name": "imp.py", "fun_name": "cache_from_source", "commit_message": "add python 3.10.4 for windows", "code": "def cache_from_source(path, debug_override=None):\n \n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n return util.cache_from_source(path, debug_override)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "def cache_from_source(path, debug_override=None):\n \n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n return util.cache_from_source(path, debug_override)\n\n", "d_id": 55071, "documentation": { "docstring": "**DEPRECATED**\n\n Given the path to a .py file, return the path to its .pyc file.\n\n The .py file does not need to exist; this simply returns the path to the\n .pyc file calculated as if the .py file were imported.\n\n If debug_override is not None, then it must be a boolean and is used in\n place of sys.flags.optimize.\n\n If sys.implementation.cache_tag is None then NotImplementedError is raised.\n\n ", "n_words": 66, "vocab_size": 45, "n_whitespaces": 87, "language": "en" } }, { "id": 217895, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/server.py", "file_name": "server.py", "fun_name": "_url_collapse_path", "commit_message": "add python 3.10.4 for windows", "code": "def _url_collapse_path(path):\n \n # Query component should not be involved.\n path, _, query = path.partition('?')\n path = urllib.parse.unquote(path)\n\n # Similar to os.path.split(os.path.normpath(path)) but specific to URL\n # path semantics rather than local operating system semantics.\n path_parts = path.split('/')\n head_parts = []\n for part in path_parts[:-1]:\n if part == '..':\n head_parts.pop() # IndexError if more '..' than prior parts\n elif part and part != '.':\n head_parts.append( part )\n if path_parts:\n tail_part = path_parts.pop()\n if tail_part:\n if tail_part == '..':\n head_parts.pop()\n tail_part = ''\n elif tail_part == '.':\n tail_part = ''\n else:\n tail_part = ''\n\n if query:\n tail_part = '?'.join((tail_part, query))\n\n splitpath = ('/' + '/'.join(head_parts), tail_part)\n collapsed_path = \"/\".join(splitpath)\n\n return collapsed_path\n\n\n\nnobody = None\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 287, "n_words": 112, "vocab_size": 71, "complexity": 10, "nloc": 25, "token_counts": 151, "n_ast_nodes": 281, "n_identifiers": 19, "random_cut": "def _url_collapse_path(path):\n \n # Query componen", "d_id": 54998, "documentation": { "docstring": "\n Given a URL path, remove extra '/'s and '.' path elements and collapse\n any '..' references and returns a collapsed path.\n\n Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.\n The utility of this function is limited to is_cgi method and helps\n preventing some security attacks.\n\n Returns: The reconstituted URL, which will always start with a '/'.\n\n Raises: IndexError if too many '..' occur within the path.\n\n ", "n_words": 70, "vocab_size": 60, "n_whitespaces": 95, "language": "en" } }, { "id": 169224, "commit_id": "5c66e65d7b9fef47ccb585ce2fd0b3ea18dc82ea", "repo": "pandas", "path": "pandas/core/arrays/sparse/accessor.py", "file_name": "accessor.py", "fun_name": "to_coo", "commit_message": "TYP: type all arguments with bool default values (#48624)\n\n* TYP: type all arguments with bool default values\r\n\r\n* bool_t\r\n\r\n* ignore type error in pandas/core/arrays/sparse/accessor.py", "code": "def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):\n \n from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo\n\n A, rows, columns = sparse_series_to_coo(\n self._parent, row_levels, column_levels, sort_labels=sort_labels\n )\n return A, rows, columns\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 26, "vocab_size": 22, "complexity": 1, "nloc": 80, "token_counts": 64, "n_ast_nodes": 89, "n_identifiers": 16, "random_cut": "def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):\n \n from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo\n\n A, rows, columns = sparse_series_to_coo(\n self._parent, row_levels, column_levels, sort_labels=sort_labels\n )\n return A, rows, columns\n", "d_id": 40402, "documentation": { "docstring": "\n Create a scipy.sparse.coo_matrix from a Series with MultiIndex.\n\n Use row_levels and column_levels to determine the row and column\n coordinates respectively. row_levels and column_levels are the names\n (labels) or numbers of the levels. {row_levels, column_levels} must be\n a partition of the MultiIndex level names (or numbers).\n\n Parameters\n ----------\n row_levels : tuple/list\n column_levels : tuple/list\n sort_labels : bool, default False\n Sort the row and column labels before forming the sparse matrix.\n When `row_levels` and/or `column_levels` refer to a single level,\n set to `True` for a faster execution.\n\n Returns\n -------\n y : scipy.sparse.coo_matrix\n rows : list (row labels)\n columns : list (column labels)\n\n Examples\n --------\n >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])\n >>> s.index = pd.MultiIndex.from_tuples(\n ... [\n ... (1, 2, \"a\", 0),\n ... (1, 2, \"a\", 1),\n ... (1, 1, \"b\", 0),\n ... (1, 1, \"b\", 1),\n ... (2, 1, \"b\", 0),\n ... (2, 1, \"b\", 1)\n ... ],\n ... names=[\"A\", \"B\", \"C\", \"D\"],\n ... )\n >>> s\n A B C D\n 1 2 a 0 3.0\n 1 NaN\n 1 b 0 1.0\n 1 3.0\n 2 1 b 0 NaN\n 1 NaN\n dtype: float64\n\n >>> ss = s.astype(\"Sparse\")\n >>> ss\n A B C D\n 1 2 a 0 3.0\n 1 NaN\n 1 b 0 1.0\n 1 3.0\n 2 1 b 0 NaN\n 1 NaN\n dtype: Sparse[float64, nan]\n\n >>> A, rows, columns = ss.sparse.to_coo(\n ... row_levels=[\"A\", \"B\"], column_levels=[\"C\", \"D\"], sort_labels=True\n ... )\n >>> A\n <3x4 sparse matrix of type ''\n with 3 stored elements in COOrdinate format>\n >>> A.todense()\n matrix([[0., 0., 1., 3.],\n [3., 0., 0., 0.],\n [0., 0., 0., 0.]])\n\n >>> rows\n [(1, 1), (1, 2), (2, 1)]\n >>> columns\n [('a', 0), ('a', 1), ('b', 0), ('b', 1)]\n ", "n_words": 279, "vocab_size": 148, "n_whitespaces": 936, "language": "en" } }, { "id": 195349, "commit_id": "58b6977a9cb45a91d78aabdc3c5538f873829a9f", "repo": "ParlAI", "path": "projects/bb3/agents/r2c2_bb3_agent.py", "file_name": "r2c2_bb3_agent.py", "fun_name": "_get_memory_heuristic_values", "commit_message": "[BB3] Memory Heuristics (#4770)\n\n* memory heuristics\r\n\r\n* small changes\r\n\r\n* address comments\r\n\r\n* fix config\r\n\r\n* reqs", "code": "def _get_memory_heuristic_values(self) -> Dict[str, Union[str, float, bool]]:\n \n return {\n 'ignore_in_session_memories': self.opt.get(\n 'ignore_in_session_memories_mkm', False\n ),\n 'memory_overlap_threshold': self.opt.get('memory_overlap_threshold', 0.0),\n 'memory_hard_block_for_n_turns': self.opt.get(\n 'memory_hard_block_for_n_turns', 0\n ),\n 'memory_soft_block_decay_factor': self.opt.get(\n 'memory_soft_block_decay_factor', 0.0\n ),\n }\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 171, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 16, "token_counts": 79, "n_ast_nodes": 123, "n_identifiers": 9, "random_cut": "def _get_memory_heuristic_values(self) -> Dict[str, Union[str, float, bool]]:\n \n return {\n 'ignore_in_session_memories': self.opt.get(\n 'ignore_in_session_memories_mkm', False\n ),\n 'memory_overlap_threshold': self.opt.get('memory_overlap_threshold', 0.0),\n 'memory_hard_block_for_n_turns': self.opt.get(\n 'memory_hard_block_for_n_turns', 0\n ", "d_id": 47255, "documentation": { "docstring": "\n Extract heuristics from self.opt.\n ", "n_words": 4, "vocab_size": 4, "n_whitespaces": 19, "language": "en" } }, { "id": 266099, "commit_id": "27bf7b4a9add27b4f3f8b0f4fd5dfc4cfe74a65b", "repo": "netbox", "path": "netbox/extras/templatetags/plugins.py", "file_name": "plugins.py", "fun_name": "plugin_list_buttons", "commit_message": "4751 Enable plugins to inject content within object list views (#10901)\n\n* 4751 add plugin buttons to list templates\r\n\r\n* 4751 add plugin buttons to list templates\r\n\r\n* 4751 add documentation\r\n\r\n* 4751 fix object reference\r\n\r\n* 4751 update docs", "code": "def plugin_list_buttons(context, model):\n \n return _get_registered_content(model, 'list_buttons', context)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def plugin_list_buttons(context, model):\n \n return _", "d_id": 78293, "documentation": { "docstring": "\n Render all list buttons registered by plugins\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 50215, "commit_id": "ffcde21305c61d950a9f93e57e6180c9a9665b87", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/droppath.py", "file_name": "droppath.py", "fun_name": "drop_path", "commit_message": "add disco_diffusion_ernievil_base", "code": "def drop_path(self, inputs):\n \n # if prob is 0 or eval mode, return original input\n if self.drop_prob == 0. or not self.training:\n return inputs\n keep_prob = 1 - self.drop_prob\n keep_prob = paddle.to_tensor(keep_prob, dtype='float32')\n shape = (inputs.shape[0], ) + (1, ) * (inputs.ndim - 1) # shape=(N, 1, 1, 1)\n random_tensor = keep_prob + paddle.rand(shape, dtype=inputs.dtype)\n random_tensor = random_tensor.floor() # mask\n output = inputs.divide(keep_prob) * random_tensor #divide is to keep same output expectation\n return output\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 157, "n_words": 73, "vocab_size": 48, "complexity": 3, "nloc": 10, "token_counts": 101, "n_ast_nodes": 162, "n_identifiers": 16, "random_cut": "def drop_path(self, inputs):\n \n # if prob is 0 or eval mode, return original input\n if self.drop_prob == 0. or not self.training:\n return inputs\n ", "d_id": 10052, "documentation": { "docstring": "drop path op\n Args:\n input: tensor with arbitrary shape\n drop_prob: float number of drop path probability, default: 0.0\n training: bool, if current mode is training, default: False\n Returns:\n output: output tensor after drop path\n ", "n_words": 34, "vocab_size": 28, "n_whitespaces": 99, "language": "en" } }, { "id": 42069, "commit_id": "34662f4be5c364e7518f9c1118c9b362038ee5dd", "repo": "seaborn", "path": "seaborn/rcmod.py", "file_name": "rcmod.py", "fun_name": "set_style", "commit_message": "Convert docs to pydata-sphinx-theme and add new material (#2842)\n\n* Do basic conversion of site to pydata_sphinx_theme\r\n\r\n* Remove some pae structure customizations we no longer need\r\n\r\n* Add some custom CSS\r\n\r\n* Tweak a few more colors\r\n\r\n* Remove vestigial div closing tag\r\n\r\n* Reorganize release notes into hierarchical pages\r\n\r\n* Rebuild full docs and fix some resulting issues\r\n\r\n* Make release note doc refs absolute\r\n\r\n* Convert homepage to use sphinx-design instead of hand-crafted html\r\n\r\n* Remove original custom css\r\n\r\n* Simplify header and put archive switcher in footer\r\n\r\n* Streamline API docs for objects\r\n\r\n* Play around with templates to fix shrinking content (not perfect yet)\r\n\r\n* Improve use of horizontal space without sidebars\r\n\r\n* Various tweaks\r\n\r\n* Convert tutorial homepage source to native sphinx-design directives\r\n\r\n* Move intro page into tutorial\r\n\r\n* More tweaks\r\n\r\n* Tweak theme colors and footer\r\n\r\n* Remove reference to navbar version\r\n\r\n* Note that error bar tutorial demonstrates new features as of v0.12\r\n\r\n* Update layout customization for new theme features\r\n\r\n* Various layout and CSS tweaks\r\n\r\n* Narrow support guidance to StackOverflow\r\n\r\n* Run all notebooks\r\n\r\n* Adapt to new dropdown navbar in pydata theme\r\n\r\n* Separate tutorial source and outputs\r\n\r\n* Separate dostring source and outputs\r\n\r\n* Add scale API template\r\n\r\n* Update API docs\r\n\r\n* Fix requirements\r\n\r\n* Add new objects\r\n\r\n* Point doc requirements at v0.10 RC for theme", "code": "def set_style(style=None, rc=None):\n \n style_object = axes_style(style, rc)\n mpl.rcParams.update(style_object)\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def set_style(style=None, rc=None):\n \n style_object = axes_style", "d_id": 7473, "documentation": { "docstring": "\n Set the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n See :func:`axes_style` to get the parameter values.\n\n Parameters\n ----------\n style : dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_style.rst\n\n ", "n_words": 111, "vocab_size": 76, "n_whitespaces": 185, "language": "en" } }, { "id": 34302, "commit_id": "ac227093e41cecb07c7e0f2fc9a504850907bd06", "repo": "transformers", "path": "src/transformers/models/vilt/feature_extraction_vilt.py", "file_name": "feature_extraction_vilt.py", "fun_name": "_resize", "commit_message": "Add ViLT (#14895)\n\n* First commit\r\n\r\n* Add conversion script\r\n\r\n* Make conversion script work for base model\r\n\r\n* More improvements\r\n\r\n* Update conversion script, works for vqa\r\n\r\n* Add indexing argument to meshgrid\r\n\r\n* Make conversion script work for ViltForPreTraining\r\n\r\n* Add ViltForPreTraining to docs\r\n\r\n* Fix device issue\r\n\r\n* Add processor\r\n\r\n* Add MinMaxResize to feature extractor\r\n\r\n* Implement call method of ViltProcessor\r\n\r\n* Fix tests\r\n\r\n* Add integration test\r\n\r\n* Add loss calculation for VQA\r\n\r\n* Improve tests\r\n\r\n* Improve some more tests\r\n\r\n* Debug tests\r\n\r\n* Small improvements\r\n\r\n* Add support for attention_mask\r\n\r\n* Remove mask_it\r\n\r\n* Add pixel_mask\r\n\r\n* Add tests for ViltFeatureExtractor\r\n\r\n* Improve tests\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning to conversion script\r\n\r\n* Minor fixes\r\n\r\n* Add support for image_embeds, update docstrings to markdown\r\n\r\n* Update docs to markdown\r\n\r\n* Improve conversion script\r\n\r\n* Rename ViltForPreTraining to ViltForMaskedLM\r\n\r\n* Improve conversion script\r\n\r\n* Convert docstrings to markdown\r\n\r\n* Fix code example of retrieval model\r\n\r\n* Properly convert masked language model\r\n\r\n* Add integration test for nlvr\r\n\r\n* Fix code quality\r\n\r\n* Apply suggestions from code review\r\n\r\n* Add copied from statements\r\n\r\n* Fix pretrained_config_archive_map\r\n\r\n* Fix docs\r\n\r\n* Add model to README\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply more suggestions from code review\r\n\r\n* Make code more readable\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning to the tests\r\n\r\n* Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering\r\n\r\n* Replace pixel_values_2 by single tensor\r\n\r\n* Add hidden_states and attentions\r\n\r\n* Fix one more test\r\n\r\n* Fix all tests\r\n\r\n* Update year\r\n\r\n* Fix rebase issues\r\n\r\n* Fix another rebase issue\r\n\r\n* Remove ViltForPreTraining from auto mapping\r\n\r\n* Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval\r\n\r\n* Make it possible to use BertTokenizerFast in the processor\r\n\r\n* Use BertTokenizerFast by default\r\n\r\n* Rename ViltForNaturalLanguageVisualReasoning, define custom model output\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC):\n \n if not isinstance(image, Image.Image):\n image = self.to_pil_image(image)\n\n w, h = image.size\n min_size = shorter\n max_size = longer\n scale = min_size / min(w, h)\n if h < w:\n newh, neww = min_size, scale * w\n else:\n newh, neww = scale * h, min_size\n\n if max(newh, neww) > max_size:\n scale = max_size / max(newh, neww)\n newh = newh * scale\n neww = neww * scale\n\n newh, neww = int(newh + 0.5), int(neww + 0.5)\n newh, neww = newh // size_divisor * size_divisor, neww // size_divisor * size_divisor\n\n return self.resize(image, size=(neww, newh), resample=resample)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 247, "n_words": 97, "vocab_size": 52, "complexity": 4, "nloc": 18, "token_counts": 169, "n_ast_nodes": 266, "n_identifiers": 23, "random_cut": "def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC):\n \n if not isinstance(image, Image.Image):\n image = self.to_pil_image(image)\n\n w, h = image.size\n min_size = shorter\n max_size = longer\n ", "d_id": 6252, "documentation": { "docstring": "\n Resizes the shorter edge of `image` to `shorter` and limits the longer edge to under `longer`, while preserving\n the aspect ratio. Also makes sure that both the height and width can be divided by `size_divisor`.\n\n Based on original implementation:\n https://github.com/dandelin/ViLT/blob/3db8b5035464afee84d951bf6322e1b27f1d072d/vilt/transforms/utils.py#L5\n\n Args:\n image (`PIL.Image`):\n The image to resize.\n shorter (`int`, *optional*, defaults to `800`):\n The size to which to resize the shorter side of the image.\n longer (`int`, *optional*, defaults to `1333`):\n The size by which to limit the longer side of the image, while preserving the aspect ratio.\n size_divisor (`int`, *optional*, defaults to `32`):\n The size by which both the height and the width must be divisible.\n resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`):\n An optional resampling filter.\n ", "n_words": 117, "vocab_size": 61, "n_whitespaces": 290, "language": "en" } }, { "id": 139470, "commit_id": "bc3a1d35cf6e9a5fd7eef908a8e76aefb80ce6a9", "repo": "ray", "path": "rllib/policy/dynamic_tf_policy_v2.py", "file_name": "dynamic_tf_policy_v2.py", "fun_name": "extra_action_out_fn", "commit_message": "[RLlib] Introduce new policy base classes. (#24742)", "code": "def extra_action_out_fn(self) -> Dict[str, TensorType]:\n \n extra_action_fetches = super().extra_action_out_fn()\n extra_action_fetches.update(self._policy_extra_action_fetches)\n return extra_action_fetches\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 39, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 10, "token_counts": 32, "n_ast_nodes": 54, "n_identifiers": 9, "random_cut": "def extra_action_out_fn(self) -> Dict[str, TensorType]:\n \n extra_action_fetches = super().extra_action_out_fn()\n extra_action_fetches.update(self._policy_extra_action_fetches)\n return extra_action_fetches\n", "d_id": 31708, "documentation": { "docstring": "Extra values to fetch and return from compute_actions().\n\n Returns:\n Dict[str, TensorType]: An extra fetch-dict to be passed to and\n returned from the compute_actions() call.\n ", "n_words": 24, "vocab_size": 20, "n_whitespaces": 65, "language": "en" } }, { "id": 104904, "commit_id": "d1d4f1065fd4ab91b2c8682643dbd12f86d66fcd", "repo": "datasets", "path": "src/datasets/builder.py", "file_name": "builder.py", "fun_name": "get_all_exported_dataset_infos", "commit_message": "Add API code examples for Builder classes (#4313)\n\n* 📝 add examples for builder classes\r\n\r\n* 📝 apply quentin review", "code": "def get_all_exported_dataset_infos(cls) -> dict:\n \n dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)\n if os.path.exists(dset_infos_file_path):\n return DatasetInfosDict.from_directory(cls.get_imported_module_dir())\n return {}\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 16, "token_counts": 50, "n_ast_nodes": 96, "n_identifiers": 13, "random_cut": "def get_all_exported_dataset_infos(cls) -> dict:\n \n dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)\n if os.path.exists(dset_infos_file_path):\n return DatasetInfosDict.from_directory(cls.get_imported_module_dir()", "d_id": 22019, "documentation": { "docstring": "Empty dict if doesn't exist\n\n Example:\n\n ```py\n >>> from datasets import load_dataset_builder\n >>> ds_builder = load_dataset_builder('rotten_tomatoes')\n >>> ds_builder.get_all_exported_dataset_infos()\n {'default': DatasetInfo(description=\"Movie Review Dataset.\\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\\nsentiment categorization with respect to rating scales.'', Proceedings of the\\nACL, 2005.\\n\", citation='@InProceedings{Pang+Lee:05a,\\n author = {Bo Pang and Lillian Lee},\\n title = {Seeing stars: Exploiting class relationships for sentiment\\n categorization with respect to rating scales},\\n booktitle = {Proceedings of the ACL},\\n year = 2005\\n}\\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)}\n ```\n ", "n_words": 140, "vocab_size": 116, "n_whitespaces": 241, "language": "en" } }, { "id": 323124, "commit_id": "44a290e94d1becd1f09fddc3d873f9e19c9d6919", "repo": "PaddleNLP", "path": "paddlenlp/trainer/trainer_args.py", "file_name": "trainer_args.py", "fun_name": "to_sanitized_dict", "commit_message": "[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)\n\n* add some datasets for finetune.\r\n\r\n* support fine tune for all tastks.\r\n\r\n* add trainer prototype.\r\n\r\n* init verison for paddlenlp trainer.\r\n\r\n* refine trainer.\r\n\r\n* update for some details.\r\n\r\n* support multi-cards training evaluation.\r\n\r\n* support load from ckpt.\r\n\r\n* support for export inference model.\r\n\r\n* first version of trainer.\r\n\r\n* seq cls support clue.\r\n\r\n* trainer support for token classification and question answersing tasks.\r\n\r\n* fix as reviews.\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def to_sanitized_dict(self) -> Dict[str, Any]:\n \n d = self.to_dict()\n d = {\n ** d, ** {\n \"train_batch_size\": self.train_batch_size,\n \"eval_batch_size\": self.eval_batch_size\n }\n }\n\n valid_types = [bool, int, float, str]\n valid_types.append(paddle.Tensor)\n\n return {\n k: v if type(v) in valid_types else str(v)\n for k, v in d.items()\n }\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 174, "n_words": 44, "vocab_size": 33, "complexity": 3, "nloc": 17, "token_counts": 88, "n_ast_nodes": 138, "n_identifiers": 20, "random_cut": "def to_sanitized_dict(self) -> Dict[str, Any]:\n \n d = self.to_dict()\n d = {\n ** d, ** {\n \"train_batch_size\": self.train_batch_size,\n \"eval_batch_size\": self.eval_batch_size\n }\n }\n\n valid_types = [bool, int,", "d_id": 118370, "documentation": { "docstring": "\n Sanitized serialization to use with TensorBoard’s hparams\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 221182, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bz2.py", "file_name": "bz2.py", "fun_name": "readinto", "commit_message": "add python 3.10.4 for windows", "code": "def readinto(self, b):\n \n self._check_can_read()\n return self._buffer.readinto(b)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 38, "n_identifiers": 5, "random_cut": "def readinto(self, b):\n \n self._check_can_read()\n", "d_id": 56252, "documentation": { "docstring": "Read bytes into b.\n\n Returns the number of bytes read (0 for EOF).\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 205032, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/oracle/base.py", "file_name": "base.py", "fun_name": "_output_type_handler", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _output_type_handler(cursor, name, defaultType, length, precision, scale):\n \n if defaultType == Database.NUMBER:\n if scale == -127:\n if precision == 0:\n # NUMBER column: decimal-precision floating point.\n # This will normally be an integer from a sequence,\n # but it could be a decimal value.\n outconverter = FormatStylePlaceholderCursor._output_number_converter\n else:\n # FLOAT column: binary-precision floating point.\n # This comes from FloatField columns.\n outconverter = float\n elif precision > 0:\n # NUMBER(p,s) column: decimal-precision fixed point.\n # This comes from IntegerField and DecimalField columns.\n outconverter = FormatStylePlaceholderCursor._get_decimal_converter(\n precision, scale\n )\n else:\n # No type information. This normally comes from a\n # mathematical expression in the SELECT list. Guess int\n # or Decimal based on whether it has a decimal point.\n outconverter = FormatStylePlaceholderCursor._output_number_converter\n return cursor.var(\n Database.STRING,\n size=255,\n arraysize=cursor.arraysize,\n outconverter=outconverter,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 557, "n_words": 126, "vocab_size": 77, "complexity": 5, "nloc": 19, "token_counts": 90, "n_ast_nodes": 147, "n_identifiers": 18, "random_cut": "def _output_type_handler(cursor, name, defaultType, length, precision, scale):\n \n if defaultType == Database.NUMBER:\n if scale == -127:\n if precision == 0:\n # NUMBER column: decimal-precision floating point.\n # This will normally be an integer from a sequence,\n # but it could be a decimal value.\n outconverter = FormatStylePlaceholderCursor._output_number_converter\n else:\n # FLOAT column: binary-precision floating point.\n # This comes from FloatField columns.\n outconverter = float\n elif precision > 0:\n # NUMBER(p,s) column: decimal-precision fixed point.\n # This comes from IntegerField and DecimalField columns.\n outconverter = FormatStylePlaceholderCursor._get_decimal_converter(\n ", "d_id": 50997, "documentation": { "docstring": "\n Called for each db column fetched from cursors. Return numbers as the\n appropriate Python type.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 104101, "commit_id": "6ca96c707502e0689f9b58d94f46d871fa5a3c9c", "repo": "datasets", "path": "src/datasets/features/features.py", "file_name": "features.py", "fun_name": "decode_nested_example", "commit_message": "Add Arrow type casting to struct for Image and Audio + Support nested casting (#3575)\n\n* add storage cast\r\n\r\n* implement dict cast for image\r\n\r\n* factorize extension type creation for audio and image + implement type cast for thos custom types\r\n\r\n* fix tests\r\n\r\n* style\r\n\r\n* [big] allow extension array in nested arrays\r\n\r\n* docs\r\n\r\n* style\r\n\r\n* fix Features pickling\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix more tests\r\n\r\n* add base extensionarray for pyarrow<6\r\n\r\n* add extensionarray for pyarrow<6\r\n\r\n* add soundfile to tests requirements\r\n\r\n* minor\r\n\r\n* remove not implemented error for complex casting in pyarrow 3\r\n\r\n* style\r\n\r\n* style again\r\n\r\n* add casting for fixed size lists\r\n\r\n* add libsndfile1 in the linux CI\r\n\r\n* style\r\n\r\n* typo\r\n\r\n* start adding new tests just to notice the concatenation issue...\r\n\r\n* [big] remove extension types + move cast_storage to the Image and Audio classes\r\n\r\n* minor\r\n\r\n* fix test\r\n\r\n* style\r\n\r\n* add more tests to image\r\n\r\n* add audio tests\r\n\r\n* support casting from null array\r\n\r\n* fix field names verifications when casting\r\n\r\n* docs + tests\r\n\r\n* use the new table_cast on pyarrow tables\r\n\r\n* whoops forgot one line\r\n\r\n* remove unused string handling in Image.decode_example\r\n\r\n* update tests accordingly", "code": "def decode_nested_example(schema, obj):\n \n # Nested structures: we allow dict, list/tuples, sequences\n if isinstance(schema, dict):\n return {\n k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj)\n }\n elif isinstance(schema, (list, tuple)):\n sub_schema = schema[0]\n if obj is None:\n return None\n else:\n if len(obj) > 0:\n for first_elmt in obj:\n if _check_non_null_non_empty_recursive(first_elmt, sub_schema):\n break\n if decode_nested_example(sub_schema, first_elmt) != first_elmt:\n return [decode_nested_example(sub_schema, o) for o in obj]\n return list(obj)\n elif isinstance(schema, Sequence):\n # We allow to reverse list of dict => dict of list for compatiblity with tfds\n if isinstance(schema.feature, dict):\n return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}\n else:\n return decode_nested_example([schema.feature], obj)\n # Object with special decoding:\n elif isinstance(schema, (Audio, Image)):\n return schema.decode_example(obj) if obj is not None else None\n return obj\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 365, "n_words": 121, "vocab_size": 79, "complexity": 15, "nloc": 25, "token_counts": 207, "n_ast_nodes": 310, "n_identifiers": 21, "random_cut": "def decode_nested_example(schema, obj):\n \n # Nested structures: we allow dict, list/tuples, sequences\n if isinstance(schema, dict):\n return {\n k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj)\n }\n elif isinstance(schema, (list, tuple)):\n sub_schema = schema[0]\n if obj is None:\n return None\n else:\n if len(obj) > 0:\n for first_elmt in obj:\n if _check_non_null_non_empty_recursive(first_elmt, sub_schema):\n break\n if decode_nested_example(sub_schema, first_elmt) != first_elmt:\n return [decode_nested_example(sub_schema, o) for o in obj]\n return list(obj)\n elif isinstance(schema, Sequence):\n # We allow to reverse list of dict => dict of list for compatiblity with tfds\n if isinstance(schema.feature, dict):\n return {k: decode_nested_example([schema.feature[k]", "d_id": 21771, "documentation": { "docstring": "Decode a nested example.\n This is used since some features (in particular Audio and Image) have some logic during decoding.\n\n To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.\n If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.\n ", "n_words": 73, "vocab_size": 57, "n_whitespaces": 85, "language": "en" } }, { "id": 175013, "commit_id": "5ded5474ac9b323496506e6391e8d8c2c888d7f1", "repo": "pip", "path": "src/pip/_internal/utils/virtualenv.py", "file_name": "virtualenv.py", "fun_name": "running_under_virtualenv", "commit_message": "Name virtualenv<20 as \"legacy\"\n\nWell they are. At least not \"regular\" anymore.", "code": "def running_under_virtualenv() -> bool:\n \n return _running_under_venv() or _running_under_legacy_virtualenv()\n\n", "url": "https://github.com/pypa/pip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 3, "token_counts": 15, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def running_under_virtualenv() -> bool:\n \n return _running_und", "d_id": 41541, "documentation": { "docstring": "True if we're running inside a virtual environment, False otherwise.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 244362, "commit_id": "9c5b3331ac8edbfa328922fbab45c382380da540", "repo": "mmdetection", "path": "mmdet/models/dense_heads/base_dense_head.py", "file_name": "base_dense_head.py", "fun_name": "forward_train", "commit_message": "Simplify api of one-stage detector", "code": "def forward_train(self, x, data_samples, proposal_cfg=None, **kwargs):\n \n img_metas = [data_sample['meta'] for data_sample in data_samples]\n outs = self(x)\n gt_bboxes = [\n data_sample.gt_instances.bboxes for data_sample in data_samples\n ]\n\n if hasattr(data_samples[0].gt_instances, 'labels'):\n gt_labels = [\n data_sample.gt_instances.labels for data_sample in data_samples\n ]\n else:\n # RPN\n gt_labels = None\n\n if hasattr(data_samples[0], 'instances_ignore'):\n gt_bboxes_ignore = [\n data_sample.ignored_instances.bboxes\n for data_sample in data_samples\n ]\n else:\n gt_bboxes_ignore = None\n\n if gt_labels is None:\n loss_inputs = outs + (gt_bboxes, img_metas)\n else:\n loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)\n losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n if proposal_cfg is None:\n return losses\n else:\n results_list = self.get_results(\n *outs, img_metas=img_metas, cfg=proposal_cfg)\n return losses, results_list\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 398, "n_words": 97, "vocab_size": 51, "complexity": 9, "nloc": 30, "token_counts": 178, "n_ast_nodes": 277, "n_identifiers": 23, "random_cut": "def forward_train(self, x, data_samples, proposal_cfg=None, **kwargs):\n \n img_metas = [data_sample['meta'] for data_sample in data_samples]\n outs = self(x)\n gt_bboxes = [\n data_sample.gt_instances.bboxes for data_sample in data_samples\n ]\n\n if hasattr(data_samples[0].gt_instances, 'labels'):\n gt_labels = [\n data_sample.gt_instances.labels ", "d_id": 70351, "documentation": { "docstring": "\n Args:\n x (list[Tensor]): Features from FPN.\n data_samples (list[:obj:`GeneralData`]): Each item contains\n the meta information of each image and corresponding\n annotations.\n proposal_cfg (mmcv.Config): Test / postprocessing configuration,\n if None, test_cfg would be used\n\n Returns:\n tuple or Tensor: When `proposal_cfg` is None, the detector is a \\\n normal one-stage detector, The return value is the losses.\n\n - losses: (dict[str, Tensor]): A dictionary of loss components.\n\n When the `proposal_cfg` is not None, the head is used as a\n `rpn_head`, the return value is a tuple contains:\n\n - losses: (dict[str, Tensor]): A dictionary of loss components.\n - results_list (list[:obj:`InstanceData`]): Detection\n results of each image after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance,)\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances,).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n ", "n_words": 147, "vocab_size": 95, "n_whitespaces": 446, "language": "en" } }, { "id": 266040, "commit_id": "ea6d86e6c4bb6037465410db6205a7471bc81a6c", "repo": "netbox", "path": "netbox/netbox/models/features.py", "file_name": "features.py", "fun_name": "cf", "commit_message": "Closes #10052: The cf attribute now returns deserialized custom field data", "code": "def cf(self):\n \n return {\n cf.name: cf.deserialize(self.custom_field_data.get(cf.name))\n for cf in self.custom_fields\n }\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 34, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def cf(self):\n \n return {\n cf.name: cf.deserialize(self.custom_field_data.get(cf.name", "d_id": 78277, "documentation": { "docstring": "\n Return a dictionary mapping each custom field for this instance to its deserialized value.\n\n ```python\n >>> tenant = Tenant.objects.first()\n >>> tenant.cf\n {'primary_site': , 'cust_id': 'DMI01', 'is_active': True}\n ```\n ", "n_words": 29, "vocab_size": 28, "n_whitespaces": 79, "language": "en" } }, { "id": 177153, "commit_id": "88245f69f89dbee75cef67bdf35bbfb986a42d52", "repo": "networkx", "path": "networkx/drawing/tests/test_layout.py", "file_name": "test_layout.py", "fun_name": "test_arf_layout_negative_a_check", "commit_message": "Arf layout (#5910)\n\n* added arf_layout\r\n\r\n* reference to docstring and comparison to spring layout\r\n\r\n* rebase to origin main\r\n\r\n* black re-format\r\n\r\n* Left aligned docstring text\r\n\r\n* Cleaned up computation and update variables to new docstring\r\n\r\n* Updated naming tests. Added input check on arf_layout parameter `a`\r\n\r\n* Fixed Linter issues for py38 target\r\n\r\n* Fixed Linter issues for target p38\r\n\r\n* linter issue fixed", "code": "def test_arf_layout_negative_a_check(self):\n \n G = self.Gs\n pytest.raises(ValueError, nx.arf_layout, G=G, a=-1)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 48, "n_identifiers": 10, "random_cut": "def test_arf_layout_negative_a_check(self):\n \n G = self.Gs\n pytest.raises(ValueError, nx.arf_layout, G=G, a=-", "d_id": 42292, "documentation": { "docstring": "\n Checks input parameters correctly raises errors. For example, `a` should be larger than 1\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 30, "language": "en" } }, { "id": 292291, "commit_id": "a18d4c51ff3ab9afd13ee08fe8c65e2f9b77f3b1", "repo": "core", "path": "tests/components/device_tracker/test_config_entry.py", "file_name": "test_config_entry.py", "fun_name": "test_connected_device_registered", "commit_message": "Ensure dhcp can still discover new devices from device trackers (#66822)\n\nCo-authored-by: Martin Hjelmare ", "code": "async def test_connected_device_registered(hass):\n \n\n registry = mock_registry(hass)\n dispatches = []\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 50, "token_counts": 204, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "async def test_connected_device_registered(hass):\n \n\n registry = mock_registry(hass)\n dispatches = []\n", "d_id": 91386, "documentation": { "docstring": "Test dispatch on connected device being registered.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 241580, "commit_id": "650c710efacd633fa283955145342bb64063c883", "repo": "lightning", "path": "tests/strategies/test_sharded_strategy.py", "file_name": "test_sharded_strategy.py", "fun_name": "test_ddp_sharded_strategy_checkpoint_multi_gpu", "commit_message": "Rename training plugin test files & names to strategy (#11303)", "code": "def test_ddp_sharded_strategy_checkpoint_multi_gpu(tmpdir):\n \n model = BoringModel()\n trainer = Trainer(gpus=2, strategy=\"ddp_sharded_spawn\", fast_dev_run=True)\n\n trainer.fit(model)\n\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer.save_checkpoint(checkpoint_path)\n saved_model = BoringModel.load_from_checkpoint(checkpoint_path)\n\n # Assert model parameters are identical after loading\n for ddp_param, shard_param in zip(model.parameters(), saved_model.parameters()):\n assert torch.equal(ddp_param.to(\"cpu\"), shard_param)\n\n\n@RunIf(min_gpus=2, skip_windows=True, fairscale=True)", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@RunIf(min_gpus=2, skip_windows=True, fairscale=True)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 72, "n_words": 39, "vocab_size": 35, "complexity": 2, "nloc": 9, "token_counts": 93, "n_ast_nodes": 177, "n_identifiers": 28, "random_cut": "def test_ddp_sharded_strategy_checkpoint_multi_gpu(tmpdir):\n \n model = BoringModel()\n trainer = Trainer(gpus=2, strategy=\"ddp_sharded_spawn\", fast_dev_run=True)\n\n trainer.fit(model)\n\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer.save_checkpoint(checkpoint_path)", "d_id": 69605, "documentation": { "docstring": "Test to ensure that checkpoint is saved correctly when using multiple GPUs.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 130870, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/serve/controller.py", "file_name": "controller.py", "fun_name": "autoscale", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def autoscale(self) -> None:\n \n for deployment_name, (\n deployment_info,\n route_prefix,\n ) in self.list_deployments().items():\n deployment_config = deployment_info.deployment_config\n autoscaling_policy = deployment_info.autoscaling_policy\n\n if autoscaling_policy is None:\n continue\n\n replicas = self.deployment_state_manager._deployment_states[\n deployment_name\n ]._replicas\n running_replicas = replicas.get([ReplicaState.RUNNING])\n\n current_num_ongoing_requests = []\n for replica in running_replicas:\n replica_tag = replica.replica_tag\n num_ongoing_requests = self.autoscaling_metrics_store.window_average(\n replica_tag,\n time.time() - autoscaling_policy.config.look_back_period_s,\n )\n if num_ongoing_requests is not None:\n current_num_ongoing_requests.append(num_ongoing_requests)\n\n if len(current_num_ongoing_requests) == 0:\n continue\n\n new_deployment_config = deployment_config.copy()\n\n decision_num_replicas = autoscaling_policy.get_decision_num_replicas(\n current_num_ongoing_requests=current_num_ongoing_requests,\n curr_target_num_replicas=deployment_config.num_replicas,\n )\n new_deployment_config.num_replicas = decision_num_replicas\n\n new_deployment_info = copy(deployment_info)\n new_deployment_info.deployment_config = new_deployment_config\n\n goal_id, updating = self.deployment_state_manager.deploy(\n deployment_name, new_deployment_info\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 522, "n_words": 85, "vocab_size": 56, "complexity": 6, "nloc": 36, "token_counts": 180, "n_ast_nodes": 284, "n_identifiers": 38, "random_cut": "def autoscale(self) -> None:\n \n for deployment_name, (\n deployment_info,\n route_prefix,\n ) in self.list_deployments().items():\n deployment_config = deployment_info.deployment_config\n autoscaling_policy = depl", "d_id": 29408, "documentation": { "docstring": "Updates autoscaling deployments with calculated num_replicas.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 210095, "commit_id": "ef83ab8a3f7814e9886a7a22c8dcc55f506b6081", "repo": "PaddleDetection", "path": "ppdet/utils/checkpoint.py", "file_name": "checkpoint.py", "fun_name": "match_state_dict", "commit_message": "Add PP-YOLOv3 code (#5281)\n\n* [ppyolov3] add ppyolov3 base code\r\n\r\n* add ppyolov3 s/m/x\r\n\r\n* modify ema\r\n\r\n* modify code to convert onnx successfully\r\n\r\n* support arbitrary shape\r\n\r\n* update config to use amp default\r\n\r\n* refine ppyolo_head code\r\n\r\n* modify reparameter code\r\n\r\n* refine act layer\r\n\r\n* adapter pico_head and tood_head code\r\n\r\n* remove ppyolov3 yaml\r\n\r\n* fix codestyle\r\n\r\nCo-authored-by: wangxinxin08 ", "code": "def match_state_dict(model_state_dict, weight_state_dict):\n \n\n model_keys = sorted(model_state_dict.keys())\n weight_keys = sorted(weight_state_dict.keys())\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 9, "vocab_size": 8, "complexity": 11, "nloc": 46, "token_counts": 305, "n_ast_nodes": 49, "n_identifiers": 7, "random_cut": "def match_state_dict(model_state_dict, weight_state_dict):\n \n\n ", "d_id": 52855, "documentation": { "docstring": "\n Match between the model state dict and pretrained weight state dict.\n Return the matched state dict.\n\n The method supposes that all the names in pretrained weight state dict are\n subclass of the names in models`, if the prefix 'backbone.' in pretrained weight\n keys is stripped. And we could get the candidates for each model key. Then we\n select the name with the longest matched size as the final match result. For\n example, the model state dict has the name of\n 'backbone.res2.res2a.branch2a.conv.weight' and the pretrained weight as\n name of 'res2.res2a.branch2a.conv.weight' and 'branch2a.conv.weight'. We\n match the 'res2.res2a.branch2a.conv.weight' to the model key.\n ", "n_words": 99, "vocab_size": 55, "n_whitespaces": 133, "language": "en" } }, { "id": 176447, "commit_id": "cc1db275efc709cb964ce88abbfa877798d58c10", "repo": "networkx", "path": "networkx/algorithms/approximation/connectivity.py", "file_name": "connectivity.py", "fun_name": "local_node_connectivity", "commit_message": "Minor improvements from general code readthrough (#5414)\n\n* Add deprecated directive to reversed docstring.\r\n\r\n* Add missing dep directives to shpfiles.\r\n\r\n* Remove defn of INF sentinel.\r\n\r\n* typo.\r\n\r\n* str -> comment in forloop.\r\n\r\n* STY: appropriate casing for var name.", "code": "def local_node_connectivity(G, source, target, cutoff=None):\n \n if target == source:\n raise nx.NetworkXError(\"source and target have to be different nodes.\")\n\n # Maximum possible node independent paths\n if G.is_directed():\n possible = min(G.out_degree(source), G.in_degree(target))\n else:\n possible = min(G.degree(source), G.degree(target))\n\n K = 0\n if not possible:\n return K\n\n if cutoff is None:\n cutoff = float(\"inf\")\n\n exclude = set()\n for i in range(min(possible, cutoff)):\n try:\n path = _bidirectional_shortest_path(G, source, target, exclude)\n exclude.update(set(path))\n K += 1\n except nx.NetworkXNoPath:\n break\n\n return K\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 200, "n_words": 74, "vocab_size": 56, "complexity": 7, "nloc": 21, "token_counts": 143, "n_ast_nodes": 232, "n_identifiers": 23, "random_cut": "def local_node_connectivity(G, source, target, cutoff=None):\n \n if target == source:\n raise nx.NetworkXError(\"source and target have to be different nodes.\")\n\n # Maximum possible node independent paths\n if G.is_directed():\n possible = min(G.out_degree(source), G.in_degree(target))\n else:\n possible = min(G.degree(source), G.degree(target))\n\n K = 0\n if not possible:\n return K\n\n if cutoff is None:\n cutoff = float(\"inf\")\n\n exclude = set()\n for i in range(min(possible, cutoff)):\n try:\n path = _bidirectional_shortest_path(G, source, target, exclude)\n ", "d_id": 41908, "documentation": { "docstring": "Compute node connectivity between source and target.\n\n Pairwise or local node connectivity between two distinct and nonadjacent\n nodes is the minimum number of nodes that must be removed (minimum\n separating cutset) to disconnect them. By Menger's theorem, this is equal\n to the number of node independent paths (paths that share no nodes other\n than source and target). Which is what we compute in this function.\n\n This algorithm is a fast approximation that gives an strict lower\n bound on the actual number of node independent paths between two nodes [1]_.\n It works for both directed and undirected graphs.\n\n Parameters\n ----------\n\n G : NetworkX graph\n\n source : node\n Starting node for node connectivity\n\n target : node\n Ending node for node connectivity\n\n cutoff : integer\n Maximum node connectivity to consider. If None, the minimum degree\n of source or target is used as a cutoff. Default value None.\n\n Returns\n -------\n k: integer\n pairwise node connectivity\n\n Examples\n --------\n >>> # Platonic octahedral graph has node connectivity 4\n >>> # for each non adjacent node pair\n >>> from networkx.algorithms import approximation as approx\n >>> G = nx.octahedral_graph()\n >>> approx.local_node_connectivity(G, 0, 5)\n 4\n\n Notes\n -----\n This algorithm [1]_ finds node independents paths between two nodes by\n computing their shortest path using BFS, marking the nodes of the path\n found as 'used' and then searching other shortest paths excluding the\n nodes marked as used until no more paths exist. It is not exact because\n a shortest path could use nodes that, if the path were longer, may belong\n to two different node independent paths. Thus it only guarantees an\n strict lower bound on node connectivity.\n\n Note that the authors propose a further refinement, losing accuracy and\n gaining speed, which is not implemented yet.\n\n See also\n --------\n all_pairs_node_connectivity\n node_connectivity\n\n References\n ----------\n .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for\n Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035\n http://eclectic.ss.uci.edu/~drwhite/working.pdf\n\n ", "n_words": 314, "vocab_size": 192, "n_whitespaces": 494, "language": "en" } }, { "id": 153802, "commit_id": "cca9468648521e9317de1cb69cf8e6b1d5292d21", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_copartition", "commit_message": "PERF-#4493: Use partition size caches more in Modin dataframe. (#4495)\n\nCo-authored-by: Devin Petersohn \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: mvashishtha ", "code": "def _copartition(self, axis, other, how, sort, force_repartition=False):\n \n if isinstance(other, type(self)):\n other = [other]\n\n self_index = self.axes[axis]\n others_index = [o.axes[axis] for o in other]\n joined_index, make_reindexer = self._join_index_objects(\n axis, [self_index] + others_index, how, sort\n )\n\n frames = [self] + other\n non_empty_frames_idx = [\n i for i, o in enumerate(frames) if o._partitions.size != 0\n ]\n\n # If all frames are empty\n if len(non_empty_frames_idx) == 0:\n return (\n self._partitions,\n [o._partitions for o in other],\n joined_index,\n # There are no partition sizes because the resulting dataframe\n # has no partitions.\n [],\n )\n\n base_frame_idx = non_empty_frames_idx[0]\n other_frames = frames[base_frame_idx + 1 :]\n\n # Picking first non-empty frame\n base_frame = frames[non_empty_frames_idx[0]]\n base_index = base_frame.axes[axis]\n\n # define conditions for reindexing and repartitioning `self` frame\n do_reindex_base = not base_index.equals(joined_index)\n do_repartition_base = force_repartition or do_reindex_base\n\n # Perform repartitioning and reindexing for `base_frame` if needed.\n # Also define length of base and frames. We will need to know the\n # lengths for alignment.\n if do_repartition_base:\n reindexed_base = base_frame._partition_mgr_cls.map_axis_partitions(\n axis,\n base_frame._partitions,\n make_reindexer(do_reindex_base, base_frame_idx),\n )\n if axis:\n base_lengths = [obj.width() for obj in reindexed_base[0]]\n else:\n base_lengths = [obj.length() for obj in reindexed_base.T[0]]\n else:\n reindexed_base = base_frame._partitions\n base_lengths = self._column_widths if axis else self._row_lengths\n\n others_lengths = [o._axes_lengths[axis] for o in other_frames]\n\n # define conditions for reindexing and repartitioning `other` frames\n do_reindex_others = [\n not o.axes[axis].equals(joined_index) for o in other_frames\n ]\n\n do_repartition_others = [None] * len(other_frames)\n for i in range(len(other_frames)):\n do_repartition_others[i] = (\n force_repartition\n or do_reindex_others[i]\n or others_lengths[i] != base_lengths\n )\n\n # perform repartitioning and reindexing for `other_frames` if needed\n reindexed_other_list = [None] * len(other_frames)\n for i in range(len(other_frames)):\n if do_repartition_others[i]:\n # indices of others frame start from `base_frame_idx` + 1\n reindexed_other_list[i] = other_frames[\n i\n ]._partition_mgr_cls.map_axis_partitions(\n axis,\n other_frames[i]._partitions,\n make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i),\n lengths=base_lengths,\n )\n else:\n reindexed_other_list[i] = other_frames[i]._partitions\n reindexed_frames = (\n [frames[i]._partitions for i in range(base_frame_idx)]\n + [reindexed_base]\n + reindexed_other_list\n )\n return (reindexed_frames[0], reindexed_frames[1:], joined_index, base_lengths)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1145, "n_words": 304, "vocab_size": 163, "complexity": 21, "nloc": 68, "token_counts": 462, "n_ast_nodes": 694, "n_identifiers": 48, "random_cut": "def _copartition(self, axis, other, how, sort, force_repartition=False):\n \n if isinstance(other, type(self)):\n other = [other]\n\n self_index = self.axes[axis]\n others_index = [o.axes[axis] for o in other]\n joined_index, make_reindexer = self._join_index_objects(\n axis, [self_index] + others_index, how, sort\n )\n\n frames = [self] + other\n non_empty_frames_idx = [\n i for i, o in enumerate(frames) if o._partitions.size != 0\n ]\n\n # If all frames are empty\n if len(non_empty_frames_idx) == 0:\n return (\n self._partitions,\n [o._partitions for o in other],\n joined_index,\n # There are no partition sizes because the resulting dataframe\n # has no partitions.\n [],\n )\n\n base_frame_idx = non_empty_frames_idx[0]\n other_frames = frames[base_frame_idx + 1 :]\n\n # Picking first non-empty frame\n base_frame = frames[non_empty_frames_idx[0]]\n base_index = base_frame.axes[axis]\n\n # define conditions for reindexing and repartitioning `self` frame\n do_reindex_base = not base_index.equals(joined_index)\n do_repartition_base = force_repartition or do_reindex_base\n\n # Perform repartitioning and reindexing for `base_frame` if needed.\n # Also define length of base and fra", "d_id": 35618, "documentation": { "docstring": "\n Copartition two Modin DataFrames.\n\n Perform aligning of partitions, index and partition blocks.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to copartition along (0 - rows, 1 - columns).\n other : PandasDataframe\n Other Modin DataFrame(s) to copartition against.\n how : str\n How to manage joining the index object (\"left\", \"right\", etc.).\n sort : bool\n Whether sort the joined index or not.\n force_repartition : bool, default: False\n Whether force the repartitioning or not. By default,\n this method will skip repartitioning if it is possible. This is because\n reindexing is extremely inefficient. Because this method is used to\n `join` or `append`, it is vital that the internal indices match.\n\n Returns\n -------\n tuple\n Tuple containing:\n 1) 2-d NumPy array of aligned left partitions\n 2) list of 2-d NumPy arrays of aligned right partitions\n 3) joined index along ``axis``\n 4) List with sizes of partitions along axis that partitioning\n was done on. This list will be empty if and only if all\n the frames are empty.\n ", "n_words": 161, "vocab_size": 111, "n_whitespaces": 448, "language": "en" } }, { "id": 169303, "commit_id": "2fbdd1eb4ef73a470f3db60cbf38a7d9f6c3ffe1", "repo": "pandas", "path": "pandas/core/indexes/multi.py", "file_name": "multi.py", "fun_name": "size", "commit_message": "PERF: MultiIndex.size (#48723)\n\n* add MultiIndex.size\r\n\r\n* whatsnew", "code": "def size(self) -> int:\n \n # override Index.size to avoid materializing _values\n return len(self)\n\n # --------------------------------------------------------------------\n # Levels Methods\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 45, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 13, "n_ast_nodes": 27, "n_identifiers": 4, "random_cut": "def size(self) -> int:\n \n # override Index.size to avoid materializing _values\n return len(self)\n\n # -------------------------------------------", "d_id": 40414, "documentation": { "docstring": "\n Return the number of elements in the underlying data.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 153079, "commit_id": "58bbcc37477866d19c8b092a0e1974a4f0baa586", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/omnisci_on_native/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "groupby_agg", "commit_message": "REFACTOR-#2656: Update modin to fit algebra (code only) (#3717)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: Rehan Durrani ", "code": "def groupby_agg(self, by, axis, agg, groupby_args, **kwargs):\n \n # Currently we only expect 'by' to be a projection of the same frame.\n # If 'by' holds a list of columns/series, then we create such projection\n # to re-use code.\n if not isinstance(by, DFAlgQueryCompiler):\n if is_list_like(by):\n by_cols = []\n by_frames = []\n for obj in by:\n if isinstance(obj, str):\n by_cols.append(obj)\n elif hasattr(obj, \"_modin_frame\"):\n by_frames.append(obj._modin_frame)\n else:\n raise NotImplementedError(\"unsupported groupby args\")\n by_cols = Index.__new__(Index, data=by_cols, dtype=self.columns.dtype)\n by_frame = self.mask(col_labels=by_cols)\n if by_frames:\n by_frame = by_frame.concat(\n axis=1, other_modin_frames=by_frames, ignore_index=True\n )\n else:\n raise NotImplementedError(\"unsupported groupby args\")\n else:\n by_frame = by._modin_frame\n\n if axis != 0:\n raise NotImplementedError(\"groupby is supported for axis = 0 only\")\n\n base = by_frame._find_common_projections_base(self)\n if base is None:\n raise NotImplementedError(\"unsupported groupby args\")\n\n if groupby_args[\"level\"] is not None:\n raise NotImplementedError(\"levels are not supported for groupby\")\n\n drop = kwargs.get(\"drop\", True)\n as_index = groupby_args.get(\"as_index\", True)\n groupby_cols = by_frame.columns\n if isinstance(agg, dict):\n agg_cols = agg.keys()\n elif not drop:\n # If 'by' data came from a different frame then 'self-aggregation'\n # columns are more prioritized.\n agg_cols = self.columns\n else:\n agg_cols = [col for col in self.columns if col not in groupby_cols]\n\n # Mimic pandas behaviour: pandas does not allow for aggregation to be empty\n # in case of multi-column 'by'.\n if not as_index and len(agg_cols) == 0 and len(groupby_cols) > 1:\n agg_cols = self.columns\n\n # Create new base where all required columns are computed. We don't allow\n # complex expressions to be a group key or an aggeregate operand.\n allowed_nodes = (FrameNode, TransformNode)\n if not isinstance(by_frame._op, allowed_nodes):\n raise NotImplementedError(\n \"OmniSci doesn't allow complex expression to be a group key. \"\n f\"The only allowed frame nodes are: {tuple(o.__name__ for o in allowed_nodes)}, \"\n f\"met '{type(by_frame._op).__name__}'.\"\n )\n\n col_to_delete_template = \"__delete_me_{name}\"\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 937, "n_words": 278, "vocab_size": 161, "complexity": 34, "nloc": 105, "token_counts": 774, "n_ast_nodes": 546, "n_identifiers": 49, "random_cut": "def groupby_agg(self, by, axis, agg, groupby_args, **kwargs):\n \n # Currently we only expect 'by' to be a projection of the same frame.\n # If 'by' holds a list of columns/series, then we create such projection\n # to re-use code.\n if not isinstance(by, DFAlgQueryCompiler):\n if is_list_like(by):\n by_cols = []\n by_frames = []\n for obj in by:\n if isinstance(obj, str):\n by_cols.append(obj)\n elif hasattr(obj, \"_modin_frame\"):\n by_frames.append(obj._modin_frame)\n else:\n raise NotImplementedError(\"unsupported groupby args\")\n by_cols = Index.__new__(Index, data=by_cols, dtype=self", "d_id": 35248, "documentation": { "docstring": "\n Groupby with aggregation operation.\n\n Parameters\n ----------\n by : DFAlgQueryCompiler or list-like of str\n Grouping keys.\n axis : {0, 1}\n Only rows groupby is supported, so should be 0.\n agg : str or dict\n Aggregates to compute.\n groupby_args : dict\n Additional groupby args.\n **kwargs : dict\n Keyword args. Currently ignored.\n\n Returns\n -------\n OmnisciOnNativeDataframe\n The new frame.\n ", "n_words": 55, "vocab_size": 45, "n_whitespaces": 206, "language": "en" } }, { "id": 92539, "commit_id": "06885ee7284a274d02a9dc1f6a0348c8edc07184", "repo": "sentry", "path": "src/sentry/snuba/tasks.py", "file_name": "tasks.py", "fun_name": "delete_subscription_from_snuba", "commit_message": "feat(mep): Restructure how we determine entity subscription for alerts (#36605)\n\nPreviously we mapped a specific `EntityKey` to all `EntitySubscription` classes. As part of\r\nintroducing metric based performance alerts, we want to have the `EntitySubscription` determine the\r\nspecific entity that the subscription will run on. This allows us to automatically determine the\r\ncorrect entity for metric based alerts without having to duplicate logic that parses\r\naggregates/datasets/etc.", "code": "def delete_subscription_from_snuba(query_subscription_id, **kwargs):\n \n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.delete.subscription_does_not_exist\")\n return\n\n if subscription.status not in [\n QuerySubscription.Status.DELETING.value,\n QuerySubscription.Status.DISABLED.value,\n ]:\n metrics.incr(\"snuba.subscriptions.delete.incorrect_status\")\n return\n\n if subscription.subscription_id is not None:\n query_dataset = QueryDatasets(subscription.snuba_query.dataset)\n entity_key = get_entity_key_from_snuba_query(\n subscription.snuba_query, subscription.project.organization_id, subscription.project_id\n )\n _delete_from_snuba(\n query_dataset,\n subscription.subscription_id,\n entity_key,\n )\n\n if subscription.status == QuerySubscription.Status.DELETING.value:\n subscription.delete()\n else:\n subscription.update(subscription_id=None)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 214, "n_words": 48, "vocab_size": 40, "complexity": 5, "nloc": 26, "token_counts": 142, "n_ast_nodes": 227, "n_identifiers": 29, "random_cut": "def delete_subscription_from_snuba(query_subscription_id, **kwargs):\n \n try:\n subscription = QuerySubscription.objects.get(id=query_subscription_id)\n except QuerySubscription.DoesNotExist:\n metrics.incr(\"snuba.subscriptions.delete.subscription_does_not_exist\")\n return\n\n if subscription.status not in [\n QuerySubscription.Status.DELETING.value,\n QuerySubscription.Status.DISABLED.value,\n ]:\n metrics.incr(\"snuba.subscriptions.delete.incorrect_status\")\n return\n\n if subscription.subscription_id is not None:\n query_dataset = QueryDatasets(subscription.snuba_query.dataset)\n entity_key = get_entity_key_from_snuba_query(\n subscription.snuba_query, subscription.project.organization_id, subscription.project_id\n )\n _delete_from_s", "d_id": 18931, "documentation": { "docstring": "\n Task to delete a corresponding subscription in Snuba from a `QuerySubscription` in\n Sentry.\n If the local subscription is marked for deletion (as opposed to disabled),\n then we delete the local subscription once we've successfully removed from Snuba.\n ", "n_words": 37, "vocab_size": 28, "n_whitespaces": 53, "language": "en" } }, { "id": 320931, "commit_id": "5616a99eff34f7074641d1391ed77d6b4b743529", "repo": "qutebrowser", "path": "tests/unit/mainwindow/test_messageview.py", "file_name": "test_messageview.py", "fun_name": "test_changing_timer_with_messages_shown", "commit_message": "Add a MessageInfo data class\n\nPreparation for #7246", "code": "def test_changing_timer_with_messages_shown(qtbot, view, config_stub):\n \n config_stub.val.messages.timeout = 900000 # 15s\n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))\n with qtbot.wait_signal(view._clear_timer.timeout):\n config_stub.val.messages.timeout = 100\n\n\n@pytest.mark.parametrize('count, expected', [(1, 100), (3, 300),\n (5, 500), (7, 500)])", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('count, expected', [(1, 100), (3, 300),\n (5, 500), (7, 500)])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 89, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 5, "token_counts": 57, "n_ast_nodes": 143, "n_identifiers": 18, "random_cut": "def test_changing_timer_with_messages_shown(qtbot, view, config_stub):\n \n config_stub.val.messages.timeout = 900000 # 15s\n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))\n with qtbot.wait_signal(view._clear_timer.timeout):\n config_stub.val.messages.timeout = 100\n\n\n@pytest.mark.parametrize('count, expected', [(1, 100), (3, 300),\n (5, 500), (7, 500)])", "d_id": 117443, "documentation": { "docstring": "When we change messages.timeout, the timer should be restarted.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 66132, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/interview/interview.py", "file_name": "interview.py", "fun_name": "get_events", "commit_message": "style: format code with black", "code": "def get_events(start, end, filters=None):\n\t\n\tfrom frappe.desk.calendar import get_event_conditions\n\n\tevents = []\n\n\tevent_color = {\n\t\t\"Pending\": \"#fff4f0\",\n\t\t\"Under Review\": \"#d3e8fc\",\n\t\t\"Cleared\": \"#eaf5ed\",\n\t\t\"Rejected\": \"#fce7e7\",\n\t}\n\n\tconditions = get_event_conditions(\"Interview\", filters)\n\n\tinterviews = frappe.db.sql(\n\t\t.format(\n\t\t\tconditions=conditions\n\t\t),\n\t\t{\"start\": start, \"end\": end},\n\t\tas_dict=True,\n\t\tupdate={\"allDay\": 0},\n\t)\n\n\tfor d in interviews:\n\t\tsubject_data = []\n\t\tfor field in [\"name\", \"job_applicant\", \"interview_round\"]:\n\t\t\tif not d.get(field):\n\t\t\t\tcontinue\n\t\t\tsubject_data.append(d.get(field))\n\n\t\tcolor = event_color.get(d.status)\n\t\tinterview_data = {\n\t\t\t\"from\": get_datetime(\"%s %s\" % (d.scheduled_on, d.from_time or \"00:00:00\")),\n\t\t\t\"to\": get_datetime(\"%s %s\" % (d.scheduled_on, d.to_time or \"00:00:00\")),\n\t\t\t\"name\": d.name,\n\t\t\t\"subject\": \"\\n\".join(subject_data),\n\t\t\t\"color\": color if color else \"#89bcde\",\n\t\t}\n\n\t\tevents.append(interview_data)\n\n\treturn events\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 62, "n_words": 96, "vocab_size": 75, "complexity": 7, "nloc": 45, "token_counts": 216, "n_ast_nodes": 373, "n_identifiers": 31, "random_cut": "def get_events(start, end, filters=None):\n\t\n\tfrom frappe.desk.calendar import get_event_conditions\n\n\tevents = []\n\n\tevent_color = {\n\t\t\"Pending\": \"#fff4f0\",\n\t\t\"Under Review\": \"#d3e8fc\",\n\t\t\"Cleared\": \"#eaf5ed\",\n\t\t\"Rejected\": \"#fce7e7\",\n\t}\n\n\tconditions = get_event_conditions(\"Interview\", filters)\n\n\tinterviews = frappe.db.sql(\n\t\t.format(\n\t\t\tconditions=conditions\n\t\t),\n\t\t{\"start\": start, \"end\": end},\n\t\tas_dict=True,\n\t\tupdate={\"allDay\": 0},\n\t)\n\n\tfor d in interviews:\n\t\tsubject_data = []\n\t\tfor field in [\"name\", \"job_applicant\", \"interview_round\"]:\n\t\t\tif not d.get(field):\n\t\t\t\tcontinue\n\t\t\tsubject_data.append(d.get(field))\n\n\t\tcolor = event_color.get(d.status)\n\t\tinterview_data = {\n\t\t\t\"from\": get_datetime(\"%s %s\" % (d.scheduled_on, d.from_time or \"00:00:00\")),\n\t\t\t\"to\": get_datetime(\"%s %s\" % (d.scheduled_on, d.to_time or \"00:00:00\")),\n\t\t\t\"name\": d.name,\n\t\t\t\"subject\": \"", "d_id": 14108, "documentation": { "docstring": "Returns events for Gantt / Calendar view rendering.\n\n\t:param start: Start date-time.\n\t:param end: End date-time.\n\t:param filters: Filters (JSON).\n\t\n\t\t\tSELECT DISTINCT\n\t\t\t\t`tabInterview`.name, `tabInterview`.job_applicant, `tabInterview`.interview_round,\n\t\t\t\t`tabInterview`.scheduled_on, `tabInterview`.status, `tabInterview`.from_time as from_time,\n\t\t\t\t`tabInterview`.to_time as to_time\n\t\t\tfrom\n\t\t\t\t`tabInterview`\n\t\t\twhere\n\t\t\t\t(`tabInterview`.scheduled_on between %(start)s and %(end)s)\n\t\t\t\tand docstatus != 2\n\t\t\t\t{conditions}\n\t\t\t", "n_words": 46, "vocab_size": 41, "n_whitespaces": 32, "language": "en" } }, { "id": 138228, "commit_id": "51b56ad0118ed3f4341410e8c75625d1ca8cd757", "repo": "ray", "path": "python/ray/tune/tests/test_experiment.py", "file_name": "test_experiment.py", "fun_name": "testFuncTrainableCheckpointConfigValidation", "commit_message": "[Tune] Fix CheckpointConfig validation for function trainables (#31255)\n\nThis fixes an issue where a ValueError wasn't being properly raised when passing in a function trainable and setting `checkpoint_at_end=True` or `checkpoint_frequency > 0`. Previously, the error was only raised for function trainables of the form `def train_func(config, checkpoint_dir):`, which is the old checkpoint dir function API.\r\n\r\nSigned-off-by: Justin Yu ", "code": "def testFuncTrainableCheckpointConfigValidation(self):\n \n with self.assertRaises(ValueError):\n Experiment(\n name=\"foo\",\n run=\"f1\", # Will point to a wrapped function trainable\n checkpoint_config=CheckpointConfig(checkpoint_at_end=True),\n )\n with self.assertRaises(ValueError):\n Experiment(\n name=\"foo\",\n run=\"f1\",\n checkpoint_config=CheckpointConfig(checkpoint_frequency=1),\n )\n with self.assertRaises(ValueError):\n Experiment(\n name=\"foo\",\n run=lambda config: 1,\n checkpoint_config=CheckpointConfig(checkpoint_at_end=True),\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 263, "n_words": 33, "vocab_size": 21, "complexity": 1, "nloc": 19, "token_counts": 93, "n_ast_nodes": 161, "n_identifiers": 12, "random_cut": "def testFuncTrainableCheckpointConfigValidation(self):\n \n with self.assertRaises(ValueError):\n Experiment(\n name=\"foo\",\n run=\"f1\", # Will point to a wrapped function trainable\n checkpoint_c", "d_id": 31364, "documentation": { "docstring": "Raise an error when trying to specify checkpoint_at_end/checkpoint_frequency\n with a function trainable.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 9551, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/utils/align2stylegan.py", "file_name": "align2stylegan.py", "fun_name": "create_perspective_transform", "commit_message": "initialize ostec", "code": "def create_perspective_transform(src, dst, round=False, splat_args=False):\n \n try:\n transform_matrix = create_perspective_transform_matrix(src, dst)\n error = None\n except np.linalg.LinAlgError as e:\n transform_matrix = np.identity(3, dtype=np.float)\n error = \"invalid input quads (%s and %s): %s\" %(src, dst, e)\n error = error.replace(\"\\n\", \"\")\n\n to_eval = \"def perspective_transform(%s):\\n\" %(\n splat_args and \"*pt\" or \"pt\",\n )\n to_eval += \" res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\\n\"\n to_eval += \" res = res / res[2]\\n\"\n if round:\n to_eval += \" return (int(round(res[0][0])), int(round(res[1][0])))\\n\"\n else:\n to_eval += \" return (res[0][0], res[1][0])\\n\"\n locals = {\n \"transform_matrix\": transform_matrix,\n }\n locals.update(globals())\n exec(to_eval,locals,locals)\n res = locals[\"perspective_transform\"]\n res.matrix = transform_matrix\n res.error = error\n return res\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 220, "n_words": 102, "vocab_size": 67, "complexity": 5, "nloc": 26, "token_counts": 144, "n_ast_nodes": 254, "n_identifiers": 23, "random_cut": "def create_perspective_transform(src, dst, round=False, splat_args=False):\n \n try:\n transform_matrix = create_perspective_transform_matrix(src, dst)\n error = None\n except np.linalg.LinAlgError as e:\n transform_matrix = np.identity(3, dtype=np.float)\n error = \"invalid input quads (%s and %s): %s\" %(src, dst, e)\n error = error.replace(\"\\n\", \"\")\n\n to_eval = \"def perspective_transform(%s):\\n\" %(\n splat_args and \"*pt\" or \"pt\",\n )\n to_eval += \" res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\\n\"\n to_eval += \" res = res / res[2]\\n\"\n if round:\n to_eval += \" return (int(round(r", "d_id": 1631, "documentation": { "docstring": " Returns a function which will transform points in quadrilateral\n ``src`` to the corresponding points on quadrilateral ``dst``::\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... )\n >>> transform((5, 5))\n (74.99999999999639, 74.999999999999957)\n\n If ``round`` is ``True`` then points will be rounded to the nearest\n integer and integer values will be returned.\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... round=True,\n ... )\n >>> transform((5, 5))\n (75, 75)\n\n If ``splat_args`` is ``True`` the function will accept two arguments\n instead of a tuple.\n\n >>> transform = create_perspective_transform(\n ... [(0, 0), (10, 0), (10, 10), (0, 10)],\n ... [(50, 50), (100, 50), (100, 100), (50, 100)],\n ... splat_args=True,\n ... )\n >>> transform(5, 5)\n (74.99999999999639, 74.999999999999957)\n\n If the input values yield an invalid transformation matrix an identity\n function will be returned and the ``error`` attribute will be set to a\n description of the error::\n\n >>> tranform = create_perspective_transform(\n ... np.zeros((4, 2)),\n ... np.zeros((4, 2)),\n ... )\n >>> transform((5, 5))\n (5.0, 5.0)\n >>> transform.error\n 'invalid input quads (...): Singular matrix\n ", "n_words": 194, "vocab_size": 84, "n_whitespaces": 606, "language": "en" } }, { "id": 321111, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/qtnetworkdownloads.py", "file_name": "qtnetworkdownloads.py", "fun_name": "get", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def get(self, url, cache=True, **kwargs):\n \n if not url.isValid():\n urlutils.invalid_url_error(url, \"start download\")\n return None\n\n req = QNetworkRequest(url)\n user_agent = websettings.user_agent(url)\n req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, user_agent)\n\n if not cache:\n req.setAttribute(QNetworkRequest.Attribute.CacheSaveControlAttribute, False)\n\n return self.get_request(req, **kwargs)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 111, "n_words": 29, "vocab_size": 25, "complexity": 3, "nloc": 10, "token_counts": 85, "n_ast_nodes": 136, "n_identifiers": 19, "random_cut": "def get(self, url, cache=True, **kwargs):\n \n if not url.isValid():\n urlutils.invalid_url_error(url, \"start download\")\n return None\n\n req = QNetworkRequest(url)\n user_agent = websettings.user_agent(url)\n req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, user_agent)\n\n if not cache:\n req.setAttribute(QNetworkRequest.Attribute.CacheSaveControlAttribute, False)\n\n return self.get_request(req, **kw", "d_id": 117539, "documentation": { "docstring": "Start a download with a link URL.\n\n Args:\n url: The URL to get, as QUrl\n cache: If set to False, don't cache the response.\n **kwargs: passed to get_request().\n\n Return:\n The created DownloadItem.\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 97, "language": "en" } }, { "id": 246332, "commit_id": "63c46349c41aa967e64a5a4042ef5177f934be47", "repo": "synapse", "path": "tests/federation/test_federation_server.py", "file_name": "test_federation_server.py", "fun_name": "test_send_join_partial_state", "commit_message": "Implement MSC3706: partial state in `/send_join` response (#11967)\n\n* Make `get_auth_chain_ids` return a Set\r\n\r\nIt has a set internally, and a set is often useful where it gets used, so let's\r\navoid converting to an intermediate list.\r\n\r\n* Minor refactors in `on_send_join_request`\r\n\r\nA little bit of non-functional groundwork\r\n\r\n* Implement MSC3706: partial state in /send_join response", "code": "def test_send_join_partial_state(self):\n \n joining_user = \"@misspiggy:\" + self.OTHER_SERVER_NAME\n join_result = self._make_join(joining_user)\n\n join_event_dict = join_result[\"event\"]\n add_hashes_and_signatures(\n KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION],\n join_event_dict,\n signature_name=self.OTHER_SERVER_NAME,\n signing_key=self.OTHER_SERVER_SIGNATURE_KEY,\n )\n channel = self.make_signed_federation_request(\n \"PUT\",\n f\"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true\",\n content=join_event_dict,\n )\n self.assertEquals(channel.code, 200, channel.json_body)\n\n # expect a reduced room state\n returned_state = [\n (ev[\"type\"], ev[\"state_key\"]) for ev in channel.json_body[\"state\"]\n ]\n self.assertCountEqual(\n returned_state,\n [\n (\"m.room.create\", \"\"),\n (\"m.room.power_levels\", \"\"),\n (\"m.room.join_rules\", \"\"),\n (\"m.room.history_visibility\", \"\"),\n ],\n )\n\n # the auth chain should not include anything already in \"state\"\n returned_auth_chain_events = [\n (ev[\"type\"], ev[\"state_key\"]) for ev in channel.json_body[\"auth_chain\"]\n ]\n self.assertCountEqual(\n returned_auth_chain_events,\n [\n (\"m.room.member\", \"@kermit:test\"),\n ],\n )\n\n # the room should show that the new user is a member\n r = self.get_success(\n self.hs.get_state_handler().get_current_state(self._room_id)\n )\n self.assertEqual(r[(\"m.room.member\", joining_user)].membership, \"join\")\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 518, "n_words": 106, "vocab_size": 74, "complexity": 3, "nloc": 41, "token_counts": 215, "n_ast_nodes": 360, "n_identifiers": 31, "random_cut": "def test_send_join_partial_state(self):\n \n joining_user = \"@misspiggy:\" + self.OTHER_SERVER_NAME\n join_result = self._make_join(joining_user)\n\n join_event_dict = join_result[\"event\"]\n add_hashes_and_signatures(\n KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION],\n join_event_dict,\n signature_name=self.OTHER_SERVER_NAME,\n signing_key=self.OTHER_SERVER_SIGNATURE_KEY,\n )\n channel = self.make_signed_federation_request(\n \"PUT\",\n f\"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true\",\n content=join_event_dict,\n )\n self.assertEquals(channel.code, 200, channel.json_body)\n\n # expect a reduced room state\n returned_state = [\n (ev[\"type\"], ev[\"state_key\"]) for ev in channel.json_body[\"state\"]\n ]\n self.assertCountEqual(\n returned_state,\n [\n (\"m.room.create\", \"\"),\n (\"m.room.power_levels\", \"\"),\n (\"m.room.join_rules\", \"\"),\n (\"m.room.history_visibility\", \"\"),\n ],\n )\n\n # the auth chain should not include anything already in \"state\"\n returned_auth_chain_events = [\n (ev[\"type\"], ev[\"state_key\"]) for ev in channel.json_body[\"auth_chain\"]\n ]\n self.assertCountEqual(\n returned_auth", "d_id": 71160, "documentation": { "docstring": "When MSC3706 support is enabled, /send_join should return partial state", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 220753, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/streams.py", "file_name": "streams.py", "fun_name": "drain", "commit_message": "add python 3.10.4 for windows", "code": "async def drain(self):\n \n if self._reader is not None:\n exc = self._reader.exception()\n if exc is not None:\n raise exc\n if self._transport.is_closing():\n # Wait for protocol.connection_lost() call\n # Raise connection closing error if any,\n # ConnectionResetError otherwise\n # Yield to the event loop so connection_lost() may be\n # called. Without this, _drain_helper() would return\n # immediately, and code that calls\n # write(...); await drain()\n # in a loop would never call connection_lost(), so it\n # would not see an error when the socket is closed.\n await sleep(0)\n await self._protocol._drain_helper()\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 267, "n_words": 87, "vocab_size": 60, "complexity": 4, "nloc": 8, "token_counts": 53, "n_ast_nodes": 100, "n_identifiers": 10, "random_cut": "async def drain(self):\n \n if self._reader is not None:\n exc = self._reader.exception()\n if exc is not None:\n raise exc\n if self._transport.is_closing():\n # Wait for protocol.connection_lost() call\n # Raise connection closing error if any,\n # ConnectionResetError otherwise\n # Yield to the event loop so connection_lost() may be\n # called. Without this, _drain_helper() would return\n # immediately, and code that calls\n # write(...); await drain()\n # in a loop would never call connection_lost(), so it\n # would not see an error when the socket is closed.\n await sleep(0)\n await self._", "d_id": 56111, "documentation": { "docstring": "Flush the write buffer.\n\n The intended use is to write\n\n w.write(data)\n await w.drain()\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 45, "language": "en" } }, { "id": 208066, "commit_id": "1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc", "repo": "celery", "path": "celery/canvas.py", "file_name": "canvas.py", "fun_name": "on_chord_header_start", "commit_message": "Canvas Header Stamping (#7384)\n\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Redo header stamping (#7341)\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Omer Katz \r\n\r\n* Added stamping mechanism\r\n\r\n* Manual stamping improved\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Add comma.\r\n\r\n* Moved groups to stamps\r\n\r\n* Fixed chord and added test for that\r\n\r\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* Fixed lint and elements\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* type -> isinstance\r\n\r\n* Added stamping mechanism\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Manual stamping improved\r\n\r\n* fail_ci_if_error uncommented\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Changes\r\n\r\n* Add comma.\r\n\r\n* Fixed chord and added test for that\r\n\r\n* canvas.py fixed\r\n\r\n* Test chord.py fixed\r\n\r\n* Fixed stamped_headers\r\n\r\n* collections import fixed\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* collections import fixed\r\n\r\n* Update celery/backends/base.py\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* ampq.py fixed\r\n\r\n* Refrain from using deprecated import path.\r\n\r\n* Fix test_complex_chain regression.\r\n\r\nWhenever we stamp a group we need to freeze it first if it wasn't already frozen.\r\nSomewhere along the line, the group id changed because we were freezing twice.\r\nThis commit places the stamping operation after preparing the chain's steps which fixes the problem somehow.\r\n\r\nWe don't know why yet.\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed issues with maybe_list. Add documentation\r\n\r\n* Fixed potential issue with integration tests\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed test_generator issues\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Update docs/userguide/canvas.rst\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* Fixed Couchbase\r\n\r\n* Better stamping intro\r\n\r\n* New GroupVisitor example\r\n\r\n* Adjust documentation.\r\n\r\nCo-authored-by: Naomi Elstein \r\nCo-authored-by: Omer Katz \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Asif Saif Uddin \r\nCo-authored-by: Omer Katz ", "code": "def on_chord_header_start(self, chord, **header) -> dict:\n \n if not isinstance(chord.tasks, group):\n chord.tasks = group(chord.tasks)\n return self.on_group_start(chord.tasks, **header)\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 12, "token_counts": 46, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def on_chord_header_start(self, chord, **header) -> dict:\n \n if not isinstance(chord.tasks, group):\n chord.tasks = group(c", "d_id": 52191, "documentation": { "docstring": "Method that is called on сhord header stamping start.\n\n Arguments:\n chord (chord): chord that is stamped.\n headers (Dict): Partial headers that could be merged with existing headers.\n Returns:\n Dict: headers to update.\n ", "n_words": 32, "vocab_size": 26, "n_whitespaces": 92, "language": "en" } }, { "id": 151043, "commit_id": "ec76214d023a6c53ffab0af8d43bc5b72b1d66af", "repo": "freqtrade", "path": "freqtrade/freqai/data_drawer.py", "file_name": "data_drawer.py", "fun_name": "load_historic_predictions_from_disk", "commit_message": "backup historical predictions pickle and load the backup in case of corruption", "code": "def load_historic_predictions_from_disk(self):\n \n exists = self.historic_predictions_path.is_file()\n if exists:\n try:\n with open(self.historic_predictions_path, \"rb\") as fp:\n self.historic_predictions = cloudpickle.load(fp)\n logger.info(\n f\"Found existing historic predictions at {self.full_path}, but beware \"\n \"that statistics may be inaccurate if the bot has been offline for \"\n \"an extended period of time.\"\n )\n except EOFError:\n logger.warning(\n 'Historical prediction file was corrupted. Trying to load backup file.')\n with open(self.historic_predictions_bkp_path, \"rb\") as fp:\n self.historic_predictions = cloudpickle.load(fp)\n logger.warning('FreqAI successfully loaded the backup historical predictions file.')\n\n elif not self.follow_mode:\n logger.info(\"Could not find existing historic_predictions, starting from scratch\")\n else:\n logger.warning(\n f\"Follower could not find historic predictions at {self.full_path} \"\n \"sending null values back to strategy\"\n )\n\n return exists\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 436, "n_words": 105, "vocab_size": 79, "complexity": 4, "nloc": 25, "token_counts": 112, "n_ast_nodes": 222, "n_identifiers": 17, "random_cut": "def load_historic_predictions_from_disk(self):\n \n exists = self.historic_predictions_path.is_file()\n if exists:\n try:\n with open(self.historic_predictions_path, \"rb\") as fp:\n self.historic_predictions = cloudpickle.load(fp)\n logger.info(\n f\"Found existing historic predictions at {self.full_path}, but beware \"\n \"that statistics may be inaccurate if the bot ha", "d_id": 34932, "documentation": { "docstring": "\n Locate and load a previously saved historic predictions.\n :return: bool - whether or not the drawer was located\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 181992, "commit_id": "1103844708c7f3a3bd1fc33cae56eb59209ef6c0", "repo": "textual", "path": "tests/test_css_parse.py", "file_name": "test_css_parse.py", "fun_name": "test_background", "commit_message": "Namespacing parsing tests into classes", "code": "def test_background(self):\n css = \n stylesheet = Stylesheet()\n stylesheet.parse(css)\n\n styles = stylesheet.rules[0].styles\n assert styles.text_background == Color(\"red\", type=ColorType.STANDARD, number=1)\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 9, "token_counts": 48, "n_ast_nodes": 79, "n_identifiers": 14, "random_cut": "def test_background(self):\n css = \n stylesheet = Stylesheet()\n stylesheet.parse(css)\n\n styles = stylesheet.rules[0].styles\n ", "d_id": 43713, "documentation": { "docstring": "#some-widget {\n text: on red;\n }\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 31, "language": "en" } }, { "id": 304403, "commit_id": "bf7239c25db06f1377a895244a906b43242c9963", "repo": "core", "path": "homeassistant/components/dte_energy_bridge/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Improve entity type hints [d] (#77031)", "code": "def update(self) -> None:\n \n try:\n response = requests.get(self._url, timeout=5)\n except (requests.exceptions.RequestException, ValueError):\n _LOGGER.warning(\n \"Could not update status for DTE Energy Bridge (%s)\", self._attr_name\n )\n return\n\n if response.status_code != HTTPStatus.OK:\n _LOGGER.warning(\n \"Invalid status_code from DTE Energy Bridge: %s (%s)\",\n response.status_code,\n self._attr_name,\n )\n return\n\n response_split = response.text.split()\n\n if len(response_split) != 2:\n _LOGGER.warning(\n 'Invalid response from DTE Energy Bridge: \"%s\" (%s)',\n response.text,\n self._attr_name,\n )\n return\n\n val = float(response_split[0])\n\n # A workaround for a bug in the DTE energy bridge.\n # The returned value can randomly be in W or kW. Checking for a\n # a decimal seems to be a reliable way to determine the units.\n # Limiting to version 1 because version 2 apparently always returns\n # values in the format 000000.000 kW, but the scaling is Watts\n # NOT kWatts\n if self._version == 1 and \".\" in response_split[0]:\n self._attr_native_value = val\n else:\n self._attr_native_value = val / 1000\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 489, "n_words": 146, "vocab_size": 99, "complexity": 6, "nloc": 29, "token_counts": 141, "n_ast_nodes": 234, "n_identifiers": 24, "random_cut": "def update(self) -> None:\n \n try:\n response = requests.get(self._url, timeout=5)\n except (requests.exceptions.RequestException, ValueError):\n _LOGGER.warning(\n \"Could not update status for DTE Energy Bridge (%s)\", self._attr_name\n )\n return\n\n if response.status_code != HTTPStatus.OK:\n _LOGGER.warning(\n \"Invalid status_code from DTE Energy Bridge: %s (%s)\",\n response.status_code,\n self._attr_name,\n )\n return\n\n response_split = response.text.split()\n\n if len(response_split) != 2:\n _LOGGER.warning(\n 'Invalid response from DTE Energy Bridge: \"%s\" (%s)',\n response.text,\n self._attr_name,\n )\n return\n\n ", "d_id": 103210, "documentation": { "docstring": "Get the energy usage data from the DTE energy bridge.", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 156131, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/order.py", "file_name": "order.py", "fun_name": "ndependencies", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def ndependencies(dependencies, dependents):\n \n num_needed = {}\n result = {}\n for k, v in dependencies.items():\n num_needed[k] = len(v)\n if not v:\n result[k] = 1\n\n num_dependencies = num_needed.copy()\n current = []\n current_pop = current.pop\n current_append = current.append\n\n for key in result:\n for parent in dependents[key]:\n num_needed[parent] -= 1\n if not num_needed[parent]:\n current_append(parent)\n while current:\n key = current_pop()\n result[key] = 1 + sum(result[child] for child in dependencies[key])\n for parent in dependents[key]:\n num_needed[parent] -= 1\n if not num_needed[parent]:\n current_append(parent)\n return num_dependencies, result\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 238, "n_words": 78, "vocab_size": 45, "complexity": 10, "nloc": 24, "token_counts": 155, "n_ast_nodes": 244, "n_identifiers": 20, "random_cut": "def ndependencies(dependencies, dependents):\n \n num_needed = {}\n result = {}\n for k, v in dependencies.items():\n num_needed[k] = len(v)\n if not v:\n result[k] = 1\n\n num_dependencies = num_needed.copy()\n current = []\n current_pop = current.pop\n cur", "d_id": 36576, "documentation": { "docstring": "Number of total data elements on which this key depends\n\n For each key we return the number of tasks that must be run for us to run\n this task.\n\n Examples\n --------\n >>> inc = lambda x: x + 1\n >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> num_dependencies, total_dependencies = ndependencies(dependencies, dependents)\n >>> sorted(total_dependencies.items())\n [('a', 1), ('b', 2), ('c', 3)]\n\n Returns\n -------\n num_dependencies: Dict[key, int]\n total_dependencies: Dict[key, int]\n ", "n_words": 77, "vocab_size": 63, "n_whitespaces": 122, "language": "en" } }, { "id": 167375, "commit_id": "7d2f9b8d59908fbf57c6453bc41891efbfe981a6", "repo": "pandas", "path": "pandas/io/pytables.py", "file_name": "pytables.py", "fun_name": "validate_attr", "commit_message": "TYP: some return annotations in pytables.py (#47512)", "code": "def validate_attr(self, append) -> None:\n \n if append:\n existing_fields = getattr(self.attrs, self.kind_attr, None)\n if existing_fields is not None and existing_fields != list(self.values):\n raise ValueError(\"appended items do not match existing items in table!\")\n\n existing_dtype = getattr(self.attrs, self.dtype_attr, None)\n if existing_dtype is not None and existing_dtype != self.dtype:\n raise ValueError(\n \"appended items dtype do not match existing items dtype in table!\"\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 181, "n_words": 59, "vocab_size": 34, "complexity": 6, "nloc": 11, "token_counts": 78, "n_ast_nodes": 124, "n_identifiers": 13, "random_cut": "def validate_attr(self, append) -> None:\n \n if append:\n existing_fields = getattr(self.attrs, self.kind_attr, None)\n if existing_fields is not None and existing_fields != list(self.values):\n raise ValueError(\"appended items do not match existing items in table!\")\n\n existing_dtype = getattr(self.attrs, self.dtype_attr, None)\n if existing_dtype is not None an", "d_id": 39982, "documentation": { "docstring": "validate that we have the same order as the existing & same dtype", "n_words": 13, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 20928, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", "file_name": "typing_extensions.py", "fun_name": "__getitem__", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def __getitem__(self, parameters):\n item = typing._type_check(parameters,\n f'{self._name} accepts only single type')\n return typing._GenericAlias(self, (item,))\n\n Final = _FinalForm('Final',\n doc=", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "Final = _FinalForm('Final',\n doc=\"\"\"A special typing construct to indicate that a name\n cannot be re-assigned or overridden in a subclass.\n For example:\n\n MAX_SIZE: Final = 9000\n MAX_SIZE += 1 # Error reported by type checker\"\"\"A special typing construct to indicate that a name\n cannot be re-assigned or overridden in a subclass.\n For example:\n\n MAX_SIZE: Final = 9000\n MAX_SIZE +=a subclass.\n For example:", "n_ast_errors": 3, "ast_levels": 11, "n_whitespaces": 101, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 103, "n_identifiers": 28, "random_cut": "def __getitem__(self, parameters):\n item = typing._type_check(parameters,\n f'{self._name} accepts only single type')\n return typing._GenericAlias(self, (item,))\n\n Final = _FinalForm('Final',\n d", "d_id": 3619, "documentation": { "docstring": "A special typing construct to indicate that a name\n cannot be re-assigned or overridden in a subclass.\n For example:\n\n MAX_SIZE: Final = 9000\n MAX_SIZE += 1 # Error reported by type checker", "n_words": 32, "vocab_size": 31, "n_whitespaces": 128, "language": "en" } }, { "id": 277091, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/tf_utils.py", "file_name": "tf_utils.py", "fun_name": "type_spec_from_value", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def type_spec_from_value(value):\n \n if is_extension_type(value):\n return value._type_spec # pylint: disable=protected-access\n # Get a TensorSpec for array-like data without\n # converting the data to a Tensor\n if hasattr(value, \"shape\") and hasattr(value, \"dtype\"):\n return tf.TensorSpec(value.shape, value.dtype)\n else:\n return tf.type_spec_from_value(value)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 76, "n_words": 36, "vocab_size": 28, "complexity": 4, "nloc": 7, "token_counts": 53, "n_ast_nodes": 92, "n_identifiers": 9, "random_cut": "def type_spec_from_value(value):\n \n if is_extension_type(value):\n return value._type_spec # pylint: disable=protected-access\n # Get a TensorSpec for array-like data without\n # converting the data to a Tensor\n if hasattr(value, \"shape\") and hasattr(value, \"dtype\"):\n return tf.TensorSpec(value.shape, value", "d_id": 81863, "documentation": { "docstring": "Grab type_spec without converting array-likes to tensors.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 212726, "commit_id": "cfe2c96a1fa6fc721c998179298a7d430ccbaefd", "repo": "PySimpleGUI", "path": "DemoPrograms/Demo_User_Settings_Class.py", "file_name": "Demo_User_Settings_Class.py", "fun_name": "make_window", "commit_message": "Catching up on the many many demo programs that were not checked in....", "code": "def make_window():\n \n\n sg.theme(settings.get('-theme-', 'DarkBlue2')) # set the theme\n\n layout = [[sg.Text('Settings Window')],\n [sg.Input(settings.get('-input-', ''), k='-IN-')],\n [sg.Listbox(sg.theme_list(), default_values=[settings['-theme-'],], size=(15, 10), k='-LISTBOX-')],\n [sg.CB('Option 1', settings.get('-option1-', True), k='-CB1-')],\n [sg.CB('Option 2', settings.get('-option2-', False), k='-CB2-')],\n [sg.T('Settings file = ' + settings.get_filename())],\n [sg.Button('Save'), sg.Button('Settings Dictionary'), sg.Button('Exit without saving', k='Exit')]]\n\n window = sg.Window('A Settings Window', layout)\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 141, "n_words": 49, "vocab_size": 46, "complexity": 1, "nloc": 10, "token_counts": 181, "n_ast_nodes": 304, "n_identifiers": 19, "random_cut": "def make_window():\n \n\n sg.theme(settings.get('-theme-', 'DarkBlue2')) # set the theme\n\n layout = [[sg.Text('Settings Window')],\n [sg.Input(settings.get('-input-', ''), k='-IN-')],\n [sg.Listbox(sg.theme_list(), default_values=[settings['-theme-'],], size=(15, 10), k='-LISTBOX-')],\n [sg.CB('Option 1', settings.get('-option1-', True), k='-CB1-')],\n [sg.CB('Option 2', settings.get('-option2-', False), k='-CB2-')],\n [sg.T('Settings file = ' + settings.get_filename())],\n [sg.Button('Save'), sg.Button('Settings Dictionar", "d_id": 53367, "documentation": { "docstring": "\n Creates a new window. The default values for some elements are pulled directly from the\n \"User Settings\" without the use of temp variables.\n\n Some get_entry calls don't have a default value, such as theme, because there was an initial call\n that would have set the default value if the setting wasn't present. Could still put the default\n value if you wanted but it would be 2 places to change if you wanted a different default value.\n\n Use of a lookup table to map between element keys and user settings could be aded. This demo\n is intentionally done without one to show how to use the settings APIs in the most basic,\n straightforward way.\n\n If your application allows changing the theme, then a make_window function is good to have\n so that you can close and re-create a window easily.\n\n :return: (sg.Window) The window that was created\n ", "n_words": 145, "vocab_size": 103, "n_whitespaces": 185, "language": "en" } }, { "id": 249450, "commit_id": "898fef2789c9b1a20ef53c7d588f536f51f0fe2f", "repo": "synapse", "path": "synapse/metrics/common_usage_metrics.py", "file_name": "common_usage_metrics.py", "fun_name": "_collect", "commit_message": "Share some metrics between the Prometheus exporter and the phone home stats (#13671)", "code": "async def _collect(self) -> CommonUsageMetrics:\n \n dau_count = await self._store.count_daily_users()\n\n return CommonUsageMetrics(\n daily_active_users=dau_count,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 8, "token_counts": 26, "n_ast_nodes": 46, "n_identifiers": 7, "random_cut": "async def _collect(self) -> CommonUsageMetrics:\n \n dau_count = await self._store.count_daily_users()\n\n return CommonUsageMetrics(\n daily_active_users=dau_count,\n )\n", "d_id": 72922, "documentation": { "docstring": "Collect the common metrics and either create the CommonUsageMetrics object to\n use if it doesn't exist yet, or update it.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 34, "language": "en" } }, { "id": 178456, "commit_id": "11b0190a5e2d77098b16ff01ae8597428e055f53", "repo": "Nuitka", "path": "nuitka/plugins/Plugins.py", "file_name": "Plugins.py", "fun_name": "getPreprocessorSymbols", "commit_message": "Minor cleanups\n\n* Typos and minor problems only", "code": "def getPreprocessorSymbols(cls):\n \n\n if cls.preprocessor_symbols is None:\n cls.preprocessor_symbols = OrderedDict()\n\n for plugin in getActivePlugins():\n value = plugin.getPreprocessorSymbols()\n\n if value is not None:\n assert type(value) is dict, value\n\n # We order per plugin, but from the plugins, lets just take a dict\n # and achieve determinism by ordering the defines by name.\n for key, value in sorted(value.items()):\n # False alarm, pylint: disable=I0021,unsupported-assignment-operation\n cls.preprocessor_symbols[key] = value\n\n return cls.preprocessor_symbols\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 260, "n_words": 65, "vocab_size": 47, "complexity": 5, "nloc": 10, "token_counts": 75, "n_ast_nodes": 124, "n_identifiers": 12, "random_cut": "def getPreprocessorSymbols(cls):\n \n\n if cls.preprocessor_symbols is None:\n cls.preprocessor_symbols = OrderedDict()\n\n for plugin in getActivePlugins():\n value = plugin.getPreprocessorSymbols()\n\n if value is not None:\n assert type(value) is dict, value\n\n # We order per plugin, but from the plugins, lets just take a dict\n # and achieve determinism by ordering the defines by name.\n for key, value in sorted(value.items()):\n # False alarm, pylint", "d_id": 42702, "documentation": { "docstring": "Let plugins provide C defines to be used in compilation.\n\n Notes:\n The plugins can each contribute, but are hopefully using\n a namespace for their defines.\n\n Returns:\n OrderedDict(), where None value indicates no define value,\n i.e. \"-Dkey=value\" vs. \"-Dkey\"\n ", "n_words": 38, "vocab_size": 37, "n_whitespaces": 103, "language": "en" } }, { "id": 108585, "commit_id": "24b16804731d3a724e4ec0984da140b1a6b05c66", "repo": "matplotlib", "path": "lib/matplotlib/text.py", "file_name": "text.py", "fun_name": "_check_xy", "commit_message": "MNT: make renderer always optional", "code": "def _check_xy(self, renderer=None):\n \n if renderer is None:\n renderer = self.figure._get_renderer()\n b = self.get_annotation_clip()\n if b or (b is None and self.xycoords == \"data\"):\n # check if self.xy is inside the axes.\n xy_pixel = self._get_position_xy(renderer)\n return self.axes.contains_point(xy_pixel)\n return True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 117, "n_words": 38, "vocab_size": 29, "complexity": 5, "nloc": 8, "token_counts": 65, "n_ast_nodes": 109, "n_identifiers": 12, "random_cut": "def _check_xy(self, renderer=None):\n \n if renderer is None:\n renderer = self.figure._get_renderer()\n b = self.get_annotation_clip()\n if b or (b is None and self.xycoords == \"data\"):\n", "d_id": 23267, "documentation": { "docstring": "Check whether the annotation at *xy_pixel* should be drawn.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 266931, "commit_id": "4baf18c573c17cf9cd5716b28dbf38a32b57aaff", "repo": "ansible", "path": "lib/ansible/plugins/connection/__init__.py", "file_name": "__init__.py", "fun_name": "_split_ssh_args", "commit_message": "Remove more Python 2.x compatibility code from controller. (#77320)", "code": "def _split_ssh_args(argstring):\n \n # In Python3, shlex.split doesn't work on a byte string.\n return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 20, "vocab_size": 20, "complexity": 3, "nloc": 2, "token_counts": 32, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def _split_ssh_args(argstring):\n \n # In Python3, shlex.split doesn't work on a byte string.\n return [to_text(x.strip()) for x in shlex.split(argstring) i", "d_id": 78660, "documentation": { "docstring": "\n Takes a string like '-o Foo=1 -o Bar=\"foo bar\"' and returns a\n list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to\n the argument list. The list will not contain any empty elements.\n ", "n_words": 34, "vocab_size": 32, "n_whitespaces": 63, "language": "en" } }, { "id": 163605, "commit_id": "6b43a78f2f1036ebae205d2d35ab96f07549fe96", "repo": "pandas", "path": "pandas/core/indexers/utils.py", "file_name": "utils.py", "fun_name": "is_empty_indexer", "commit_message": "REF: simplify Block.setitem (#45403)", "code": "def is_empty_indexer(indexer) -> bool:\n \n if is_list_like(indexer) and not len(indexer):\n return True\n if not isinstance(indexer, tuple):\n indexer = (indexer,)\n return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)\n\n\n# -----------------------------------------------------------\n# Indexer Validation\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 34, "vocab_size": 29, "complexity": 6, "nloc": 17, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 11, "random_cut": "def is_empty_indexer(indexer) -> bool:\n \n if is_list_like(indexer) and not len(indexer):\n return True\n if not isinstance(indexer, tuple):\n indexer = (indexer,)\n ", "d_id": 39462, "documentation": { "docstring": "\n Check if we have an empty indexer.\n\n Parameters\n ----------\n indexer : object\n\n Returns\n -------\n bool\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 40, "language": "en" } }, { "id": 107046, "commit_id": "c682ca40c647770a967b6b8a7615eb91c7cb3fc9", "repo": "matplotlib", "path": "lib/matplotlib/_constrained_layout.py", "file_name": "_constrained_layout.py", "fun_name": "make_layoutgrids_gs", "commit_message": "FIX: better repr for subgridspecs", "code": "def make_layoutgrids_gs(layoutgrids, gs):\n \n\n if gs in layoutgrids or gs.figure is None:\n return layoutgrids\n # in order to do constrained_layout there has to be at least *one*\n # gridspec in the tree:\n layoutgrids['hasgrids'] = True\n if not hasattr(gs, '_subplot_spec'):\n # normal gridspec\n parent = layoutgrids[gs.figure]\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(\n parent=parent,\n parent_inner=True,\n name='gridspec',\n ncols=gs._ncols, nrows=gs._nrows,\n width_ratios=gs.get_width_ratios(),\n height_ratios=gs.get_height_ratios())\n else:\n # this is a gridspecfromsubplotspec:\n subplot_spec = gs._subplot_spec\n parentgs = subplot_spec.get_gridspec()\n # if a nested gridspec it is possible the parent is not in there yet:\n if parentgs not in layoutgrids:\n layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs)\n subspeclb = layoutgrids[parentgs]\n # get a unique representation:\n rep = object.__repr__(gs) + 'top'\n # gridspecfromsubplotspec need an outer container:\n if rep not in layoutgrids:\n layoutgrids[rep] = mlayoutgrid.LayoutGrid(\n parent=subspeclb,\n name='top',\n nrows=1, ncols=1,\n parent_pos=(subplot_spec.rowspan, subplot_spec.colspan))\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(\n parent=layoutgrids[rep],\n name='gridspec',\n nrows=gs._nrows, ncols=gs._ncols,\n width_ratios=gs.get_width_ratios(),\n height_ratios=gs.get_height_ratios())\n return layoutgrids\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 510, "n_words": 134, "vocab_size": 80, "complexity": 6, "nloc": 33, "token_counts": 230, "n_ast_nodes": 361, "n_identifiers": 29, "random_cut": "def make_layoutgrids_gs(layoutgrids, gs):\n \n\n if gs in layoutgrids or gs.figure is None:\n return layoutgrids\n # in order to do constrained_layout there has to be at least *one*\n # gridspec in the tree:\n layoutgrids['hasgrids'] = True\n if not hasattr(gs, '_subplot_spec'):\n # normal gridspec\n parent = layoutgrids[gs.figure]\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(\n parent=parent,\n parent_inner=True,\n name='gridspec',\n ncols=gs._ncols, nrows=gs._nrows,\n width_ratios=gs.get_width_ratios(),\n height_ratios=gs.get_height_ratios())\n else:\n # this is a gridspecfromsubplotspec:\n subplot_spec = g", "d_id": 22565, "documentation": { "docstring": "\n Make the layoutgrid for a gridspec (and anything nested in the gridspec)\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 19, "language": "en" } }, { "id": 82430, "commit_id": "c1290c9ff89cb00caa5469129fd527e9d82cd820", "repo": "django-cms", "path": "cms/tests/test_sitemap.py", "file_name": "test_sitemap.py", "fun_name": "test_sitemap_published_titles", "commit_message": "ci: Added codespell (#7355)\n\nCo-authored-by: Christian Clauss \r\n\r\n* ci: codespell config taken from #7292", "code": "def test_sitemap_published_titles(self):\n \n sitemap = CMSSitemap()\n locations = []\n urlset = sitemap.get_urls()\n for item in urlset:\n locations.append(item['location'])\n for title in Title.objects.public():\n page = title.page.get_public_object()\n if title.path:\n url = f'http://example.com/{title.language}/{title.path}/'\n else:\n url = f'http://example.com/{title.language}/{title.path}'\n if page.is_published('en') and not page.publisher_is_draft:\n self.assertTrue(url in locations)\n else:\n self.assertFalse(url in locations)\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 212, "n_words": 44, "vocab_size": 31, "complexity": 6, "nloc": 16, "token_counts": 102, "n_ast_nodes": 203, "n_identifiers": 22, "random_cut": "def test_sitemap_published_titles(self):\n \n sitemap = CMSSitemap()\n locations = []\n urlset = sitemap.get_urls()\n for item in urlset:\n locations.append(item['location'])\n for title in Title.objects.public():\n page = title.page.get_public_object()\n ", "d_id": 17396, "documentation": { "docstring": "\n Check that published titles are in the urls\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 155800, "commit_id": "e25284dced9749f02bd5d8c80b6225153aa282d8", "repo": "dask", "path": "dask/array/creation.py", "file_name": "creation.py", "fun_name": "eye", "commit_message": "Fix eye inconsistency with NumPy for dtype=None (#8669) (#8685)", "code": "def eye(N, chunks=\"auto\", M=None, k=0, dtype=float):\n \n eye = {}\n if M is None:\n M = N\n if dtype is None:\n dtype = float\n\n if not isinstance(chunks, (int, str)):\n raise ValueError(\"chunks must be an int or string\")\n\n vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype)\n chunks = vchunks[0]\n\n token = tokenize(N, chunks, M, k, dtype)\n name_eye = \"eye-\" + token\n\n for i, vchunk in enumerate(vchunks):\n for j, hchunk in enumerate(hchunks):\n if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks:\n eye[name_eye, i, j] = (\n np.eye,\n vchunk,\n hchunk,\n k - (j - i) * chunks,\n dtype,\n )\n else:\n eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype)\n return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype)\n\n\n@derived_from(np)", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@derived_from(np)", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 343, "n_words": 121, "vocab_size": 80, "complexity": 7, "nloc": 25, "token_counts": 230, "n_ast_nodes": 342, "n_identifiers": 27, "random_cut": "def eye(N, chunks=\"auto\", M=None, k=0, dtype=float):\n \n eye = {}\n if M is None:\n M = N\n if dtype is None:\n dtype = float\n\n if not isinstance(chunks, (int, str)):\n raise ValueError(\"chunks must be an int or string\")\n\n vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype)\n chunks = vchunks[0]\n\n token = tokenize(N, chunks, M, k, dtype)\n name_eye = \"eye-\" + token\n\n for i, vchunk in enumerate(vchunks):\n for j, hchunk in enumerate(hchunks):\n if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks:\n eye[name_eye, i, j] = (\n np.eye,\n vchunk,\n hchunk,\n k - (j - i) * chunks,\n dtype,\n )\n else:\n eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype)\n return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype)\n\n\n", "d_id": 36471, "documentation": { "docstring": "\n Return a 2-D Array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n chunks : int, str\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n I : Array of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n ", "n_words": 162, "vocab_size": 103, "n_whitespaces": 295, "language": "en" } }, { "id": 271876, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_utils_v1.py", "file_name": "training_utils_v1.py", "fun_name": "is_composite_or_composite_value", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def is_composite_or_composite_value(tensor):\n \n # TODO(b/125094323): This should be isinstance(CompositeTensor) or\n # isinstance(CompositeTensorValue) once we support that.\n return isinstance(\n tensor,\n (\n tf.__internal__.CompositeTensor,\n tf.compat.v1.SparseTensorValue,\n tf.compat.v1.ragged.RaggedTensorValue,\n ),\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 93, "n_words": 24, "vocab_size": 23, "complexity": 1, "nloc": 9, "token_counts": 39, "n_ast_nodes": 61, "n_identifiers": 11, "random_cut": "def is_composite_or_composite_value(tensor):\n \n # TODO(b/125094323): This sho", "d_id": 80885, "documentation": { "docstring": "Returns true if 'tensor' is a CompositeTensor or a CT Value object.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 80990, "commit_id": "24152555c5d1b52d5024197bcaf80fdb87b8b14e", "repo": "awx", "path": "awx/main/utils/common.py", "file_name": "common.py", "fun_name": "create_partition", "commit_message": "Handle error for create_partition\n\nOccasionally the create_partition will error with,\nrelation \"main_projectupdateevent_20220323_19\" already exists\n\nThis change wraps the db command into a try except block with its\nown transaction", "code": "def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False):\n \n current_time = now()\n if not start:\n if minutely:\n start = current_time.replace(microsecond=0, second=0)\n else:\n start = current_time.replace(microsecond=0, second=0, minute=0)\n if not end:\n if minutely:\n end = start.replace(microsecond=0, second=0) + timedelta(minutes=1)\n else:\n end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1)\n start_timestamp = str(start)\n end_timestamp = str(end)\n\n if not partition_label:\n if minutely:\n partition_label = start.strftime('%Y%m%d_%H%M')\n else:\n partition_label = start.strftime('%Y%m%d_%H')\n\n try:\n with transaction.atomic():\n with connection.cursor() as cursor:\n cursor.execute(\n f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} '\n f'PARTITION OF {tblname} '\n f'FOR VALUES FROM (\\'{start_timestamp}\\') to (\\'{end_timestamp}\\');'\n )\n except ProgrammingError as e:\n logger.debug(f'Caught known error due to existing partition: {e}')\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 346, "n_words": 99, "vocab_size": 67, "complexity": 8, "nloc": 29, "token_counts": 201, "n_ast_nodes": 360, "n_identifiers": 28, "random_cut": "def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False):\n \n current_time = now()\n if not start:\n if minutely:\n start = current_time.replace(microsecond=0, second=0)\n else:\n start = current_time.replace(microsecond=0, second=0, minute=0)\n if not end:\n if minutely:\n end = start.replace(microsecond=0, second=0) + timedelta(minutes=1)\n else:\n end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1)\n start_timestamp = str(start)\n end_timestamp = str(end)\n\n if not partition_label:\n if minutely:\n partition_label = start.strftime('%Y%m%d_%H%M')\n else:\n partition_label = start.strftime('%Y%m%d_%H')\n\n try:\n with transaction.atomic():\n with connection.cursor() as cursor:\n cursor.execute(\n f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} '\n f'PARTITION OF {tblname} '\n f'FOR VALUES FROM (\\'{start_timestamp}\\') to (\\'{end_timestamp}\\');'\n )\n exce", "d_id": 17125, "documentation": { "docstring": "Creates new partition table for events.\n - start defaults to beginning of current hour\n - end defaults to end of current hour\n - partition_label defaults to YYYYMMDD_HH\n\n - minutely will create partitions that span _a single minute_ for testing purposes\n ", "n_words": 40, "vocab_size": 28, "n_whitespaces": 55, "language": "en" } }, { "id": 260529, "commit_id": "095e46670a1e21e8c49972b23e75f2d2a48c6c93", "repo": "scikit-learn", "path": "sklearn/metrics/pairwise.py", "file_name": "pairwise.py", "fun_name": "rbf_kernel", "commit_message": "DOC Ensure `rbf_kernel` passes numpydoc validation (#23954)\n\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def rbf_kernel(X, Y=None, gamma=None):\n \n X, Y = check_pairwise_arrays(X, Y)\n if gamma is None:\n gamma = 1.0 / X.shape[1]\n\n K = euclidean_distances(X, Y, squared=True)\n K *= -gamma\n np.exp(K, K) # exponentiate K in-place\n return K\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 34, "vocab_size": 28, "complexity": 2, "nloc": 8, "token_counts": 68, "n_ast_nodes": 101, "n_identifiers": 11, "random_cut": "def rbf_kernel(X, Y=None, gamma=None):\n \n X, Y = check_pairwise_arrays(X, Y)\n", "d_id": 76319, "documentation": { "docstring": "Compute the rbf (gaussian) kernel between X and Y.\n\n K(x, y) = exp(-gamma ||x-y||^2)\n\n for each pair of rows x in X and y in Y.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples_X, n_features)\n A feature array.\n\n Y : ndarray of shape (n_samples_Y, n_features), default=None\n An optional second feature array. If `None`, uses `Y=X`.\n\n gamma : float, default=None\n If None, defaults to 1.0 / n_features.\n\n Returns\n -------\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\n The RBF kernel.\n ", "n_words": 85, "vocab_size": 63, "n_whitespaces": 153, "language": "en" } }, { "id": 266698, "commit_id": "4f48f375a0203b0d09c55522a86300a52da5b24a", "repo": "ansible", "path": "lib/ansible/module_utils/common/parameters.py", "file_name": "parameters.py", "fun_name": "_validate_argument_values", "commit_message": "parameters: handle blank values when argument is a list (#77119)\n\nFixes: #77108\r\n\r\nSigned-off-by: Abhijeet Kasurde ", "code": "def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None):\n \n\n if errors is None:\n errors = AnsibleValidationErrorMultiple()\n\n for param, spec in argument_spec.items():\n choices = spec.get('choices')\n if choices is None:\n continue\n\n if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)):\n if param in parameters:\n # Allow one or more when type='list' param with choices\n if isinstance(parameters[param], list):\n diff_list = [item for item in parameters[param] if item not in choices]\n if diff_list:\n choices_str = \", \".join([to_native(c) for c in choices])\n diff_str = \", \".join(diff_list)\n msg = \"value of %s must be one or more of: %s. Got no match for: %s\" % (param, choices_str, diff_str)\n if options_context:\n msg = \"{0} found in {1}\".format(msg, \" -> \".join(options_context))\n errors.append(ArgumentValueError(msg))\n elif parameters[param] not in choices:\n # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking\n # the value. If we can't figure this out, module author is responsible.\n if parameters[param] == 'False':\n overlap = BOOLEANS_FALSE.intersection(choices)\n if len(overlap) == 1:\n # Extract from a set\n (parameters[param],) = overlap\n\n if parameters[param] == 'True':\n overlap = BOOLEANS_TRUE.intersection(choices)\n if len(overlap) == 1:\n (parameters[param],) = overlap\n\n if parameters[param] not in choices:\n choices_str = \", \".join([to_native(c) for c in choices])\n msg = \"value of %s must be one of: %s, got: %s\" % (param, choices_str, parameters[param])\n if options_context:\n msg = \"{0} found in {1}\".format(msg, \" -> \".join(options_context))\n errors.append(ArgumentValueError(msg))\n else:\n msg = \"internal error: choices for argument %s are not iterable: %s\" % (param, choices)\n if options_context:\n msg = \"{0} found in {1}\".format(msg, \" -> \".join(options_context))\n errors.append(ArgumentTypeError(msg))\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 974, "n_words": 248, "vocab_size": 128, "complexity": 22, "nloc": 38, "token_counts": 356, "n_ast_nodes": 578, "n_identifiers": 35, "random_cut": "def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None):\n \n\n if errors is None:\n errors = AnsibleValidationErrorMultiple()\n\n for param, spec in argument_spec.items():\n choices = spec.get('choices')\n if choices is None:\n continue\n\n if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)):\n if param in parameters:\n # Allow one or more when type='list' param with choices\n if isinstance(parameters[param], list):\n ", "d_id": 78517, "documentation": { "docstring": "Ensure all arguments have the requested values, and there are no stray arguments", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 19145, "commit_id": "4c58179509e6f6047789efb0a95c2b0e20cb6c8f", "repo": "mlflow", "path": "mlflow/models/evaluation/base.py", "file_name": "base.py", "fun_name": "content", "commit_message": "Improve evaluation api (#5256)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap limitation on value type\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix format\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def content(self):\n \n if self._content is None:\n self._load()\n return self._content\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 4, "token_counts": 22, "n_ast_nodes": 39, "n_identifiers": 4, "random_cut": "def content(self):\n \n if self._content is None:\n self._load()\n ", "d_id": 2892, "documentation": { "docstring": "\n The content of the artifact (representation varies)\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 64540, "commit_id": "b2755f6fdddd3e1b0a305b57c18651c98fee8f7e", "repo": "erpnext", "path": "erpnext/e_commerce/api.py", "file_name": "api.py", "fun_name": "get_product_filter_data", "commit_message": "feat: Include child item group products in Item Group Page & cleanup\n\n- Added 'Include descendants' checkbox, which will pull child item group products too\n- Build item group filters in query engine file\n- Include logic in filter engine\n- Clean up Website section of Item Group page (UX)\n- Add util to fetch child item groups including self", "code": "def get_product_filter_data(query_args=None):\n\t\n\tif isinstance(query_args, str):\n\t\tquery_args = json.loads(query_args)\n\n\tquery_args = frappe._dict(query_args)\n\tif query_args:\n\t\tsearch = query_args.get(\"search\")\n\t\tfield_filters = query_args.get(\"field_filters\", {})\n\t\tattribute_filters = query_args.get(\"attribute_filters\", {})\n\t\tstart = cint(query_args.start) if query_args.get(\"start\") else 0\n\t\titem_group = query_args.get(\"item_group\")\n\t\tfrom_filters = query_args.get(\"from_filters\")\n\telse:\n\t\tsearch, attribute_filters, item_group, from_filters = None, None, None, None\n\t\tfield_filters = {}\n\t\tstart = 0\n\n\t# if new filter is checked, reset start to show filtered items from page 1\n\tif from_filters:\n\t\tstart = 0\n\n\tsub_categories = []\n\tif item_group:\n\t\tsub_categories = get_child_groups_for_website(item_group, immediate=True)\n\n\tengine = ProductQuery()\n\ttry:\n\t\tresult = engine.query(\n\t\t\tattribute_filters,\n\t\t\tfield_filters,\n\t\t\tsearch_term=search,\n\t\t\tstart=start,\n\t\t\titem_group=item_group\n\t\t)\n\texcept Exception:\n\t\ttraceback = frappe.get_traceback()\n\t\tfrappe.log_error(traceback, frappe._(\"Product Engine Error\"))\n\t\treturn {\"exc\": \"Something went wrong!\"}\n\n\t# discount filter data\n\tfilters = {}\n\tdiscounts = result[\"discounts\"]\n\n\tif discounts:\n\t\tfilter_engine = ProductFiltersBuilder()\n\t\tfilters[\"discount_filters\"] = filter_engine.get_discount_filters(discounts)\n\n\treturn {\n\t\t\"items\": result[\"items\"] or [],\n\t\t\"filters\": filters,\n\t\t\"settings\": engine.settings,\n\t\t\"sub_categories\": sub_categories,\n\t\t\"items_count\": result[\"items_count\"]\n\t}\n\n@frappe.whitelist(allow_guest=True)", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist(allow_guest=True)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 95, "n_words": 143, "vocab_size": 100, "complexity": 9, "nloc": 46, "token_counts": 271, "n_ast_nodes": 464, "n_identifiers": 37, "random_cut": "def get_product_filter_data(query_args=None):\n\t\n\tif isinstance(query_args, str):\n\t\tquery_args = json.loads(query_args)\n\n\tquery_args = frappe._dict(query_args)\n\tif query_args:\n\t\tsearch = query_args.get(\"search\")\n\t\tfield_filters = query_args.get(\"field_filters\", {})\n\t\tattribute_filters = query_args.get(\"attribute_filters\", {})\n\t\tstart = cint(query_args.start) if query_args.get(\"start\") else 0\n\t\titem_group = query_args.get(\"item_group\")\n\t\tfrom_filters = query_args.get(\"from_filters\")\n\telse:\n\t\tsearch, attri", "d_id": 13657, "documentation": { "docstring": "\n\t\tReturns filtered products and discount filters.\n\t\t:param query_args (dict): contains filters to get products list\n\n\t\tQuery Args filters:\n\t\tsearch (str): Search Term.\n\t\tfield_filters (dict): Keys include item_group, brand, etc.\n\t\tattribute_filters(dict): Keys include Color, Size, etc.\n\t\tstart (int): Offset items by\n\t\titem_group (str): Valid Item Group\n\t\tfrom_filters (bool): Set as True to jump to page 1\n\t", "n_words": 55, "vocab_size": 47, "n_whitespaces": 46, "language": "en" } }, { "id": 290303, "commit_id": "d66d079330b92c02c38fb1c9dca539617161fdbc", "repo": "core", "path": "homeassistant/components/mqtt/light/schema_basic.py", "file_name": "schema_basic.py", "fun_name": "async_turn_on", "commit_message": "Use `_attr_` for MQTT light (#81465)\n\n* Schema basic\r\n\r\n* Schema json\r\n\r\n* Schema template\r\n\r\n* add color_mode - follow up comments\r\n\r\n* Fix regression\r\n\r\n* Follow up comments 2\r\n\r\n* Fix mypy errors\r\n\r\n* Update homeassistant/components/mqtt/light/schema_template.py\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>", "code": "async def async_turn_on(self, **kwargs): # noqa: C901\n \n should_update = False\n on_command_type = self._config[CONF_ON_COMMAND_TYPE]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 13, "vocab_size": 12, "complexity": 43, "nloc": 126, "token_counts": 909, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "async def async_turn_on(self, **kwargs): # noqa: C901\n \n should", "d_id": 89421, "documentation": { "docstring": "Turn the device on.\n\n This method is a coroutine.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 23, "language": "en" } }, { "id": 321374, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "tests/unit/keyinput/test_keyutils.py", "file_name": "test_keyutils.py", "fun_name": "test_fake_mac", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def test_fake_mac(self, modifiers, expected):\n \n seq = keyutils.KeySequence()\n info = keyutils.KeyInfo(key=Qt.Key.Key_A, modifiers=modifiers)\n new = seq.append_event(info.to_event())\n assert new[0] == keyutils.KeyInfo(Qt.Key.Key_A, expected)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 65, "n_ast_nodes": 102, "n_identifiers": 16, "random_cut": "def test_fake_mac(self, modifiers, expected):\n \n seq = keyutils.KeySequence()\n info = keyutils.KeyInfo(key=Qt.K", "d_id": 117683, "documentation": { "docstring": "Make sure Control/Meta are swapped with a simulated Mac.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 4171, "commit_id": "63af98e3b999d4b223237b51472a819915c5a558", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-recurly/source_recurly/streams.py", "file_name": "streams.py", "fun_name": "default_params", "commit_message": "🎉 Recurly Schema Revamp (#9866)\n\n* Cleanup Recurly connector schemas\r\n\r\n* Add more Recurly schemas to the connector\r\n\r\n- `billing_infos`\r\n- `shipping_addresses`\r\n- `shipping_methods`\r\n- `subscription_changes`\r\n\r\n* Add Recurly `add-on` resouce\r\n\r\n* Add Recurly's account notes resource schema\r\n\r\n* Add unique coupons to Recurly source\r\n\r\n* Add credit payments to Recurly connector\r\n\r\n* Add Recurly resources to integration tests configurations\r\n\r\n* Bump Recurly source version to `0.4.0`\r\n\r\n* Add `line_items` Recurly resource\r\n\r\n* Add `line_items` to Recurly documentation\r\n\r\n* Add missing `line_items` JSON schema\r\n\r\n* Replace Subscription Change Recurly API call with Subscription `pending_changes` field\r\n\r\n* Replace Recurly unique coupon codes API call with coupons `unique_coupon` field\r\n\r\nTo avoid the extra API call to import unique coupon calls\r\n\r\n* Revert \"Replace Recurly unique coupon codes API call with coupons `unique_coupon` field\"\r\n\r\nThis reverts commit 1c4592d82da3c5e5e0026dda8eb2ed7a896ac5b8.\r\n\r\n* Add `end_time` parameter to Recurly connector\r\n\r\n* Order Recurly specs\r\n\r\n* Set the Recurly `begin_time` and `end_time` to be optional\r\n\r\n* Add `order` to Recurly `source_spec.yaml`\r\n\r\n* Add `maxLength` to Recurly source schemas\r\n\r\n* Set `maxLength` for Recurly Subscription and Transaction `uuid`\r\n\r\n* Fix Recurly `export_dates` acceptance tests", "code": "def default_params(self) -> dict:\n \n return {\"order\": \"asc\", \"sort\": self.sort_key, \"limit\": self.limit}\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 5, "random_cut": "def default_params(self) -> dict:\n \n return {\"order\": \"asc\", \"sort\": self.sort_key, \"limit\": self.limit}\n", "d_id": 628, "documentation": { "docstring": "\n Returns the parameters to be sent together with the API call to Recurly\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 28, "language": "en" } }, { "id": 144810, "commit_id": "610930ae6aeafb37be75851a8c1b9ff39d5f7d22", "repo": "ray", "path": "python/ray/serve/deployment_state.py", "file_name": "deployment_state.py", "fun_name": "_should_start_new_health_check", "commit_message": "[serve] Improve health check failure semantics (#22297)", "code": "def _should_start_new_health_check(self) -> bool:\n \n if self._health_check_ref is not None:\n # There's already an active health check.\n return False\n\n # If there's no active health check, kick off another and reset\n # the timer if it's been long enough since the last health\n # check. Add some randomness to avoid synchronizing across all\n # replicas.\n time_since_last = time.time() - self._last_health_check_time\n randomized_period = self._health_check_period_s * random.uniform(0.9, 1.1)\n return time_since_last > randomized_period\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 154, "n_words": 69, "vocab_size": 55, "complexity": 2, "nloc": 16, "token_counts": 51, "n_ast_nodes": 81, "n_identifiers": 11, "random_cut": "def _should_start_new_health_check(self) -> bool:\n \n if self._health_check_ref is not None:\n # There's already an active health check.\n return False\n\n # If there's no active health check, kick off another and reset\n # the timer if it's been long enough since the last health\n # check. Add some randomness to avo", "d_id": 33312, "documentation": { "docstring": "Determines if a new health check should be kicked off.\n\n A health check will be started if:\n 1) There is not already an active health check.\n 2) It has been more than self._health_check_period_s since the\n previous health check was *started*.\n\n This assumes that self._health_check_ref is reset to `None` when an\n active health check succeeds or fails (due to returning or timeout).\n ", "n_words": 61, "vocab_size": 48, "n_whitespaces": 125, "language": "en" } }, { "id": 37910, "commit_id": "47412c7d434f6ddfc02a9b7ecd6182b86ae0a164", "repo": "transformers", "path": "src/transformers/trainer_pt_utils.py", "file_name": "trainer_pt_utils.py", "fun_name": "numpy_pad_and_concatenate", "commit_message": "Ensure tensors are at least 1d for pad and concat (#17179)\n\n* Ensure tensors are at least 1d for pad and concat\r\n\r\n* Compatibility\r\n\r\n* Fix\r\n\r\n* Fix\r\n\r\n* Add test\r\n\r\n* Retrigger CI\r\n\r\n* Consistency with master\r\n\r\n* Retrigger CI", "code": "def numpy_pad_and_concatenate(array1, array2, padding_index=-100):\n \n array1 = atleast_1d(array1)\n array2 = atleast_1d(array2)\n\n if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]:\n return np.concatenate((array1, array2), axis=0)\n\n # Let's figure out the new shape\n new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:]\n\n # Now let's fill the result tensor\n result = np.full_like(array1, padding_index, shape=new_shape)\n result[: array1.shape[0], : array1.shape[1]] = array1\n result[array1.shape[0] :, : array2.shape[1]] = array2\n return result\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 104, "n_words": 64, "vocab_size": 49, "complexity": 3, "nloc": 10, "token_counts": 162, "n_ast_nodes": 242, "n_identifiers": 14, "random_cut": "def numpy_pad_and_concatenate(array1, array2, padding_index=-100):\n \n array1 = atleas", "d_id": 6885, "documentation": { "docstring": "Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 53859, "commit_id": "f97603bba836c215e153d7d3d5b3b9de4d0ae822", "repo": "prefect", "path": "src/prefect/task_runners.py", "file_name": "task_runners.py", "fun_name": "_ray", "commit_message": "First draft `RayTaskRunner` implementation", "code": "def _ray(self) -> \"ray\":\n \n global ray\n\n if ray is None:\n try:\n import ray\n except ImportError as exc:\n raise RuntimeError(\n \"Using the `RayTaskRunner` requires `ray` to be installed.\"\n ) from exc\n\n return ray\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 146, "n_words": 32, "vocab_size": 29, "complexity": 3, "nloc": 14, "token_counts": 33, "n_ast_nodes": 61, "n_identifiers": 6, "random_cut": "def _ray(self) -> \"ray\":\n \n global ray\n\n if r", "d_id": 10926, "documentation": { "docstring": "\n Delayed import of `ray` allowing configuration of the task runner\n without the extra installed and improves `prefect` import times.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 41, "language": "en" } }, { "id": 290231, "commit_id": "9a747bafa398185eb3d4fe041c52acfbb8264372", "repo": "core", "path": "homeassistant/components/zwave_js/climate.py", "file_name": "climate.py", "fun_name": "temperature_unit", "commit_message": "Use enums instead of deprecated constants (#81591)", "code": "def temperature_unit(self) -> str:\n \n if (\n self._unit_value\n and self._unit_value.metadata.unit\n and \"f\" in self._unit_value.metadata.unit.lower()\n ):\n return UnitOfTemperature.FAHRENHEIT\n return UnitOfTemperature.CELSIUS\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 90, "n_words": 18, "vocab_size": 16, "complexity": 4, "nloc": 9, "token_counts": 45, "n_ast_nodes": 75, "n_identifiers": 10, "random_cut": "def temperature_unit(self) -> str:\n \n if (\n self._unit_value\n and self._unit_v", "d_id": 89349, "documentation": { "docstring": "Return the unit of measurement used by the platform.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 109993, "commit_id": "f15aeee5e8d380c2ea04bcbed202a8940a7db1d0", "repo": "matplotlib", "path": "examples/text_labels_and_annotations/angles_on_bracket_arrows.py", "file_name": "angles_on_bracket_arrows.py", "fun_name": "get_point_of_rotated_vertical", "commit_message": "Updated Angles on Bracket arrow styles example to make angles clear #23176 (#24145)\n\n* removed AngleAnnotation from angle_on_bracket_arrow example\r\n\r\n* Fixes indentation mistake.\r\n\r\n* rebase to main, remove conflicting commit", "code": "def get_point_of_rotated_vertical(origin, line_length, degrees):\n \n rad = np.deg2rad(-degrees)\n return [origin[0] + line_length * np.sin(rad),\n origin[1] + line_length * np.cos(rad)]\n\n\nfig, ax = plt.subplots(figsize=(8, 7))\nax.set(xlim=(0, 6), ylim=(-1, 4))\nax.set_title(\"Orientation of the bracket arrows relative to angleA and angleB\")\n\nfor i, style in enumerate([\"]-[\", \"|-|\"]):\n for j, angle in enumerate([-40, 60]):\n y = 2*i + j\n arrow_centers = ((1, y), (5, y))\n vlines = ((1, y + 0.5), (5, y + 0.5))\n anglesAB = (angle, -angle)\n bracketstyle = f\"{style}, angleA={anglesAB[0]}, angleB={anglesAB[1]}\"\n bracket = FancyArrowPatch(*arrow_centers, arrowstyle=bracketstyle,\n mutation_scale=42)\n ax.add_patch(bracket)\n ax.text(3, y + 0.05, bracketstyle, ha=\"center\", va=\"bottom\")\n ax.vlines([i[0] for i in vlines], [y, y], [i[1] for i in vlines],\n linestyles=\"--\", color=\"C0\")\n # Get the top coordinates for the drawn patches at A and B\n patch_tops = [get_point_of_rotated_vertical(center, 0.5, angle)\n for center, angle in zip(arrow_centers, anglesAB)]\n # Define the connection directions for the annotation arrows\n connection_dirs = (1, -1) if angle > 0 else (-1, 1)\n # Add arrows and annotation text\n arrowstyle = \"Simple, tail_width=0.5, head_width=4, head_length=8\"\n for vline, dir, patch_top, angle in zip(vlines, connection_dirs,\n patch_tops, anglesAB):\n kw = dict(connectionstyle=f\"arc3,rad={dir * 0.5}\",\n arrowstyle=arrowstyle, color=\"C0\")\n ax.add_patch(FancyArrowPatch(vline, patch_top, **kw))\n ax.text(vline[0] - dir * 0.15, y + 0.3, f'{angle}°', ha=\"center\",\n va=\"center\")\n\n#############################################################################\n#\n# .. admonition:: References\n#\n# The use of the following functions, methods, classes and modules is shown\n# in this example:\n#\n# - `matplotlib.patches.ArrowStyle`\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 544, "n_words": 221, "vocab_size": 150, "complexity": 1, "nloc": 4, "token_counts": 49, "n_ast_nodes": 608, "n_identifiers": 48, "random_cut": "def get_point_of_rotated_vertical(origin, line_length, degrees):\n \n rad = np.deg2rad(-degrees)\n return [origin[0] + line_length * np.sin(rad),\n origin[1] + line_length * np.cos(rad)]\n\n\nfig, ax = plt.subplots(figsize=(8, 7))\nax.set(xlim=(0, 6), ylim=(-1, 4))\nax.set_title(\"Orientation of the bracket arrows relative to angleA and angleB\")\n\nfor i, style in enumerate([\"]-[\", \"|-|\"]):\n for j, angle in enumerate([-40, 60]):\n y = 2*i + j\n arrow_centers = ((1, y), (5, y))\n vlines = ((1, y + 0.5), (5, y + 0.5))\n anglesAB = (angle, -angle)\n bracketstyle = f\"{style}, angleA={anglesAB[0]}, angleB={anglesAB[1]}\"\n bracket = FancyArrowPatch(*arrow_centers, arrowstyle=bracketstyle,\n mutation_scale=42)\n ax.add_patch(bracket)\n ax.text(3, y + 0.05, bracketstyle, ha=\"center\", va=\"bottom\")\n ax.vlines([i[0] for i in vlines], [y, y], [i[1] for i in vlines],\n linestyles=\"--\", color=\"C0\")\n # Get the top coordinates for the drawn patches at A and B\n patch_tops = [get_point_of_rotated_vertical(center, 0.5, angle)\n for center, angle in zip(arrow_centers, anglesAB)]\n # Define the connection directions for the annotation arrows\n connection_dirs = (1, -1) if angle > 0 else (-1, 1)\n # Add arrows and annotation text\n arrowstyle = \"Simple, tail_width=0.5, head_width=4, head_length=8\"\n for vline, dir, patch_top, angle in zip(vlines, connection_dirs,\n patch_tops, anglesAB):\n kw = dict(connectionstyle=f\"arc3,rad={dir * 0.5}\",\n arrowst", "d_id": 23870, "documentation": { "docstring": "Return xy coordinates of the vertical line end rotated by degrees.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 62436, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/_tokenizer.py", "file_name": "_tokenizer.py", "fun_name": "processEntityInAttribute", "commit_message": "upd; format", "code": "def processEntityInAttribute(self, allowedChar):\n \n self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 5, "random_cut": "def processEntityInAttribute(self, allowedChar):\n \n self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)\n", "d_id": 12977, "documentation": { "docstring": "This method replaces the need for \"entityInAttributeValueState\".\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 200424, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/physics/secondquant.py", "file_name": "secondquant.py", "fun_name": "_get_ordered_dummies", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def _get_ordered_dummies(mul, verbose=False):\n \n # setup dicts to avoid repeated calculations in key()\n args = Mul.make_args(mul)\n fac_dum = { fac: fac.atoms(Dummy) for fac in args }\n fac_repr = { fac: __kprint(fac) for fac in args }\n all_dums = set().union(*fac_dum.values())\n mask = {}\n for d in all_dums:\n if d.assumptions0.get('below_fermi'):\n mask[d] = '0'\n elif d.assumptions0.get('above_fermi'):\n mask[d] = '1'\n else:\n mask[d] = '2'\n dum_repr = {d: __kprint(d) for d in all_dums}\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 148, "n_words": 67, "vocab_size": 43, "complexity": 13, "nloc": 26, "token_counts": 258, "n_ast_nodes": 211, "n_identifiers": 21, "random_cut": "def _get_ordered_dummies(mul, verbose=False):\n \n # setup dicts to avoid repeated calculations in key()\n args = Mul.make_args(mul)\n fac_dum = { fac: fac.atoms(Dummy) for fac in args }\n fac_repr = { fac: __kprint(fac) for fac in args }\n all_dums = set().union(*fac_dum.values())\n mask = {}\n for d in all_dums:\n if d.assumptions0.get('below_fermi'):\n mask[d] = '0'\n elif d.assumptions0.get('above_fermi'):\n mask[d] = '1'\n else:\n mask[d] = '2'\n d", "d_id": 49638, "documentation": { "docstring": "Returns all dummies in the mul sorted in canonical order.\n\n Explanation\n ===========\n\n The purpose of the canonical ordering is that dummies can be substituted\n consistently across terms with the result that equivalent terms can be\n simplified.\n\n It is not possible to determine if two terms are equivalent based solely on\n the dummy order. However, a consistent substitution guided by the ordered\n dummies should lead to trivially (non-)equivalent terms, thereby revealing\n the equivalence. This also means that if two terms have identical sequences of\n dummies, the (non-)equivalence should already be apparent.\n\n Strategy\n --------\n\n The canonical order is given by an arbitrary sorting rule. A sort key\n is determined for each dummy as a tuple that depends on all factors where\n the index is present. The dummies are thereby sorted according to the\n contraction structure of the term, instead of sorting based solely on the\n dummy symbol itself.\n\n After all dummies in the term has been assigned a key, we check for identical\n keys, i.e. unorderable dummies. If any are found, we call a specialized\n method, _determine_ambiguous(), that will determine a unique order based\n on recursive calls to _get_ordered_dummies().\n\n Key description\n ---------------\n\n A high level description of the sort key:\n\n 1. Range of the dummy index\n 2. Relation to external (non-dummy) indices\n 3. Position of the index in the first factor\n 4. Position of the index in the second factor\n\n The sort key is a tuple with the following components:\n\n 1. A single character indicating the range of the dummy (above, below\n or general.)\n 2. A list of strings with fully masked string representations of all\n factors where the dummy is present. By masked, we mean that dummies\n are represented by a symbol to indicate either below fermi, above or\n general. No other information is displayed about the dummies at\n this point. The list is sorted stringwise.\n 3. An integer number indicating the position of the index, in the first\n factor as sorted in 2.\n 4. An integer number indicating the position of the index, in the second\n factor as sorted in 2.\n\n If a factor is either of type AntiSymmetricTensor or SqOperator, the index\n position in items 3 and 4 is indicated as 'upper' or 'lower' only.\n (Creation operators are considered upper and annihilation operators lower.)\n\n If the masked factors are identical, the two factors cannot be ordered\n unambiguously in item 2. In this case, items 3, 4 are left out. If several\n indices are contracted between the unorderable factors, it will be handled by\n _determine_ambiguous()\n\n\n ", "n_words": 415, "vocab_size": 207, "n_whitespaces": 650, "language": "en" } }, { "id": 48153, "commit_id": "46af5baba810a07eec395e89db08fc5dab175e23", "repo": "airflow", "path": "airflow/providers/amazon/aws/example_dags/example_athena.py", "file_name": "example_athena.py", "fun_name": "read_results_from_s3", "commit_message": "Update the Athena Sample DAG and Docs (#23428)\n\n* Update the Athena Sample DAG and Docs", "code": "def read_results_from_s3(query_execution_id):\n s3_hook = S3Hook()\n file_obj = s3_hook.get_conn().get_object(Bucket=S3_BUCKET, Key=f'{S3_KEY}/{query_execution_id}.csv')\n file_content = file_obj['Body'].read().decode('utf-8')\n print(file_content)\n\n\nQUERY_CREATE_TABLE = f\n\nQUERY_READ_TABLE = f\n\nQUERY_DROP_TABLE = f\n\nwith DAG(\n dag_id='example_athena',\n schedule_interval=None,\n start_date=datetime(2021, 1, 1),\n tags=['example'],\n catchup=False,\n) as dag:\n\n upload_sample_data = S3CreateObjectOperator(\n task_id='upload_sample_data',\n s3_bucket=S3_BUCKET,\n s3_key=f'{S3_KEY}/{ATHENA_TABLE}/{SAMPLE_FILENAME}',\n data=SAMPLE_DATA,\n replace=True,\n )\n\n create_table = AthenaOperator(\n task_id='create_table',\n query=QUERY_CREATE_TABLE,\n database=ATHENA_DATABASE,\n output_location=f's3://{S3_BUCKET}/{S3_KEY}',\n )\n\n # [START howto_athena_operator]\n read_table = AthenaOperator(\n task_id='read_table',\n query=QUERY_READ_TABLE,\n database=ATHENA_DATABASE,\n output_location=f's3://{S3_BUCKET}/{S3_KEY}',\n )\n # [END howto_athena_operator]\n\n # [START howto_athena_sensor]\n await_query = AthenaSensor(\n task_id='await_query',\n query_execution_id=read_table.output,\n )\n # [END howto_athena_sensor]\n\n drop_table = AthenaOperator(\n task_id='drop_table',\n query=QUERY_DROP_TABLE,\n database=ATHENA_DATABASE,\n output_location=f's3://{S3_BUCKET}/{S3_KEY}',\n )\n\n remove_s3_files = S3DeleteObjectsOperator(\n task_id='remove_s3_files',\n bucket=S3_BUCKET,\n prefix=S3_KEY,\n )\n\n (\n upload_sample_data\n >> create_table\n >> read_table\n >> await_query\n >> read_results_from_s3(read_table.output)\n >> drop_table\n >> remove_s3_files\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 385, "n_words": 107, "vocab_size": 63, "complexity": 1, "nloc": 5, "token_counts": 48, "n_ast_nodes": 462, "n_identifiers": 51, "random_cut": "def read_results_from_s3(query_execution_id):\n s3_hook = S3Hook()\n file_obj = s3_hook.get_conn().get_object(Bucket=S3_BUCKET, Key=f'{S3_KEY}/{query_execution_id}.csv')\n file_content = file_obj['Body'].read().decode('utf-8')\n print(file_content)\n\n\nQUERY_CREATE_TABLE = f\n\nQUERY_READ_TABLE = f\n\nQUERY_DROP_TABLE = f\n\nwith DAG(\n dag_id='example_athena',\n schedule_interval=None,\n start_date=datetime(2021, 1, 1),\n tags=['example'],\n catchup=False,\n) as dag:\n\n upload_sample_data = S3CreateObjectOperator(\n task_id='upload_sample_data',\n s3_bucket=S3_BUCKET,\n s3_key=f'{S3_KEY}/{ATHENA_TABLE}/{SAMPLE_FILENAME}',\n data=SAMPLE_DATA,\n replace=True,\n )\n\n create_table = AthenaOperator(\n task_id='create_table',\n query=QUERY_CREATE_TABLE,\n database=ATHENA_DATABASE,\n output_location=f's3://{S3_BUCKET}/{S3_KEY}',\n )\n\n # [START howto_athena_operator]\n read_table = AthenaOperator(\n task_id='read_table',\n query=QUERY_READ_TABLE,\n database=ATHENA_DATABASE,\n output_location=f's3://{S3_BUCKET}/{", "d_id": 9378, "documentation": { "docstring": "\nCREATE EXTERNAL TABLE IF NOT EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE} ( `name` string, `age` int )\nROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'\nWITH SERDEPROPERTIES ( 'serialization.format' = ',', 'field.delim' = ','\n) LOCATION 's3://{S3_BUCKET}/{S3_KEY}/{ATHENA_TABLE}'\nTBLPROPERTIES ('has_encrypted_data'='false')\n\nSELECT * from {ATHENA_DATABASE}.{ATHENA_TABLE}\n\nDROP TABLE IF EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE}\n", "n_words": 40, "vocab_size": 32, "n_whitespaces": 33, "language": "en" } }, { "id": 167667, "commit_id": "67e8c4c3761ab1da4b0a341a472c0fe2ea393e8b", "repo": "pandas", "path": "pandas/core/dtypes/common.py", "file_name": "common.py", "fun_name": "is_datetime64_ns_dtype", "commit_message": "ENH: DTI/DTA.astype support non-nano (#47579)\n\n* ENH: DTI/DTA.astype support non-nano\r\n\r\n* whatsnew\r\n\r\n* GH ref\r\n\r\n* pyright fixup", "code": "def is_datetime64_ns_dtype(arr_or_dtype) -> bool:\n \n if arr_or_dtype is None:\n return False\n try:\n tipo = get_dtype(arr_or_dtype)\n except TypeError:\n if is_datetime64tz_dtype(arr_or_dtype):\n tipo = get_dtype(arr_or_dtype.dtype)\n else:\n return False\n return tipo == DT64NS_DTYPE or (\n isinstance(tipo, DatetimeTZDtype) and tipo._unit == \"ns\"\n )\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 112, "n_words": 37, "vocab_size": 29, "complexity": 6, "nloc": 47, "token_counts": 63, "n_ast_nodes": 106, "n_identifiers": 12, "random_cut": "def is_datetime64_ns_dtype(arr_or_dtype) -> bool:\n \n if arr_or_dtype is None:\n return False\n try:\n tipo = get_dtype(arr_or_dtype)\n except TypeError:\n ", "d_id": 40074, "documentation": { "docstring": "\n Check whether the provided array or dtype is of the datetime64[ns] dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like or dtype\n The array or dtype to check.\n\n Returns\n -------\n bool\n Whether or not the array or dtype is of the datetime64[ns] dtype.\n\n Examples\n --------\n >>> is_datetime64_ns_dtype(str)\n False\n >>> is_datetime64_ns_dtype(int)\n False\n >>> is_datetime64_ns_dtype(np.datetime64) # no unit\n False\n >>> is_datetime64_ns_dtype(DatetimeTZDtype(\"ns\", \"US/Eastern\"))\n True\n >>> is_datetime64_ns_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_ns_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype=\"datetime64\")) # no unit\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype=\"datetime64[ps]\")) # wrong unit\n False\n >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype=\"datetime64[ns]\"))\n True\n ", "n_words": 86, "vocab_size": 49, "n_whitespaces": 188, "language": "en" } }, { "id": 203239, "commit_id": "c5cd8783825b5f6384417dac5f3889b4210b7d08", "repo": "django", "path": "django/template/defaulttags.py", "file_name": "defaulttags.py", "fun_name": "regroup", "commit_message": "Refs #33476 -- Refactored problematic code before reformatting by Black.\n\nIn these cases Black produces unexpected results, e.g.\r\n\r\ndef make_random_password(\r\n self,\r\n length=10,\r\n allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',\r\n):\r\n\r\nor\r\n\r\ncursor.execute(\"\"\"\r\nSELECT ...\r\n\"\"\",\r\n [table name],\r\n)", "code": "def regroup(parser, token):\n \n bits = token.split_contents()\n if len(bits) != 6:\n raise TemplateSyntaxError(\"'regroup' tag takes five arguments\")\n target = parser.compile_filter(bits[1])\n if bits[2] != 'by':\n raise TemplateSyntaxError(\"second argument to 'regroup' tag must be 'by'\")\n if bits[4] != 'as':\n raise TemplateSyntaxError(\n \"next-to-last argument to 'regroup' tag must be 'as'\"\n )\n var_name = bits[5]\n # RegroupNode will take each item in 'target', put it in the context under\n # 'var_name', evaluate 'var_name'.'expression' in the current context, and\n # group by the resulting value. After all items are processed, it will\n # save the final result in the context under 'var_name', thus clearing the\n # temporary values. This hack is necessary because the template engine\n # doesn't provide a context-aware equivalent of Python's getattr.\n expression = parser.compile_filter(var_name +\n VARIABLE_ATTRIBUTE_SEPARATOR +\n bits[3])\n return RegroupNode(target, expression, var_name)\n\n\n@register.tag", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.tag", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 290, "n_words": 131, "vocab_size": 95, "complexity": 4, "nloc": 16, "token_counts": 95, "n_ast_nodes": 172, "n_identifiers": 15, "random_cut": "def regroup(parser, token):\n \n bits = token.split_contents()\n if len(bits) != 6:\n raise TemplateSyntaxError(\"'regroup' tag takes five arguments\")\n target = parser.compile_filter(bits[1])\n if bits[2] != 'by':\n raise TemplateSyntaxError(\"second argument to 'regroup' tag must be 'by'\")\n if bits[4] != 'as':\n raise Template", "d_id": 50267, "documentation": { "docstring": "\n Regroup a list of alike objects by a common attribute.\n\n This complex tag is best illustrated by use of an example: say that\n ``musicians`` is a list of ``Musician`` objects that have ``name`` and\n ``instrument`` attributes, and you'd like to display a list that\n looks like:\n\n * Guitar:\n * Django Reinhardt\n * Emily Remler\n * Piano:\n * Lovie Austin\n * Bud Powell\n * Trumpet:\n * Duke Ellington\n\n The following snippet of template code would accomplish this dubious task::\n\n {% regroup musicians by instrument as grouped %}\n
      \n {% for group in grouped %}\n
    • {{ group.grouper }}\n
        \n {% for musician in group.list %}\n
      • {{ musician.name }}
      • \n {% endfor %}\n
      \n {% endfor %}\n
    \n\n As you can see, ``{% regroup %}`` populates a variable with a list of\n objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the\n item that was grouped by; ``list`` contains the list of objects that share\n that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano``\n and ``Trumpet``, and ``list`` is the list of musicians who play this\n instrument.\n\n Note that ``{% regroup %}`` does not work when the list to be grouped is not\n sorted by the key you are grouping by! This means that if your list of\n musicians was not sorted by instrument, you'd need to make sure it is sorted\n before using it, i.e.::\n\n {% regroup musicians|dictsort:\"instrument\" by instrument as grouped %}\n ", "n_words": 230, "vocab_size": 128, "n_whitespaces": 478, "language": "en" } }, { "id": 30242, "commit_id": "773398048b7990ab58e2998fe4d15355f7998774", "repo": "spotify-downloader", "path": "spotdl/types/saved.py", "file_name": "saved.py", "fun_name": "create_basic_list", "commit_message": "fixed arguments for frozen env\n\nfixed pylint errors\n\nfixed arguments\n\nblack\n\nfixed argument parser for all scenarios\n\nblack\n\ndocs\n\nblack", "code": "def create_basic_list(cls) -> \"Saved\":\n \n\n metadata = cls.get_metadata(\"saved\")\n urls = cls.get_urls(\"saved\")\n\n return cls(**metadata, urls=urls, songs=[])\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 10, "token_counts": 39, "n_ast_nodes": 70, "n_identifiers": 7, "random_cut": "def create_basic_list(cls) -> \"Saved\":\n \n\n metadata ", "d_id": 5427, "documentation": { "docstring": "\n Create a basic list with only the required metadata and urls.\n\n ### Returns\n - The Saved object.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 46, "language": "en" } }, { "id": 296701, "commit_id": "64381acbaf2930cda5dfa538d00bfa9f5172e690", "repo": "core", "path": "tests/common.py", "file_name": "common.py", "fun_name": "assert_lists_same", "commit_message": "Mark device actions from hidden or auxiliary entities as secondary (#70278)", "code": "def assert_lists_same(a, b):\n \n assert len(a) == len(b)\n for i in a:\n assert i in b\n for i in b:\n assert i in a\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 23, "vocab_size": 14, "complexity": 3, "nloc": 6, "token_counts": 36, "n_ast_nodes": 57, "n_identifiers": 5, "random_cut": "def assert_lists_same(a, b):\n \n assert len(a) == len(b)\n for i in a:\n assert i in b\n for i in b:\n assert i in a\n\n", "d_id": 95675, "documentation": { "docstring": "Compare two lists, ignoring order.\n\n Check both that all items in a are in b and that all items in b are in a,\n otherwise assert_lists_same([\"1\", \"1\"], [\"1\", \"2\"]) could be True.\n ", "n_words": 32, "vocab_size": 24, "n_whitespaces": 41, "language": "en" } }, { "id": 156780, "commit_id": "ddcb841903f8f180aa359bd8db0054aa3b5964e3", "repo": "dask", "path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "fun_name": "test_dataframe_format_with_index", "commit_message": "Change repr methods to avoid Layer materialization (#9289)\n\n* change task count to layer count in DataFrame and Array reprs\r\n\r\n* add test\r\n\r\n* address doctest failure\r\n\r\n* simplify test\r\n\r\n* support pluralization\r\n\r\n* use 'graph layers' instead of 'layers' to be more explicit", "code": "def test_dataframe_format_with_index():\n pytest.importorskip(\"jinja2\")\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"B\": list(\"ABCDEFGH\"),\n \"C\": pd.Categorical(list(\"AAABBBCC\")),\n },\n index=list(\"ABCDEFGH\"),\n )\n ddf = dd.from_pandas(df, 3)\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \"A int64 object category[known]\\n\"\n \"D ... ... ...\\n\"\n \"G ... ... ...\\n\"\n \"H ... ... ...\\n\"\n \"Dask Name: from_pandas, 1 graph layer\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp_table = \n\n exp = .format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div and has style\n exp = .format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 453, "n_words": 100, "vocab_size": 70, "complexity": 1, "nloc": 79, "token_counts": 145, "n_ast_nodes": 259, "n_identifiers": 20, "random_cut": "def test_dataframe_format_with_index():\n pytest.importorskip(\"jinja2\")\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"B\": list(\"ABCDEFGH\"),\n \"C\": pd.Categorical(list(\"AAABBBCC\")),\n },\n index=list(\"ABCDEFGH\"),\n )\n ddf = dd.from_pandas(df, 3)\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \"A int64 object category[known]\\n\"\n \"D ... ... ...\\n\"\n \"G ... ... ...\\n\"\n \"H ... ... ...\\n\"\n \"Dask Name: from_pandas, 1 graph layer\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp_table = \n\n exp = .format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div and has style\n exp = .format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp\n\n", "d_id": 36771, "documentation": { "docstring": "
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ABC
    npartitions=3
    Aint64objectcategory[known]
    D.........
    G.........
    H.........
    \n{exp_table}\n
    Dask Name: from_pandas, 1 graph layer
    Dask DataFrame Structure:
    \n
    \n{style}{exp_table}\n
    \n
    Dask Name: from_pandas, 1 graph layer
    ", "n_words": 66, "vocab_size": 38, "n_whitespaces": 218, "language": "en" } }, { "id": 207372, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_run_from_argv_closes_connections", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_run_from_argv_closes_connections(self):\n \n command = BaseCommand()\n command.check = lambda: []\n command.handle = lambda *args, **kwargs: args\n with mock.patch(\"django.core.management.base.connections\") as mock_connections:\n command.run_from_argv([\"\", \"\"])\n # Test connections have been closed\n self.assertTrue(mock_connections.close_all.called)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 88, "n_words": 28, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 61, "n_ast_nodes": 111, "n_identifiers": 15, "random_cut": "def test_run_from_argv_closes_connections(self):\n ", "d_id": 51942, "documentation": { "docstring": "\n A command called from the command line should close connections after\n being executed (#21255).\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 117547, "commit_id": "3f1a5c30c2ccbd78b21f1f41b7dfdfca87bb7135", "repo": "mindsdb", "path": "tests/unit/test_project_structure.py", "file_name": "test_project_structure.py", "fun_name": "test_version_managing", "commit_message": "update and delete model version\nrenaming (predictor->model)", "code": "def test_version_managing(self, data_handler):\n # set up\n\n df = pd.DataFrame([\n {'a': 1, 'b': dt.datetime(2020, 1, 1)},\n {'a': 2, 'b': dt.datetime(2020, 1, 2)},\n {'a': 1, 'b': dt.datetime(2020, 1, 3)},\n ])\n self.set_handler(data_handler, name='pg', tables={'tasks': df})\n\n # ================= retrain cycles =====================\n\n # create folder\n self.run_sql('create database proj')\n\n # -- create model --\n self.run_sql(\n \n )\n self.wait_predictor('proj', 'task_model')\n\n assert data_handler().native_query.call_args[0][0] == 'select * from tasks'\n\n # tag works in create model\n ret = self.run_sql('select * from proj.models')\n assert ret['TAG'][0] == 'first'\n\n # use model\n ret = self.run_sql()\n\n assert len(ret) == 3\n assert ret.predicted[0] == 42\n\n # -- retrain predictor with tag --\n data_handler.reset_mock()\n self.run_sql(\n \n )\n self.wait_predictor('proj', 'task_model', {'tag': 'second'})\n\n # get current model\n ret = self.run_sql('select * from proj.models')\n\n # check target\n assert ret['PREDICT'][0] == 'b'\n\n # check label\n assert ret['TAG'][0] == 'second'\n\n # check integration sql\n assert data_handler().native_query.call_args[0][0] == 'select * from tasks where a=2'\n\n # use model\n ret = self.run_sql()\n assert ret.predicted[0] == 42\n\n # used model has tag 'second'\n models = self.get_models()\n model_id = ret.predictor_id[0]\n assert models[model_id].label == 'second'\n\n # -- retrain again with active=0 --\n data_handler.reset_mock()\n self.run_sql(\n \n )\n self.wait_predictor('proj', 'task_model', {'tag': 'third'})\n\n ret = self.run_sql('select * from proj.models')\n\n # check target is from previous retrain\n assert ret['PREDICT'][0] == 'b'\n\n # use model\n ret = self.run_sql()\n\n # used model has tag 'second' (previous)\n models = self.get_models()\n model_id = ret.predictor_id[0]\n assert models[model_id].label == 'second'\n\n # ================ working with inactive versions =================\n\n # run 3st version model and check used model version\n ret = self.run_sql()\n\n models = self.get_models()\n model_id = ret.predictor_id[0]\n assert models[model_id].label == 'third'\n\n # one-line query model by version\n ret = self.run_sql('SELECT * from proj.task_model.3 where a=1 and b=2')\n model_id = ret.predictor_id[0]\n assert models[model_id].label == 'third'\n\n # not existing version\n with pytest.raises(Exception) as exc_info:\n self.run_sql(\n 'SELECT * from proj.task_model.4 where a=1 and b=2',\n )\n assert 'does not exists' in str(exc_info.value)\n\n # ================== managing versions =========================\n\n # show models command\n # Show models where \n ret = self.run_sql('Show models')\n assert len(ret) == 1 and ret['NAME'][0] == 'task_model'\n\n ret = self.run_sql('Show models from proj')\n assert len(ret) == 1 and ret['NAME'][0] == 'task_model'\n\n ret = self.run_sql('Show models in proj')\n assert len(ret) == 1 and ret['NAME'][0] == 'task_model'\n\n ret = self.run_sql(\"Show models where name='task_model'\")\n assert len(ret) == 1 and ret['NAME'][0] == 'task_model'\n\n ret = self.run_sql(\"Show models from proj where name='xxx'\")\n assert len(ret) == 0\n\n # ----------------\n\n # See all versions\n ret = self.run_sql('select * from proj.models_versions')\n # we have all tags in versions\n assert set(ret['TAG']) == {'first', 'second', 'third'}\n\n # Set active selected version\n self.run_sql()\n\n # get active version\n ret = self.run_sql('select * from proj.models_versions where active = 1')\n assert ret['TAG'][0] == 'first'\n\n # use active version ?\n\n # Delete specific version\n self.run_sql()\n\n # deleted version not in list\n ret = self.run_sql('select * from proj.models_versions')\n assert len(ret) == 2\n assert 'second' not in ret['TAG']\n\n # try to use deleted version\n with pytest.raises(Exception) as exc_info:\n self.run_sql(\n 'SELECT * from proj.task_model.2 where a=1',\n )\n assert 'does not exists' in str(exc_info.value)\n\n # exception with deleting active version\n with pytest.raises(Exception) as exc_info:\n self.run_sql()\n assert 'is not found' in str(exc_info.value)\n\n # drop predictor and check model is deleted and no versions\n self.run_sql('drop predictor proj.task_model')\n ret = self.run_sql('select * from proj.models')\n assert len(ret) == 0\n\n ret = self.run_sql('select * from proj.models_versions')\n assert len(ret) == 0\n\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1445, "n_words": 536, "vocab_size": 173, "complexity": 5, "nloc": 130, "token_counts": 716, "n_ast_nodes": 1293, "n_identifiers": 31, "random_cut": "def test_version_managing(self, data_handler):\n # set up\n\n df = pd.DataFrame([\n {'a': 1, 'b': dt.datetime(2020, 1, 1)},\n {'a': 2, 'b': dt.datetime(2020, 1, 2)},\n {'a': 1, 'b': dt.datetime(2020, 1, 3)},\n ])\n self.set_handler(data_handler, name='pg', tables={'tasks': df})\n\n # ================= retrain cycles =====================\n\n # create folder\n self.run_sql('create database proj')\n\n # -- create model --\n self.run_sql(\n \n )\n self.wait_predictor('proj', 'task_model')\n\n assert data_handler().native_query.call_args[0][0] == 'select * from tasks'\n\n # tag works in create model\n ret = self.run_sql('select * from proj.models')\n assert ret['TAG'][0] == 'first'\n\n # use model\n ret = self.run_sql()\n\n assert len(ret) == 3\n assert ret.predicted[0] == 42\n\n # -- retrain predictor with tag --\n data_handler.reset_mock()\n self.run_sql(\n \n )\n self.wait_predictor('proj', 'task_model', {'tag': 'second'})\n\n # get current model\n ret = self.run_sql('select * from proj.models')\n\n # check target\n assert ret['PREDICT'][0] == 'b'\n\n # check label\n a", "d_id": 26025, "documentation": { "docstring": "\n CREATE PREDICTOR proj.task_model\n from pg (select * from tasks)\n PREDICT a\n using engine='dummy_ml', tag = 'first'\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n retrain proj.task_model\n from pg (select * from tasks where a=2)\n PREDICT b\n using tag = 'second'\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n retrain proj.task_model\n from pg (select * from tasks where a=2)\n PREDICT a\n using tag='third', active=0\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model.3 as m\n \n update proj.models_versions \n set active=1\n where version=1 and name='task_model' \n \n delete from proj.models_versions \n where version=2 \n and name='task_model'\n \n delete from proj.models_versions \n where version=3 \n and model='task_model'\n ", "n_words": 109, "vocab_size": 43, "n_whitespaces": 654, "language": "en" } }, { "id": 19462, "commit_id": "7e33fcae4384563b4c927fd44318c29dd524a097", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/locations/__init__.py", "file_name": "__init__.py", "fun_name": "_looks_like_red_hat_lib", "commit_message": "Vendor in pip 21.2.4 release (from pip 21.2.2 prior). (#5009)\n\n* Vendor in pip 21.2.4 release (from pip 21.2.2 prior).\r\n\r\n* Add news fragment for pip 21.2.4 vendor update.\r\n\r\n* Add potentially missing LICENSE files", "code": "def _looks_like_red_hat_lib() -> bool:\n \n from distutils.command.install import INSTALL_SCHEMES # type: ignore\n\n return all(\n k in INSTALL_SCHEMES\n and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k])\n for k in (\"unix_prefix\", \"unix_home\")\n )\n\n\n@functools.lru_cache(maxsize=None)", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@functools.lru_cache(maxsize=None)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 58, "n_words": 25, "vocab_size": 22, "complexity": 3, "nloc": 11, "token_counts": 38, "n_ast_nodes": 79, "n_identifiers": 12, "random_cut": "def _looks_like_red_hat_lib() -> bool:\n \n from distutils.command.install import INSTALL_SCHEMES # type: ignore\n\n return all(\n k in INSTALL_SCHEMES\n and _looks_like_red_hat_patched_platlib", "d_id": 2979, "documentation": { "docstring": "Red Hat patches platlib in unix_prefix and unix_home, but not purelib.\n\n This is the only way I can see to tell a Red Hat-patched Python.\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 31, "language": "en" } }, { "id": 73144, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/helpers/permission.py", "file_name": "permission.py", "fun_name": "get_valid_parent_pages", "commit_message": "Reformat with black", "code": "def get_valid_parent_pages(self, user):\n \n # Get queryset of pages where this page type can be added\n allowed_parent_page_content_types = list(\n ContentType.objects.get_for_models(\n *self.model.allowed_parent_page_models()\n ).values()\n )\n allowed_parent_pages = Page.objects.filter(\n content_type__in=allowed_parent_page_content_types\n )\n\n # Get queryset of pages where the user has permission to add subpages\n if user.is_superuser:\n pages_where_user_can_add = Page.objects.all()\n else:\n pages_where_user_can_add = Page.objects.none()\n user_perms = UserPagePermissionsProxy(user)\n\n for perm in user_perms.permissions.filter(permission_type=\"add\"):\n # user has add permission on any subpage of perm.page\n # (including perm.page itself)\n pages_where_user_can_add |= Page.objects.descendant_of(\n perm.page, inclusive=True\n )\n\n # Combine them\n return allowed_parent_pages & pages_where_user_can_add\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 331, "n_words": 83, "vocab_size": 58, "complexity": 3, "nloc": 19, "token_counts": 109, "n_ast_nodes": 184, "n_identifiers": 27, "random_cut": "def get_valid_parent_pages(self, user):\n \n # Get queryset of pages where this page type can be added\n allowed_parent_page_content_types = list(\n ContentType.objects.get_for_models(\n *self.model.allowed_parent_page_models()\n ).values()\n )\n allowed_parent_pages = Page.objects.filter(\n content_type__in=allowed_parent_page_content_types\n )\n\n # Get queryset of pages where the user has permission to add subpages\n if user.is_superuser:\n pages_where_user_can_add = Page.objects.all()\n else:\n pages_where_user_can_add = Page.objects.none()\n user_perms = UserPagePermissionsProxy(user)\n\n ", "d_id": 15958, "documentation": { "docstring": "\n Identifies possible parent pages for the current user by first looking\n at allowed_parent_page_models() on self.model to limit options to the\n correct type of page, then checking permissions on those individual\n pages to make sure we have permission to add a subpage to it.\n ", "n_words": 43, "vocab_size": 36, "n_whitespaces": 79, "language": "en" } }, { "id": 66255, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py", "file_name": "monthly_attendance_sheet.py", "fun_name": "get_attendance_list", "commit_message": "style: format code with black", "code": "def get_attendance_list(conditions, filters):\n\tattendance_list = frappe.db.sql(\n\t\t\n\t\t% conditions,\n\t\tfilters,\n\t\tas_dict=1,\n\t)\n\n\tif not attendance_list:\n\t\tmsgprint(_(\"No attendance record found\"), alert=True, indicator=\"orange\")\n\n\tatt_map = {}\n\tfor d in attendance_list:\n\t\tatt_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, \"\")\n\t\tatt_map[d.employee][d.day_of_month] = d.status\n\n\treturn att_map\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 22, "n_words": 35, "vocab_size": 31, "complexity": 3, "nloc": 15, "token_counts": 94, "n_ast_nodes": 149, "n_identifiers": 19, "random_cut": "def get_attendance_list(conditions, filters):\n\tattendance_list = frapp", "d_id": 14149, "documentation": { "docstring": "select employee, day(attendance_date) as day_of_month,\n\t\tstatus from tabAttendance where docstatus = 1 %s order by employee, attendance_date", "n_words": 17, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 35987, "commit_id": "70203b59379b1841013980b6941bddfd34bfe816", "repo": "transformers", "path": "src/transformers/models/rembert/modeling_tf_rembert.py", "file_name": "modeling_tf_rembert.py", "fun_name": "_reorder_cache", "commit_message": "TF generate refactor - past without encoder outputs (#15944)\n\n* Remove packed past from generation_tf_utils\r\n\r\n* update models with the new past format\r\n\r\n* update template accordingly", "code": "def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),)\n return reordered_past\n\n\n@add_start_docstrings(\n ,\n REMBERT_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"\"\"\n RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.\n \"\"\",\n REMBERT_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 60, "n_words": 25, "vocab_size": 21, "complexity": 3, "nloc": 5, "token_counts": 42, "n_ast_nodes": 76, "n_identifiers": 12, "random_cut": "def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(tf.gather(past_state, bea", "d_id": 6559, "documentation": { "docstring": "\n RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 21, "language": "en" } }, { "id": 100089, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_team_details.py", "file_name": "test_team_details.py", "fun_name": "test_remove_as_admin_not_in_team", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_remove_as_admin_not_in_team(self):\n \n\n # an org with closed membership (byproduct of flags=0)\n org = self.create_organization(owner=self.user, flags=0)\n team = self.create_team(organization=org)\n admin_user = self.create_user(email=\"foo@example.com\", is_superuser=False)\n\n self.create_member(\n organization=org,\n user=admin_user,\n role=\"admin\",\n teams=[], # note that admin_user isn't a member of `team`\n )\n\n self.login_as(admin_user)\n\n # first, try deleting the team with open membership off\n self.get_error_response(team.organization.slug, team.slug, status_code=403)\n self.assert_team_not_deleted(team.id)\n\n # now, with open membership on\n org.flags.allow_joinleave = True\n org.save()\n\n self.get_success_response(team.organization.slug, team.slug, status_code=204)\n self.assert_team_deleted(team.id)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 223, "n_words": 66, "vocab_size": 49, "complexity": 1, "nloc": 17, "token_counts": 138, "n_ast_nodes": 221, "n_identifiers": 27, "random_cut": "def test_remove_as_admin_not_in_team(self):\n \n\n # an org with closed membership (byproduct of flags=0)\n org = self.create_organization(owner=self.user, flags=0)\n team = self.create_team(organization=org)\n admin_user = self.create_user(email=\"foo@example.com\", is_superuser=False)\n\n self.create_member(\n organization=org,\n user=admin_user,\n role=\"admin\",\n teams=[], # note that admin_user isn't a member of `team`\n )\n\n self.login_as(admin_user)\n\n # first, try deleting the team with open membership off\n self.get_error_response(team.organization.slug, team.slug, status_code=403)\n self.assert_team_not_deleted(", "d_id": 19756, "documentation": { "docstring": "Admins can't remove teams of which they're not a part, unless\n open membership is on.", "n_words": 15, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 131087, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/aws/test_aws_batch_tag_update.py", "file_name": "test_aws_batch_tag_update.py", "fun_name": "batch_test", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def batch_test(num_threads, delay):\n \n with mock.patch(\n \"ray.autoscaler._private.aws.node_provider.make_ec2_client\"\n ), mock.patch.object(AWSNodeProvider, \"_create_tags\", mock_create_tags):\n provider = AWSNodeProvider(\n provider_config={\"region\": \"nowhere\"}, cluster_name=\"default\"\n )\n provider.batch_counter = 0\n provider.tag_update_counter = 0\n provider.tag_cache = {str(x): {} for x in range(num_threads)}\n\n threads = []\n for x in range(num_threads):\n thread = threading.Thread(\n target=provider.set_node_tags, args=(str(x), {\"foo\": \"bar\"})\n )\n threads.append(thread)\n\n for thread in threads:\n thread.start()\n time.sleep(delay)\n for thread in threads:\n thread.join()\n\n return provider.batch_counter, provider.tag_update_counter\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 239, "n_words": 61, "vocab_size": 43, "complexity": 5, "nloc": 22, "token_counts": 154, "n_ast_nodes": 256, "n_identifiers": 29, "random_cut": "def batch_test(num_threads, delay):\n \n with mock.patch(\n \"ray.autoscaler._private.aws.node_provider.make_ec2_client\"\n ), mock.patch.object(AWSNodeProvider, \"_create_tags\", mock_create_tags):\n provider = AWSNodeProvider(\n provider_config={\"region\": \"nowhere\"}, cluster_name=\"default\"\n )\n provider.batch_counter = 0\n provider.tag_update_count", "d_id": 29474, "documentation": { "docstring": "Run AWSNodeProvider.set_node_tags in several threads, with a\n specified delay between thread launches.\n\n Return the number of batches of tag updates and the number of tags\n updated.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 38, "language": "en" } }, { "id": 244048, "commit_id": "cac356380d505bf15587f07c0529218cc36b9652", "repo": "mmdetection", "path": "mmdet/models/plugins/pixel_decoder.py", "file_name": "pixel_decoder.py", "fun_name": "forward", "commit_message": "[Feature] Add Maskformer to mmdet (#7212)\n\n* first commit\r\n\r\n* add README\r\n\r\n* move model description from config to readme\r\n\r\nadd description for binary_input\r\n\r\nadd description for dice loss\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nremove compatibility of pretrain in maskformer\r\n\r\n* update comments in maskformer_head\r\n\r\n* update docs format", "code": "def forward(self, feats, img_metas):\n \n y = self.last_feat_conv(feats[-1])\n for i in range(self.num_inputs - 2, -1, -1):\n x = feats[i]\n cur_fpn = self.lateral_convs[i](x)\n y = cur_fpn + \\\n F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest')\n y = self.output_convs[i](y)\n\n mask_feature = self.mask_feature(y)\n memory = feats[-1]\n return mask_feature, memory\n\n\n@PLUGIN_LAYERS.register_module()", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "@PLUGIN_LAYERS.register_module()", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 142, "n_words": 42, "vocab_size": 32, "complexity": 2, "nloc": 11, "token_counts": 113, "n_ast_nodes": 186, "n_identifiers": 22, "random_cut": "def forward(self, feats, img_metas):\n \n y = self.last_feat_conv(feats[-1])\n for i in range(self.num_inputs - 2, -1, -1):\n x = feats[i]\n cur_fpn = self.lateral_convs[i](x)\n y = cur_fpn + \\\n F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest')\n y = self.output_convs[i](y)\n\n mask_feature = self.mask_feature(y)\n memory = feats[-1]\n return mask_feature, memory\n\n\n@PLUGIN_LAYERS.register_module()", "d_id": 70215, "documentation": { "docstring": "\n Args:\n feats (list[Tensor]): Feature maps of each level. Each has\n shape of (batch_size, c, h, w).\n img_metas (list[dict]): List of image information. Pass in\n for creating more accurate padding mask. Not used here.\n\n Returns:\n tuple: a tuple containing the following:\n\n - mask_feature (Tensor): Shape (batch_size, c, h, w).\n - memory (Tensor): Output of last stage of backbone.\\\n Shape (batch_size, c, h, w).\n ", "n_words": 62, "vocab_size": 47, "n_whitespaces": 196, "language": "en" } }, { "id": 290594, "commit_id": "f584efa0c24df19ef1f805ecf95a95cecec5ff99", "repo": "core", "path": "tests/components/bluetooth/test_models.py", "file_name": "test_models.py", "fun_name": "test_remote_scanner_expires_non_connectable", "commit_message": "Move bluetooth remote scanner implementation into a base class (#82012)", "code": "async def test_remote_scanner_expires_non_connectable(hass):\n \n manager = _get_manager()\n\n switchbot_device = BLEDevice(\n \"44:44:33:11:23:45\",\n \"wohand\",\n {},\n rssi=-100,\n )\n switchbot_device_adv = generate_advertisement_data(\n local_name=\"wohand\",\n service_uuids=[],\n manufacturer_data={1: b\"\\x01\"},\n rssi=-100,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 97, "n_words": 23, "vocab_size": 19, "complexity": 1, "nloc": 64, "token_counts": 301, "n_ast_nodes": 95, "n_identifiers": 12, "random_cut": "async def test_remote_scanner_expires_non_connectable(hass):\n \n manager = _get_manager()\n\n switchbot_device = BLEDevice(\n ", "d_id": 89709, "documentation": { "docstring": "Test the remote scanner expires stale non connectable data.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 267605, "commit_id": "bcdc2e167af61cf978e589c753764f76e301a6fa", "repo": "ansible", "path": "lib/ansible/plugins/inventory/toml.py", "file_name": "toml.py", "fun_name": "convert_yaml_objects_to_native", "commit_message": "Support for Python 3.11+ tomllib for inventory (#77435)", "code": "def convert_yaml_objects_to_native(obj):\n \n if isinstance(obj, dict):\n return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items())\n elif isinstance(obj, list):\n return [convert_yaml_objects_to_native(v) for v in obj]\n elif isinstance(obj, text_type):\n return text_type(obj)\n else:\n return obj\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 73, "n_words": 30, "vocab_size": 21, "complexity": 6, "nloc": 9, "token_counts": 72, "n_ast_nodes": 113, "n_identifiers": 9, "random_cut": "def convert_yaml_objects_to_native(obj):\n \n if isinstance(obj, dict):\n ", "d_id": 78972, "documentation": { "docstring": "Older versions of the ``toml`` python library, and tomllib, don't have\n a pluggable way to tell the encoder about custom types, so we need to\n ensure objects that we pass are native types.\n\n Used with:\n - ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing\n - ``tomli`` or ``tomllib``\n\n This function recurses an object and ensures we cast any of the types from\n ``ansible.parsing.yaml.objects`` into their native types, effectively cleansing\n the data before we hand it over to the toml library.\n\n This function doesn't directly check for the types from ``ansible.parsing.yaml.objects``\n but instead checks for the types those objects inherit from, to offer more flexibility.\n ", "n_words": 101, "vocab_size": 76, "n_whitespaces": 138, "language": "en" } }, { "id": 3302, "commit_id": "8fbf8ba2a5bfcdb892e8ca596e338894614000b5", "repo": "prophet", "path": "python/prophet/forecaster.py", "file_name": "forecaster.py", "fun_name": "make_future_dataframe", "commit_message": "Speed Up Uncertainty Predictions (#2186)", "code": "def make_future_dataframe(self, periods, freq='D', include_history=True):\n \n if self.history_dates is None:\n raise Exception('Model has not been fit.')\n if freq is None:\n # taking the tail makes freq inference more reliable\n freq = pd.infer_freq(self.history_dates.tail(5))\n # returns None if inference failed\n if freq is None:\n raise Exception('Unable to infer `freq`')\n last_date = self.history_dates.max()\n dates = pd.date_range(\n start=last_date,\n periods=periods + 1, # An extra in case we include start\n freq=freq)\n dates = dates[dates > last_date] # Drop start if equals last_date\n dates = dates[:periods] # Return correct number of periods\n\n if include_history:\n dates = np.concatenate((np.array(self.history_dates), dates))\n\n return pd.DataFrame({'ds': dates})\n", "url": "https://github.com/facebook/prophet.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 274, "n_words": 94, "vocab_size": 66, "complexity": 5, "nloc": 17, "token_counts": 135, "n_ast_nodes": 223, "n_identifiers": 19, "random_cut": "def make_future_dataframe(self, periods, freq='D', include_history=True):\n \n if self.history_dates is None:\n raise Exception('Model has not been fit.')\n if freq is None:\n # taking the tail makes freq inference more reliable\n freq = pd.infer_freq(self.history_dates.tail(5))\n # returns None if inference failed\n if freq is None:\n raise Exception('Unable to infer `freq`')\n last_date = self.history_dates.max()\n dates = pd.date_range(\n start=last_date,\n periods=periods + 1, # An extra in case we include start\n freq=freq)\n dates = dates[dates > last_date] # Drop start if equals last_date\n dates = dates[:per", "d_id": 437, "documentation": { "docstring": "Simulate the trend using the extrapolated generative model.\n\n Parameters\n ----------\n periods: Int number of periods to forecast forward.\n freq: Any valid frequency for pd.date_range, such as 'D' or 'M'.\n include_history: Boolean to include the historical dates in the data\n frame for predictions.\n\n Returns\n -------\n pd.Dataframe that extends forward from the end of self.history for the\n requested number of periods.\n ", "n_words": 59, "vocab_size": 48, "n_whitespaces": 140, "language": "en" } }, { "id": 60251, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/io.py", "file_name": "io.py", "fun_name": "array_to_blobproto", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def array_to_blobproto(arr, diff=None):\n \n blob = caffe_pb2.BlobProto()\n blob.shape.dim.extend(arr.shape)\n blob.data.extend(arr.astype(float).flat)\n if diff is not None:\n blob.diff.extend(diff.astype(float).flat)\n return blob\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 41, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 7, "token_counts": 67, "n_ast_nodes": 109, "n_identifiers": 13, "random_cut": "def array_to_blobproto(arr, diff=None):\n \n blob = caffe_pb2.BlobProt", "d_id": 12044, "documentation": { "docstring": "Converts a N-dimensional array to blob proto. If diff is given, also\n convert the diff. You need to make sure that arr and diff have the same\n shape, and this function does not do sanity check.\n ", "n_words": 36, "vocab_size": 32, "n_whitespaces": 45, "language": "en" } }, { "id": 248122, "commit_id": "78b99de7c206b106340e12cdee0af9aa246bd5ad", "repo": "synapse", "path": "tests/test_federation.py", "file_name": "test_federation.py", "fun_name": "test_cross_signing_keys_retry", "commit_message": "Prefer `make_awaitable` over `defer.succeed` in tests (#12505)\n\nWhen configuring the return values of mocks, prefer awaitables from\r\n`make_awaitable` over `defer.succeed`. `Deferred`s are only awaitable\r\nonce, so it is inappropriate for a mock to return the same `Deferred`\r\nmultiple times.\r\n\r\nAlso update `run_in_background` to support functions that return\r\narbitrary awaitables.\r\n\r\nSigned-off-by: Sean Quah ", "code": "def test_cross_signing_keys_retry(self):\n \n remote_user_id = \"@john:test_remote\"\n remote_master_key = \"85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY\"\n remote_self_signing_key = \"QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ\"\n\n # Register mock device list retrieval on the federation client.\n federation_client = self.homeserver.get_federation_client()\n federation_client.query_user_devices = Mock(\n return_value=make_awaitable(\n {\n \"user_id\": remote_user_id,\n \"stream_id\": 1,\n \"devices\": [],\n \"master_key\": {\n \"user_id\": remote_user_id,\n \"usage\": [\"master\"],\n \"keys\": {\"ed25519:\" + remote_master_key: remote_master_key},\n },\n \"self_signing_key\": {\n \"user_id\": remote_user_id,\n \"usage\": [\"self_signing\"],\n \"keys\": {\n \"ed25519:\"\n + remote_self_signing_key: remote_self_signing_key\n },\n },\n }\n )\n )\n\n # Resync the device list.\n device_handler = self.homeserver.get_device_handler()\n self.get_success(\n device_handler.device_list_updater.user_device_resync(remote_user_id),\n )\n\n # Retrieve the cross-signing keys for this user.\n keys = self.get_success(\n self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]),\n )\n self.assertTrue(remote_user_id in keys)\n\n # Check that the master key is the one returned by the mock.\n master_key = keys[remote_user_id][\"master\"]\n self.assertEqual(len(master_key[\"keys\"]), 1)\n self.assertTrue(\"ed25519:\" + remote_master_key in master_key[\"keys\"].keys())\n self.assertTrue(remote_master_key in master_key[\"keys\"].values())\n\n # Check that the self-signing key is the one returned by the mock.\n self_signing_key = keys[remote_user_id][\"self_signing\"]\n self.assertEqual(len(self_signing_key[\"keys\"]), 1)\n self.assertTrue(\n \"ed25519:\" + remote_self_signing_key in self_signing_key[\"keys\"].keys(),\n )\n self.assertTrue(remote_self_signing_key in self_signing_key[\"keys\"].values())\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 767, "n_words": 145, "vocab_size": 87, "complexity": 1, "nloc": 45, "token_counts": 263, "n_ast_nodes": 464, "n_identifiers": 27, "random_cut": "def test_cross_signing_keys_retry(self):\n \n remote_user_id = \"@john:test_remote\"\n remote_master_key = \"85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY\"\n remote_self_signing_key = \"QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ\"\n\n # Register mock device list retrieval on the federation client.\n federation_client = self.homeserver.get_federation_client()\n federation_client.query_user_devices = Mock(\n return_value=make_awaitable(\n {\n \"user_id\": remote_user_id,\n \"stream_id\": 1,\n \"devices\": [],\n \"master_key\": {\n \"user_id\": remote_user_id,\n \"usage\": [\"master\"],\n \"keys\": {\"ed25519:\" + remote_master_key: remote_master_key},\n },\n \"self_signing_key\": {\n \"user_id\": remote_user_id,\n \"usage\": [\"self_signing\"],\n \"keys\": {\n \"ed25", "d_id": 72114, "documentation": { "docstring": "Tests that resyncing a device list correctly processes cross-signing keys from\n the remote server.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 28, "language": "en" } }, { "id": 46113, "commit_id": "12e9e2c695f9ebb9d3dde9c0f7dfaa112654f0d6", "repo": "airflow", "path": "tests/providers/databricks/operators/test_databricks.py", "file_name": "test_databricks.py", "fun_name": "test_exec_success", "commit_message": "Databricks hook - retry on HTTP Status 429 as well (#21852)\n\n* Databricks hook - retry on HTTP Status 429 as well\r\n\r\nthis fixes #21559\r\n\r\n* Reimplement retries using tenacity\r\n\r\nit's now uses exponential backoff by default", "code": "def test_exec_success(self, db_mock_class):\n \n run = {\n 'new_cluster': NEW_CLUSTER,\n 'notebook_task': NOTEBOOK_TASK,\n }\n op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)\n db_mock = db_mock_class.return_value\n db_mock.submit_run.return_value = 1\n db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '')\n\n op.execute(None)\n\n expected = databricks_operator._deep_string_coerce(\n {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID}\n )\n db_mock_class.assert_called_once_with(\n DEFAULT_CONN_ID,\n retry_limit=op.databricks_retry_limit,\n retry_delay=op.databricks_retry_delay,\n retry_args=None,\n )\n\n db_mock.submit_run.assert_called_once_with(expected)\n db_mock.get_run_page_url.assert_called_once_with(RUN_ID)\n db_mock.get_run_state.assert_called_once_with(RUN_ID)\n assert RUN_ID == op.run_id\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 239, "n_words": 50, "vocab_size": 41, "complexity": 1, "nloc": 23, "token_counts": 137, "n_ast_nodes": 224, "n_identifiers": 30, "random_cut": "def test_exec_success(self, db_mock_class):\n \n run = {\n 'new_cluster': NEW_CLUSTER,\n 'notebook_task': NOTEBOOK_TASK,\n }\n op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)\n db_mock = db_mock_class.return_value\n db_mock.submit_run.return_value = 1\n db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '')\n\n op.execute(None)\n\n expected = databricks_operator._deep_string_coerce(\n {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID}\n )\n db_mock_class.assert_called_once_with(\n DEFAULT_CONN_ID,\n retry_limit=op.databricks_retry_limit,\n retry_delay=op.databricks_retry_delay,\n retry_args=None,\n )\n\n db_mock.submit_run.assert_called_once_with(expected)\n db_mock.get_run_page_url.assert_called_once_with(RUN_ID)\n db_mock.get_run_state.assert_called_once_with(RUN_ID)\n assert RUN_ID == op.run_", "d_id": 8783, "documentation": { "docstring": "\n Test the execute function in case where the run is successful.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 42253, "commit_id": "e644793f0ac2b1be178425f20f529121f37f29de", "repo": "seaborn", "path": "seaborn/palettes.py", "file_name": "palettes.py", "fun_name": "dark_palette", "commit_message": "Convert color palette docstrings to notebooks (#3034)\n\n* Convert color palette docstrings to notebooks and rerun all with py310 kernel\r\n\r\n* Add v0.12.1 release notes to index\r\n\r\n* Improve failure mode when ipywidgets is not involved\r\n\r\n* Update palettes docstrings\r\n\r\n* Remove all other doctest-style examples\r\n\r\n* Remove doctest-oriented testing infrastructure\r\n\r\n* Mention in release notes\r\n\r\n* Skip colormap patch test on matplotlib's where it's not relevant\r\n\r\n* Use more robust approach to mpl backcompat", "code": "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 15\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 62, "n_words": 41, "vocab_size": 35, "complexity": 2, "nloc": 7, "token_counts": 93, "n_ast_nodes": 137, "n_identifiers": 18, "random_cut": "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 15\n gray = _color_to_rgb", "d_id": 7513, "documentation": { "docstring": "Make a sequential palette that blends from dark to ``color``.\n\n This kind of palette is good for data that range between relatively\n uninteresting low values and interesting high values.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_dark_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex, rgb-tuple, or html color name\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n .. include:: ../docstrings/dark_palette.rst\n\n ", "n_words": 201, "vocab_size": 128, "n_whitespaces": 328, "language": "en" } }, { "id": 106834, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "contour", "commit_message": "apply black py to all python files", "code": "def contour(self, X, win=None, env=None, opts=None):\n \n\n return self._surface(X=X, stype=\"contour\", opts=opts, win=win, env=env)\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 26, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 45, "n_ast_nodes": 66, "n_identifiers": 8, "random_cut": "def contour(self, X, win=None, env=None, opts=None):\n \n\n return self._surface(X=X, stype=\"contour\", opts=opts, win=win, env=env", "d_id": 22462, "documentation": { "docstring": "\n This function draws a contour plot. It takes as input an `NxM` tensor\n `X` that specifies the value at each location in the contour plot.\n\n The following `opts` are supported:\n\n - `opts.colormap`: colormap (`string`; default = `'Viridis'`)\n - `opts.xmin` : clip minimum value (`number`; default = `X:min()`)\n - `opts.xmax` : clip maximum value (`number`; default = `X:max()`)\n ", "n_words": 57, "vocab_size": 43, "n_whitespaces": 113, "language": "en" } }, { "id": 224060, "commit_id": "e7f07cc82ab2be920ab426ba07456d8b2592714d", "repo": "mkdocs", "path": "mkdocs/utils/__init__.py", "file_name": "__init__.py", "fun_name": "get_themes", "commit_message": "Remove spaces at the ends of docstrings, normalize quotes", "code": "def get_themes():\n \n\n themes = {}\n eps = set(importlib_metadata.entry_points(group='mkdocs.themes'))\n builtins = {ep.name for ep in eps if ep.dist.name == 'mkdocs'}\n\n for theme in eps:\n\n if theme.name in builtins and theme.dist.name != 'mkdocs':\n raise exceptions.ConfigurationError(\n f\"The theme '{theme.name}' is a builtin theme but the package '{theme.dist.name}' \"\n \"attempts to provide a theme with the same name.\"\n )\n elif theme.name in themes:\n log.warning(\n f\"A theme named '{theme.name}' is provided by the Python packages '{theme.dist.name}' \"\n f\"and '{themes[theme.name].dist.name}'. The one in '{theme.dist.name}' will be used.\"\n )\n\n themes[theme.name] = theme\n\n return themes\n\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 230, "n_words": 87, "vocab_size": 60, "complexity": 7, "nloc": 17, "token_counts": 96, "n_ast_nodes": 224, "n_identifiers": 16, "random_cut": "def get_themes():\n \n\n themes = {}\n eps = set(importlib_metadata.entry_points(group='mkdocs.themes'))\n builtins = {ep.name for ep in eps if ep.dist.name == 'mkdocs'}\n\n for theme in eps:\n\n if theme.name in builtins and", "d_id": 57207, "documentation": { "docstring": "Return a dict of all installed themes as {name: EntryPoint}.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 100127, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_user_notification_details.py", "file_name": "test_user_notification_details.py", "fun_name": "test_subscribe_by_default", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_subscribe_by_default(self):\n \n NotificationSetting.objects.update_settings(\n ExternalProviders.EMAIL,\n NotificationSettingTypes.ISSUE_ALERTS,\n NotificationSettingOptionValues.NEVER,\n user=self.user,\n )\n\n response = self.get_success_response(\"me\")\n assert response.data.get(\"subscribeByDefault\") is False\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 94, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 9, "token_counts": 50, "n_ast_nodes": 82, "n_identifiers": 16, "random_cut": "def test_subscribe_by_default(self):\n \n NotificationSetting", "d_id": 19757, "documentation": { "docstring": "\n Test that we expect project-independent issue alert preferences to be\n returned as `subscribe_by_default`.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 73918, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/permission_policies/base.py", "file_name": "base.py", "fun_name": "_get_users_with_any_permission_codenames_filter", "commit_message": "Reformat with black", "code": "def _get_users_with_any_permission_codenames_filter(self, permission_codenames):\n \n permissions = Permission.objects.filter(\n content_type=self._content_type, codename__in=permission_codenames\n )\n return (\n Q(is_superuser=True)\n | Q(user_permissions__in=permissions)\n | Q(groups__permissions__in=permissions)\n ) & Q(is_active=True)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 98, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 9, "token_counts": 56, "n_ast_nodes": 89, "n_identifiers": 15, "random_cut": "def _get_users_with_any_permission_codenames_filter(self, permission_codenames):\n \n permissions = Permission.objects.filter(\n content_type=self._content_type, codename__in=permission_codenames\n )\n return (\n Q(is_superuser=True)\n ", "d_id": 16175, "documentation": { "docstring": "\n Given a list of permission codenames, return a filter expression which\n will find all users which have any of those permissions - either\n through group permissions, user permissions, or implicitly through\n being a superuser.\n ", "n_words": 34, "vocab_size": 28, "n_whitespaces": 70, "language": "en" } }, { "id": 289847, "commit_id": "e15f2e050e7afadbb19d32973104e4e2f5a172ae", "repo": "core", "path": "homeassistant/components/ibeacon/coordinator.py", "file_name": "coordinator.py", "fun_name": "_async_update_rssi_and_transients", "commit_message": "Update ibeacon-ble to 1.0.1 (#80785)", "code": "def _async_update_rssi_and_transients(self) -> None:\n \n for (\n unique_id,\n ibeacon_advertisement,\n ) in self._last_ibeacon_advertisement_by_unique_id.items():\n address = unique_id.split(\"_\")[-1]\n service_info = bluetooth.async_last_service_info(\n self.hass, address, connectable=False\n )\n if not service_info:\n continue\n\n if address in self._transient_seen_count:\n self._transient_seen_count[address] += 1\n if self._transient_seen_count[address] == MIN_SEEN_TRANSIENT_NEW:\n self._transient_seen_count.pop(address)\n _async_dispatch_update(\n self.hass,\n unique_id,\n service_info,\n ibeacon_advertisement,\n True,\n True,\n )\n continue\n\n if service_info.rssi != ibeacon_advertisement.rssi:\n ibeacon_advertisement.update_rssi(service_info.rssi)\n async_dispatcher_send(\n self.hass,\n signal_seen(unique_id),\n ibeacon_advertisement,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 541, "n_words": 56, "vocab_size": 39, "complexity": 6, "nloc": 40, "token_counts": 139, "n_ast_nodes": 213, "n_identifiers": 21, "random_cut": "def _async_update_rssi_and_transients(self) -> None:\n \n for (\n unique_id,\n ibeacon_advert", "d_id": 88977, "documentation": { "docstring": "Check to see if the rssi has changed and update any devices.\n\n We don't callback on RSSI changes so we need to check them\n here and send them over the dispatcher periodically to\n ensure the distance calculation is update.\n\n If the transient flag is set we also need to check to see\n if the device is still transmitting and increment the counter\n ", "n_words": 62, "vocab_size": 43, "n_whitespaces": 104, "language": "en" } }, { "id": 207935, "commit_id": "fbda0089f08d7f2a8f00925dbc0b6e10bd779251", "repo": "celery", "path": "celery/contrib/testing/worker.py", "file_name": "worker.py", "fun_name": "setup_app_for_worker", "commit_message": "Add `mypy` to the pipeline (#7383)\n\n* Add typing to Celery\r\n\r\nThis is a simple bootstrap of the process, adding some types to a\r\nfew selected functions, based on comment annotations. MyPy is chosen as the\r\ndefault static analyzer for the types.\r\n\r\n* Add mypy to the pipeline\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Remove unused command from tox\r\n\r\n* Install mypy only on CPython\r\n\r\n* Remove wrong annotations\r\n\r\n* Update celery/utils/saferepr.py\r\n\r\nCo-authored-by: Mads Jensen \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def setup_app_for_worker(app, loglevel, logfile) -> None:\n # type: (Celery, Union[str, int], str) -> None\n \n app.finalize()\n app.set_current()\n app.set_default()\n type(app.log)._setup = False\n app.log.setup(loglevel=loglevel, logfile=logfile)\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 85, "n_identifiers": 11, "random_cut": "def setup_app_for_worker(app, loglevel, logfile) -> None:\n # type: (Celery, Union[str, int], str) -> None\n \n app.finalize()\n app.set_current()\n app.set_default()\n type(app.log)._setup = False\n app.log.setup(loglevel=loglevel, logfile=logfile)\n", "d_id": 52162, "documentation": { "docstring": "Setup the app to be used for starting an embedded worker.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 196098, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/graycode.py", "file_name": "graycode.py", "fun_name": "rank", "commit_message": "Updated import locations", "code": "def rank(self):\n \n if self._rank is None:\n self._rank = int(gray_to_bin(self.current), 2)\n return self._rank\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 44, "n_words": 12, "vocab_size": 10, "complexity": 2, "nloc": 4, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 6, "random_cut": "def rank(self):\n \n if self._rank is None:\n self._rank ", "d_id": 47598, "documentation": { "docstring": "\n Ranks the Gray code.\n\n A ranking algorithm determines the position (or rank)\n of a combinatorial object among all the objects w.r.t.\n a given order. For example, the 4 bit binary reflected\n Gray code (BRGC) '0101' has a rank of 6 as it appears in\n the 6th position in the canonical ordering of the family\n of 4 bit Gray codes.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> a = GrayCode(3)\n >>> list(a.generate_gray())\n ['000', '001', '011', '010', '110', '111', '101', '100']\n >>> GrayCode(3, start='100').rank\n 7\n >>> GrayCode(3, rank=7).current\n '100'\n\n See Also\n ========\n\n unrank\n\n References\n ==========\n\n .. [1] http://statweb.stanford.edu/~susan/courses/s208/node12.html\n\n ", "n_words": 97, "vocab_size": 73, "n_whitespaces": 266, "language": "en" } }, { "id": 289315, "commit_id": "599d61a4da096227ce4d5ba1dc0eaabceea56f49", "repo": "core", "path": "homeassistant/components/rest/data.py", "file_name": "data.py", "fun_name": "async_update", "commit_message": "Fix payload in rest (#80544)", "code": "async def async_update(self, log_errors=True):\n \n if not self._async_client:\n self._async_client = get_async_client(\n self._hass, verify_ssl=self._verify_ssl\n )\n\n rendered_headers = template.render_complex(self._headers, parse_result=False)\n rendered_params = template.render_complex(self._params)\n\n _LOGGER.debug(\"Updating from %s\", self._resource)\n try:\n response = await self._async_client.request(\n self._method,\n self._resource,\n headers=rendered_headers,\n params=rendered_params,\n auth=self._auth,\n content=self._request_data,\n timeout=self._timeout,\n follow_redirects=True,\n )\n self.data = response.text\n self.headers = response.headers\n except httpx.TimeoutException as ex:\n if log_errors:\n _LOGGER.error(\"Timeout while fetching data: %s\", self._resource)\n self.last_exception = ex\n self.data = None\n self.headers = None\n except httpx.RequestError as ex:\n if log_errors:\n _LOGGER.error(\n \"Error fetching data: %s failed with %s\", self._resource, ex\n )\n self.last_exception = ex\n self.data = None\n self.headers = None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 500, "n_words": 91, "vocab_size": 56, "complexity": 6, "nloc": 35, "token_counts": 202, "n_ast_nodes": 317, "n_identifiers": 38, "random_cut": "async def async_update(self, log_errors=True):\n \n if not self._async_client:\n self._async_client = get_async_client(\n self._hass, verify_ssl=self._verify_ssl\n )\n\n rendered_headers = template.render_complex(self._headers, parse_result=False)\n rendered_params = template.render_complex(self._params)\n\n _LOGGER.debug(\"Updating from %s\", self._resource)\n try:\n response = await self._async_client.request(\n self._method,\n self._resource,\n headers=rendered_headers,\n params=rendered_params,\n auth=self._auth,\n content=self._request_data,\n timeout=self._timeout,\n follow_redirects=True,\n )\n self.data = response.text\n self.headers = response.headers\n except httpx.TimeoutException as ex:\n if log_errors:\n _LOGGER.error(\"Timeout while fetching data: %s\", self._resource)\n self.last_exception = ex\n self.data = None\n self.headers = None\n except httpx.RequestError as ex:\n ", "d_id": 88457, "documentation": { "docstring": "Get the latest data from REST service with provided method.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 286285, "commit_id": "47549cbd9f52a436c06b040fda5b88a7d2bf700a", "repo": "OpenBBTerminal", "path": "openbb_terminal/helper_funcs.py", "file_name": "helper_funcs.py", "fun_name": "set_default_timezone", "commit_message": "[SDK] Allow silencing verbose output in commands that use stocks/load (#3180)\n\n* remove verbose on load\r\n\r\n* Revert implementation of the verbosity setting in stocks controller\r\n\r\n* Edit docstrings to comply with pydocstyle linting rules\r\n\r\n* Fix typos in variable names and help text\r\n\r\n* Add verbosity setting to forex load helper as it uses the stocks helper\r\n\r\n* Update docstrings to comply with pydocstyle linting rules\r\n\r\n* Update tests\r\n\r\n* Fix test relying on local sources settings\r\n\r\n* Remove old test cassettes\r\n\r\n* Add new test data\r\n\r\n* WIP: Fix futures tests\r\n\r\n* Clean up test file\r\n\r\n* Fix futures tests having a time component\r\n\r\n* Fix futures model tests\r\n\r\nCo-authored-by: James Maslek \r\nCo-authored-by: Theodore Aptekarev ", "code": "def set_default_timezone() -> None:\n \n dotenv.load_dotenv(USER_ENV_FILE)\n user_tz = os.getenv(\"OPENBB_TIMEZONE\")\n if not user_tz:\n dotenv.set_key(USER_ENV_FILE, \"OPENBB_TIMEZONE\", \"America/New_York\")\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 35, "n_ast_nodes": 65, "n_identifiers": 8, "random_cut": "def set_default_timezone() -> None:\n \n dotenv.load_dotenv(USER_ENV_FILE)\n user_tz = os.ge", "d_id": 85683, "documentation": { "docstring": "Set a default (America/New_York) timezone if one doesn't exist.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 191733, "commit_id": "d0f194de73c942cb89d731dbfa5ae809111fb07a", "repo": "langchain", "path": "langchain/agents/agent.py", "file_name": "agent.py", "fun_name": "return_stopped_response", "commit_message": "add logic for agent stopping (#420)", "code": "def return_stopped_response(self) -> dict:\n \n return {k: \"Agent stopped due to max iterations.\" for k in self.return_values}\n\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 3, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def return_stopped_response(self) -> dict:\n \n return {k: \"Agent stopped due to max iterations.\" for k in self.return_values}\n\n", "d_id": 46839, "documentation": { "docstring": "Return response when agent has been stopped due to max iterations.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 264354, "commit_id": "cf3ca5a661cc015baf4ef462be07e91c09db0ede", "repo": "netbox", "path": "netbox/utilities/forms/fields/dynamic.py", "file_name": "dynamic.py", "fun_name": "clean", "commit_message": "Refactor & document supported form fields", "code": "def clean(self, value):\n \n if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE:\n return None\n return super().clean(value)\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 48, "n_words": 16, "vocab_size": 14, "complexity": 3, "nloc": 4, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 7, "random_cut": "def clean(self, value):\n \n if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE:\n return Non", "d_id": 77696, "documentation": { "docstring": "\n When null option is enabled and \"None\" is sent as part of a form to be submitted, it is sent as the\n string 'null'. This will check for that condition and gracefully handle the conversion to a NoneType.\n ", "n_words": 38, "vocab_size": 30, "n_whitespaces": 61, "language": "en" } }, { "id": 248647, "commit_id": "d4b1c0d800eaa83c4d56a9cf17881ad362b9194b", "repo": "synapse", "path": "tests/test_event_auth.py", "file_name": "test_event_auth.py", "fun_name": "test_unexpected_auth_events", "commit_message": "Fix inconsistencies in event validation (#13088)", "code": "def test_unexpected_auth_events(self):\n \n creator = \"@creator:example.com\"\n\n create_event = _create_event(RoomVersions.V9, creator)\n join_event = _join_event(RoomVersions.V9, creator)\n pl_event = _power_levels_event(\n RoomVersions.V9,\n creator,\n {\"state_default\": 30, \"users\": {\"creator\": 100}},\n )\n join_rules_event = _join_rules_event(RoomVersions.V9, creator, \"public\")\n\n event_store = _StubEventSourceStore()\n event_store.add_events([create_event, join_event, pl_event, join_rules_event])\n\n good_event = _random_state_event(\n RoomVersions.V9, creator, [create_event, join_event, pl_event]\n )\n # join rules should *not* be included in the auth events.\n bad_event = _random_state_event(\n RoomVersions.V9,\n creator,\n [create_event, join_event, pl_event, join_rules_event],\n )\n\n get_awaitable_result(\n event_auth.check_state_independent_auth_rules(event_store, good_event)\n )\n with self.assertRaises(AuthError):\n get_awaitable_result(\n event_auth.check_state_independent_auth_rules(event_store, bad_event)\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 320, "n_words": 76, "vocab_size": 52, "complexity": 1, "nloc": 27, "token_counts": 154, "n_ast_nodes": 239, "n_identifiers": 24, "random_cut": "def test_unexpected_auth_events(self):\n \n creator = \"@creator:example.com\"\n\n create_event = _create_event(RoomVersions.V9, creator)\n join_event = _join_event(RoomVersions.V9, creator)\n pl_event = _power_levels_event(\n RoomVersions.V9,\n creator,\n {\"state_default\": 30, \"users\": {\"creator\": 100}},\n )\n join_rules_event = _join_rules_event(RoomVersions.V9, creator, \"public\")\n\n event_store = _StubEventSourceStore()\n event_store.add_events([create_event, join_event, pl_event, join_rules_event])\n\n good_event = _random_state_event(\n RoomVersions.V9, creator, [create_event, join_event, pl_event]\n )\n # join rules", "d_id": 72397, "documentation": { "docstring": "Events with excess auth_events should be rejected\n\n https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules\n 2. Reject if event has auth_events that:\n 2. have entries whose type and state_key don’t match those specified by the\n auth events selection algorithm described in the server specification.\n ", "n_words": 37, "vocab_size": 34, "n_whitespaces": 81, "language": "en" } }, { "id": 182900, "commit_id": "a72e347ed99333a090377ee438eaf63477cbf98b", "repo": "textual", "path": "src/textual/devtools/service.py", "file_name": "service.py", "fun_name": "_consume_incoming", "commit_message": "Seperate server and client handling logic into classes for devtools", "code": "async def _consume_incoming(self) -> None:\n \n while True:\n message_json = await self.incoming_queue.get()\n if message_json is None:\n self.incoming_queue.task_done()\n break\n\n type = message_json[\"type\"]\n if type == \"client_log\":\n path = message_json[\"payload\"][\"path\"]\n line_number = message_json[\"payload\"][\"line_number\"]\n timestamp = message_json[\"payload\"][\"timestamp\"]\n encoded_segments = message_json[\"payload\"][\"encoded_segments\"]\n decoded_segments = base64.b64decode(encoded_segments)\n segments = pickle.loads(decoded_segments)\n self.service.console.print(\n DevtoolsLogMessage(\n segments=segments,\n path=path,\n line_number=line_number,\n unix_timestamp=timestamp,\n )\n )\n elif type == \"client_spillover\":\n spillover = int(message_json[\"payload\"][\"spillover\"])\n info_renderable = DevtoolsInternalMessage(\n f\"Discarded {spillover} messages\", level=\"warning\"\n )\n self.service.console.print(info_renderable)\n self.incoming_queue.task_done()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 506, "n_words": 67, "vocab_size": 49, "complexity": 5, "nloc": 32, "token_counts": 170, "n_ast_nodes": 299, "n_identifiers": 27, "random_cut": "async def _consume_incoming(self) -> None:\n \n while True:\n message_json = await self.incoming_queue.get()\n if message_json is None:\n self.incoming_queue.task_done()\n break\n\n type = message_json[\"type\"]\n if type == \"client_log\":\n path = message_json[\"payload\"][\"path\"]\n line_number = message_json[\"payload\"][\"line_number\"]\n timestamp = message_json[\"payload\"][\"timestamp\"]\n encoded_segments = message_json[\"payload\"][\"encoded_segments\"]\n decoded_segments = base64.b64decode(encoded_segments)\n segments = pickle.loads(decoded_segments)\n self.service.console.print(\n DevtoolsLogMessage(\n segments=segments,\n path=path,\n line_number=line_number,\n unix_timestamp=timestamp,\n )\n )\n elif type == \"client_spillover\":\n ", "d_id": 44000, "documentation": { "docstring": "Consume messages from the incoming (client -> server) Queue, and print\n the corresponding renderables to the console for each message.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 34, "language": "en" } }, { "id": 145231, "commit_id": "a402e956a4e1ebe9bc4e2b404536466967c497af", "repo": "ray", "path": "docker/kuberay-autoscaler/test_autoscaling_config.py", "file_name": "test_autoscaling_config.py", "fun_name": "_get_basic_ray_cr", "commit_message": "[KubeRay] Format autoscaling config based on RayCluster CR (#22348)\n\nCloses #21655. At the start of each autoscaler iteration, we read the Ray Cluster CR from K8s and use it to extract the autoscaling config.", "code": "def _get_basic_ray_cr() -> dict:\n \n cr_path = str(\n Path(__file__).resolve().parents[2]\n / \"python\"\n / \"ray\"\n / \"autoscaler\"\n / \"kuberay\"\n / \"ray-cluster.complete.yaml\"\n )\n return yaml.safe_load(open(cr_path).read())\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 75, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 11, "token_counts": 49, "n_ast_nodes": 92, "n_identifiers": 12, "random_cut": "def _get_basic_ray_cr() -> dict:\n \n cr_path = str(\n Path(__file__).resolve().paren", "d_id": 33397, "documentation": { "docstring": "Returns the example Ray CR included in the Ray documentation.", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 176595, "commit_id": "8bea55e3071ed71eab4fb6650a45f0cdf5c911d4", "repo": "networkx", "path": "networkx/generators/spectral_graph_forge.py", "file_name": "spectral_graph_forge.py", "fun_name": "spectral_graph_forge", "commit_message": "Remove `_mat_spect_approx` in favor of simpler procedure (#5624)\n\n* Replace _mat_spect_approx func internal usage.\r\n\r\n* Rm _mat_spect_approx helper function.", "code": "def spectral_graph_forge(G, alpha, transformation=\"identity\", seed=None):\n \n import numpy as np\n import scipy as sp\n import scipy.stats # call as sp.stats\n\n available_transformations = [\"identity\", \"modularity\"]\n alpha = np.clip(alpha, 0, 1)\n A = nx.to_numpy_array(G)\n n = A.shape[1]\n level = int(round(n * alpha))\n\n if transformation not in available_transformations:\n msg = f\"{transformation!r} is not a valid transformation. \"\n msg += f\"Transformations: {available_transformations}\"\n raise nx.NetworkXError(msg)\n\n K = np.ones((1, n)) @ A\n\n B = A\n if transformation == \"modularity\":\n B -= K.T @ K / K.sum()\n\n # Compute low-rank approximation of B\n evals, evecs = np.linalg.eigh(B)\n k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order\n evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0\n B = evecs @ np.diag(evals) @ evecs.T\n\n if transformation == \"modularity\":\n B += K.T @ K / K.sum()\n\n B = np.clip(B, 0, 1)\n np.fill_diagonal(B, 0)\n\n for i in range(n - 1):\n B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed)\n B[i + 1 :, i] = np.transpose(B[i, i + 1 :])\n\n H = nx.from_numpy_array(B)\n\n return H\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 293, "n_words": 169, "vocab_size": 105, "complexity": 5, "nloc": 30, "token_counts": 306, "n_ast_nodes": 494, "n_identifiers": 45, "random_cut": "def spectral_graph_forge(G, alpha, transformation=\"identity\", seed=None):\n \n import numpy as np\n import scipy as sp\n import scipy.stats # call as sp.stats\n\n available_transformations = [\"identity\", \"modularity\"]\n alpha = np.clip(alpha, 0, 1)\n A = nx.to_numpy_array(G)\n n = A.shape[1]\n level = int(round(n * alpha))\n\n if transformation not in available_transformations:\n msg = f\"{transformation!r} is not a valid transformation. \"\n msg += f\"Transformations: {available_transformations}\"\n raise nx.NetworkXError(msg)\n\n K = np.ones((1, n)) @ A\n\n B = A\n if transformation == \"modularity\":\n B -= K.T @ K / K.sum()\n\n # Compute low-rank approximation of B\n evals, evecs = np.", "d_id": 41994, "documentation": { "docstring": "Returns a random simple graph with spectrum resembling that of `G`\n\n This algorithm, called Spectral Graph Forge (SGF), computes the\n eigenvectors of a given graph adjacency matrix, filters them and\n builds a random graph with a similar eigenstructure.\n SGF has been proved to be particularly useful for synthesizing\n realistic social networks and it can also be used to anonymize\n graph sensitive data.\n\n Parameters\n ----------\n G : Graph\n alpha : float\n Ratio representing the percentage of eigenvectors of G to consider,\n values in [0,1].\n transformation : string, optional\n Represents the intended matrix linear transformation, possible values\n are 'identity' and 'modularity'\n seed : integer, random_state, or None (default)\n Indicator of numpy random number generation state.\n See :ref:`Randomness`.\n\n Returns\n -------\n H : Graph\n A graph with a similar eigenvector structure of the input one.\n\n Raises\n ------\n NetworkXError\n If transformation has a value different from 'identity' or 'modularity'\n\n Notes\n -----\n Spectral Graph Forge (SGF) generates a random simple graph resembling the\n global properties of the given one.\n It leverages the low-rank approximation of the associated adjacency matrix\n driven by the *alpha* precision parameter.\n SGF preserves the number of nodes of the input graph and their ordering.\n This way, nodes of output graphs resemble the properties of the input one\n and attributes can be directly mapped.\n\n It considers the graph adjacency matrices which can optionally be\n transformed to other symmetric real matrices (currently transformation\n options include *identity* and *modularity*).\n The *modularity* transformation, in the sense of Newman's modularity matrix\n allows the focusing on community structure related properties of the graph.\n\n SGF applies a low-rank approximation whose fixed rank is computed from the\n ratio *alpha* of the input graph adjacency matrix dimension.\n This step performs a filtering on the input eigenvectors similar to the low\n pass filtering common in telecommunications.\n\n The filtered values (after truncation) are used as input to a Bernoulli\n sampling for constructing a random adjacency matrix.\n\n References\n ----------\n .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, \"Spectral Graph Forge:\n Graph Generation Targeting Modularity\", IEEE Infocom, '18.\n https://arxiv.org/abs/1801.01715\n .. [2] M. Newman, \"Networks: an introduction\", Oxford university press,\n 2010\n\n Examples\n --------\n >>> G = nx.karate_club_graph()\n >>> H = nx.spectral_graph_forge(G, 0.3)\n >>>\n ", "n_words": 358, "vocab_size": 213, "n_whitespaces": 582, "language": "en" } }, { "id": 106051, "commit_id": "c78559cacbb0ca6e0bc8bfc313cc0359f8c23ead", "repo": "datasets", "path": "src/datasets/features/features.py", "file_name": "features.py", "fun_name": "encode_example", "commit_message": "Clean up remaining Main Classes docstrings (#5349)\n\nclean up docstrings", "code": "def encode_example(self, example):\n \n example = cast_to_python_objects(example)\n return encode_nested_example(self, example)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def encode_example(self, example):\n \n example = cast_to_python_objects(example)\n return encode_nested_exa", "d_id": 22260, "documentation": { "docstring": "\n Encode example into a format for Arrow.\n\n Args:\n example (`dict[str, Any]`):\n Data in a Dataset row.\n\n Returns:\n `dict[str, Any]`\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 85, "language": "en" } }, { "id": 271980, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_v1.py", "file_name": "training_v1.py", "fun_name": "_add_unique_metric_name", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _add_unique_metric_name(self, metric_name, metric_fn, output_index):\n \n # For multi-output models, prepend the output names to the metric name.\n if len(self.output_names) > 1:\n # If we're loading from an already-serialized model, we've already\n # prepended the output name, and we don't want to do it again.\n #\n # Alternatively, we may be receiving a stateless metric (e.g. the string\n # \"accuracy\") rather than a `Metric` object, in which case we want to\n # prepend the output name even if we are loading a serialized model.\n if not getattr(metric_fn, \"_from_serialized\", False):\n metric_name = \"%s_%s\" % (\n self.output_names[output_index],\n metric_name,\n )\n\n j = 1\n base_metric_name = metric_name\n while metric_name in self.metrics_names:\n metric_name = \"%s_%d\" % (base_metric_name, j)\n j += 1\n\n return metric_name\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 333, "n_words": 117, "vocab_size": 80, "complexity": 4, "nloc": 13, "token_counts": 75, "n_ast_nodes": 127, "n_identifiers": 11, "random_cut": "def _add_unique_metric_name(self, metric_name, metric_fn, output_index):\n \n # For multi-output models, prepend the output names to the metric name.\n if len(self.output_names) > 1:\n # If we're loading from an already-serialized model, we've already\n # prepended the output name, and we don't want to do it again.\n #\n # Alternatively, we may be receiving a stateless metric (e.g. the string\n # \"accuracy\") rather than a `Metric` object, in which case we want to\n # prepend the output name even if we are loading a serialized model.\n if not getattr(metric_fn", "d_id": 80920, "documentation": { "docstring": "Makes the metric name unique.\n\n If there are multiple outputs for which the metrics are calculated, the\n metric names have to be made unique by appending an integer.\n\n Args:\n metric_name: Metric name that corresponds to the metric specified by the\n user. For example: 'acc'.\n metric_fn: The Metric object.\n output_index: The index of the model output for which the metric name is\n being added.\n\n Returns:\n string, name of the model's unique metric name\n ", "n_words": 72, "vocab_size": 48, "n_whitespaces": 171, "language": "en" } }, { "id": 176253, "commit_id": "77c49c16e10693dbe566d20601b28dd2b1e8df03", "repo": "networkx", "path": "networkx/algorithms/components/strongly_connected.py", "file_name": "strongly_connected.py", "fun_name": "strongly_connected_components", "commit_message": "Fixing Tarjan's strongly connected components algorithm implementation to have O(|E|+|V|) time complexity instead of O(|V|^3). (#5288)\n\nPrevent unnecessary traversal of edges multiple times", "code": "def strongly_connected_components(G):\n \n preorder = {}\n lowlink = {}\n scc_found = set()\n scc_queue = []\n i = 0 # Preorder counter\n neighbors = {v: iter(G[v]) for v in G}\n for source in G:\n if source not in scc_found:\n queue = [source]\n while queue:\n v = queue[-1]\n if v not in preorder:\n i = i + 1\n preorder[v] = i\n done = True\n for w in neighbors[v]:\n if w not in preorder:\n queue.append(w)\n done = False\n break\n if done:\n lowlink[v] = preorder[v]\n for w in G[v]:\n if w not in scc_found:\n if preorder[w] > preorder[v]:\n lowlink[v] = min([lowlink[v], lowlink[w]])\n else:\n lowlink[v] = min([lowlink[v], preorder[w]])\n queue.pop()\n if lowlink[v] == preorder[v]:\n scc = {v}\n while scc_queue and preorder[scc_queue[-1]] > preorder[v]:\n k = scc_queue.pop()\n scc.add(k)\n scc_found.update(scc)\n yield scc\n else:\n scc_queue.append(v)\n\n\n@not_implemented_for(\"undirected\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"undirected\")", "n_ast_errors": 1, "ast_levels": 25, "n_whitespaces": 783, "n_words": 126, "vocab_size": 66, "complexity": 15, "nloc": 39, "token_counts": 257, "n_ast_nodes": 413, "n_identifiers": 23, "random_cut": "def strongly_connected_components(G):\n \n preorder = {}\n lowlink = {}\n scc_found = set()\n scc_queue = []\n i = 0 # Preorder counter\n neighbors = {v: iter(G[v]) for v in G}\n for source in G:\n if source not in scc_found:\n queue = [source]\n while queue:\n v = queue[-1]\n if v not in preorder:\n i = i + 1\n preorder[v] = i\n done = True\n for w in neighbors[v]:\n if w not in preorder:\n queue.append(w)\n done = False\n break\n if done:\n lowlink[v] = preorder[v]\n for w in G[v]:\n if w not in scc_found:\n if preorder[w] > preorder[v]:\n lowlink[v] = min([lowlink[v], lowlink[w]])\n else:\n ", "d_id": 41793, "documentation": { "docstring": "Generate nodes in strongly connected components of graph.\n\n Parameters\n ----------\n G : NetworkX Graph\n A directed graph.\n\n Returns\n -------\n comp : generator of sets\n A generator of sets of nodes, one for each strongly connected\n component of G.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is undirected.\n\n Examples\n --------\n Generate a sorted list of strongly connected components, largest first.\n\n >>> G = nx.cycle_graph(4, create_using=nx.DiGraph())\n >>> nx.add_cycle(G, [10, 11, 12])\n >>> [\n ... len(c)\n ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True)\n ... ]\n [4, 3]\n\n If you only want the largest component, it's more efficient to\n use max instead of sort.\n\n >>> largest = max(nx.strongly_connected_components(G), key=len)\n\n See Also\n --------\n connected_components\n weakly_connected_components\n kosaraju_strongly_connected_components\n\n Notes\n -----\n Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_.\n Nonrecursive version of algorithm.\n\n References\n ----------\n .. [1] Depth-first search and linear graph algorithms, R. Tarjan\n SIAM Journal of Computing 1(2):146-160, (1972).\n\n .. [2] On finding the strongly connected components in a directed graph.\n E. Nuutila and E. Soisalon-Soinen\n Information Processing Letters 49(1): 9-14, (1994)..\n\n ", "n_words": 162, "vocab_size": 118, "n_whitespaces": 324, "language": "en" } }, { "id": 259437, "commit_id": "75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/glm.py", "file_name": "glm.py", "fun_name": "score", "commit_message": "ENH migrate GLMs / TweedieRegressor to linear loss (#22548)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", "code": "def score(self, X, y, sample_weight=None):\n \n # TODO: Adapt link to User Guide in the docstring, once\n # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged.\n #\n # Note, default score defined in RegressorMixin is R^2 score.\n # TODO: make D^2 a score function in module metrics (and thereby get\n # input validation and so on)\n raw_prediction = self._linear_predictor(X) # validates X\n # required by losses\n y = check_array(y, dtype=raw_prediction.dtype, order=\"C\", ensure_2d=False)\n\n if sample_weight is not None:\n # Note that _check_sample_weight calls check_array(order=\"C\") required by\n # losses.\n sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)\n\n base_loss = self._linear_loss.base_loss\n\n if not base_loss.in_y_true_range(y):\n raise ValueError(\n \"Some value(s) of y are out of the valid range of the loss\"\n f\" {self._base_loss.__name__}.\"\n )\n\n # Note that constant_to_optimal_zero is already multiplied by sample_weight.\n constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y))\n if sample_weight is not None:\n constant *= sample_weight.shape[0] / np.sum(sample_weight)\n\n # Missing factor of 2 in deviance cancels out.\n deviance = base_loss(\n y_true=y,\n raw_prediction=raw_prediction,\n sample_weight=sample_weight,\n n_threads=1,\n )\n y_mean = base_loss.link.link(np.average(y, weights=sample_weight))\n deviance_null = base_loss(\n y_true=y,\n raw_prediction=np.tile(y_mean, y.shape[0]),\n sample_weight=sample_weight,\n n_threads=1,\n )\n return 1 - (deviance + constant) / (deviance_null + constant)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 524, "n_words": 172, "vocab_size": 115, "complexity": 4, "nloc": 28, "token_counts": 209, "n_ast_nodes": 340, "n_identifiers": 33, "random_cut": "def score(self, X, y, sample_weight=None):\n \n # TODO: Adapt link to User Guide in the docstring, once\n # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged.\n #\n # Note, default score defined in RegressorMixin is R^2 score.\n # TODO: make D^2 a score function in module metrics (and thereby get\n # input validation and so on)\n raw_prediction = self._linear_predictor(X) # validates X\n # required by losses\n y = check_array(y, dtype=raw_prediction.dtype, order=\"C\", ensure_2d=False)\n\n if sample_weight is not None:\n # Note that _check_sample_weight calls check_array(order=\"C\") required by\n # losses.\n sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)\n\n base_loss = self._linear_loss.base_loss\n\n if not base_loss.in_y_true_range(y):\n raise ValueError(\n \"Some value(s) of y are out of the valid range of the loss\"\n f\" {self._base_loss.__name__}.\"\n )\n\n # Note that constant_to_optimal_zero is already multiplied by sample_weight.\n constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y))\n if sample_weight is not None:\n constant *= sample_weight.shape[0] / np.sum(sample_weight)\n\n # Missing factor of 2 in deviance cancels out.\n deviance = base_loss(\n y_true=y,\n raw_prediction=raw_prediction,\n sample_weight=sample_weight,\n n_threads=1,\n )\n y_mean = base_loss.link.link(np.average(y, weights=sample_weight))\n deviance_null = base_loss(\n ", "d_id": 75771, "documentation": { "docstring": "Compute D^2, the percentage of deviance explained.\n\n D^2 is a generalization of the coefficient of determination R^2.\n R^2 uses squared error and D^2 uses the deviance of this GLM, see the\n :ref:`User Guide `.\n\n D^2 is defined as\n :math:`D^2 = 1-\\\\frac{D(y_{true},y_{pred})}{D_{null}}`,\n :math:`D_{null}` is the null deviance, i.e. the deviance of a model\n with intercept alone, which corresponds to :math:`y_{pred} = \\\\bar{y}`.\n The mean :math:`\\\\bar{y}` is averaged by sample_weight.\n Best possible score is 1.0 and it can be negative (because the model\n can be arbitrarily worse).\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Test samples.\n\n y : array-like of shape (n_samples,)\n True values of target.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n D^2 of self.predict(X) w.r.t. y.\n ", "n_words": 127, "vocab_size": 89, "n_whitespaces": 304, "language": "en" } }, { "id": 9407, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py", "file_name": "upfirdn_2d.py", "fun_name": "upsample_conv_2d", "commit_message": "initialize ostec", "code": "def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):\n r\n\n assert isinstance(factor, int) and factor >= 1\n\n # Check weight shape.\n w = tf.convert_to_tensor(w)\n assert w.shape.rank == 4\n convH = w.shape[0].value\n convW = w.shape[1].value\n inC = _shape(w, 2)\n outC = _shape(w, 3)\n assert convW == convH\n\n # Setup filter kernel.\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * (gain * (factor ** 2))\n p = (k.shape[0] - factor) - (convW - 1)\n\n # Determine data dimensions.\n if data_format == 'NCHW':\n stride = [1, 1, factor, factor]\n output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW]\n num_groups = _shape(x, 1) // inC\n else:\n stride = [1, factor, factor, 1]\n output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC]\n num_groups = _shape(x, 3) // inC\n\n # Transpose weights.\n w = tf.reshape(w, [convH, convW, inC, num_groups, -1])\n w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2])\n w = tf.reshape(w, [convH, convW, -1, num_groups * inC])\n\n # Execute.\n x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)\n return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl)\n\n#----------------------------------------------------------------------------\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 317, "n_words": 198, "vocab_size": 110, "complexity": 4, "nloc": 48, "token_counts": 387, "n_ast_nodes": 602, "n_identifiers": 34, "random_cut": "def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):\n r\n\n assert isinstance(factor, int) and factor >= 1\n\n # Check weight shape.", "d_id": 1607, "documentation": { "docstring": "Fused `upsample_2d()` followed by `tf.nn.conv2d()`.\n\n Padding is performed only once at the beginning, not between the operations.\n The fused op is considerably more efficient than performing the same calculation\n using standard TensorFlow ops. It supports gradients of arbitrary order.\n\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`.\n Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).\n The default is `[1] * factor`, which corresponds to nearest-neighbor\n upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or\n `[N, H * factor, W * factor, C]`, and same datatype as `x`.\n ", "n_words": 158, "vocab_size": 114, "n_whitespaces": 358, "language": "en" } }, { "id": 7491, "commit_id": "7ec0cd13cf5e77d6fe68acbbeef9a7c694fc83c2", "repo": "ludwig", "path": "ludwig/contribs/aim.py", "file_name": "aim.py", "fun_name": "normalize_config", "commit_message": "Fixes to serialization, and update to allow set repo location. (#2367)\n\n* Updating AimCallback to add init for optional repo.\r\n* Fixed numpy serialization for config objects.\r\n* Removed print statements, added logging for progress tracker.", "code": "def normalize_config(config):\n \n return json.loads(json.dumps(config, cls=NumpyEncoder))\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def normalize_config(config):\n \n return json.loads(json.dumps(config, cls=NumpyEn", "d_id": 1218, "documentation": { "docstring": "Convert to json string and back again to remove numpy types.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 191243, "commit_id": "1d9deef4e99f52a08eed9aa973572c4567754f5a", "repo": "thumbor", "path": "thumbor/utils.py", "file_name": "utils.py", "fun_name": "ensure_srgb", "commit_message": "feat: Support AVIF format encoding (#1476)\n\n* feat: Support AVIF format encoding\r\n\r\n* Increase test coverage\r\n\r\n* test coverage for remaining uncovered lines\r\n\r\n* Add simple integration test\r\n\r\n* Add \"filters:format(avif)\" integration test\r\n\r\n* Gracefully handle AVIF encoding when codec unavailable\r\n\r\n* Don't pass quality=\"keep\" to AVIF save\r\n\r\n* Fix no-member pylint error", "code": "def ensure_srgb(img, srgb_profile=None):\n \n img_info = dict(img.info)\n icc = img_info.pop(\"icc_profile\", None)\n if not icc:\n return img\n\n if ImageCms is None:\n raise RuntimeError(\"ImageCms is required for color profile utilities\")\n\n if srgb_profile is not None:\n srgb_profile = ImageCms.ImageCmsProfile(srgb_profile)\n else:\n srgb_profile = DEFAULT_SRGB_PROFILE\n\n buf = BytesIO(icc)\n try:\n orig_profile = ImageCms.ImageCmsProfile(buf)\n color_space = orig_profile.profile.xcolor_space\n except (AttributeError, OSError, TypeError, ValueError):\n return None\n finally:\n buf.close()\n\n if color_space == \"RGB \":\n logger.debug(\"Returning img (RGB)\")\n return img\n\n if color_space not in (\"GRAY\", \"CMYK\"):\n # Other color spaces are rare, but best not to try to convert them.\n # Upstream understands a None return as meaning it should not\n # use it for the target encoder.\n logger.debug(\n \"Cannot convert to sRGB; color space = %s\",\n (color_space.strip()),\n )\n return None\n\n # Probably not possible to have an animated image with CMYK or GRAY icc\n # profile, but best leave it alone if we have one\n if getattr(img, \"is_animated\", False):\n return None\n\n if color_space == \"GRAY\":\n pil_mode = \"L\"\n else:\n pil_mode = \"CMYK\"\n\n logger.debug(\"Converting from %s to sRGB\", color_space)\n\n transform = ImageCms.ImageCmsTransform(\n orig_profile,\n srgb_profile,\n pil_mode,\n \"RGBA\",\n intent=ImageCms.INTENT_RELATIVE_COLORIMETRIC,\n flags=TRANSFORM_FLAGS,\n )\n\n src_im = Image.new(pil_mode, img.size, \"white\")\n src_im.paste(img)\n\n dst_im = Image.new(\"RGBA\", img.size, \"white\")\n dst_im.info = img_info\n dst_im = transform.apply(src_im, dst_im)\n dst_im = dst_im.convert(\"RGB\")\n dst_im.info = img_info\n return dst_im\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 487, "n_words": 203, "vocab_size": 126, "complexity": 10, "nloc": 51, "token_counts": 268, "n_ast_nodes": 452, "n_identifiers": 42, "random_cut": "def ensure_srgb(img, srgb_profile=None):\n \n img_info = dict(img.info)\n icc = img_info.pop(\"icc_profile\", None)\n if not icc:\n return img\n\n if ImageCms is None:\n raise RuntimeError(\"ImageCms is required for color profile utilities\")\n\n if srgb_profile is not None:\n srgb_profile = ImageCms.ImageCmsProfile(srgb_profile)\n else:\n srgb_profile = DEFAULT_SRGB_PROFILE\n\n buf = BytesIO(icc)\n try:\n orig_profile = ImageCms.ImageCmsProfile(buf)\n color_space = orig_profile.profile.xcolor_space\n except (AttributeError, OSError, TypeError, ValueError):\n return None\n finally:\n buf.close()\n\n if color_space == \"RGB \":\n logger.debug(\"Returning img (RGB)\")\n return img\n\n if color_space not in (\"GRAY\", \"CMYK", "d_id": 46467, "documentation": { "docstring": "\n Ensures that an image either has no ICC profile (and so is implicitly\n sRGB) or has an sRGB color profile. If the image is sRGB, it is returned\n unchanged. If it has a CMYK or Gray color profile, this function will\n return an image converted to sRGB. Any color profiles in other color\n spaces will return None.\n ", "n_words": 57, "vocab_size": 41, "n_whitespaces": 76, "language": "en" } }, { "id": 270733, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_cast_single_input", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _cast_single_input(self, x):\n \n if self._should_cast_single_input(x):\n return tf.cast(x, self._compute_dtype_object)\n else:\n return x\n\n # _dtype used to be an attribute set in the constructor. We still expose it\n # because some clients still use it.\n # TODO(reedwm): Deprecate, then remove the _dtype property.", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 93, "n_words": 41, "vocab_size": 35, "complexity": 2, "nloc": 5, "token_counts": 31, "n_ast_nodes": 54, "n_identifiers": 7, "random_cut": "def _cast_single_input(self, x):\n \n if self._should_cast_single_input(x):\n return tf.cast(x, self._compute_dtype_obje", "d_id": 80550, "documentation": { "docstring": "Cast a single Tensor or TensorSpec to the compute dtype.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 25698, "commit_id": "f5a45de4a22fecacfcd5b2cd18c07e5cf95ce27c", "repo": "saleor", "path": "saleor/csv/utils/export.py", "file_name": "export.py", "fun_name": "queryset_in_batches", "commit_message": "Feature/gift cards post mvp (#7977)\n\n* Add giftCardBulkCreate mutation\r\n\r\n* Extend OrderFilter with giftCardUsed and giftCardBought fields\r\n\r\n* Allow exporting gift cards\r\n\r\n* Update the name of the email template for export\r\n\r\n* Add exportGiftCards muttaion\r\n\r\n* Add used gift card filter\r\n\r\n* Export only unused gift cards\r\n\r\n* Block mutations for expired gift cards (#8115)\r\n\r\n* Block mutations for expired gift cards\r\n\r\n* Block only resending and activating expired gift cards\r\n\r\n* Add celery schedule task for deactivate expired cards (#8100)\r\n\r\n* Add gift card section to invoice (#8148)\r\n\r\n* Add filtering on gift card events (#8090)\r\n\r\n* Add filtering on gift card events\r\n\r\n* Filter gift card events by orders instead of order_id\r\n\r\n* Update populatedb with gift card data (#8016)\r\n\r\n* Generate gift cards with events in populate db\r\n\r\n* Set product types kinds and add placeholder for gift card product\r\n\r\n* Add dedicated gift card product images\r\n\r\n* Change order of order emails (#8168)\r\n\r\n* Drop duplicated kind field from producttype in populatedb (#8224)\r\n\r\n* Change gift card display_code field to last_4 (#8445)\r\n\r\n* Change gift card display_code field to last_4\r\n\r\n* Change last4 to last4CodeChars\r\n\r\n* Fix github test env action configuration\r\n\r\n* Drop filtering gift cards by tag\r\n\r\n* Fix export gift card tags test\r\n\r\n* Re-add gift card tags query (#8412)\r\n\r\n* Update populatedb with gift card data (#8016)\r\n\r\n* Generate gift cards with events in populate db\r\n\r\n* Set product types kinds and add placeholder for gift card product\r\n\r\n* Add dedicated gift card product images\r\n\r\n* Add giftCardTags model\r\n\r\n* Add giftCardTags query\r\n\r\nCo-authored-by: Iga Karbowiak <40886528+IKarbowiak@users.noreply.github.com>\r\nCo-authored-by: IKarbowiak \r\n\r\n* Do not create EXPIRY_DATE_UPDATED gift card event when expiry date is not changed (#8882)\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def queryset_in_batches(queryset):\n \n start_pk = 0\n\n while True:\n qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE]\n pks = list(qs.values_list(\"pk\", flat=True))\n\n if not pks:\n break\n\n yield pks\n\n start_pk = pks[-1]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 78, "n_words": 23, "vocab_size": 18, "complexity": 3, "nloc": 9, "token_counts": 55, "n_ast_nodes": 94, "n_identifiers": 11, "random_cut": "def queryset_in_batches(queryset):\n \n start_pk = 0\n\n while True:\n qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE]\n pks = list(qs.values_list(\"pk\", flat=True))\n\n if not pks:\n break\n\n yield pks\n\n ", "d_id": 4915, "documentation": { "docstring": "Slice a queryset into batches.\n\n Input queryset should be sorted be pk.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 18, "language": "en" } }, { "id": 66831, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/update_returned_qty_in_pr_dn.py", "file_name": "update_returned_qty_in_pr_dn.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"purchase_receipt\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"purchase_receipt_item\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"delivery_note\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"delivery_note_item\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"stock_settings\")\n\n\tdef update_from_return_docs(doctype):\n\t\tfor return_doc in frappe.get_all(\n\t\t\tdoctype, filters={\"is_return\": 1, \"docstatus\": 1, \"return_against\": (\"!=\", \"\")}\n\t\t):\n\t\t\t# Update original receipt/delivery document from return\n\t\t\treturn_doc = frappe.get_cached_doc(doctype, return_doc.name)\n\t\t\ttry:\n\t\t\t\treturn_doc.update_prevdoc_status()\n\t\t\texcept OverAllowanceError:\n\t\t\t\tfrappe.db.rollback()\n\t\t\t\tcontinue\n\n\t\t\treturn_against = frappe.get_doc(doctype, return_doc.return_against)\n\t\t\treturn_against.update_billing_status()\n\t\t\tfrappe.db.commit()\n\n\t# Set received qty in stock uom in PR, as returned qty is checked against it\n\tfrappe.db.sql(\n\t\t\n\t)\n\n\tfor doctype in (\"Purchase Receipt\", \"Delivery Note\"):\n\t\tupdate_from_return_docs(doctype)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 56, "n_words": 81, "vocab_size": 63, "complexity": 2, "nloc": 14, "token_counts": 77, "n_ast_nodes": 297, "n_identifiers": 19, "random_cut": "def execute():\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"purchase_receipt\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"purchase_receipt_item\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"delivery_note\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"delivery_note_item\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"stock_settings\")\n\n\tdef update_from_return_docs(doctype):\n\t\tfor return_doc in frappe.get_all(\n\t\t\tdoctype, filters={\"is_return\": 1, \"docstatus\":", "d_id": 14353, "documentation": { "docstring": " update `tabPurchase Receipt Item`\n\t\tset received_stock_qty = received_qty * conversion_factor\n\t\twhere docstatus = 1 ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 286204, "commit_id": "38a53e5f43bccb716e6a6494605f97293077a679", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/discovery/discovery_controller.py", "file_name": "discovery_controller.py", "fun_name": "call_dex", "commit_message": "Combining commands and renaming others (#3000)\n\n* Combining commands and renaming others\r\n\r\nCombining commands with different sources but the same functionality. I also removed indications of source from command names\r\n\r\n* Fix tests and hugo\r\n\r\n* Test fixes\r\n\r\nCo-authored-by: james ", "code": "def call_dex(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"dex\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"Number of records to display\",\n default=15,\n )\n parser.add_argument(\n \"-s\",\n \"--sort\",\n dest=\"sortby\",\n nargs=\"+\",\n help=\"Sort by given column. Default: Daily Volume [$]\",\n default=\"Daily Volume [$]\",\n )\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n if ns_parser:\n dappradar_view.display_top_dexes(\n sortby=\" \".join(ns_parser.sortby),\n limit=ns_parser.limit,\n export=ns_parser.export,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 379, "n_words": 55, "vocab_size": 47, "complexity": 2, "nloc": 36, "token_counts": 126, "n_ast_nodes": 205, "n_identifiers": 27, "random_cut": "def call_dex(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"dex\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"Number of records to display\",\n default=15,\n )\n parser.add_argument(\n \"-s\",\n \"--sort\",\n dest=\"sortby\",\n nargs=\"+\",\n help=\"Sort by given column. Default: Daily Volume [$]\",\n default=\"Daily Volume [", "d_id": 85618, "documentation": { "docstring": "Process dex command\n Shows top decentralized exchanges [Source: https://dappradar.com/]\n Accepts --sort {Name,Daily Users,Daily Volume [$]}\n to sort by column\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 63, "language": "en" } }, { "id": 61419, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py", "file_name": "versioncontrol.py", "fun_name": "get_backend_for_dir", "commit_message": "upd; format", "code": "def get_backend_for_dir(self, location):\n # type: (str) -> Optional[VersionControl]\n \n vcs_backends = {}\n for vcs_backend in self._registry.values():\n repo_path = vcs_backend.get_repository_root(location)\n if not repo_path:\n continue\n logger.debug('Determine that %s uses VCS: %s',\n location, vcs_backend.name)\n vcs_backends[repo_path] = vcs_backend\n\n if not vcs_backends:\n return None\n\n # Choose the VCS in the inner-most directory. Since all repository\n # roots found here would be either `location` or one of its\n # parents, the longest path should have the most path components,\n # i.e. the backend representing the inner-most repository.\n inner_most_repo_path = max(vcs_backends, key=len)\n return vcs_backends[inner_most_repo_path]\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 257, "n_words": 86, "vocab_size": 67, "complexity": 4, "nloc": 13, "token_counts": 75, "n_ast_nodes": 126, "n_identifiers": 16, "random_cut": "def get_backend_for_dir(self, location):\n # type: (str) -> Optional[VersionControl]\n \n vcs_backends = {}\n for vcs_backend in self._registry.values():\n repo_path = vcs_backend.get_repository_root(location)\n if not repo_path:\n continue\n logger.debug('Determine that %s uses VCS: %s',\n location, vcs_backend.name)\n vcs_backends[repo_path] = vcs_backend\n\n if not vcs_backends:\n return None\n\n # Choose the VCS in the inner-most directory. Since all repository\n # roots found here would be eith", "d_id": 12562, "documentation": { "docstring": "\n Return a VersionControl object if a repository of that type is found\n at the given directory.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 44123, "commit_id": "602abe8394fafe7de54df7e73af56de848cdf617", "repo": "airflow", "path": "airflow/www/security.py", "file_name": "security.py", "fun_name": "has_access", "commit_message": "Remove `:type` lines now sphinx-autoapi supports typehints (#20951)\n\n* Remove `:type` lines now sphinx-autoapi supports typehints\r\n\r\nSince we have no updated sphinx-autoapi to a more recent version it\r\nsupports showing type hints in the documentation, so we don't need to\r\nhave the type hints _and_ the `:type` lines -- which is good, as the\r\nones in the doc strings are easy to get out of date!\r\n\r\nThe following settings have been set:\r\n\r\n`autodoc_typehints = 'description'` -- show types in description (where\r\nprevious `:type` used to show up)\r\n\r\n`autodoc_typehints_description_target = 'documented'` -- only link to\r\ntypes that are documented. (Without this we have some missing return\r\ntypes that aren't documented, and aren't linked to in our current python\r\nAPI docs, so this caused a build failure)\r\n\r\n`autodoc_typehints_format = 'short'` -- Shorten type hints where\r\npossible, i.e. `StringIO` instead of `io.StringIO`\r\n\r\n* Add argument type names to local spelling dictionary\r\n\r\nNow that we are using the type hints in the docs, sphinxcontrib-spelling\r\npicks them up as words to be checked, so we have to ignore them.\r\n\r\nI've chosen to add the provider specific ones to local dictionary files\r\nrather than the global, as for example, `mgmt` is an error in most\r\nplaces, but not in some of the Azure provider.", "code": "def has_access(self, action_name, resource_name, user=None) -> bool:\n \n if not user:\n user = g.user\n\n if user.is_anonymous:\n user.roles = self.get_user_roles(user)\n\n has_access = self._has_access(user, action_name, resource_name)\n # FAB built-in view access method. Won't work for AllDag access.\n if self.is_dag_resource(resource_name):\n if action_name == permissions.ACTION_CAN_READ:\n has_access |= self.can_read_dag(resource_name, user)\n elif action_name == permissions.ACTION_CAN_EDIT:\n has_access |= self.can_edit_dag(resource_name, user)\n\n return has_access\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 177, "n_words": 54, "vocab_size": 41, "complexity": 6, "nloc": 25, "token_counts": 96, "n_ast_nodes": 150, "n_identifiers": 17, "random_cut": "def has_access(self, action_name, resource_name, user=None) -> bool:\n \n if not user:\n user = g.user\n\n if ", "d_id": 8173, "documentation": { "docstring": "\n Verify whether a given user could perform a certain action\n (e.g can_read, can_write) on the given resource.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n ", "n_words": 48, "vocab_size": 30, "n_whitespaces": 105, "language": "en" } }, { "id": 118557, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/streamlit/forward_msg_cache.py", "file_name": "forward_msg_cache.py", "fun_name": "has_refs", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def has_refs(self) -> bool:\n \n return len(self._session_report_run_counts) > 0\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 17, "n_ast_nodes": 30, "n_identifiers": 5, "random_cut": "def has_refs(self) -> bool:\n \n return len(self._session_report_run_counts) > 0\n", "d_id": 26290, "documentation": { "docstring": "True if this Entry has references from any AppSession.\n\n If not, it can be removed from the cache.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 195092, "commit_id": "2ef5586ed0d644abe18cd3ff45ef9fa01981e87c", "repo": "ParlAI", "path": "projects/director/director_agent.py", "file_name": "director_agent.py", "fun_name": "_reshape_to_record_metrics", "commit_message": "Added director agent and safety experiment commands. (#4602)\n\n* Added director agent and safety.\r\n\r\n* ran autoformat.sh", "code": "def _reshape_to_record_metrics(self, batch, losses, num_target_tokens, indices):\n \n val_id_shape = batch.valid_indices.shape\n reshaped_losses = torch.zeros(\n val_id_shape, device=losses.device, dtype=losses.dtype\n )\n reshaped_num_target_tokens = torch.zeros(\n val_id_shape, device=num_target_tokens.device, dtype=num_target_tokens.dtype\n )\n\n reshaped_losses[indices] = losses\n reshaped_num_target_tokens[indices] = num_target_tokens\n\n return (reshaped_losses, reshaped_num_target_tokens)\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 117, "n_words": 32, "vocab_size": 25, "complexity": 1, "nloc": 11, "token_counts": 79, "n_ast_nodes": 116, "n_identifiers": 15, "random_cut": "def _reshape_to_record_metrics(self, batch, losses, num_target_tokens, indices):\n \n val_id_shape = batch.", "d_id": 47191, "documentation": { "docstring": "\n MultitaskAgent shuffles and combines examples from both classifier and the\n generator tasks in a single batch. We compute losses only for those exs in the\n batch resulting in losses and num_target_tokens vectors that are smaller than\n the.\n\n This method reshapes the losses and num_target_tokens vectors back to the batch size. This is needed to record local metrics as the metrics need to be of batch size.\n\n Args:\n batch: batch being processed in this iteration.\n losses: classifier or generator loss vector (shape: b' X 1), where b' <= b.\n num_target_tokens: number of tokens in each examples for classification or generation tasks. (shape: b' X 1), where b' <= b.\n indices: indices of (either classification or generation) exs for which the loss was computed.\n\n Returns:\n A tuple of reshaped losses and num_target_tokens, both of shape: b X 1.\n ", "n_words": 136, "vocab_size": 85, "n_whitespaces": 248, "language": "en" } }, { "id": 198126, "commit_id": "158f441d4fae4bd406597a41ba8af142e5eeb593", "repo": "sympy", "path": "sympy/physics/continuum_mechanics/truss.py", "file_name": "truss.py", "fun_name": "add_support", "commit_message": "Truss class initialized and documentation added", "code": "def add_support(self, location, type):\n \n if location not in self._node_labels:\n raise ValueError(\"Support must be added on a known node\")\n\n else:\n self._supports[location] = type\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 22, "vocab_size": 22, "complexity": 2, "nloc": 5, "token_counts": 33, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def add_support(self, location, type):\n \n if location not in s", "d_id": 48805, "documentation": { "docstring": "\n This method adds a pinned or roller support at a particular node\n\n Parameters\n ==========\n\n location: String or Symbol\n Label of the Node at which support is added.\n\n type: String\n Type of the support being provided at the node.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> from sympy import symbols\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.add_support('A', 'pinned')\n >>> t.supports\n {'A': 'pinned', 'B': 'none'}\n ", "n_words": 71, "vocab_size": 52, "n_whitespaces": 206, "language": "en" } }, { "id": 217444, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ftplib.py", "file_name": "ftplib.py", "fun_name": "makeport", "commit_message": "add python 3.10.4 for windows", "code": "def makeport(self):\n \n sock = socket.create_server((\"\", 0), family=self.af, backlog=1)\n port = sock.getsockname()[1] # Get proper port\n host = self.sock.getsockname()[0] # Get proper host\n if self.af == socket.AF_INET:\n resp = self.sendport(host, port)\n else:\n resp = self.sendeprt(host, port)\n if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(self.timeout)\n return sock\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 132, "n_words": 43, "vocab_size": 30, "complexity": 3, "nloc": 11, "token_counts": 99, "n_ast_nodes": 159, "n_identifiers": 18, "random_cut": "def makeport(self):\n \n sock = socket.create_server((\"\", 0), family=self.af, backlog=1)\n port = sock.getsockname()[1] # Get proper port\n host = ", "d_id": 54789, "documentation": { "docstring": "Create a new socket and send a PORT command for it.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 320967, "commit_id": "21419c9ef5a90ea36a27afaf2503a57f8f9f8536", "repo": "qutebrowser", "path": "tests/unit/javascript/test_greasemonkey.py", "file_name": "test_greasemonkey.py", "fun_name": "test_all", "commit_message": "greasemonkey: Don't implicitly load scripts\n\nNeeded for #7245 and also seems like cleaner code.", "code": "def test_all(gm_manager):\n \n _save_script(test_gm_script, 'test.user.js')\n gm_manager.load_scripts()\n\n assert (gm_manager.all_scripts()[0].name ==\n \"qutebrowser test userscript\")\n\n\n@pytest.mark.parametrize(\"url, expected_matches\", [\n # included\n ('http://trolol.com/', 1),\n # neither included nor excluded\n ('http://aaaaaaaaaa.com/', 0),\n # excluded\n ('https://badhost.xxx/', 0),\n])", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"url, expected_matches\", [\n # included\n ('http://trolol.com/', 1),\n # neither included nor excluded\n ('http://aaaaaaaaaa.com/', 0),\n # excluded\n ('https://badhost.xxx/', 0),\n])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 69, "n_words": 30, "vocab_size": 25, "complexity": 1, "nloc": 5, "token_counts": 32, "n_ast_nodes": 109, "n_identifiers": 10, "random_cut": "def test_all(gm_manager):\n \n _save_script(test_gm_script, 'test.user.js')\n gm_manager.load_scripts()\n\n assert (gm_manager.all_scripts()[0].name ==\n \"qutebrowser test userscript\")\n\n\n@pytest.mark.parametrize(\"url, expected_matches\", [\n # included\n ('http://trolol.com/', 1),\n # neither included nor excluded\n ('http://aaaaaaaaaa.com/', 0)", "d_id": 117470, "documentation": { "docstring": "Test that a script gets read from file, parsed and returned.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 263700, "commit_id": "64ccb7aea824fbec57f7ed1bbe483ec486183c13", "repo": "pyinstaller", "path": "bootloader/waflib/Utils.py", "file_name": "Utils.py", "fun_name": "split_path_msys", "commit_message": "Bootloader: Building: Unpack waf's lib archive.\n\nDoing so makes it easier to modify. This is a temporary measure until the next\nwaf version is released (although I'm tempted to keep it since it's much more\nIDE completion friendly).", "code": "def split_path_msys(path):\n if path.startswith(('/', '\\\\')) and not path.startswith(('//', '\\\\\\\\')):\n global msysroot\n if not msysroot:\n msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1')\n msysroot = msysroot.strip()\n path = os.path.normpath(msysroot + os.sep + path)\n return split_path_win32(path)\n\n\nif sys.platform == 'cygwin':\n split_path = split_path_cygwin\nelif is_win32:\n if os.environ.get('MSYSTEM') and sys.executable.startswith('/'):\n split_path = split_path_msys\n else:\n split_path = split_path_win32\nelse:\n split_path = split_path_unix\nsplit_path.__doc__ = \n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 130, "n_words": 59, "vocab_size": 40, "complexity": 5, "nloc": 8, "token_counts": 88, "n_ast_nodes": 247, "n_identifiers": 24, "random_cut": "def split_path_msys(path):\n if path.startswith(('/', '\\\\')) and not path.startswith(('//', '\\\\\\\\')):\n global msysroot\n if not msysroot:\n msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1')\n msysroot = msysroot.strip()\n path = os.path.normpath(msysroot + os.sep + path)\n return split_path_win32(path)\n\n\nif sys.platform == 'cygwin':\n split_path = split_path", "d_id": 77443, "documentation": { "docstring": "\nSplits a path by / or \\\\; do not confuse this function with with ``os.path.split``\n\n:type path: string\n:param path: path to split\n:return: list of string\n", "n_words": 27, "vocab_size": 23, "n_whitespaces": 28, "language": "en" } }, { "id": 77104, "commit_id": "c136f461bc052cef362991458e1bd1fca37a3da9", "repo": "wagtail", "path": "wagtail/images/utils.py", "file_name": "utils.py", "fun_name": "find_image_duplicates", "commit_message": "Add duplicate detection to multiple image upload view\n\nAdd utility function to find an image's potential duplicates\n\nAdd logic to detect duplicates on multiple images upload view\n\nAdd template shown when a user is prompted to confirm a duplicate upload\n\nAdd client-side logic to confirm a duplicate upload\n\nAdd/update styles\n\nAdd tests for duplicate image uploads\n\nIndex Image file_hash field\n\nEnsure that a user can choose an image from duplicates returned by find_image_duplicates\n\nUse CSS classes instead of HTML elements to hide edit form on duplicate upload\n\nAdd ImagesPermissionPolicy helper to retrieve the permission policy dynamically\n\nThis allows test cases that override the base image model to pick up the corresponding permission policy, should they need it.\n\nRemove usage of sibling selector\n\nUse wagtail image templatetag to generate image\n\nRenamed ImagesPermissionPolicy to ImagesPermissionPolicyGetter\n\nFail loudly when setting permission policy and a wromg image model is provided\n\nAdd decorator to disconnect a signal's receiver during a test execution and use it in get_image_model tests\n\nImprove warning message on duplicate upload in multiple upload view\n\nShow matching form when confirming a duplicate upload", "code": "def find_image_duplicates(image, user, permission_policy):\n \n\n instances = permission_policy.instances_user_has_permission_for(user, \"choose\")\n return instances.exclude(pk=image.pk).filter(file_hash=image.file_hash)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 19, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 10, "random_cut": "def find_image_duplicates(image, user, permission_policy):\n \n\n instances = permi", "d_id": 16631, "documentation": { "docstring": "\n Finds all the duplicates of a given image.\n To keep things simple, two images are considered to be duplicates if they have the same `file_hash` value.\n This function also ensures that the `user` can choose one of the duplicate images returned (if any).\n ", "n_words": 43, "vocab_size": 37, "n_whitespaces": 56, "language": "en" } }, { "id": 20562, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/core.py", "file_name": "core.py", "fun_name": "_trim_arity", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _trim_arity(func, maxargs=2):\n \n global _trim_arity_call_line\n\n if func in _single_arg_builtins:\n return lambda s, l, t: func(t)\n\n limit = 0\n found_arity = False\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 21, "vocab_size": 20, "complexity": 3, "nloc": 19, "token_counts": 100, "n_ast_nodes": 56, "n_identifiers": 10, "random_cut": "def _trim_arity(func, maxargs=2):\n \n global _trim_arity_call_line\n\n if f", "d_id": 3427, "documentation": { "docstring": "decorator to trim function calls to match the arity of the target", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 286407, "commit_id": "09f753da1c2a2f03c41fe6a3ca2eb79f6ea58995", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/overview/overview_controller.py", "file_name": "overview_controller.py", "fun_name": "call_exmarkets", "commit_message": "More Fixes to Crypto + key sort (#3244)\n\n* fix #3095 - autocomplete and command working + key sort\r\n\r\n* fix #3056\r\n\r\n* fix [Bug] bugs #3048\r\n\r\n* fix [Bug] bug #3017\r\n\r\n* sort -> sortby, not ascend, tests\r\n\r\n* fix my goof ups\r\n\r\nCo-authored-by: james ", "code": "def call_exmarkets(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"exmarkets\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-e\",\n \"--exchange\",\n help=\"Identifier of exchange e.g for Binance Exchange -> binance\",\n dest=\"exchange\",\n default=\"binance\",\n type=str,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"display N number records\",\n default=10,\n )\n\n parser.add_argument(\n \"-s\",\n \"--sortby\",\n dest=\"sortby\",\n type=str,\n help=\"Sort by given column. Default: reported_volume_24h_share\",\n default=\"reported_volume_24h_share\",\n choices=coinpaprika_model.EXMARKETS_FILTERS,\n )\n\n parser.add_argument(\n \"--descend\",\n action=\"store_false\",\n help=\"Flag to sort in descending order (lowest first)\",\n dest=\"descend\",\n default=False,\n )\n\n parser.add_argument(\n \"-u\",\n \"--urls\",\n dest=\"urls\",\n action=\"store_true\",\n help=,\n default=False,\n )\n\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-e\")\n\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n if ns_parser:\n coinpaprika_view.display_exchange_markets(\n exchange=ns_parser.exchange,\n limit=ns_parser.limit,\n export=ns_parser.export,\n sortby=ns_parser.sortby,\n ascend=not ns_parser.descend,\n links=ns_parser.urls,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 728, "n_words": 101, "vocab_size": 85, "complexity": 4, "nloc": 69, "token_counts": 241, "n_ast_nodes": 388, "n_identifiers": 36, "random_cut": "def call_exmarkets(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"exmarkets\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-e\",\n \"--exchange\",\n help=\"Identifier of exchange e.g for Binance Exchange -> binance\",\n dest=\"exchange\",\n default=\"binance\",\n type=str,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"display N number records\",\n default=10,\n )\n\n parser.add_argument(\n \"-s\",\n \"--sortby\",\n dest=\"sortby\",\n type=str,\n help=\"Sort by given column. Default: reported_volume_24h_share\",\n default=\"reported_volume_24h_share\",\n choices=coinpaprika_model.EXMARKETS_FILTERS,\n )\n\n parser.add_argument(\n \"--descend\",\n action=\"store_false\",\n help=\"Flag to sort in descending order (lowest first)\",\n dest=\"descend\",\n default=False,\n )\n\n parser.add_argument(\n \"-u\",\n \"--urls\",\n dest=\"urls\",\n action=\"store_true\",\n help=,\n default=False,\n )\n\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-e\")\n\n ns_pa", "d_id": 85794, "documentation": { "docstring": "Process exmarkets commandGet all exchange markets found for given exchange\n You can display only N number of records with --limit parameter.\n You can sort data by pair, base_currency_name, quote_currency_name, market_url, category,\n reported_volume_24h_share, trust_score --sortby parameter and also with --descend flag to sort descending.\n You can use additional flag --urls to see urls for each market\n Displays:\n exchange_id, pair, base_currency_name, quote_currency_name, market_url,\n category, reported_volume_24h_share, trust_score,Flag to show urls. If you will use that flag you will see only:\n exchange, pair, trust_score, market_url columns", "n_words": 82, "vocab_size": 59, "n_whitespaces": 209, "language": "en" } }, { "id": 292803, "commit_id": "684f01f4664ad490a314ae983194c0f439445a16", "repo": "core", "path": "tests/components/lcn/test_cover.py", "file_name": "test_cover.py", "fun_name": "test_unload_config_entry", "commit_message": "Add tests for LCN cover platform (#64832)", "code": "async def test_unload_config_entry(hass, entry, lcn_connection):\n \n await hass.config_entries.async_unload(entry.entry_id)\n assert hass.states.get(\"cover.cover_outputs\").state == STATE_UNAVAILABLE\n assert hass.states.get(\"cover.cover_relays\").state == STATE_UNAVAILABLE\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 27, "n_words": 15, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 47, "n_ast_nodes": 80, "n_identifiers": 11, "random_cut": "async def test_unload_config_entry(hass, entry, lcn_connection):\n \n await hass.config_entries.async_unload(entry.entry_id)\n assert hass.states.get(\"cover.cover_outputs\").state == STATE_UNAVAILABLE\n assert hass.s", "d_id": 91872, "documentation": { "docstring": "Test the cover is removed when the config entry is unloaded.", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 156029, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "topk", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def topk(self, k, axis=-1, split_every=None):\n \n from dask.array.reductions import topk\n\n return topk(self, k, axis=axis, split_every=split_every)\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 40, "n_ast_nodes": 58, "n_identifiers": 8, "random_cut": "def topk(self, k, axis=-1, split_every=None):\n \n from dask.array.reductions import topk\n\n return topk(self, k, axis=axis, split_every", "d_id": 36511, "documentation": { "docstring": "The top k elements of an array.\n\n See :func:`dask.array.topk` for docstring.\n\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 25, "language": "en" } }, { "id": 240287, "commit_id": "c95b4fa4388f29e50b6966e45c94c5980013a01d", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_figure.py", "file_name": "_figure.py", "fun_name": "for_each_ternary", "commit_message": "type annotations for chainable Figure methods", "code": "def for_each_ternary(self, fn, selector=None, row=None, col=None) -> \"Figure\":\n \n for obj in self.select_ternaries(selector=selector, row=row, col=col):\n fn(obj)\n\n return self\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 32, "token_counts": 48, "n_ast_nodes": 73, "n_identifiers": 8, "random_cut": "def for_each_ternary(self, fn, selector=None, row=None, col=None) -> \"Figure\":\n \n for obj in self.select_ternaries(selector=selector, row=row, col=col):\n fn(obj)\n\n r", "d_id": 68398, "documentation": { "docstring": "\n Apply a function to all ternary objects that satisfy the\n specified selection criteria\n\n Parameters\n ----------\n fn:\n Function that inputs a single ternary object.\n selector: dict, function, or None (default None)\n Dict to use as selection criteria.\n ternary objects will be selected if they contain\n properties corresponding to all of the dictionary's keys, with\n values that exactly match the supplied values. If None\n (the default), all ternary objects are selected. If a\n function, it must be a function accepting a single argument and\n returning a boolean. The function will be called on each\n ternary and those for which the function returned True will\n be in the selection.\n row, col: int or None (default None)\n Subplot row and column index of ternary objects to select.\n To select ternary objects by row and column, the Figure\n must have been created using plotly.subplots.make_subplots.\n If None (the default), all ternary objects are selected.\n Returns\n -------\n self\n Returns the Figure object that the method was called on\n ", "n_words": 161, "vocab_size": 95, "n_whitespaces": 404, "language": "en" } }, { "id": 63798, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py", "file_name": "__init__.py", "fun_name": "call", "commit_message": "upd; format", "code": "def call(self, *args, **kwargs):\n \n warnings.warn(\n \"'call()' method is deprecated. \" + \"Use '__call__()' instead\",\n DeprecationWarning,\n )\n return self.__call__(*args, **kwargs)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 69, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 6, "token_counts": 34, "n_ast_nodes": 58, "n_identifiers": 8, "random_cut": "def call(self, *args, **kwargs):\n \n warnings.warn(\n ", "d_id": 13507, "documentation": { "docstring": "Use ``__call__`` instead because this method is deprecated.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 269121, "commit_id": "2d7dc6080f0824200e317f255e3290da60e0f98a", "repo": "keras", "path": "keras/distribute/distributed_training_utils_v1.py", "file_name": "distributed_training_utils_v1.py", "fun_name": "validate_per_replica_inputs", "commit_message": "Rework a test to avoid instantiating DistributedValues directly.\n\nPiperOrigin-RevId: 438824819", "code": "def validate_per_replica_inputs(distribution_strategy, x):\n \n # Convert the inputs and targets into a list of PerReplica objects.\n per_replica_list = tf.nest.flatten(x)\n x_values_list = []\n for x in per_replica_list:\n # At this point x should contain only tensors.\n x_values = distribution_strategy.unwrap(x)\n for value in x_values:\n if not tf.is_tensor(value):\n raise ValueError('Dataset input to the model should be tensors instead '\n 'they are of type {}'.format(type(value)))\n\n if not tf.executing_eagerly():\n # Validate that the shape and dtype of all the elements in x are the same.\n validate_all_tensor_shapes(x, x_values)\n validate_all_tensor_types(x, x_values)\n\n x_values_list.append(x_values[0])\n return x_values_list\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 156, "n_words": 86, "vocab_size": 64, "complexity": 5, "nloc": 14, "token_counts": 94, "n_ast_nodes": 159, "n_identifiers": 19, "random_cut": "def validate_per_replica_inputs(distribution_strategy, x):\n \n # Convert the inputs and targets into a list of PerReplica objects.\n per_replica_list = tf.nest.flatten(x)\n x_values_list = []\n for x in per_replica_list:\n # At this point x should contain only tensors.\n x_values = distribution_strategy.unwrap(x)\n for value in x_values:\n if not tf.is_tensor(value):\n raise ValueError('Dataset input to the model should be tensors instead '\n 'they are of type {}'.format(type(value)))\n\n if not tf.executing_eagerly():\n # Validate that the shape and dtype of all the elements in x are ", "d_id": 79913, "documentation": { "docstring": "Validates PerReplica dataset input list.\n\n Args:\n distribution_strategy: The current DistributionStrategy used to call\n `fit`, `evaluate` and `predict`.\n x: A list of PerReplica objects that represent the input or\n target values.\n\n Returns:\n List containing the first element of each of the PerReplica objects in\n the input list.\n\n Raises:\n ValueError: If any of the objects in the `per_replica_list` is not a tensor.\n\n ", "n_words": 60, "vocab_size": 44, "n_whitespaces": 89, "language": "en" } }, { "id": 313595, "commit_id": "a0974e0c7297537149985f93544dd6f8ed8cfded", "repo": "core", "path": "homeassistant/components/lifx/light.py", "file_name": "light.py", "fun_name": "get_mac_addr", "commit_message": "Refactor LIFX discovery to prevent duplicate discovery response handling (#72213)\n\n* Partially revert #70458 and allow duplicate LIFX discoveries\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Only process one discovery at a time\r\n\r\n* Revert all LIFX duplicate/inflight discovery checks\r\n\r\nAlso remember LIFX Switches and do as little processing for them\r\nas possible.\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Bump aiolifx version to support the latest LIFX devices\r\n\r\nLIFX added 22 new product definitions to their public product\r\nlist at the end of January and those new products are defined in\r\naiolifx v0.8.1, so bump the dependency version.\r\n\r\nAlso switched to testing for relays instead of maintaining a\r\nseperate list of switch product IDs.\r\n\r\nFixes #72894.\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Refactor LIFX discovery to better handle duplicate responses\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Update clear_inflight_discovery with review suggestion\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Move the existing entity check to before the asyncio lock\r\n\r\nSigned-off-by: Avi Miller \r\n\r\n* Bail out of discovery early and if an entity was created\r\n\r\nAlso ensure that the entity always has a unique ID even if the bulb was\r\nnot successfully discovered.\r\n\r\nSigned-off-by: Avi Miller \r\n\r\nCo-authored-by: J. Nick Koston ", "code": "def get_mac_addr(self):\n \n if (\n self.bulb.host_firmware_version\n and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW\n ):\n octets = [int(octet, 16) for octet in self.mac_addr.split(\":\")]\n octets[5] = (octets[5] + 1) % 256\n return \":\".join(f\"{octet:02x}\" for octet in octets)\n return self.mac_addr\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 116, "n_words": 33, "vocab_size": 28, "complexity": 5, "nloc": 9, "token_counts": 78, "n_ast_nodes": 131, "n_identifiers": 12, "random_cut": "def get_mac_addr(self):\n \n if (\n self.bulb.host_firmware_version\n and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW\n ):\n octets = [int(octet, 16) for octet in self.mac_addr.split(\":\")]\n octets[5] = (octets[5] + 1) % 256\n return \":\".join(f\"{octet:02x}\" for octet in octets)\n return self.ma", "d_id": 112213, "documentation": { "docstring": "Increment the last byte of the mac address by one for FW>3.70.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 181595, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/driver_tests.py", "file_name": "driver_tests.py", "fun_name": "test_positive_integer_or_none_4", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_positive_integer_or_none_4():\n \n assert positive_integer_or_none('none') is None\n assert positive_integer_or_none('None') is None\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 10, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 38, "n_identifiers": 2, "random_cut": "def test_positive_integer_or_none_4():\n \n assert positive_integer_or_none('none') is None", "d_id": 43384, "documentation": { "docstring": "Assert that the TPOT CLI interface's positive_integer_or_none parsing return None when value is string 'None' or 'none'.", "n_words": 17, "vocab_size": 17, "n_whitespaces": 16, "language": "en" } }, { "id": 223511, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "get_local_part", "commit_message": "add python 3.10.4 for windows", "code": "def get_local_part(value):\n \n local_part = LocalPart()\n leader = None\n if value[0] in CFWS_LEADER:\n leader, value = get_cfws(value)\n if not value:\n raise errors.HeaderParseError(\n \"expected local-part but found '{}'\".format(value))\n try:\n token, value = get_dot_atom(value)\n except errors.HeaderParseError:\n try:\n token, value = get_word(value)\n except errors.HeaderParseError:\n if value[0] != '\\\\' and value[0] in PHRASE_ENDS:\n raise\n token = TokenList()\n if leader is not None:\n token[:0] = [leader]\n local_part.append(token)\n if value and (value[0]=='\\\\' or value[0] not in PHRASE_ENDS):\n obs_local_part, value = get_obs_local_part(str(local_part) + value)\n if obs_local_part.token_type == 'invalid-obs-local-part':\n local_part.defects.append(errors.InvalidHeaderDefect(\n \"local-part is not dot-atom, quoted-string, or obs-local-part\"))\n else:\n local_part.defects.append(errors.ObsoleteHeaderDefect(\n \"local-part is not a dot-atom (contains CFWS)\"))\n local_part[0] = obs_local_part\n try:\n local_part.value.encode('ascii')\n except UnicodeEncodeError:\n local_part.defects.append(errors.NonASCIILocalPartDefect(\n \"local-part contains non-ASCII characters)\"))\n return local_part, value\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 361, "n_words": 112, "vocab_size": 71, "complexity": 13, "nloc": 35, "token_counts": 222, "n_ast_nodes": 377, "n_identifiers": 26, "random_cut": "def get_local_part(value):\n \n local_part = LocalPart()\n leader = None\n if value[0] in CFWS_LEADER:\n leader, value = get_cfws(value)\n if not value:\n raise errors.HeaderParseError(\n \"expected local-part but found '{}'\".format(value))\n try:\n token, value = get_dot_atom(value)\n except errors.HeaderParseError:\n try:\n token, value = get_word(value)\n except errors.HeaderParseError:\n if value[0] != '\\\\' and value[0] in PHRASE_ENDS:\n raise\n token = TokenList()\n if leader is not None:\n token[:0] = [leader]\n local_part.append(token)\n if value and (value[0]=='\\\\' or value[0] not in PHRASE_ENDS):\n obs_local_part, value = get", "d_id": 56941, "documentation": { "docstring": " local-part = dot-atom / quoted-string / obs-local-part\n\n ", "n_words": 7, "vocab_size": 6, "n_whitespaces": 11, "language": "en" } }, { "id": 32803, "commit_id": "f9a0008d2d3082a665f711b24f5314e4a8205fab", "repo": "transformers", "path": "tests/test_feature_extraction_common.py", "file_name": "test_feature_extraction_common.py", "fun_name": "prepare_video_inputs", "commit_message": "Add VideoMAE (#17821)\n\n* First draft\r\n\r\n* Add VideoMAEForVideoClassification\r\n\r\n* Improve conversion script\r\n\r\n* Add VideoMAEForPreTraining\r\n\r\n* Add VideoMAEFeatureExtractor\r\n\r\n* Improve VideoMAEFeatureExtractor\r\n\r\n* Improve docs\r\n\r\n* Add first draft of model tests\r\n\r\n* Improve VideoMAEForPreTraining\r\n\r\n* Fix base_model_prefix\r\n\r\n* Make model take pixel_values of shape (B, T, C, H, W)\r\n\r\n* Add loss computation of VideoMAEForPreTraining\r\n\r\n* Improve tests\r\n\r\n* Improve model testsé\r\n\r\n* Make all tests pass\r\n\r\n* Add VideoMAE to main README\r\n\r\n* Add tests for VideoMAEFeatureExtractor\r\n\r\n* Add integration test\r\n\r\n* Improve conversion script\r\n\r\n* Rename patch embedding class\r\n\r\n* Remove VideoMAELayer from init\r\n\r\n* Update design of patch embeddings\r\n\r\n* Improve comments\r\n\r\n* Improve conversion script\r\n\r\n* Improve conversion script\r\n\r\n* Add conversion of pretrained model\r\n\r\n* Add loss verification of pretrained model\r\n\r\n* Add loss verification of unnormalized targets\r\n\r\n* Add integration test for pretraining model\r\n\r\n* Apply suggestions from code review\r\n\r\n* Fix bug to make feature extractor resize only shorter edge\r\n\r\n* Address more comments\r\n\r\n* Improve normalization of videos\r\n\r\n* Add doc examples\r\n\r\n* Move constants to dedicated script\r\n\r\n* Remove scripts\r\n\r\n* Transfer checkpoints, fix docs\r\n\r\n* Update script\r\n\r\n* Update image mean and std\r\n\r\n* Fix doc tests\r\n\r\n* Set return_tensors to NumPy by default\r\n\r\n* Revert the previous change\r\n\r\nCo-authored-by: Niels Rogge ", "code": "def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False):\n \n\n assert not (numpify and torchify), \"You cannot specify both numpy and PyTorch tensors at the same time\"\n\n video_inputs = []\n for i in range(feature_extract_tester.batch_size):\n if equal_resolution:\n width = height = feature_extract_tester.max_resolution\n else:\n width, height = np.random.choice(\n np.arange(feature_extract_tester.min_resolution, feature_extract_tester.max_resolution), 2\n )\n video = prepare_video(\n feature_extract_tester=feature_extract_tester,\n width=width,\n height=height,\n numpify=numpify,\n torchify=torchify,\n )\n video_inputs.append(video)\n\n return video_inputs\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 238, "n_words": 57, "vocab_size": 49, "complexity": 4, "nloc": 19, "token_counts": 111, "n_ast_nodes": 169, "n_identifiers": 20, "random_cut": "def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False):\n \n\n assert not (numpify and torchify), \"You cannot specify both numpy and PyTorch tensors at the same time\"\n\n video_inputs = []\n for i in range(feature_extract_te", "d_id": 5984, "documentation": { "docstring": "This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if\n one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True.\n\n One can specify whether the videos are of the same resolution or not.\n ", "n_words": 51, "vocab_size": 30, "n_whitespaces": 60, "language": "en" } }, { "id": 186372, "commit_id": "eeca208c8f57304590ac1af80b496e61021aaa45", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/configurator.py", "file_name": "configurator.py", "fun_name": "_verify_no_matching_http_header", "commit_message": "Various clean-ups in certbot-apache. Use f-strings. (#9132)\n\n* Various clean-ups in certbot-apache. Use f-strings.\r\n\r\n* Smaller tweaks", "code": "def _verify_no_matching_http_header(self, ssl_vhost, header_substring):\n \n header_path = self.parser.find_dir(\"Header\", None,\n start=ssl_vhost.path)\n if header_path:\n # \"Existing Header directive for virtualhost\"\n pat = '(?:[ \"]|^)(%s)(?:[ \"]|$)' % (header_substring.lower())\n for match in header_path:\n if re.search(pat, self.parser.aug.get(match).lower()):\n raise errors.PluginEnhancementAlreadyPresent(\n \"Existing %s header\" % header_substring)\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 191, "n_words": 38, "vocab_size": 32, "complexity": 4, "nloc": 9, "token_counts": 79, "n_ast_nodes": 130, "n_identifiers": 18, "random_cut": "def _verify_no_matching_http_header(self, ssl_vhost, header_substring):\n \n header_path = self.parser.find_dir(\"Header\", None,\n start=ssl_vhost.path)\n if header_path:\n # \"Existing Header directive for virtualhost\"\n pat = '(?:[ \"]|^)(%s)(?:[ \"]|$)' % (header_substring.lower())\n for match in header_path:\n if re.search(pat, self.parser.aug.get(matc", "d_id": 45468, "documentation": { "docstring": "Checks to see if there is an existing Header directive that\n contains the string header_substring.\n\n :param ssl_vhost: vhost to check\n :type vhost: :class:`~certbot_apache._internal.obj.VirtualHost`\n\n :param header_substring: string that uniquely identifies a header.\n e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.\n :type str\n\n :returns: boolean\n :rtype: (bool)\n\n :raises errors.PluginEnhancementAlreadyPresent When header\n header_substring exists\n\n ", "n_words": 46, "vocab_size": 41, "n_whitespaces": 139, "language": "en" } }, { "id": 120281, "commit_id": "667d63aa2d4fbf7c9da73aab0e24c5c4c33cb5ba", "repo": "jax", "path": "jax/_src/numpy/lax_numpy.py", "file_name": "lax_numpy.py", "fun_name": "indices", "commit_message": "replace int with operator.index part2\n\nThis change align the behavior of `ravel_multi_index`, `split` and `indices` to their `numpy` counterparts.\nAlso ensure size argument of `nonzero` should be integer.\nThe changes with `*space` are only simplification", "code": "def indices(dimensions, dtype=int32, sparse=False):\n dimensions = tuple(\n core.concrete_or_error(operator.index, d, \"dimensions argument of jnp.indices\")\n for d in dimensions)\n N = len(dimensions)\n output = []\n s = dimensions\n for i, dim in enumerate(dimensions):\n idx = lax.iota(dtype, dim)\n if sparse:\n s = (1,)*i + (dim,) + (1,)*(N - i - 1)\n output.append(lax.broadcast_in_dim(idx, s, (i,)))\n if sparse:\n return tuple(output)\n return stack(output, 0) if output else array([], dtype=dtype)\n\n\n_TOTAL_REPEAT_LENGTH_DOC = \n\n\n@_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 99, "n_words": 67, "vocab_size": 50, "complexity": 6, "nloc": 15, "token_counts": 141, "n_ast_nodes": 237, "n_identifiers": 30, "random_cut": "def indices(dimensions, dtype=int32, sparse=False):\n dimensions = tuple(\n core.concrete_or_error(operator.index, d, \"dimensions argument of jnp.indices\")\n for d in dimensions)\n N = len(dimensions)\n output = []\n s = dimensio", "d_id": 26809, "documentation": { "docstring": "\\\nJax adds the optional `total_repeat_length` parameter which specifies the total\nnumber of repeat, and defaults to sum(repeats). It must be specified for repeat\nto be compilable. If `sum(repeats)` is larger than the specified\n`total_repeat_length` the remaining values will be discarded. In the case of\n`sum(repeats)` being smaller than the specified target length, the final value\nwill be repeated.\n", "n_words": 59, "vocab_size": 42, "n_whitespaces": 52, "language": "en" } }, { "id": 251914, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/test_tcp.py", "file_name": "test_tcp.py", "fun_name": "test_open_connection", "commit_message": "make it black!", "code": "def test_open_connection(tctx):\n \n assert Playbook(tcp.TCPLayer(tctx, True)) << OpenConnection(tctx.server)\n\n tctx.server.timestamp_start = 1624544785\n assert Playbook(tcp.TCPLayer(tctx, True)) << None\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 27, "n_words": 15, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 48, "n_ast_nodes": 74, "n_identifiers": 8, "random_cut": "def test_open_connection(tctx):\n \n assert Playbook(tcp.TCPLayer(tctx,", "d_id": 73887, "documentation": { "docstring": "\n If there is no server connection yet, establish one,\n because the server may send data first.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 264900, "commit_id": "4c51dbba809b6b199a96da30f32f4dd3cd6ea6ed", "repo": "netbox", "path": "netbox/dcim/api/serializers.py", "file_name": "serializers.py", "fun_name": "get_connected_endpoints", "commit_message": "Update connected_endpoint serializer field to support multiple objects", "code": "def get_connected_endpoints(self, obj):\n \n endpoints = obj.connected_endpoints\n if endpoints:\n serializer = get_serializer_for_model(endpoints[0], prefix='Nested')\n context = {'request': self.context['request']}\n return serializer(endpoints, many=True, context=context).data\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 56, "n_ast_nodes": 92, "n_identifiers": 11, "random_cut": "def get_connected_endpoints(self, obj):\n \n endpoints = obj.connected_endpoints\n if endpoints:\n serializer = get_serializer_for_model(endpoints[0], prefix='Nested')\n context = {'request': self.context['request']}\n ", "d_id": 77903, "documentation": { "docstring": "\n Return the appropriate serializer for the type of connected object.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 176485, "commit_id": "f6755ffa00211b523c6c0bec5398bc6c3c43c8b1", "repo": "networkx", "path": "networkx/algorithms/tree/tests/test_operations.py", "file_name": "test_operations.py", "fun_name": "test_basic", "commit_message": "Update black (#5438)\n\n* CI: sync up black dev requirements version with precommit\r\n\r\n* Run black\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def test_basic(self):\n \n trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)]\n actual = nx.join(trees)\n expected = nx.full_rary_tree(2, 2**3 - 1)\n assert nx.is_isomorphic(actual, expected)\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 25, "vocab_size": 22, "complexity": 2, "nloc": 5, "token_counts": 64, "n_ast_nodes": 99, "n_identifiers": 11, "random_cut": "def test_basic(self):\n \n trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)]\n actual = nx.join(trees)\n expected = nx.full_rary_tree(2, 2**3 - 1)\n ", "d_id": 41930, "documentation": { "docstring": "Tests for joining multiple subtrees at a root node.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 206949, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_changelist/tests.py", "file_name": "tests.py", "fun_name": "test_result_list_html", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_result_list_html(self):\n \n new_parent = Parent.objects.create(name=\"parent\")\n new_child = Child.objects.create(name=\"name\", parent=new_parent)\n request = self.factory.get(\"/child/\")\n request.user = self.superuser\n m = ChildAdmin(Child, custom_site)\n cl = m.get_changelist_instance(request)\n cl.formset = None\n template = Template(\n \"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}\"\n )\n context = Context({\"cl\": cl, \"opts\": Child._meta})\n table_output = template.render(context)\n link = reverse(\"admin:admin_changelist_child_change\", args=(new_child.id,))\n row_html = build_tbody_html(\n new_child.id, link, '%s' % new_parent\n )\n self.assertNotEqual(\n table_output.find(row_html),\n -1,\n \"Failed to find expected row element: %s\" % table_output,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 251, "n_words": 77, "vocab_size": 59, "complexity": 1, "nloc": 22, "token_counts": 150, "n_ast_nodes": 251, "n_identifiers": 36, "random_cut": "def test_result_list_html(self):\n \n new_parent = Parent.objects.create(name=\"parent\")\n new_child = Child.objects.create(name=\"name\", parent=new_parent)\n request = self.factory.get(\"/child/\")\n request.user = self.superuser\n m = ChildAdmin(Child, custom_site)\n cl = m.get_changelist_instance(request)\n cl.formset = None\n template = Template(\n \"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}", "d_id": 51805, "documentation": { "docstring": "\n Inclusion tag result_list generates a table when with default\n ModelAdmin settings.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 75179, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_delete_uploaded_image", "commit_message": "Reformat with black", "code": "def test_delete_uploaded_image(self):\n \n # Send request\n response = self.client.post(\n reverse(\n \"wagtailimages:delete_upload_multiple\", args=(self.uploaded_image.id,)\n )\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"Content-Type\"], \"application/json\")\n\n # Make sure the image is deleted\n self.assertFalse(\n UploadedImage.objects.filter(id=self.uploaded_image.id).exists()\n )\n\n # Check JSON\n response_json = json.loads(response.content.decode())\n self.assertTrue(response_json[\"success\"])\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 176, "n_words": 37, "vocab_size": 29, "complexity": 1, "nloc": 13, "token_counts": 97, "n_ast_nodes": 166, "n_identifiers": 22, "random_cut": "def test_delete_uploaded_image(self):\n \n # Send request\n response = self.client.post(\n reverse(\n \"wagtailimages:delete_upload_multiple\", args=(self.uploaded_image.id,)\n )\n )\n\n ", "d_id": 16376, "documentation": { "docstring": "\n This tests that a POST request to the delete view deletes the UploadedImage\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 101204, "commit_id": "a2de4a97985dc62db3b140a924aeac2be733abf8", "repo": "faceswap", "path": "lib/align/aligned_face.py", "file_name": "aligned_face.py", "fun_name": "matrix", "commit_message": "lib.align.aligned_face updates\n - Typing\n - Legacy support for pre-aligned faces\n - Coverage support for pre-aligned faces\n - Standardized retrieval of sub-crops", "code": "def matrix(self) -> np.ndarray:\n \n if not np.any(self._matrices[self._centering]):\n matrix = self._matrices[\"legacy\"].copy()\n matrix[:, 2] -= self.pose.offset[self._centering]\n self._matrices[self._centering] = matrix\n logger.trace(\"original matrix: %s, new matrix: %s\", # type: ignore\n self._matrices[\"legacy\"], matrix)\n return self._matrices[self._centering]\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 120, "n_words": 30, "vocab_size": 26, "complexity": 2, "nloc": 11, "token_counts": 89, "n_ast_nodes": 144, "n_identifiers": 12, "random_cut": "def matrix(self) -> np.ndarray:\n \n ", "d_id": 20625, "documentation": { "docstring": " :class:`numpy.ndarray`: The 3x2 transformation matrix for extracting and aligning the\n core face area out of the original frame, with no padding or sizing applied. The returned\n matrix is offset for the given :attr:`centering`. ", "n_words": 33, "vocab_size": 28, "n_whitespaces": 48, "language": "en" } }, { "id": 267137, "commit_id": "b439e41a915ccec0ccbabecc966919ea406db74e", "repo": "ansible", "path": "lib/ansible/parsing/plugin_docs.py", "file_name": "plugin_docs.py", "fun_name": "read_docstub", "commit_message": "expand ansible-doc coverage (#74963)\n\n* Expand ansible-doc to tests/filters and fix existing issues\r\n\r\n enable filter/test docs if in single file or companion yaml\r\n add docs for several filters/tests plugins\r\n allow .yml companion for docs for other plugins, must be colocated\r\n verify plugins are valid (not modules, cannot)\r\n fix 'per collection' filtering\r\n limit old style deprecation (_ prefix) to builtin/legacy\r\n start move to pathlib for saner path handling\r\n moved some funcitons, kept backwards compat shims with deprecation notice\r\n\r\n Co-authored-by: Abhijeet Kasurde \r\n Co-authored-by: Felix Fontein \r\n Co-authored-by: Sandra McCann ", "code": "def read_docstub(filename):\n \n\n in_documentation = False\n capturing = False\n indent_detection = ''\n doc_stub = []\n\n with open(filename, 'r') as t_module_data:\n for line in t_module_data:\n if in_documentation:\n # start capturing the stub until indentation returns\n if capturing and line.startswith(indent_detection):\n doc_stub.append(line)\n\n elif capturing and not line.startswith(indent_detection):\n break\n\n elif line.lstrip().startswith('short_description:'):\n capturing = True\n # Detect that the short_description continues on the next line if it's indented more\n # than short_description itself.\n indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1)\n doc_stub.append(line)\n\n elif line.startswith('DOCUMENTATION') and ('=' in line or ':' in line):\n in_documentation = True\n\n short_description = r''.join(doc_stub).strip().rstrip('.')\n data = AnsibleLoader(short_description, file_name=filename).get_single_data()\n\n return data\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 364, "n_words": 100, "vocab_size": 63, "complexity": 11, "nloc": 21, "token_counts": 162, "n_ast_nodes": 287, "n_identifiers": 21, "random_cut": "def read_docstub(filename):\n \n\n in_documentation = False\n capturing = False\n indent_detection = ''\n doc_stub = []\n\n with open(filename, 'r') as t_module_data:\n for line in t_module_data:\n if in_documentation:\n ", "d_id": 78755, "documentation": { "docstring": "\n Quickly find short_description using string methods instead of node parsing.\n This does not return a full set of documentation strings and is intended for\n operations like ansible-doc -l.\n ", "n_words": 28, "vocab_size": 27, "n_whitespaces": 41, "language": "en" } }, { "id": 115740, "commit_id": "91e73cdd2402a12373379b85ef1934d8ecfa364e", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py", "file_name": "test_lightwood_handler.py", "fun_name": "test_02_train_predictor", "commit_message": "lw handler tests", "code": "def test_02_train_predictor(self):\n query = f\n response = self.handler.native_query(query)\n self.assertTrue(response.type == RESPONSE_TYPE.OK)\n\n # def test_03_retrain_predictor(self):\n # query = f\"RETRAIN {self.test_model_name_1}\"\n # response = self.handler.native_query(query)\n # self.assertTrue(response.type == RESPONSE_TYPE.OK)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 71, "n_words": 27, "vocab_size": 14, "complexity": 1, "nloc": 8, "token_counts": 31, "n_ast_nodes": 66, "n_identifiers": 12, "random_cut": "def test_02_train_predictor(self):\n query = f\n response = self.handler.native_query(query)\n self.assertTrue(response.type == R", "d_id": 25535, "documentation": { "docstring": "\n CREATE PREDICTOR {self.test_model_name_1}\n FROM {PG_HANDLER_NAME} (SELECT * FROM demo_data.home_rentals limit 50)\n PREDICT rental_price\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 54, "language": "en" } }, { "id": 156079, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/core.py", "file_name": "core.py", "fun_name": "get_dependencies", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def get_dependencies(dsk, key=None, task=no_default, as_list=False):\n \n if key is not None:\n arg = dsk[key]\n elif task is not no_default:\n arg = task\n else:\n raise ValueError(\"Provide either key or task\")\n\n return keys_in_tasks(dsk, [arg], as_list=as_list)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 32, "vocab_size": 26, "complexity": 3, "nloc": 8, "token_counts": 59, "n_ast_nodes": 92, "n_identifiers": 9, "random_cut": "def get_dependencies(dsk, key=None, task=no_default, as_list=False):\n \n if key is not None:\n arg = dsk[key]\n elif task is not no_default:\n arg = task\n else:\n raise ValueError(\"Provide either key or task\")\n\n return keys_in_tasks(dsk, [arg], as_list=as_list)\n\n", "d_id": 36541, "documentation": { "docstring": "Get the immediate tasks on which this task depends\n\n Examples\n --------\n >>> inc = lambda x: x + 1\n >>> add = lambda x, y: x + y\n >>> dsk = {'x': 1,\n ... 'y': (inc, 'x'),\n ... 'z': (add, 'x', 'y'),\n ... 'w': (inc, 'z'),\n ... 'a': (add, (inc, 'x'), 1)}\n\n >>> get_dependencies(dsk, 'x')\n set()\n\n >>> get_dependencies(dsk, 'y')\n {'x'}\n\n >>> get_dependencies(dsk, 'z') # doctest: +SKIP\n {'x', 'y'}\n\n >>> get_dependencies(dsk, 'w') # Only direct dependencies\n {'z'}\n\n >>> get_dependencies(dsk, 'a') # Ignore non-keys\n {'x'}\n\n >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly\n {'x'}\n ", "n_words": 92, "vocab_size": 61, "n_whitespaces": 190, "language": "en" } }, { "id": 146016, "commit_id": "b267be475863a66e9feedb2be5f0a30a2ed8c493", "repo": "ray", "path": "python/ray/ml/tests/test_checkpoints.py", "file_name": "test_checkpoints.py", "fun_name": "test_dict_checkpoint_fs", "commit_message": "[ml] Add Ray ML / AIR checkpoint implementation (#22691)\n\nThis PR splits up the changes in #22393 and introduces an implementation of the ML Checkpoint interface used by Ray Tune.\r\n\r\nThis means, the TuneCheckpoint class implements the to/from_[bytes|dict|directory|object_ref|uri] conversion functions, as well as more high-level functions to transition between the different TuneCheckpoint classes. It also includes test cases for Tune's main conversion modes, i.e. dict - intermediate - dict and fs - intermediate - fs.\r\n\r\nThese changes will be the basis for refactoring the tune interface to use TuneCheckpoint objects instead of TrialCheckpoints (externally) and instead of paths/objects (internally).", "code": "def test_dict_checkpoint_fs(self):\n \n checkpoint = self._prepare_dict_checkpoint()\n\n # Convert into fs checkpoint\n path = checkpoint.to_directory()\n self.assertIsInstance(path, str)\n\n # Create from path\n checkpoint = Checkpoint.from_directory(path)\n self.assertTrue(checkpoint._local_path)\n\n self._assert_dict_checkpoint(checkpoint)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 87, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 50, "n_ast_nodes": 87, "n_identifiers": 13, "random_cut": "def test_dict_checkpoint_fs(self):\n \n checkpoint = self._prepare_dict_checkpoint()\n\n # Convert into fs c", "d_id": 33589, "documentation": { "docstring": "Test conversion from dict to FS checkpoint and back.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 211511, "commit_id": "92078713cced4f0d9450a6fc80a449fa75fd8c10", "repo": "PaddleDetection", "path": "ppdet/modeling/rbox_utils.py", "file_name": "rbox_utils.py", "fun_name": "box2corners", "commit_message": "add fcosr model (#6765)\n\n* add fcosr\r\n\r\n* fix some problem\r\n\r\n* add docs for fcosr\r\n\r\n* modify code\r\n\r\n* modify focsr reader\r\n\r\n* finish tensorrt deployment with dynamic shape\r\n\r\n* modify according to review comment\r\n\r\nCo-authored-by: wangxinxin08 <>", "code": "def box2corners(box):\n \n B = box.shape[0]\n x, y, w, h, alpha = paddle.split(box, 5, axis=-1)\n x4 = paddle.to_tensor(\n [0.5, 0.5, -0.5, -0.5], dtype=paddle.float32).reshape(\n (1, 1, 4)) # (1,1,4)\n x4 = x4 * w # (B, N, 4)\n y4 = paddle.to_tensor(\n [-0.5, 0.5, 0.5, -0.5], dtype=paddle.float32).reshape((1, 1, 4))\n y4 = y4 * h # (B, N, 4)\n corners = paddle.stack([x4, y4], axis=-1) # (B, N, 4, 2)\n sin = paddle.sin(alpha)\n cos = paddle.cos(alpha)\n row1 = paddle.concat([cos, sin], axis=-1)\n row2 = paddle.concat([-sin, cos], axis=-1) # (B, N, 2)\n rot_T = paddle.stack([row1, row2], axis=-2) # (B, N, 2, 2)\n rotated = paddle.bmm(corners.reshape([-1, 4, 2]), rot_T.reshape([-1, 2, 2]))\n rotated = rotated.reshape([B, -1, 4, 2]) # (B*N, 4, 2) -> (B, N, 4, 2)\n rotated[..., 0] += x\n rotated[..., 1] += y\n return rotated\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 214, "n_words": 128, "vocab_size": 71, "complexity": 1, "nloc": 21, "token_counts": 287, "n_ast_nodes": 403, "n_identifiers": 28, "random_cut": "def box2corners(box):\n \n B = box.shape[0]\n x, y, w, h, alpha = paddle.split(box, 5, axis=-1)\n x4 = paddle.to_tensor(\n [0.5, 0.5, -0.5, -0.5], dtype=paddle.float32).reshape(\n (1, 1, 4)) # (1,1,4)\n x4 = x4 * w # (B, N, 4)\n y4 = paddle.to_tensor(\n [-0.5, 0.5, 0.5, -0.5], dtype=paddle.float32).reshape((1, 1, 4))\n y4 = y4 * h # (B, N, 4)\n corners = paddle.stack([x4, y4], axis=-1) # (B, N, 4, 2)\n sin = paddle.sin(alpha)\n cos = paddle.cos(alpha)\n row1 = paddle.concat([cos, sin], axis=-1)\n row2 = paddle.concat([-sin, cos], axis=-1) # (B, N, 2)\n rot_T = paddle.stack([row1, row2], axis=-2) # (B, N, 2, 2)\n rotated = paddle.bmm(corners.reshape([-1, 4, 2]), rot_T.reshape([-1, 2, 2]))\n rotated = rotated.reshape([B, -1, 4, 2]) # (B*N, 4, 2) -> (B, N, 4, 2)\n rotated[..., 0] += x\n rotated[..., 1] += y\n return rotated\n\n", "d_id": 53111, "documentation": { "docstring": "convert box coordinate to corners\n Args:\n box (Tensor): (B, N, 5) with (x, y, w, h, alpha) angle is in [0, 90)\n Returns:\n corners (Tensor): (B, N, 4, 2) with (x1, y1, x2, y2, x3, y3, x4, y4)\n ", "n_words": 38, "vocab_size": 32, "n_whitespaces": 61, "language": "en" } }, { "id": 91256, "commit_id": "65f43fd4e0f1821b468547fc08136bbad9cd8446", "repo": "sentry", "path": "src/sentry/incidents/subscription_processor.py", "file_name": "subscription_processor.py", "fun_name": "get_crash_rate_alert_metrics_aggregation_value", "commit_message": "fix(cra-metrics): Count all users in metrics alerts (#34957)\n\nUse conditional aggregates in order to get both the total user count and\r\nthe number of crashed users in the same snuba query.\r\n\r\nTo maintain compatibility until existing subscriptions have been\r\nmigrated, make the subscription processor able to handle both the old\r\nand the new format. The actual migration of existing subscriptions will\r\nbe in a separate PR.", "code": "def get_crash_rate_alert_metrics_aggregation_value(self, subscription_update):\n \n rows = subscription_update[\"values\"][\"data\"]\n if BaseMetricsEntitySubscription.is_crash_rate_format_v2(rows):\n version = \"v2\"\n result = self._get_crash_rate_alert_metrics_aggregation_value_v2(subscription_update)\n else:\n version = \"v1\"\n result = self._get_crash_rate_alert_metrics_aggregation_value_v1(subscription_update)\n\n metrics.incr(\n \"incidents.alert_rules.get_crash_rate_alert_metrics_aggregation_value\",\n tags={\"format\": version},\n sample_rate=1.0,\n )\n return result\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 155, "n_words": 29, "vocab_size": 22, "complexity": 2, "nloc": 14, "token_counts": 72, "n_ast_nodes": 123, "n_identifiers": 14, "random_cut": "def get_crash_rate_alert_metrics_aggregation_value(self, subscription_update):\n \n rows = subscription_update[\"values\"][\"data\"]\n if BaseMetricsEntitySubscription.is_crash_rate_format_v2(rows):\n version = \"v2\"\n result = self._get_crash_rate_alert_metrics_aggregation_value_v2(subscription_update)\n else:\n version = \"v1\"\n result = self._get_crash_rate_alert_metrics_aggregation_value_v1(subscription_update)\n\n metrics.incr(\n \"incidents.alert_rules.get_crash_rate_alert_metrics_aggregation_value\",\n tags={\"format\": version},\n sample_rate=1.0,\n )\n ret", "d_id": 18753, "documentation": { "docstring": "Handle both update formats. Once all subscriptions have been updated\n to v2, we can remove v1 and replace this function with current v2.\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 37, "language": "en" } }, { "id": 120945, "commit_id": "4c0d61a1435b70760814f1f678cb041d36b8408d", "repo": "jax", "path": "jax/_src/test_util.py", "file_name": "test_util.py", "fun_name": "strict_promotion_if_dtypes_match", "commit_message": "Add jtu.strict_promotion_if_dtypes_match utility", "code": "def strict_promotion_if_dtypes_match(dtypes):\n \n if all(dtype == dtypes[0] for dtype in dtypes):\n return jax.numpy_dtype_promotion('strict')\n return jax.numpy_dtype_promotion('standard')\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 4, "token_counts": 35, "n_ast_nodes": 61, "n_identifiers": 6, "random_cut": "def strict_promotion_if_dtypes_match(dtypes):\n \n if all(dtype == dtypes[0] for dtype in dtypes):\n return jax.", "d_id": 26998, "documentation": { "docstring": "\n Context manager to enable strict promotion if all dtypes match,\n and enable standard dtype promotion otherwise.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 260170, "commit_id": "de659b9dee2054efb4830eff8e98ece60f4a1758", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_param_validation.py", "file_name": "test_param_validation.py", "fun_name": "test_stroptions_deprecated_subset", "commit_message": "MNT Param validation: Make it possible to mark a constraint as hidden (#23558)", "code": "def test_stroptions_deprecated_subset():\n \n with pytest.raises(ValueError, match=\"deprecated options must be a subset\"):\n StrOptions({\"a\", \"b\", \"c\"}, deprecated={\"a\", \"d\"})\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 28, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 35, "n_ast_nodes": 68, "n_identifiers": 7, "random_cut": "def test_stroptions_deprecated_subset():\n \n with pytest.raises(ValueError, match=\"deprecated options must be a subset\"):\n StrOptions({\"a\", \"b\", \"c\"}, deprecated={\"a\", \"d\"})\n\n", "d_id": 76104, "documentation": { "docstring": "Check that the deprecated parameter must be a subset of options.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 271557, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "_validate_target_and_loss", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _validate_target_and_loss(self, y, loss):\n \n\n # `self.loss` references the loss added via `compile` call. If users have\n # provided such, the target must be provided; otherwise it's a user error.\n # Note that `self.loss` does not include losses added via `add_loss`, and it\n # is a valid use when such loss from `add_loss` exists and target does not.\n if self.loss and y is None:\n raise ValueError(\n \"Target data is missing. Your model was compiled with \"\n f\"loss={self.loss}, \"\n \"and therefore expects target data to be provided in `fit()`.\"\n )\n\n # For training, there must be compiled loss or regularization loss to exist\n # in order to apply the gradients. If one is not found, it means no loss\n # was supplied via `compile` or `add_loss`.\n elif loss is None:\n raise ValueError(\n \"No loss found. You may have forgotten to provide a `loss` argument \"\n \"in the `compile()` method.\"\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 337, "n_words": 148, "vocab_size": 95, "complexity": 4, "nloc": 12, "token_counts": 38, "n_ast_nodes": 84, "n_identifiers": 5, "random_cut": "def _validate_target_and_loss(self, y, loss):\n \n\n # `self.loss` references the loss added via `compile` call. If users have\n # provided such, the target must be provided; otherwise it's a user error.\n # Note that `self.loss` does not include losses added via `add_loss`, and it\n # is a valid use when such loss from `add_loss` exists and target does not.\n if self.loss and y is None:\n raise ValueError(\n \"Target data is missing. Your model was compiled with \"\n f\"loss={self.loss}, \"\n \"and therefore expects target data to be provided in `fit()`.\"\n )\n\n # For training, there must be compiled loss or regularization loss to exist\n # in ord", "d_id": 80793, "documentation": { "docstring": "Raises error if target or loss is not found.\n\n This method verifies that the target and loss are properly populated\n when applicable, or raises errors.\n\n Args:\n y: the target for training.\n loss: the total loss tensor including loss added via `compile` and\n `add_loss`.\n ", "n_words": 43, "vocab_size": 34, "n_whitespaces": 100, "language": "en" } }, { "id": 165883, "commit_id": "d2aa44f50f6ac4789d4e351e4e52a53a358da42e", "repo": "pandas", "path": "pandas/core/window/rolling.py", "file_name": "rolling.py", "fun_name": "_validate_datetimelike_monotonic", "commit_message": "BUG: groupby().rolling(freq) with monotonic dates within groups #46065 (#46567)", "code": "def _validate_datetimelike_monotonic(self):\n \n # GH 46061\n if self._on.hasnans:\n self._raise_monotonic_error(\"values must not have NaT\")\n for group_indices in self._grouper.indices.values():\n group_on = self._on.take(group_indices)\n if not (\n group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing\n ):\n on = \"index\" if self.on is None else self.on\n raise ValueError(\n f\"Each group within {on} must be monotonic. \"\n f\"Sort the values in {on} first.\"\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 222, "n_words": 52, "vocab_size": 44, "complexity": 6, "nloc": 13, "token_counts": 75, "n_ast_nodes": 135, "n_identifiers": 15, "random_cut": "def _validate_datetimelike_monotonic(self):\n \n # GH 46061\n if self._on.hasnans:\n self._raise_monoton", "d_id": 39728, "documentation": { "docstring": "\n Validate that each group in self._on is monotonic\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 221825, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/macholib/framework.py", "file_name": "framework.py", "fun_name": "framework_info", "commit_message": "add python 3.10.4 for windows", "code": "def framework_info(filename):\n \n is_framework = STRICT_FRAMEWORK_RE.match(filename)\n if not is_framework:\n return None\n return is_framework.groupdict()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "def framework_info(filename):\n \n is_framework = STRICT_FRAMEWORK_RE.match(filename)\n if not is_framework:\n return None\n return is_framewo", "d_id": 56528, "documentation": { "docstring": "\n A framework name can take one of the following four forms:\n Location/Name.framework/Versions/SomeVersion/Name_Suffix\n Location/Name.framework/Versions/SomeVersion/Name\n Location/Name.framework/Name_Suffix\n Location/Name.framework/Name\n\n returns None if not found, or a mapping equivalent to:\n dict(\n location='Location',\n name='Name.framework/Versions/SomeVersion/Name_Suffix',\n shortname='Name',\n version='SomeVersion',\n suffix='Suffix',\n )\n\n Note that SomeVersion and Suffix are optional and may be None\n if not present\n ", "n_words": 46, "vocab_size": 42, "n_whitespaces": 159, "language": "en" } }, { "id": 62994, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pep517/in_process/_in_process.py", "file_name": "_in_process.py", "fun_name": "contained_in", "commit_message": "upd; format", "code": "def contained_in(filename, directory):\n \n filename = os.path.normcase(os.path.abspath(filename))\n directory = os.path.normcase(os.path.abspath(directory))\n return os.path.commonprefix([filename, directory]) == directory\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 26, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 57, "n_ast_nodes": 91, "n_identifiers": 8, "random_cut": "def contained_in(filename, directory):\n \n filename = os.path.normcase(os.path.abspath(filename))\n directory = os.path.normcase(os.path.abspath(directory))\n return os.path.commonprefix([f", "d_id": 13090, "documentation": { "docstring": "Test if a file is located within the given directory.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 34064, "commit_id": "c4f7eb124b218741d66dd1d86b5d744024a78f6f", "repo": "transformers", "path": "src/transformers/activations_tf.py", "file_name": "activations_tf.py", "fun_name": "glu", "commit_message": "add TF glu activation function (#15146)", "code": "def glu(x, axis=-1):\n \n a, b = tf.split(x, 2, axis=axis)\n return a * tf.math.sigmoid(b)\n\n\nif version.parse(tf.version.VERSION) >= version.parse(\"2.4\"):\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 3, "token_counts": 38, "n_ast_nodes": 92, "n_identifiers": 12, "random_cut": "def glu(x, axis=-1):\n \n a, b = tf.split(x, 2, axis=axis)\n return a * ", "d_id": 6193, "documentation": { "docstring": "\n Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where\n the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B).\n\n Args:\n `x`: float Tensor to perform activation\n `axis`: dimension across which `x` be split in half\n\n Returns:\n `x` with the GLU activation applied (with its size halved across the dimension `axis`).\n ", "n_words": 63, "vocab_size": 49, "n_whitespaces": 100, "language": "en" } }, { "id": 106848, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "matplot", "commit_message": "apply black py to all python files", "code": "def matplot(self, plot, opts=None, env=None, win=None):\n \n opts = {} if opts is None else opts\n _title2str(opts)\n _assert_opts(opts)\n\n # write plot to SVG buffer:\n buffer = StringIO()\n plot.savefig(buffer, format=\"svg\")\n buffer.seek(0)\n svg = buffer.read()\n buffer.close()\n\n if opts.get(\"resizable\", False):\n if not BS4_AVAILABLE:\n raise ImportError(\"No module named 'bs4'\")\n else:\n try:\n soup = bs4.BeautifulSoup(svg, \"xml\")\n except bs4.FeatureNotFound as e:\n import six\n\n six.raise_from(ImportError(\"No module named 'lxml'\"), e)\n height = soup.svg.attrs.pop(\"height\", None)\n width = soup.svg.attrs.pop(\"width\", None)\n svg = str(soup)\n else:\n height = None\n width = None\n\n # show SVG:\n if \"height\" not in opts:\n height = height or re.search(r'height\\=\"([0-9\\.]*)pt\"', svg)\n if height is not None:\n if not isstr(height):\n height = height.group(1)\n height = height.replace(\"pt\", \"00\")\n opts[\"height\"] = 1.4 * int(math.ceil(float(height)))\n if \"width\" not in opts:\n width = width or re.search(r'width\\=\"([0-9\\.]*)pt\"', svg)\n if width is not None:\n if not isstr(width):\n width = width.group(1)\n width = width.replace(\"pt\", \"00\")\n opts[\"width\"] = 1.35 * int(math.ceil(float(width)))\n return self.svg(svgstr=svg, opts=opts, env=env, win=win)\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 624, "n_words": 149, "vocab_size": 88, "complexity": 13, "nloc": 39, "token_counts": 329, "n_ast_nodes": 543, "n_identifiers": 41, "random_cut": "def matplot(self, plot, opts=None, env=None, win=None):\n \n opts = {} if opts is None else opts\n _title2str(opts)\n _assert_opts(opts)\n\n # write plot to SVG buffer:\n buffer = StringIO()\n plot.savefig(buffer, format=\"svg\")\n buffer.seek(0)\n svg = buffer.read()\n buffer.close()\n\n if opts.get(\"resizable\", False):\n if not BS4_AVAILABLE:\n raise ImportError(\"No module named 'bs4'\")\n else:\n try:\n soup = bs4.BeautifulSoup(svg, \"x", "d_id": 22471, "documentation": { "docstring": "\n This function draws a Matplotlib `plot`. The function supports\n one plot-specific option: `resizable`. When set to `True` the plot\n is resized with the pane. You need `beautifulsoup4` and `lxml`\n packages installed to use this option.\n ", "n_words": 35, "vocab_size": 32, "n_whitespaces": 71, "language": "en" } }, { "id": 216309, "commit_id": "50a17432015fb712ec4dc7d3ead79e8939e2bf96", "repo": "salt", "path": "salt/modules/consul.py", "file_name": "consul.py", "fun_name": "acl_clone", "commit_message": "fix(consul): serialize to JSON only non string objects.\n\nFixes 35215", "code": "def acl_clone(consul_url=None, token=None, **kwargs):\n \n ret = {}\n data = {}\n if not consul_url:\n consul_url = _get_config()\n if not consul_url:\n log.error(\"No Consul URL found.\")\n ret[\"message\"] = \"No Consul URL found.\"\n ret[\"res\"] = False\n return ret\n\n if \"id\" not in kwargs:\n ret[\"message\"] = 'Required parameter \"id\" is missing.'\n ret[\"res\"] = False\n return ret\n\n function = \"acl/clone/{}\".format(kwargs[\"id\"])\n res = _query(\n consul_url=consul_url, token=token, data=data, method=\"PUT\", function=function\n )\n if res[\"res\"]:\n ret[\"res\"] = True\n ret[\"message\"] = \"ACL {} cloned.\".format(kwargs[\"name\"])\n ret[\"ID\"] = res[\"data\"]\n else:\n ret[\"res\"] = False\n ret[\"message\"] = \"Cloning ACL item {} failed.\".format(kwargs[\"name\"])\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 243, "n_words": 89, "vocab_size": 51, "complexity": 5, "nloc": 26, "token_counts": 170, "n_ast_nodes": 306, "n_identifiers": 14, "random_cut": "def acl_clone(consul_url=None, token=None, **kwargs):\n \n ret = {}\n data = {}\n if not consul_url:\n consul_url = _get_config()\n if not consul_url:\n log.error(\"No Consul URL found.\")\n ret[\"message\"] = \"No Consul URL found.\"\n ret[\"res\"] = False\n return ret\n\n if \"id\" not in kwargs:\n ret[\"message\"] = 'Required parameter \"id\" is missing.'\n ret[\"res\"] = False\n return ret\n\n function = \"acl/clone/{}\".format(kwargs[\"id\"])\n res = _query(\n consul_url=consul_url, token=token, data=data, method=\"PUT\", function=function\n )\n if res[\"res\"]:\n ret[\"res\"] = True\n ret[\"message\"] = \"ACL {} cloned.\".format(kwargs[\"name\"])\n ret[\"ID\"] = res[\"data\"]\n else:\n ret[\"res\"] = False\n ret[\"message\"] = \"Cloning ACL item {} failed.\".format(kwargs[\"name\"])\n return ret\n\n", "d_id": 54514, "documentation": { "docstring": "\n Information about an ACL token.\n\n :param consul_url: The Consul server URL.\n :param id: Unique identifier for the ACL to update.\n :return: Boolean, message of success or\n failure, and new ID of cloned ACL.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716'\n\n ", "n_words": 42, "vocab_size": 39, "n_whitespaces": 83, "language": "en" } }, { "id": 168242, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/core/indexes/datetimes.py", "file_name": "datetimes.py", "fun_name": "slice_indexer", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def slice_indexer(self, start=None, end=None, step=None, kind=lib.no_default):\n \n self._deprecated_arg(kind, \"kind\", \"slice_indexer\")\n\n # For historical reasons DatetimeIndex supports slices between two\n # instances of datetime.time as if it were applying a slice mask to\n # an array of (self.hour, self.minute, self.seconds, self.microsecond).\n if isinstance(start, time) and isinstance(end, time):\n if step is not None and step != 1:\n raise ValueError(\"Must have step size of 1 with time slices\")\n return self.indexer_between_time(start, end)\n\n if isinstance(start, time) or isinstance(end, time):\n raise KeyError(\"Cannot mix time and non-time slice keys\")\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 178, "n_words": 81, "vocab_size": 63, "complexity": 14, "nloc": 38, "token_counts": 269, "n_ast_nodes": 149, "n_identifiers": 14, "random_cut": "def slice_indexer(self, start=None, end=None, step=None, kind=lib.no_default):\n \n self._deprecated_arg(kind, \"kind\", \"slice_indexer\")\n\n # For historical reasons DatetimeIndex supports slices between two\n # instances of datetime.time as if it were applying a slice mask to\n # an ", "d_id": 40253, "documentation": { "docstring": "\n Return indexer for specified label slice.\n Index.slice_indexer, customized to handle time slicing.\n\n In addition to functionality provided by Index.slice_indexer, does the\n following:\n\n - if both `start` and `end` are instances of `datetime.time`, it\n invokes `indexer_between_time`\n - if `start` and `end` are both either string or None perform\n value-based selection in non-monotonic cases.\n\n ", "n_words": 52, "vocab_size": 43, "n_whitespaces": 120, "language": "en" } }, { "id": 149967, "commit_id": "c0ff554d5be871098cd10424fdd579322b5370df", "repo": "freqtrade", "path": "freqtrade/persistence/migrations.py", "file_name": "migrations.py", "fun_name": "fix_old_dry_orders", "commit_message": "Cleanup old, left open dry-run orders", "code": "def fix_old_dry_orders(engine):\n with engine.begin() as connection:\n connection.execute(\n text(\n \n )\n )\n connection.execute(\n text(\n \n )\n )\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 120, "n_words": 14, "vocab_size": 9, "complexity": 1, "nloc": 27, "token_counts": 32, "n_ast_nodes": 61, "n_identifiers": 6, "random_cut": "def fix_old_dry_orders(engine):\n", "d_id": 34618, "documentation": { "docstring": "\n update orders\n set ft_is_open = 0\n where ft_is_open = 1 and (ft_trade_id, order_id) not in (\n select id, stoploss_order_id from trades where stoploss_order_id is not null\n ) and ft_order_side = 'stoploss'\n and order_id like 'dry_%'\n \n update orders\n set ft_is_open = 0\n where ft_is_open = 1\n and (ft_trade_id, order_id) not in (\n select id, open_order_id from trades where open_order_id is not null\n ) and ft_order_side != 'stoploss'\n and order_id like 'dry_%'\n ", "n_words": 70, "vocab_size": 29, "n_whitespaces": 305, "language": "en" } }, { "id": 251829, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/http/hyper_h2_test_helpers.py", "file_name": "hyper_h2_test_helpers.py", "fun_name": "build_data_frame", "commit_message": "make it black!", "code": "def build_data_frame(self, data, flags=None, stream_id=1, padding_len=0):\n \n flags = set(flags) if flags is not None else set()\n f = DataFrame(stream_id)\n f.data = data\n f.flags = flags\n\n if padding_len:\n flags.add(\"PADDED\")\n f.pad_length = padding_len\n\n return f\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 104, "n_words": 33, "vocab_size": 25, "complexity": 3, "nloc": 9, "token_counts": 67, "n_ast_nodes": 107, "n_identifiers": 11, "random_cut": "def build_data_frame(self, data, flags=None, stream_id=1, padding_len=0):\n \n flags = set(flags) if flags is not None else set()\n f = DataFrame(stream_id)\n f.data = data\n f.flags = flags\n\n if padding_len:\n flags.add(\"PADDED\")\n f.pad_length = padding_len\n\n return f\n", "d_id": 73833, "documentation": { "docstring": "\n Builds a single data frame out of a chunk of data.\n ", "n_words": 11, "vocab_size": 9, "n_whitespaces": 26, "language": "en" } }, { "id": 12837, "commit_id": "124045351137d80d118f9692de4295d50561f1e1", "repo": "jina", "path": "jina/parsers/dryrun.py", "file_name": "dryrun.py", "fun_name": "set_dryrun_parser", "commit_message": "feat: add dryrun to cli (#5050)\n\n* feat: add dryrun to cli\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: add parsing for dryrun\r\n\r\n* feat: update checker dryrun\r\n\r\n* docs: add dryrun cli to healt check page\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: add exit\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def set_dryrun_parser(parser=None):\n \n if not parser:\n parser = set_base_parser()\n\n parser.add_argument(\n 'host',\n type=str,\n help='The full host address of the Gateway, e.g. grpc://localhost:12345',\n )\n\n parser.add_argument(\n '--timeout',\n type=int,\n default=3000,\n help=,\n )\n\n return parser\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 106, "n_words": 29, "vocab_size": 26, "complexity": 2, "nloc": 18, "token_counts": 53, "n_ast_nodes": 90, "n_identifiers": 9, "random_cut": "def set_dryrun_parser(parser=None):\n \n if not parser:\n parser = set_base_parser()\n\n parser.add_argument(\n 'host',\n type=str,\n help='The full host address of the Gateway, e.g. grpc://localhost:12345',\n )\n\n parser.add_argument(\n '--timeout',\n type=int,\n default=3000,\n help=,\n )\n\n return parser\n", "d_id": 2421, "documentation": { "docstring": "Set the parser for `dryrun`\n\n :param parser: an existing parser to build upon\n :return: the parser\n \nTimeout in millisecond of one check\n-1 for waiting forever\n", "n_words": 26, "vocab_size": 22, "n_whitespaces": 33, "language": "en" } }, { "id": 61339, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py", "file_name": "wheel.py", "fun_name": "wheel_dist_info_dir", "commit_message": "upd; format", "code": "def wheel_dist_info_dir(source, name):\n # type: (ZipFile, str) -> str\n \n # Zip file path separators must be /\n subdirs = {p.split(\"/\", 1)[0] for p in source.namelist()}\n\n info_dirs = [s for s in subdirs if s.endswith(\".dist-info\")]\n\n if not info_dirs:\n raise UnsupportedWheel(\".dist-info directory not found\")\n\n if len(info_dirs) > 1:\n raise UnsupportedWheel(\n \"multiple .dist-info directories found: {}\".format(\", \".join(info_dirs))\n )\n\n info_dir = info_dirs[0]\n\n info_dir_name = canonicalize_name(info_dir)\n canonical_name = canonicalize_name(name)\n if not info_dir_name.startswith(canonical_name):\n raise UnsupportedWheel(\n \".dist-info directory {!r} does not start with {!r}\".format(\n info_dir, canonical_name\n )\n )\n\n return info_dir\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 202, "n_words": 83, "vocab_size": 61, "complexity": 7, "nloc": 19, "token_counts": 120, "n_ast_nodes": 204, "n_identifiers": 19, "random_cut": "def wheel_dist_info_dir(source, name):\n # type: (ZipFile, str) -> str\n \n # Zip file path separators must be /\n subdirs = {p.split(\"/\", 1)[0] for p in source.namelist()}\n\n info_dirs = [s for s in subdirs if s.endswith(\".dist-info\")]\n\n if not info_dirs:\n raise UnsupportedWheel(\".dist-info directory not found\")\n\n if len(info_dirs) > 1:\n raise UnsupportedWheel(\n \"multiple .dist-info directories found: {}\".format(\", \".join(info_dirs))\n )\n\n info_dir = info_dirs[0]\n\n info_dir_name = canonicalize_name(info_dir)\n canonical_name = canonicalize_name(name)\n if not info_dir_name.startswith(canonical_name):\n raise Unsupported", "d_id": 12521, "documentation": { "docstring": "Returns the name of the contained .dist-info directory.\n\n Raises AssertionError or UnsupportedWheel if not found, >1 found, or\n it doesn't match the provided name.\n ", "n_words": 24, "vocab_size": 20, "n_whitespaces": 33, "language": "en" } }, { "id": 160611, "commit_id": "cedba623b110caf83f46edfa38cb4fbc0191e285", "repo": "numpy", "path": "numpy/lib/arraysetops.py", "file_name": "arraysetops.py", "fun_name": "in1d", "commit_message": "MAINT: Optimize np.isin for integer arrays\n\n- This optimization indexes with an intermediary boolean array to speed up\nnumpy.isin and numpy.in1d for integer arrays over a range of optimal parameters\nwhich are calculated.", "code": "def in1d(ar1, ar2, assume_unique=False, invert=False):\n \n # Ravel both arrays, behavior for the first array could be different\n ar1 = np.asarray(ar1).ravel()\n ar2 = np.asarray(ar2).ravel()\n\n # Ensure that iteration through object arrays yields size-1 arrays\n if ar2.dtype == object:\n ar2 = ar2.reshape(-1, 1)\n # Check if we can use a fast integer algorithm:\n integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and\n np.issubdtype(ar2.dtype, np.integer))\n\n if integer_arrays:\n ar2_min = np.min(ar2)\n ar2_max = np.max(ar2)\n ar2_range = ar2_max - ar2_min\n ar2_size = ar2.size\n\n # Optimal performance is for approximately\n # log10(size) > (log10(range) - 2.27) / 0.927, see discussion on\n # https://github.com/numpy/numpy/pull/12065\n optimal_parameters = (\n np.log10(ar2_size + 1) >\n ((np.log10(ar2_range + 1) - 2.27) / 0.927)\n )\n\n if optimal_parameters:\n\n if invert:\n outgoing_array = np.ones_like(ar1, dtype=np.bool_)\n else:\n outgoing_array = np.zeros_like(ar1, dtype=np.bool_)\n\n # Make elements 1 where the integer exists in ar2\n if invert:\n isin_helper_ar = np.ones(ar2_range + 1, dtype=np.bool_)\n isin_helper_ar[ar2 - ar2_min] = 0\n else:\n isin_helper_ar = np.zeros(ar2_range + 1, dtype=np.bool_)\n isin_helper_ar[ar2 - ar2_min] = 1\n\n # Mask out elements we know won't work\n basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)\n outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -\n ar2_min]\n\n return outgoing_array\n\n\n # Check if one of the arrays may contain arbitrary objects\n contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject\n\n # This code is run when\n # a) the first condition is true, making the code significantly faster\n # b) the second condition is true (i.e. `ar1` or `ar2` may contain\n # arbitrary objects), since then sorting is not guaranteed to work\n if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:\n if invert:\n mask = np.ones(len(ar1), dtype=bool)\n for a in ar2:\n mask &= (ar1 != a)\n else:\n mask = np.zeros(len(ar1), dtype=bool)\n for a in ar2:\n mask |= (ar1 == a)\n return mask\n\n # Otherwise use sorting\n if not assume_unique:\n ar1, rev_idx = np.unique(ar1, return_inverse=True)\n ar2 = np.unique(ar2)\n\n ar = np.concatenate((ar1, ar2))\n # We need this to be a stable sort, so always use 'mergesort'\n # here. The values from the first array should always come before\n # the values from the second array.\n order = ar.argsort(kind='mergesort')\n sar = ar[order]\n if invert:\n bool_ar = (sar[1:] != sar[:-1])\n else:\n bool_ar = (sar[1:] == sar[:-1])\n flag = np.concatenate((bool_ar, [invert]))\n ret = np.empty(ar.shape, dtype=bool)\n ret[order] = flag\n\n if assume_unique:\n return ret[:len(ar1)]\n else:\n return ret[rev_idx]\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 976, "n_words": 367, "vocab_size": 205, "complexity": 16, "nloc": 59, "token_counts": 504, "n_ast_nodes": 792, "n_identifiers": 51, "random_cut": "def in1d(ar1, ar2, assume_unique=False, invert=False):\n \n # Ravel both arrays, behavior for the first array could be different\n ar1 = np.asarray(ar1).ravel()\n ar2 = np.asarray(ar2).ravel()\n\n # Ensure that iteration through object arrays yields size", "d_id": 38667, "documentation": { "docstring": "\n Test whether each element of a 1-D array is also present in a second array.\n\n Returns a boolean array the same length as `ar1` that is True\n where an element of `ar1` is in `ar2` and False otherwise.\n\n We recommend using :func:`isin` instead of `in1d` for new code.\n\n Parameters\n ----------\n ar1 : (M,) array_like\n Input array.\n ar2 : array_like\n The values against which to test each value of `ar1`.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted (that is,\n False where an element of `ar1` is in `ar2` and True otherwise).\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\n to (but is faster than) ``np.invert(in1d(a, b))``.\n\n .. versionadded:: 1.8.0\n\n Returns\n -------\n in1d : (M,) ndarray, bool\n The values `ar1[in1d]` are in `ar2`.\n\n See Also\n --------\n isin : Version of this function that preserves the\n shape of ar1.\n numpy.lib.arraysetops : Module with a number of other functions for\n performing set operations on arrays.\n\n Notes\n -----\n `in1d` can be considered as an element-wise function version of the\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\n equivalent to ``np.array([item in b for item in a])``.\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\n container: As ``ar2`` is converted to an array, in those cases\n ``asarray(ar2)`` is an object array rather than the expected array of\n contained values.\n\n .. versionadded:: 1.4.0\n\n Examples\n --------\n >>> test = np.array([0, 1, 2, 5, 0])\n >>> states = [0, 2]\n >>> mask = np.in1d(test, states)\n >>> mask\n array([ True, False, True, False, True])\n >>> test[mask]\n array([0, 2, 0])\n >>> mask = np.in1d(test, states, invert=True)\n >>> mask\n array([False, True, False, True, False])\n >>> test[mask]\n array([1, 5])\n ", "n_words": 303, "vocab_size": 181, "n_whitespaces": 577, "language": "en" } }, { "id": 176973, "commit_id": "b8d1438e4ea3d8190c650110b3b7d7c141224842", "repo": "networkx", "path": "networkx/algorithms/centrality/degree_alg.py", "file_name": "degree_alg.py", "fun_name": "out_degree_centrality", "commit_message": "added examples to degree_alg.py (#5644)\n\n* added example on degree centrality\r\n\r\n* added example on in degree centrality\r\n\r\n* added example on out degree centrality\r\n\r\n* added opening braces", "code": "def out_degree_centrality(G):\n \n if len(G) <= 1:\n return {n: 1 for n in G}\n\n s = 1.0 / (len(G) - 1.0)\n centrality = {n: d * s for n, d in G.out_degree()}\n return centrality\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 55, "n_words": 33, "vocab_size": 25, "complexity": 4, "nloc": 6, "token_counts": 61, "n_ast_nodes": 91, "n_identifiers": 8, "random_cut": "def out_degree_centrality(G):\n \n if len(G) <= 1:\n return {n: 1 for n in G}\n\n s = 1.0 / (len(G) - 1.0)\n centrality = {n: d * s for n, d in G.out_degree()}\n return centralit", "d_id": 42201, "documentation": { "docstring": "Compute the out-degree centrality for nodes.\n\n The out-degree centrality for a node v is the fraction of nodes its\n outgoing edges are connected to.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with out-degree centrality as values.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is undirected.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])\n >>> nx.out_degree_centrality(G)\n {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0}\n\n See Also\n --------\n degree_centrality, in_degree_centrality\n\n Notes\n -----\n The degree centrality values are normalized by dividing by the maximum\n possible degree in a simple graph n-1 where n is the number of nodes in G.\n\n For multigraphs or graphs with self loops the maximum degree might\n be higher than n-1 and values of degree centrality greater than 1\n are possible.\n ", "n_words": 136, "vocab_size": 93, "n_whitespaces": 238, "language": "en" } }, { "id": 235950, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/tests/test_optional/test_offline/test_offline.py", "file_name": "test_offline.py", "fun_name": "_read_html", "commit_message": "switch to black .22", "code": "def _read_html(self, file_url):\n \n with open(file_url.replace(\"file://\", \"\").replace(\" \", \"\")) as f:\n return f.read()\n\n if matplotlylib:\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 42, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 74, "n_identifiers": 8, "random_cut": "def _read_html(self, file_url):\n \n with open(file_url.replace(\"file://\", \"\").replace(\" \", \"\")) as f:\n return f.read()\n\n if matplotlylib:\n", "d_id": 67386, "documentation": { "docstring": "Read and return the HTML contents from a file_url in the\n form e.g. file:///Users/chriddyp/Repos/plotly.py/plotly-temp.html\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 136708, "commit_id": "7c8859f1428224710e4c2db2abf0d9ec28536301", "repo": "ray", "path": "python/ray/_private/utils.py", "file_name": "utils.py", "fun_name": "set_omp_num_threads_if_unset", "commit_message": "[core] Set OMP_NUM_THREADS to `num_cpus` required by task/actors by default (#30496)\n\nRay currently sets OMP_NUM_THREADS=1 when the environ variable is not set.\r\nThis PR:\r\n\r\nSets OMP_NUM_THREADS to the number of cpus assigned to the worker that runs a task before running, and reset it after running.\r\nIf num_cpus is a fractional smaller than 1, it will set OMP_NUM_THREADS to 1.\r\nDoesn't override OMP_NUM_THREADS if it's already being specified in runtime env or through os.environ.\r\nSigned-off-by: Ricky Xu \r\nCo-authored-by: Eric Liang \r\nCo-authored-by: Simon Mo ", "code": "def set_omp_num_threads_if_unset() -> bool:\n \n num_threads_from_env = os.environ.get(\"OMP_NUM_THREADS\")\n if num_threads_from_env is not None:\n # No ops if it's set\n return False\n\n # If unset, try setting the correct CPU count assigned.\n runtime_ctx = ray.get_runtime_context()\n if runtime_ctx.worker.mode != ray._private.worker.WORKER_MODE:\n # Non worker mode, no ops.\n return False\n\n num_assigned_cpus = runtime_ctx.get_assigned_resources().get(\"CPU\")\n\n if num_assigned_cpus is None:\n # This is an actor task w/o any num_cpus specified, set it to 1\n logger.debug(\n \"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n \"degradation with many workers (issue #6998). You can override this \"\n \"by explicitly setting OMP_NUM_THREADS, or changing num_cpus.\"\n )\n num_assigned_cpus = 1\n\n import math\n\n # For num_cpu < 1: Set to 1.\n # For num_cpus >= 1: Set to the floor of the actual assigned cpus.\n omp_num_threads = max(math.floor(num_assigned_cpus), 1)\n os.environ[\"OMP_NUM_THREADS\"] = str(omp_num_threads)\n return True\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 260, "n_words": 129, "vocab_size": 94, "complexity": 4, "nloc": 27, "token_counts": 105, "n_ast_nodes": 189, "n_identifiers": 22, "random_cut": "def set_omp_num_threads_if_unset() -> bool:\n \n num_threads_from_env = os.environ.get(\"OMP_NUM_THREADS\")\n if num_threads_from_env is not None:\n # No ops if it's set\n return False\n\n # If unset, try setting the correct CPU count assigned.\n runtime_ctx = ray.get_runtime_context()\n if runtime_ctx.worker.mode != ray._private.worker.WORKER_MODE:\n # Non worker mode, no ops.\n return False\n\n num_assigned_cpus = runtime_ctx.get_assigned_resources().get(\"CPU\")\n\n if num_assigned_cpus is None:\n # This is an acto", "d_id": 30974, "documentation": { "docstring": "Set the OMP_NUM_THREADS to default to num cpus assigned to the worker\n\n This function sets the environment variable OMP_NUM_THREADS for the worker,\n if the env is not previously set and it's running in worker (WORKER_MODE).\n\n Returns True if OMP_NUM_THREADS is set in this function.\n\n ", "n_words": 44, "vocab_size": 31, "n_whitespaces": 56, "language": "en" } }, { "id": 289145, "commit_id": "3b33e0d832b238b40360383099391e2093ea05cb", "repo": "core", "path": "tests/components/homekit/test_type_sensors.py", "file_name": "test_type_sensors.py", "fun_name": "test_binary_device_classes", "commit_message": "Add support for restoring HomeKit IIDs (#79913)", "code": "async def test_binary_device_classes(hass, hk_driver):\n \n entity_id = \"binary_sensor.demo\"\n aid = 1\n\n for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items():\n hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class})\n await hass.async_block_till_done()\n\n aid += 1\n acc = BinarySensor(hass, hk_driver, \"Binary Sensor\", entity_id, aid, None)\n assert acc.get_service(service).display_name == service\n assert acc.char_detected.display_name == char\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 97, "n_words": 43, "vocab_size": 37, "complexity": 2, "nloc": 10, "token_counts": 91, "n_ast_nodes": 142, "n_identifiers": 21, "random_cut": "async def test_binary_device_classes(hass, hk_driver):\n \n entity_id = \"binary_sensor.demo\"\n aid = 1\n\n for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items():\n hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class})\n await hass.async_block_till_done()\n\n aid += 1\n acc = BinarySensor(hass, hk_driver, \"Binary Sensor\", entity_id, aid, None)\n assert acc.get_service(", "d_id": 88292, "documentation": { "docstring": "Test if services and characteristics are assigned correctly.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 217902, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/imaplib.py", "file_name": "imaplib.py", "fun_name": "Internaldate2tuple", "commit_message": "add python 3.10.4 for windows", "code": "def Internaldate2tuple(resp):\n \n\n mo = InternalDate.match(resp)\n if not mo:\n return None\n\n mon = Mon2num[mo.group('mon')]\n zonen = mo.group('zonen')\n\n day = int(mo.group('day'))\n year = int(mo.group('year'))\n hour = int(mo.group('hour'))\n min = int(mo.group('min'))\n sec = int(mo.group('sec'))\n zoneh = int(mo.group('zoneh'))\n zonem = int(mo.group('zonem'))\n\n # INTERNALDATE timezone must be subtracted to get UT\n\n zone = (zoneh*60 + zonem)*60\n if zonen == b'-':\n zone = -zone\n\n tt = (year, mon, day, hour, min, sec, -1, -1, -1)\n utc = calendar.timegm(tt) - zone\n\n return time.localtime(utc)\n\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 144, "n_words": 76, "vocab_size": 57, "complexity": 3, "nloc": 19, "token_counts": 178, "n_ast_nodes": 302, "n_identifiers": 24, "random_cut": "def Internaldate2tuple(resp):\n \n\n mo = InternalDate.match(resp)\n if not mo:\n return ", "d_id": 55002, "documentation": { "docstring": "Parse an IMAP4 INTERNALDATE string.\n\n Return corresponding local time. The return value is a\n time.struct_time tuple or None if the string has wrong format.\n ", "n_words": 24, "vocab_size": 24, "n_whitespaces": 34, "language": "en" } }, { "id": 74795, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_delete_get", "commit_message": "Reformat with black", "code": "def test_delete_get(self):\n \n # Send request\n response = self.client.get(\n reverse(\"wagtaildocs:delete_multiple\", args=(self.doc.id,))\n )\n\n # Check response\n self.assertEqual(response.status_code, 405)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 69, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 40, "n_ast_nodes": 68, "n_identifiers": 11, "random_cut": "def test_delete_get(self):\n \n # Send request\n response = self.client.get(\n reverse(\"wagtaildocs:delete_multiple\", args=(self.doc.id,))\n )\n\n # Check response\n self.assertEqual(response.status_code, 405)\n", "d_id": 16319, "documentation": { "docstring": "\n This tests that a GET request to the delete view returns a 405 \"METHOD NOT ALLOWED\" response\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 83840, "commit_id": "803982e87254e3b1ebcb16ed795e224afceea3a3", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_stream_admin_remove_others_from_public_stream", "commit_message": "message_flags: Short-circuit if no messages changed.\n\nOmit sending an event, and updating the database, if there are no\nmatching messages.", "code": "def test_stream_admin_remove_others_from_public_stream(self) -> None:\n \n result = self.attempt_unsubscribe_of_principal(\n query_count=15,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_stream_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 141, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 16, "token_counts": 80, "n_ast_nodes": 125, "n_identifiers": 15, "random_cut": "def test_stream_admin_remove_others_from_public_stream(self) -> None:\n \n result = self.attempt_unsubscribe_of_principal(\n query_count=15,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=False,\n is_stream_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"re", "d_id": 17733, "documentation": { "docstring": "\n You can remove others from public streams you're a stream administrator of.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 244539, "commit_id": "c407e970a8ee2544f27d1c233855a31129dca158", "repo": "mmdetection", "path": "mmdet/datasets/pipelines/transforms.py", "file_name": "transforms.py", "fun_name": "_mosaic_combine", "commit_message": "Refactor RandomCrop and SegRescale", "code": "def _mosaic_combine(self, loc, center_position_xy, img_shape_wh):\n \n assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n if loc == 'top_left':\n # index0 to top left part of image\n x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n max(center_position_xy[1] - img_shape_wh[1], 0), \\\n center_position_xy[0], \\\n center_position_xy[1]\n crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (\n y2 - y1), img_shape_wh[0], img_shape_wh[1]\n\n elif loc == 'top_right':\n # index1 to top right part of image\n x1, y1, x2, y2 = center_position_xy[0], \\\n max(center_position_xy[1] - img_shape_wh[1], 0), \\\n min(center_position_xy[0] + img_shape_wh[0],\n self.img_scale[1] * 2), \\\n center_position_xy[1]\n crop_coord = 0, img_shape_wh[1] - (y2 - y1), min(\n img_shape_wh[0], x2 - x1), img_shape_wh[1]\n\n elif loc == 'bottom_left':\n # index2 to bottom left part of image\n x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n center_position_xy[1], \\\n center_position_xy[0], \\\n min(self.img_scale[0] * 2, center_position_xy[1] +\n img_shape_wh[1])\n crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min(\n y2 - y1, img_shape_wh[1])\n\n else:\n # index3 to bottom right part of image\n x1, y1, x2, y2 = center_position_xy[0], \\\n center_position_xy[1], \\\n min(center_position_xy[0] + img_shape_wh[0],\n self.img_scale[1] * 2), \\\n min(self.img_scale[0] * 2, center_position_xy[1] +\n img_shape_wh[1])\n crop_coord = 0, 0, min(img_shape_wh[0],\n x2 - x1), min(y2 - y1, img_shape_wh[1])\n\n paste_coord = x1, y1, x2, y2\n return paste_coord, crop_coord\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 724, "n_words": 201, "vocab_size": 71, "complexity": 4, "nloc": 36, "token_counts": 406, "n_ast_nodes": 562, "n_identifiers": 14, "random_cut": "def _mosaic_combine(self, loc, center_position_xy, img_shape_wh):\n \n assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n if loc == 'top_left':\n # index0 to top left part of image\n x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n max(center_position_xy[1] - img_shape_wh[1], 0), \\\n center_position_xy[0], \\\n center_position_xy[1]\n crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (\n y2 - y1), img_shape_wh[0], img_shape_wh[1]\n\n elif loc == 'top_right':\n ", "d_id": 70422, "documentation": { "docstring": "Calculate global coordinate of mosaic image and local coordinate of\n cropped sub-image.\n\n Args:\n loc (str): Index for the sub-image, loc in ('top_left',\n 'top_right', 'bottom_left', 'bottom_right').\n center_position_xy (Sequence[float]): Mixing center for 4 images,\n (x, y).\n img_shape_wh (Sequence[int]): Width and height of sub-image\n\n Returns:\n tuple[tuple[float]]: Corresponding coordinate of pasting and\n cropping\n - paste_coord (tuple): paste corner coordinate in mosaic image.\n - crop_coord (tuple): crop corner coordinate in mosaic image.\n ", "n_words": 67, "vocab_size": 48, "n_whitespaces": 212, "language": "en" } }, { "id": 196357, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/common.py", "file_name": "common.py", "fun_name": "permute", "commit_message": "Moved imports to higher level", "code": "def permute(self, perm, orientation='rows', direction='forward'):\n r\n from sympy.combinatorics import Permutation\n\n # allow british variants and `columns`\n if direction == 'forwards':\n direction = 'forward'\n if direction == 'backwards':\n direction = 'backward'\n if orientation == 'columns':\n orientation = 'cols'\n\n if direction not in ('forward', 'backward'):\n raise TypeError(\"direction='{}' is an invalid kwarg. \"\n \"Try 'forward' or 'backward'\".format(direction))\n if orientation not in ('rows', 'cols'):\n raise TypeError(\"orientation='{}' is an invalid kwarg. \"\n \"Try 'rows' or 'cols'\".format(orientation))\n\n if not isinstance(perm, (Permutation, Iterable)):\n raise ValueError(\n \"{} must be a list, a list of lists, \"\n \"or a SymPy permutation object.\".format(perm))\n\n # ensure all swaps are in range\n max_index = self.rows if orientation == 'rows' else self.cols\n if not all(0 <= t <= max_index for t in flatten(list(perm))):\n raise IndexError(\"`swap` indices out of range.\")\n\n if perm and not isinstance(perm, Permutation) and \\\n isinstance(perm[0], Iterable):\n if direction == 'forward':\n perm = list(reversed(perm))\n perm = Permutation(perm, size=max_index+1)\n else:\n perm = Permutation(perm, size=max_index+1)\n\n if orientation == 'rows':\n return self._eval_permute_rows(perm)\n if orientation == 'cols':\n return self._eval_permute_cols(perm)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 517, "n_words": 164, "vocab_size": 95, "complexity": 16, "nloc": 128, "token_counts": 238, "n_ast_nodes": 413, "n_identifiers": 25, "random_cut": "def permute(self, perm, orientation='rows', direction='forward'):\n r\n from sympy.combinatorics import Permutation\n\n # allow british variants and `columns`\n if direction == 'forwards':\n direction = 'forward'\n if direction == 'backwards':\n direction = 'backward'\n if orientation == 'columns':\n orientation = 'cols'\n\n if direction not in ('forward', 'backward'):\n raise TypeError(\"direction='{}' is an invalid kwarg. \"\n \"Try 'forward' or 'backward'\".format(direction))\n if orientation not in ('rows', 'cols'):\n raise TypeError(\"orientation='{}' is an invalid kwarg. \"\n \"Try 'rows' or 'cols'\".format(orientation))\n\n if not isinstance(perm, (Permutation, Iterable)):\n raise ValueError(\n \"{} must be a list, a list of lists, \"\n \"or a SymPy permutation object.\".format(perm))\n\n # ensure all swaps are in range\n max_index = self.rows if orientation == 'rows' else self.cols\n if not all(0 <= t <= max_index for t in flatten(list(perm))):\n raise IndexError(\"`swap` indices out of range.\")\n\n if perm and not isinstance(perm, Permutation) and \\\n isinstance(perm[0], Iterable):\n if direction == 'forward':\n perm = list(reversed(perm))\n perm = Permutation(perm, size=max_index+1)\n else:\n perm = Permutation(perm, size=max_index+1)\n\n if orientation == 'rows':\n return self._eval_permute_rows(perm)\n if orientation == 'cols':\n return self._eval_permute_cols(perm)\n", "d_id": 47857, "documentation": { "docstring": "Permute the rows or columns of a matrix by the given list of\n swaps.\n\n Parameters\n ==========\n\n perm : Permutation, list, or list of lists\n A representation for the permutation.\n\n If it is ``Permutation``, it is used directly with some\n resizing with respect to the matrix size.\n\n If it is specified as list of lists,\n (e.g., ``[[0, 1], [0, 2]]``), then the permutation is formed\n from applying the product of cycles. The direction how the\n cyclic product is applied is described in below.\n\n If it is specified as a list, the list should represent\n an array form of a permutation. (e.g., ``[1, 2, 0]``) which\n would would form the swapping function\n `0 \\mapsto 1, 1 \\mapsto 2, 2\\mapsto 0`.\n\n orientation : 'rows', 'cols'\n A flag to control whether to permute the rows or the columns\n\n direction : 'forward', 'backward'\n A flag to control whether to apply the permutations from\n the start of the list first, or from the back of the list\n first.\n\n For example, if the permutation specification is\n ``[[0, 1], [0, 2]]``,\n\n If the flag is set to ``'forward'``, the cycle would be\n formed as `0 \\mapsto 2, 2 \\mapsto 1, 1 \\mapsto 0`.\n\n If the flag is set to ``'backward'``, the cycle would be\n formed as `0 \\mapsto 1, 1 \\mapsto 2, 2 \\mapsto 0`.\n\n If the argument ``perm`` is not in a form of list of lists,\n this flag takes no effect.\n\n Examples\n ========\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='forward')\n Matrix([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]])\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='backward')\n Matrix([\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 0]])\n\n Notes\n =====\n\n If a bijective function\n `\\sigma : \\mathbb{N}_0 \\rightarrow \\mathbb{N}_0` denotes the\n permutation.\n\n If the matrix `A` is the matrix to permute, represented as\n a horizontal or a vertical stack of vectors:\n\n .. math::\n A =\n \\begin{bmatrix}\n a_0 \\\\ a_1 \\\\ \\vdots \\\\ a_{n-1}\n \\end{bmatrix} =\n \\begin{bmatrix}\n \\alpha_0 & \\alpha_1 & \\cdots & \\alpha_{n-1}\n \\end{bmatrix}\n\n If the matrix `B` is the result, the permutation of matrix rows\n is defined as:\n\n .. math::\n B := \\begin{bmatrix}\n a_{\\sigma(0)} \\\\ a_{\\sigma(1)} \\\\ \\vdots \\\\ a_{\\sigma(n-1)}\n \\end{bmatrix}\n\n And the permutation of matrix columns is defined as:\n\n .. math::\n B := \\begin{bmatrix}\n \\alpha_{\\sigma(0)} & \\alpha_{\\sigma(1)} &\n \\cdots & \\alpha_{\\sigma(n-1)}\n \\end{bmatrix}\n ", "n_words": 395, "vocab_size": 170, "n_whitespaces": 1054, "language": "en" } }, { "id": 42082, "commit_id": "949dec3666ab12a366d2fc05ef18d6e90625b5fa", "repo": "seaborn", "path": "seaborn/axisgrid.py", "file_name": "axisgrid.py", "fun_name": "apply", "commit_message": "Add apply and pipe methods to Grid objects for fluent customization (#2928)\n\n* Return self from tight_layout and refline\r\n\r\n* Add apply and pipe methods to FacetGrid for fluent customization\r\n\r\n* Move apply/pipe down to base class so JointGrid/PaiGrid get them too\r\n\r\n* Tweak docstrings", "code": "def apply(self, func, *args, **kwargs):\n \n func(self, *args, **kwargs)\n return self\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 41, "n_identifiers": 5, "random_cut": "def apply(self, func, *args, **kwargs):\n \n ", "d_id": 7482, "documentation": { "docstring": "\n Pass the grid to a user-supplied function and return self.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` is ignored; this method returns self.\n See the `pipe` method if you want the return value.\n\n Added in v0.12.0.\n\n ", "n_words": 53, "vocab_size": 43, "n_whitespaces": 103, "language": "en" } }, { "id": 81766, "commit_id": "33c0fb79d66f56374d7c042ba79887faa85e2885", "repo": "awx", "path": "awx/main/utils/common.py", "file_name": "common.py", "fun_name": "copy_m2m_relationships", "commit_message": "JT param everything (#12646)\n\n* Making almost all fields promptable on job templates and config models\r\n* Adding EE, IG and label access checks\r\n* Changing jobs preferred instance group function to handle the new IG cache field\r\n* Adding new ask fields to job template modules\r\n* Address unit/functional tests\r\n* Adding migration file", "code": "def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):\n \n for field_name in fields:\n if hasattr(obj1, field_name):\n try:\n field_obj = obj1._meta.get_field(field_name)\n except FieldDoesNotExist:\n continue\n if isinstance(field_obj, ManyToManyField):\n # Many to Many can be specified as field_name\n src_field_value = getattr(obj1, field_name)\n if kwargs and field_name in kwargs:\n override_field_val = kwargs[field_name]\n # TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order?\n if field_name == 'instance_groups':\n # instance_groups are a list but we need to preserve the order\n for ig_id in override_field_val:\n getattr(obj2, field_name).add(ig_id)\n continue\n if isinstance(override_field_val, (set, list, QuerySet)):\n getattr(obj2, field_name).add(*override_field_val)\n continue\n if override_field_val.__class__.__name__ == 'ManyRelatedManager':\n src_field_value = override_field_val\n dest_field = getattr(obj2, field_name)\n dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 521, "n_words": 110, "vocab_size": 77, "complexity": 11, "nloc": 22, "token_counts": 164, "n_ast_nodes": 263, "n_identifiers": 27, "random_cut": "def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):\n \n for field_name in fields:\n if hasattr(obj1, field_name):\n try:\n field_obj = obj1._meta.get_field(field_name)\n except FieldDoesNotExist:\n continue\n if isinstance(field_obj, ManyToManyField):\n # Many to Many can be specified as field_name\n src_field_value = getattr(obj1, field_name)\n if kwargs and field_name in kwargs:\n override_field_val = kwargs[field_name]\n # TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order?\n if field_name == 'instance_groups':\n # instance_groups are a list but we need to preserve the order\n for ig_id in override_field_val:\n getattr(obj2, field_name).add(ig_id)\n continue\n", "d_id": 17256, "documentation": { "docstring": "\n In-place operation.\n Given two saved objects, copies related objects from obj1\n to obj2 to field of same name, if field occurs in `fields`\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 36, "language": "en" } }, { "id": 179238, "commit_id": "cc0cff893f9d7d472788adc2510c123967b384fe", "repo": "gradio", "path": "gradio/external.py", "file_name": "external.py", "fun_name": "load_from_pipeline", "commit_message": "Format The Codebase\n- black formatting\n- isort formatting", "code": "def load_from_pipeline(pipeline):\n \n try:\n import transformers\n except ImportError:\n raise ImportError(\n \"transformers not installed. Please try `pip install transformers`\"\n )\n if not isinstance(pipeline, transformers.Pipeline):\n raise ValueError(\"pipeline must be a transformers.Pipeline\")\n\n # Handle the different pipelines. The has_attr() checks to make sure the pipeline exists in the\n # version of the transformers library that the user has installed.\n if hasattr(transformers, \"AudioClassificationPipeline\") and isinstance(\n pipeline, transformers.AudioClassificationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Audio(label=\"Input\", source=\"microphone\", type=\"filepath\"),\n \"outputs\": outputs.Label(label=\"Class\", type=\"confidences\"),\n \"preprocess\": lambda i: {\"inputs\": i},\n \"postprocess\": lambda r: {i[\"label\"].split(\", \")[0]: i[\"score\"] for i in r},\n }\n elif hasattr(transformers, \"AutomaticSpeechRecognitionPipeline\") and isinstance(\n pipeline, transformers.AutomaticSpeechRecognitionPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Audio(label=\"Input\", source=\"microphone\", type=\"filepath\"),\n \"outputs\": outputs.Textbox(label=\"Output\"),\n \"preprocess\": lambda i: {\"inputs\": i},\n \"postprocess\": lambda r: r[\"text\"],\n }\n elif hasattr(transformers, \"FeatureExtractionPipeline\") and isinstance(\n pipeline, transformers.FeatureExtractionPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\"),\n \"outputs\": outputs.Dataframe(label=\"Output\"),\n \"preprocess\": lambda x: {\"inputs\": x},\n \"postprocess\": lambda r: r[0],\n }\n elif hasattr(transformers, \"FillMaskPipeline\") and isinstance(\n pipeline, transformers.FillMaskPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\"),\n \"outputs\": outputs.Label(label=\"Classification\", type=\"confidences\"),\n \"preprocess\": lambda x: {\"inputs\": x},\n \"postprocess\": lambda r: {i[\"token_str\"]: i[\"score\"] for i in r},\n }\n elif hasattr(transformers, \"ImageClassificationPipeline\") and isinstance(\n pipeline, transformers.ImageClassificationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Image(label=\"Input Image\", type=\"filepath\"),\n \"outputs\": outputs.Label(label=\"Classification\", type=\"confidences\"),\n \"preprocess\": lambda i: {\"images\": i},\n \"postprocess\": lambda r: {i[\"label\"].split(\", \")[0]: i[\"score\"] for i in r},\n }\n elif hasattr(transformers, \"QuestionAnsweringPipeline\") and isinstance(\n pipeline, transformers.QuestionAnsweringPipeline\n ):\n pipeline_info = {\n \"inputs\": [\n inputs.Textbox(label=\"Context\", lines=7),\n inputs.Textbox(label=\"Question\"),\n ],\n \"outputs\": [outputs.Textbox(label=\"Answer\"), outputs.Label(label=\"Score\")],\n \"preprocess\": lambda c, q: {\"context\": c, \"question\": q},\n \"postprocess\": lambda r: (r[\"answer\"], r[\"score\"]),\n }\n elif hasattr(transformers, \"SummarizationPipeline\") and isinstance(\n pipeline, transformers.SummarizationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\", lines=7),\n \"outputs\": outputs.Textbox(label=\"Summary\"),\n \"preprocess\": lambda x: {\"inputs\": x},\n \"postprocess\": lambda r: r[0][\"summary_text\"],\n }\n elif hasattr(transformers, \"TextClassificationPipeline\") and isinstance(\n pipeline, transformers.TextClassificationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\"),\n \"outputs\": outputs.Label(label=\"Classification\", type=\"confidences\"),\n \"preprocess\": lambda x: [x],\n \"postprocess\": lambda r: {i[\"label\"].split(\", \")[0]: i[\"score\"] for i in r},\n }\n elif hasattr(transformers, \"TextGenerationPipeline\") and isinstance(\n pipeline, transformers.TextGenerationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\"),\n \"outputs\": outputs.Textbox(label=\"Output\"),\n \"preprocess\": lambda x: {\"text_inputs\": x},\n \"postprocess\": lambda r: r[0][\"generated_text\"],\n }\n elif hasattr(transformers, \"TranslationPipeline\") and isinstance(\n pipeline, transformers.TranslationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\"),\n \"outputs\": outputs.Textbox(label=\"Translation\"),\n \"preprocess\": lambda x: [x],\n \"postprocess\": lambda r: r[0][\"translation_text\"],\n }\n elif hasattr(transformers, \"Text2TextGenerationPipeline\") and isinstance(\n pipeline, transformers.Text2TextGenerationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\"),\n \"outputs\": outputs.Textbox(label=\"Generated Text\"),\n \"preprocess\": lambda x: [x],\n \"postprocess\": lambda r: r[0][\"generated_text\"],\n }\n elif hasattr(transformers, \"ZeroShotClassificationPipeline\") and isinstance(\n pipeline, transformers.ZeroShotClassificationPipeline\n ):\n pipeline_info = {\n \"inputs\": [\n inputs.Textbox(label=\"Input\"),\n inputs.Textbox(label=\"Possible class names (\" \"comma-separated)\"),\n inputs.Checkbox(label=\"Allow multiple true classes\"),\n ],\n \"outputs\": outputs.Label(label=\"Classification\", type=\"confidences\"),\n \"preprocess\": lambda i, c, m: {\n \"sequences\": i,\n \"candidate_labels\": c,\n \"multi_label\": m,\n },\n \"postprocess\": lambda r: {\n r[\"labels\"][i]: r[\"scores\"][i] for i in range(len(r[\"labels\"]))\n },\n }\n else:\n raise ValueError(\"Unsupported pipeline type: {}\".format(type(pipeline)))\n\n # define the function that will be called by the Interface", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1541, "n_words": 440, "vocab_size": 165, "complexity": 32, "nloc": 139, "token_counts": 1075, "n_ast_nodes": 1781, "n_identifiers": 43, "random_cut": "def load_from_pipeline(pipeline):\n \n try:\n import transformers\n except ImportError:\n raise ImportError(\n \"transformers not installed. Please try `pip install transformers`\"\n )\n if not isinstance(pipeline, transformers.Pipeline):\n raise ValueError(\"pipeline must be a transformers.Pipeline\")\n\n # Handle the different pipelines. The has_attr() checks to make sure the pipeline exists in the\n # version of the transformers library that the user has installed.\n if hasattr(transformers, \"AudioClassificationPipeline\") and isinstance(\n pipeline, transformers.AudioClassificationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Audio(label=\"Input\", source=\"microphone\", type=\"filepath\"),\n \"outputs\": outputs.Label(label=\"Class\", type=\"confidences\"),\n \"preprocess\": lambda i: {\"inputs\": i},\n \"postprocess\": lambda r: {i[\"label\"].split(\", \")[0]: i[\"score\"] for i in r},\n }\n elif hasattr(transformers, \"AutomaticSpeechRecognitionPipeline\") and isinstance(\n pipeline, transformers.AutomaticSpeechRecognitionPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Audio(label=\"Input\", source=\"microphone\", type=\"filepath\"),\n \"outputs\": outputs.Textbox(label=\"Output\"),\n \"preprocess\": lambda i: {\"inputs\": i},\n \"postprocess\": lambda r: r[\"text\"],\n }\n elif hasattr(transformers, \"FeatureExtractionPipeline\") and isinstance(\n pipeline, transformers.FeatureExtractionPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\"),\n \"outputs\": outputs.Dataframe(label=\"Output\"),\n \"preprocess\": lambda x: {\"inputs\": x},\n \"postprocess\": lambda r: r[0],\n }\n elif hasattr(transformers, \"FillMaskPipeline\") and isinstance(\n pipeline, transformers.FillMaskPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\"),\n \"outputs\": outputs.Label(label=\"Classification\", type=\"confidences\"),\n \"preprocess\": lambda x: {\"inputs\": x},\n \"postprocess\": lambda r: {i[\"token_str\"]: i[\"score\"] for i in r},\n }\n elif hasattr(transformers, \"ImageClassificationPipeline\") and isinstance(\n pipeline, transformers.ImageClassificationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Image(label=\"Input Image\", type=\"filepath\"),\n \"outputs\": outputs.Label(label=\"Classification\", type=\"confidences\"),\n \"preprocess\": lambda i: {\"images\": i},\n \"postprocess\": lambda r: {i[\"label\"].split(\", \")[0]: i[\"score\"] for i in r},\n }\n elif hasattr(transformers, \"QuestionAnsweringPipeline\") and isinstance(\n pipeline, transformers.QuestionAnsweringPipeline\n ):\n pipeline_info = {\n \"inputs\": [\n inputs.Textbox(label=\"Context\", lines=7),\n inputs.Textbox(label=\"Question\"),\n ],\n \"outputs\": [outputs.Textbox(label=\"Answer\"), outputs.Label(label=\"Score\")],\n \"preprocess\": lambda c, q: {\"context\": c, \"question\": q},\n \"postprocess\": lambda r: (r[\"answer\"], r[\"score\"]),\n }\n elif hasattr(transformers, \"SummarizationPipeline\") and isinstance(\n pipeline, transformers.SummarizationPipeline\n ):\n pipeline_info = {\n \"inputs\": inputs.Textbox(label=\"Input\", lines=7),\n \"outputs\": outputs.Textbox(label=\"Summary\"),\n \"preprocess\": lambda x: {\"inputs\": x},\n \"postprocess\": lambda r: r[0][\"summary_text\"],\n }\n elif hasattr(transformers, \"TextClassificationPipeline\") and isinstance(\n pipeline, transformers.TextClassificationPipeline\n ):\n ", "d_id": 42923, "documentation": { "docstring": "\n Gets the appropriate Interface kwargs for a given Hugging Face transformers.Pipeline.\n pipeline (transformers.Pipeline): the transformers.Pipeline from which to create an interface\n Returns:\n (dict): a dictionary of kwargs that can be used to construct an Interface object\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 52, "language": "en" } }, { "id": 819, "commit_id": "56137bacda6fea5a0053c65eb6fd88688f5298cc", "repo": "PySyft", "path": "packages/syft/src/syft/core/adp/vectorized_publish.py", "file_name": "vectorized_publish.py", "fun_name": "calculate_bounds_for_mechanism", "commit_message": "Implemented working vectorized_publish method into codebase\n\nTook 26 minutes", "code": "def calculate_bounds_for_mechanism(value_array, min_val_array, max_val_array):\n \n \n\n # TODO: Double check whether the iDPGaussianMechanism class squares its squared_l2_norm values!!\n worst_case_l2_norm = np.sqrt(np.sum(np.square(max_val_array - min_val_array))) * np.ones_like(value_array)\n l2_norm = np.sqrt(np.sum(np.square(value_array))) * np.ones_like(value_array)\n # print(l2_norm.shape, worst_case_l2_norm.shape)\n # print(l2_norm.shape)\n return l2_norm, worst_case_l2_norm\n\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 61, "n_words": 36, "vocab_size": 30, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 113, "n_identifiers": 11, "random_cut": "def calculate_bounds_for_mechanism(value_array, min_val_array, max_val_array):\n \n \n\n # TODO: Double check whether the iDPGaussianMechanism class squares its squared_l2_no", "d_id": 126, "documentation": { "docstring": "Calculates the squared L2 norm values needed to create a Mechanism, and calculate privacy budget + spend If you calculate the privacy budget spend with the worst case bound, you can show this number to the D.S.\n If you calculate it with the regular value (the value computed below when public_only = False, you cannot show the \n privacy budget to the DS because this violates privacy.\n ", "n_words": 66, "vocab_size": 43, "n_whitespaces": 76, "language": "en" } }, { "id": 65572, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/buying/report/procurement_tracker/procurement_tracker.py", "file_name": "procurement_tracker.py", "fun_name": "get_po_entries", "commit_message": "style: format code with black", "code": "def get_po_entries(conditions):\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tconditions=conditions\n\t\t),\n\t\tas_dict=1,\n\t) # nosec\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 5, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 34, "token_counts": 26, "n_ast_nodes": 43, "n_identifiers": 7, "random_cut": "def get_po_entries(conditions):\n\tret", "d_id": 13947, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\tchild.name,\n\t\t\tchild.parent,\n\t\t\tchild.cost_center,\n\t\t\tchild.project,\n\t\t\tchild.warehouse,\n\t\t\tchild.material_request,\n\t\t\tchild.material_request_item,\n\t\t\tchild.item_code,\n\t\t\tchild.stock_uom,\n\t\t\tchild.qty,\n\t\t\tchild.amount,\n\t\t\tchild.base_amount,\n\t\t\tchild.schedule_date,\n\t\t\tparent.transaction_date,\n\t\t\tparent.supplier,\n\t\t\tparent.status,\n\t\t\tparent.owner\n\t\tFROM `tabPurchase Order` parent, `tabPurchase Order Item` child\n\t\tWHERE\n\t\t\tparent.docstatus = 1\n\t\t\tAND parent.name = child.parent\n\t\t\tAND parent.status not in (\"Closed\",\"Completed\",\"Cancelled\")\n\t\t\t{conditions}\n\t\tGROUP BY\n\t\t\tparent.name, child.item_code\n\t\t", "n_words": 44, "vocab_size": 41, "n_whitespaces": 19, "language": "en" } }, { "id": 206849, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/views/generic/dates.py", "file_name": "dates.py", "fun_name": "get_year", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_year(self):\n \n year = self.year\n if year is None:\n try:\n year = self.kwargs[\"year\"]\n except KeyError:\n try:\n year = self.request.GET[\"year\"]\n except KeyError:\n raise Http404(_(\"No year specified\"))\n return year\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 160, "n_words": 27, "vocab_size": 17, "complexity": 4, "nloc": 11, "token_counts": 54, "n_ast_nodes": 96, "n_identifiers": 9, "random_cut": "def get_year(self):\n \n year = self.year\n if year is None:\n try:\n year = self.kwargs[\"year\"]\n ", "d_id": 51751, "documentation": { "docstring": "Return the year for which this view should display data.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 319934, "commit_id": "c8e838e3a0828e82efac1fd93ebb9aba6a000ff8", "repo": "paperless-ngx", "path": "src/documents/tests/test_management_retagger.py", "file_name": "test_management_retagger.py", "fun_name": "test_overwrite_storage_path", "commit_message": "Adds the storage paths to the re-tagger command", "code": "def test_overwrite_storage_path(self):\n \n call_command(\"document_retagger\", \"--storage_path\", \"--overwrite\")\n d_first, d_second, d_unrelated, d_auto = self.get_updated_docs()\n\n self.assertEqual(d_first.storage_path, self.sp2)\n self.assertEqual(d_auto.storage_path, self.sp1)\n self.assertIsNone(d_second.storage_path)\n self.assertEqual(d_unrelated.storage_path, self.sp2)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 67, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 71, "n_ast_nodes": 116, "n_identifiers": 13, "random_cut": "def test_overwrite_storage_path(self):\n \n call_command(\"document_retagger\", \"--storage_path\", \"--overwrite\")\n d_first, d_second, d_unrelated, d_auto = self.get_updated_docs()\n\n self.assertEqual(d_first.storage_path, self.sp2)\n self.assertEqual(d_auto.storage_path, self.sp1)\n self.assertIsNone(d_second.storage_path)\n self.assertEqual(d_unrelated.storage_path, self.sp2)\n", "d_id": 117022, "documentation": { "docstring": "\n GIVEN:\n - 2 storage paths with documents which match them\n - 1 document which matches but has a storage path\n WHEN:\n - document retagger is called with overwrite\n THEN:\n - Matching document's storage paths updated\n - Non-matching documents have no storage path\n - Existing storage patch overwritten\n ", "n_words": 47, "vocab_size": 32, "n_whitespaces": 142, "language": "en" } }, { "id": 153820, "commit_id": "57e29bc5d82348006c5170ef9ac0a9eedcd9acf9", "repo": "modin", "path": "modin/core/storage_formats/base/query_compiler.py", "file_name": "query_compiler.py", "fun_name": "invert", "commit_message": "REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514)\n\nCo-authored-by: Rehan Sohail Durrani \r\nSigned-off-by: jeffreykennethli ", "code": "def invert(self):\n \n return DataFrameDefault.register(pandas.DataFrame.__invert__)(self)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 7, "random_cut": "def invert(self):\n \n return DataFrameDefault.regis", "d_id": 35635, "documentation": { "docstring": "\n Apply bitwise inversion for each element of the QueryCompiler.\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing bitwise inversion for each value.\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 67, "language": "en" } }, { "id": 244739, "commit_id": "96aa909c19dbe753852ac6dba13bbbc35329b99f", "repo": "mmdetection", "path": "tests/test_models/test_dense_heads/test_centernet_head.py", "file_name": "test_centernet_head.py", "fun_name": "test_center_head_loss", "commit_message": "[Refactor] CenterNet", "code": "def test_center_head_loss(self):\n \n s = 256\n img_metas = [{'batch_input_shape': (s, s, 3)}]\n test_cfg = dict(topK=100, max_per_img=100)\n centernet_head = CenterNetHead(\n num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)\n\n feat = [torch.rand(1, 1, s, s)]\n center_out, wh_out, offset_out = centernet_head.forward(feat)\n # Test that empty ground truth encourages the network to\n # predict background\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.empty((0, 4))\n gt_instances.labels = torch.LongTensor([])\n empty_gt_losses = centernet_head.loss(center_out, wh_out, offset_out,\n [gt_instances], img_metas)\n loss_center = empty_gt_losses['loss_center_heatmap']\n loss_wh = empty_gt_losses['loss_wh']\n loss_offset = empty_gt_losses['loss_offset']\n assert loss_center.item() > 0, 'loss_center should be non-zero'\n assert loss_wh.item() == 0, (\n 'there should be no loss_wh when there are no true boxes')\n assert loss_offset.item() == 0, (\n 'there should be no loss_offset when there are no true boxes')\n\n # When truth is non-empty then both cls and box loss\n # should be nonzero for random inputs\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.Tensor(\n [[23.6667, 23.8757, 238.6326, 151.8874]])\n gt_instances.labels = torch.LongTensor([2])\n one_gt_losses = centernet_head.loss(center_out, wh_out, offset_out,\n [gt_instances], img_metas)\n loss_center = one_gt_losses['loss_center_heatmap']\n loss_wh = one_gt_losses['loss_wh']\n loss_offset = one_gt_losses['loss_offset']\n assert loss_center.item() > 0, 'loss_center should be non-zero'\n assert loss_wh.item() > 0, 'loss_wh should be non-zero'\n assert loss_offset.item() > 0, 'loss_offset should be non-zero'\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 532, "n_words": 183, "vocab_size": 101, "complexity": 1, "nloc": 33, "token_counts": 295, "n_ast_nodes": 457, "n_identifiers": 34, "random_cut": "def test_center_head_loss(self):\n \n s = 256\n img_metas = [{'batch_input_shape': (s, s, 3)}]\n test_cfg = dict(topK=100, max_per_img=100)\n centernet_head = CenterNetHead(\n num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)\n\n feat = [torch.rand(1, 1, s, s)]\n center_out, wh_out, offset_out = centernet_head.forward(feat)\n # Test that empty ground truth encourages the network to\n # predict background\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.empty((0, 4))\n gt_instances.labels = torch.LongTensor([])\n empty_gt_losses = centernet_head.loss(center_out, wh_out, offset_out,\n [gt_instances], img_metas)\n loss_center = empty_gt_losses['loss_center_heatmap']\n loss_wh = empty_gt_losses['loss_wh']\n loss_offset = empty_gt_losses['loss_offset']\n assert loss_center.item() > 0, 'loss_center should be non-zero'\n assert loss_wh.item() == 0, (\n 'there should be no loss_wh when there are no true", "d_id": 70506, "documentation": { "docstring": "Tests center head loss when truth is empty and non-empty.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 218181, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/_common.py", "file_name": "_common.py", "fun_name": "from_package", "commit_message": "add python 3.10.4 for windows", "code": "def from_package(package):\n \n spec = wrap_spec(package)\n reader = spec.loader.get_resource_reader(spec.name)\n return reader.files()\n\n\n@contextlib.contextmanager", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "@contextlib.contextmanager", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 22, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 59, "n_identifiers": 11, "random_cut": "def from_package(package):\n \n spec = wrap_spec(package)\n reader = spec.loader.get_resource_reader(spec.name)\n return reader.files()\n\n\n@contextlib.contex", "d_id": 55183, "documentation": { "docstring": "\n Return a Traversable object for the given package.\n\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 26683, "commit_id": "0881beec1ac02dfa97525c5173687defb356d85c", "repo": "saleor", "path": "saleor/checkout/complete_checkout.py", "file_name": "complete_checkout.py", "fun_name": "_is_refund_ongoing", "commit_message": "Fix payment flow (#9504)\n\n* Do not capture payment again when it should be refunded or voided\r\n\r\n* Do not create order when then is ongoing refund", "code": "def _is_refund_ongoing(payment):\n \n return (\n payment.transactions.filter(\n kind=TransactionKind.REFUND_ONGOING, is_success=True\n ).exists()\n if payment\n else False\n )\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 61, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 8, "token_counts": 33, "n_ast_nodes": 53, "n_identifiers": 9, "random_cut": "def _is_refund_ongoing(payment):\n \n return (\n payment.transactions.filter(\n kind=TransactionKind.REFUND_ONGOING, is_s", "d_id": 5042, "documentation": { "docstring": "Return True if refund is ongoing for given payment.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 216011, "commit_id": "52c922760e8447f0c9efd23b12481ba1a7509dcd", "repo": "salt", "path": "salt/states/win_wua.py", "file_name": "win_wua.py", "fun_name": "installed", "commit_message": "Remove 40 character limit to update Title", "code": "def installed(name, updates=None):\n \n if isinstance(updates, str):\n updates = [updates]\n\n if not updates:\n updates = name\n\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n wua = salt.utils.win_update.WindowsUpdateAgent()\n\n # Search for updates\n install_list = wua.search(updates)\n\n # No updates found\n if install_list.count() == 0:\n ret[\"comment\"] = \"No updates found\"\n return ret\n\n # List of updates to download\n download = salt.utils.win_update.Updates()\n for item in install_list.updates:\n if not salt.utils.data.is_true(item.IsDownloaded):\n download.updates.Add(item)\n\n # List of updates to install\n install = salt.utils.win_update.Updates()\n installed_updates = []\n for item in install_list.updates:\n if not salt.utils.data.is_true(item.IsInstalled):\n install.updates.Add(item)\n else:\n installed_updates.extend(\"KB\" + kb for kb in item.KBArticleIDs)\n\n if install.count() == 0:\n ret[\"comment\"] = \"Updates already installed: \"\n ret[\"comment\"] += \"\\n - \".join(installed_updates)\n return ret\n\n # Return comment of changes if test.\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n ret[\"comment\"] = \"Updates will be installed:\"\n for update in install.updates:\n ret[\"comment\"] += \"\\n\"\n ret[\"comment\"] += \": \".join([update.Identity.UpdateID, update.Title])\n return ret\n\n # Download updates\n wua.download(download)\n\n # Install updates\n wua.install(install)\n\n # Refresh windows update info\n wua.refresh()\n post_info = wua.updates().list()\n\n # Verify the installation\n for item in install.list():\n if not salt.utils.data.is_true(post_info[item][\"Installed\"]):\n ret[\"changes\"][\"failed\"] = {\n item: {\n \"Title\": post_info[item][\"Title\"],\n \"KBs\": post_info[item][\"KBs\"],\n }\n }\n ret[\"result\"] = False\n else:\n ret[\"changes\"][\"installed\"] = {\n item: {\n \"Title\": post_info[item][\"Title\"],\n \"NeedsReboot\": post_info[item][\"NeedsReboot\"],\n \"KBs\": post_info[item][\"KBs\"],\n }\n }\n\n if ret[\"changes\"].get(\"failed\", False):\n ret[\"comment\"] = \"Updates failed\"\n else:\n ret[\"comment\"] = \"Updates installed successfully\"\n\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 699, "n_words": 215, "vocab_size": 114, "complexity": 15, "nloc": 59, "token_counts": 441, "n_ast_nodes": 772, "n_identifiers": 37, "random_cut": "def installed(name, updates=None):\n \n if isinstance(updates, str):\n updates = [updates]\n\n if not updates:\n updates = name\n\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n wua = salt.utils.win_update.WindowsUpdateAgent()\n\n # Search for updates\n install_list = wua.search(updates)\n\n # No updates found\n if install_list.count() == 0:\n ret[\"comment\"] = \"No updates found\"\n return ret\n\n # List of updates to download\n download = salt.utils.win_update.Updates()\n for item in install_list.updates:\n if not salt.utils.data.is_true(item.IsDownloaded):\n download.updates.Add(item)\n\n # List of updates to install", "d_id": 54320, "documentation": { "docstring": "\n Ensure Microsoft Updates are installed. Updates will be downloaded if\n needed.\n\n Args:\n\n name (str):\n The identifier of a single update to install.\n\n updates (list):\n A list of identifiers for updates to be installed. Overrides\n ``name``. Default is None.\n\n .. note:: Identifiers can be the GUID, the KB number, or any part of the\n Title of the Microsoft update. GUIDs and KBs are the preferred method\n to ensure you're installing the correct update.\n\n .. warning:: Using a partial KB number or a partial Title could result in\n more than one update being installed.\n\n Returns:\n dict: A dictionary containing the results of the update\n\n CLI Example:\n\n .. code-block:: yaml\n\n # using a GUID\n install_update:\n wua.installed:\n - name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211\n\n # using a KB\n install_update:\n wua.installed:\n - name: KB3194343\n\n # using the full Title\n install_update:\n wua.installed:\n - name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343)\n\n # Install multiple updates\n install_updates:\n wua.installed:\n - updates:\n - KB3194343\n - 28cf1b09-2b1a-458c-9bd1-971d1b26b211\n ", "n_words": 161, "vocab_size": 101, "n_whitespaces": 423, "language": "en" } }, { "id": 178349, "commit_id": "6ed4d787519d7075d7ff492bc40a291bc12f088c", "repo": "Nuitka", "path": "nuitka/plugins/standard/KivyPlugin.py", "file_name": "KivyPlugin.py", "fun_name": "_getKivyInformation", "commit_message": "Plugins: Add DLL folders needed on Windows for Kivy plugin\n\n* Make DLL reporting code part of plugin base class.\n\n* Added new method to scan for DLLs in folders.", "code": "def _getKivyInformation(self):\n setup_codes = r\n info = self.queryRuntimeInformationMultiple(\n info_name=\"kivy_info\",\n setup_codes=setup_codes,\n values=(\n (\"libs_loaded\", \"kivy.core.image.libs_loaded\"),\n (\"window_impl\", \"kivy.core.window.window_impl\"),\n (\"label_libs\", \"kivy.core.text.label_libs\"),\n (\"sdl2_dep_bins\", \"sdl2_dep_bins\"),\n (\"glew_dep_bins\", \"glew_dep_bins\"),\n ),\n )\n\n if info is None:\n self.sysexit(\"Error, it seems Kivy is not installed.\")\n\n return info\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 200, "n_words": 36, "vocab_size": 32, "complexity": 2, "nloc": 32, "token_counts": 72, "n_ast_nodes": 125, "n_identifiers": 8, "random_cut": "def _getKivyInformation(self):\n setup_codes = r\n info = self.queryRuntimeInformationMultiple(\n info_name=\"kivy_info\",\n setup_codes=setup_codes,\n values=(\n (\"libs_loaded\", \"kivy.core.image.libs_loaded\"),\n (\"window_impl\", \"kivy.core.window.window_impl\"),\n (\"label_libs\", \"kivy.core.text.label_libs\"),\n (", "d_id": 42673, "documentation": { "docstring": "\nimport kivy.core.image\nimport kivy.core.text\n# Prevent Window from being created at compile time.\nkivy.core.core_select_lib=(lambda *args, **kwargs: None)\nimport kivy.core.window\n\n# Kivy has packages designed to provide these on Windows\ntry:\n from kivy_deps.sdl2 import dep_bins as sdl2_dep_bins\nexcept ImportError:\n sdl2_dep_bins = []\ntry:\n from kivy_deps.glew import dep_bins as glew_dep_bins\nexcept ImportError:\n glew_dep_bins = []\n", "n_words": 53, "vocab_size": 37, "n_whitespaces": 55, "language": "en" } }, { "id": 169019, "commit_id": "54347fe684e0f7844bf407b1fb958a5269646825", "repo": "pandas", "path": "pandas/core/generic.py", "file_name": "generic.py", "fun_name": "__iter__", "commit_message": "TYP: Autotyping (#48191)\n\n* annotate-magics\r\n\r\n* annotate-imprecise-magics\r\n\r\n* none-return\r\n\r\n* scalar-return\r\n\r\n* pyi files\r\n\r\n* ignore vendored file\r\n\r\n* manual changes\r\n\r\n* ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments)\r\n\r\n* run autotyping in pre-commit\r\n\r\n* remove final and expand safe (and add annotate-imprecise-magics)", "code": "def __iter__(self) -> Iterator:\n \n return iter(self._info_axis)\n\n # can we get a better explanation of this?", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 10, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 5, "random_cut": "def __iter__(self) -> Iterator:\n \n return ", "d_id": 40375, "documentation": { "docstring": "\n Iterate over info axis.\n\n Returns\n -------\n iterator\n Info axis as iterator.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 58, "language": "en" } }, { "id": 104397, "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "cast", "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def cast(self, target_schema, *args, **kwargs):\n \n table = table_cast(self.table, target_schema, *args, **kwargs)\n blocks = []\n for subtables in self.blocks:\n new_tables = []\n fields = list(target_schema)\n for subtable in subtables:\n subfields = []\n for name in subtable.column_names:\n subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))\n subschema = pa.schema(subfields)\n new_tables.append(subtable.cast(subschema, *args, **kwargs))\n blocks.append(new_tables)\n return ConcatenationTable(table, blocks)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 213, "n_words": 55, "vocab_size": 39, "complexity": 6, "nloc": 14, "token_counts": 134, "n_ast_nodes": 208, "n_identifiers": 26, "random_cut": "def cast(self, target_schema, *args, **kwargs):\n \n table = table_cast(self.table, target_schema, *args, **kwargs)\n blocks = []\n for subtables in self.blocks:\n new_tables = []\n fields = list(target_schema)\n for subtable in subtables:\n subfields = []\n for name in subtable.column_names:\n subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))\n ", "d_id": 21833, "documentation": { "docstring": "\n Cast table values to another schema\n\n Args:\n target_schema (:obj:`Schema`):\n Schema to cast to, the names and order of fields must match\n safe (:obj:`bool`, defaults to :obj:`True`):\n Check for overflows or other unsafe conversions\n\n Returns:\n :class:`datasets.table.Table`:\n ", "n_words": 35, "vocab_size": 33, "n_whitespaces": 127, "language": "en" } }, { "id": 173537, "commit_id": "e833848c6dda95dbcf17e84d935dcdb8cff6f47d", "repo": "magenta", "path": "magenta/models/coconet/lib_util.py", "file_name": "lib_util.py", "fun_name": "softmax", "commit_message": "Work around tensor2tensor/gym issues, fix pylint errors.\n\nPiperOrigin-RevId: 433019701", "code": "def softmax(p, axis=None, temperature=1):\n \n if axis is None:\n axis = p.ndim - 1\n if temperature == 0.:\n # NOTE: in case of multiple equal maxima, returns uniform distribution.\n p = p == np.max(p, axis=axis, keepdims=True)\n else:\n # oldp = p\n logp = np.log(p)\n logp /= temperature\n logp -= logp.max(axis=axis, keepdims=True)\n p = np.exp(logp)\n p /= p.sum(axis=axis, keepdims=True)\n if np.isnan(p).any():\n pdb.set_trace() # pylint: disable=forgotten-debug-statement\n return p\n\n", "url": "https://github.com/magenta/magenta.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 65, "vocab_size": 44, "complexity": 4, "nloc": 14, "token_counts": 120, "n_ast_nodes": 188, "n_identifiers": 16, "random_cut": "def softmax(p, axis=None, temperature=1):\n \n if axis is None:\n axis = p.ndim - 1\n if temperature == 0.:\n # NOTE: in case o", "d_id": 40850, "documentation": { "docstring": "Apply the softmax transform to an array of categorical distributions.\n\n Args:\n p: an array of categorical probability vectors, possibly unnormalized.\n axis: the axis that spans the categories (default: -1).\n temperature: if not 1, transform the distribution by dividing the log\n probabilities and renormalizing. Values greater than 1 increase entropy,\n values less than 1 decrease entropy. A value of 0 yields a deterministic\n distribution that chooses the mode.\n\n Returns:\n An array of categorical probability vectors, like `p` but tempered and\n normalized.\n ", "n_words": 80, "vocab_size": 59, "n_whitespaces": 119, "language": "en" } }, { "id": 8001, "commit_id": "c50997c2b27e7f7f59a96c0158f3737e22419ed8", "repo": "ludwig", "path": "ludwig/benchmarking/profiler.py", "file_name": "profiler.py", "fun_name": "_populate_static_information", "commit_message": "More precise resource usage tracking (#2363)\n\n* added `torch.profiler.record_function` decorator\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* export torch profiler metric draft/pseudocode\r\n\r\n* exporting cpu and cuda memory usage\r\n\r\n* exporting CPU and CUDA execution time\r\n\r\n* formatting\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* adding basic comments\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* removed `_str` entries in the exported JSONs\r\n\r\n* attempting to speed up result collection from kineto and function event lists\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* style improvements\r\n\r\n* speed improvements while calculating averages/summaries\r\n\r\n* style improvements\r\n\r\n* using variable defined in torch instead of hard coding it\r\n\r\n* specifying torch in the tracked metrics\r\n\r\n* added torch.profiler to ResrouceUsageTracker\r\n\r\n* combining torch.profiler metrics with ResourceUsageMetrics\r\n\r\n* handling multiple context mangers with exitstack\r\n\r\n* making it a decorator\r\n\r\n* flattening output dict\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* using logging instead of print\r\n\r\n* flake8 formatting\r\n\r\n* removed intermediary write/read to/from disk\r\n\r\n* cleaning after last change\r\n\r\n* adjusted number of args\r\n\r\n* replaced tag with code_block_tag\r\n\r\n* changed total_duration to total_execution_time\r\n\r\n* support nested use of the context manager and the decorator\r\n\r\n* remove torch.record_function decorator\r\n\r\n* adding LUDWIG_TAG to label torch profiler main events\r\n\r\n* using logging instead of print\r\n\r\n* style changes\r\n\r\n* preventing cases of empty list when code block execution is too quick\r\n\r\n* removing experimental code\r\n\r\n* fixed gpu tracking\r\n\r\n* style improvements\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* remove experimental code from trainer.py\r\n\r\n* style improvements\r\n\r\n* more accurate torch op cpu and cuda execution time.\r\n\r\n* flake8 fixes\r\n\r\n* rename to LudwigProfiler\r\n\r\n* updated docstrings\r\n\r\n* dont collect torch metrics when use_torch_profiler=False\r\n\r\n* test for LudwigProfiler\r\n\r\n* formatting improvements\r\n\r\n* update test to remove repetitive asserts\r\n\r\n* make the tag->os directory relationship more obvious\r\n\r\n* explaining what LUDWIG_TAG is used for\r\n\r\n* removing unncessary asserts\r\n\r\n* added explanation for `LUDWIG_TAG`\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* formatting fixes\r\n\r\n* removing `get_python_packages_and_versions`\r\n\r\nCo-authored-by: Joppe Geluykens \r\n\r\n* dataclasses for base profiler\r\n\r\n* dataclasses for torch profiler\r\n\r\n* adding OOM event tracking\r\n\r\n* formatting\r\n\r\n* normalizing cpu_utilization\r\n\r\n* pull out flattening dataclass function\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* added total CPU memory size and CPU memory available\r\n\r\n* adding system-wide CPU utilization\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Joppe Geluykens ", "code": "def _populate_static_information(self) -> None:\n \n self.info[\"ludwig_version\"] = LUDWIG_VERSION\n self.info[\"start_disk_usage\"] = shutil.disk_usage(os.path.expanduser(\"~\")).used\n\n # CPU information\n cpu_info = get_my_cpu_info()\n self.info[\"cpu_architecture\"] = cpu_info[\"arch\"]\n self.info[\"num_cpu\"] = psutil.cpu_count()\n self.info[\"cpu_name\"] = cpu_info[\"brand_raw\"]\n self.info[\"total_cpu_memory_size\"] = psutil.virtual_memory().total\n\n # GPU information\n if self.cuda_is_available:\n gpu_infos = get_gpu_info()\n for i, gpu_info in enumerate(gpu_infos):\n gpu_key = f\"cuda_{i}\"\n self.info[f\"{gpu_key}_memory_used\"] = []\n self.info[f\"{gpu_key}_name\"] = gpu_info[\"name\"]\n self.info[f\"{gpu_key}_total_memory\"] = gpu_info[\"total_memory\"]\n self.info[f\"{gpu_key}_driver_version\"] = gpu_info[\"driver_version\"]\n self.info[f\"{gpu_key}_cuda_version\"] = gpu_info[\"cuda_version\"]\n\n # recording in microseconds to be in line with torch profiler time recording.\n self.info[\"start_time\"] = time.perf_counter_ns() / 1000\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 280, "n_words": 77, "vocab_size": 58, "complexity": 3, "nloc": 19, "token_counts": 187, "n_ast_nodes": 348, "n_identifiers": 25, "random_cut": "def _populate_static_information(self) -> None:\n \n self.info[\"ludwig_version\"] = LUDWIG_VERSION\n self.info[\"start_disk_usage\"] = shutil.disk_usage(os.path.expanduser(\"~\")).used\n\n # CPU information\n cpu_info = get_my_cpu_info()\n self.info[\"cpu_architecture\"] = cpu_info[\"arch\"]\n self.info[\"num_cpu\"] = psutil.cpu_count()\n self.info[\"cpu_name\"] = cpu_info[\"brand_raw\"]\n self.info[\"total_cpu_memory_size\"] = psutil.virtual_memory().total\n\n # GPU information\n if self.cuda_is_ava", "d_id": 1308, "documentation": { "docstring": "Populate the report with static software and hardware information.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 76116, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/tests/utils/page_tests.py", "file_name": "page_tests.py", "fun_name": "assertCanNotCreateAt", "commit_message": "Reformat with black", "code": "def assertCanNotCreateAt(self, parent_model, child_model, msg=None):\n \n if self._testCanCreateAt(parent_model, child_model):\n msg = self._formatMessage(\n msg,\n \"Can create a %s.%s under a %s.%s\"\n % (\n child_model._meta.app_label,\n child_model._meta.model_name,\n parent_model._meta.app_label,\n parent_model._meta.model_name,\n ),\n )\n raise self.failureException(msg)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 212, "n_words": 29, "vocab_size": 28, "complexity": 2, "nloc": 13, "token_counts": 69, "n_ast_nodes": 103, "n_identifiers": 11, "random_cut": "def assertCanNotCreateAt(self, parent_model, child_model, msg=None):\n \n if self._testCanCreateAt(parent_model, child_model):\n msg = self._formatMessage(\n msg,\n \"Can create a %s.%s unde", "d_id": 16457, "documentation": { "docstring": "\n Assert a particular child Page type can not be created under a parent\n Page type. ``parent_model`` and ``child_model`` should be the Page\n classes being tested.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 54, "language": "en" } }, { "id": 91818, "commit_id": "f64e58203b6c6d5121a4f6685dace0a4627d49b0", "repo": "sentry", "path": "src/sentry/features/manager.py", "file_name": "manager.py", "fun_name": "get_feature_objects", "commit_message": "feat(notifications): add a feature flag to make slack the default for new users (#35652)\n\nWe want to have some users automatically get notifications on Slack and email instead of just Slack. But we don't want to impact existing users so instead I introduce the concept of a UserFeature which isn't dependent on the user. I'm adding a flag called users:notification-slack-automatic which is controlled by the age of the user (see getsentry/getsentry#7644). If there is no value for a particular notification setting and provider, it will fall back to being enabled. This is different than the current behavior which defaults to being off for Slack for issue and workflow notifications. This PR is based off a previous PR which had a similar feature flag: #28190", "code": "def get_feature_objects(self) -> Mapping[Project, Feature]:\n \n\n cls = self._manager._get_feature_class(self.feature_name)\n return {obj: cls(self.feature_name, obj) for obj in self.objects}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 37, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 9, "token_counts": 44, "n_ast_nodes": 68, "n_identifiers": 11, "random_cut": "def get_feature_objects(self) -> Mapping[Project, Feature]:\n \n\n cls = self._manager._get_feature_class(self.feature_name)\n return {obj: cls(self.feature_name, obj) for obj in self.objects}\n", "d_id": 18815, "documentation": { "docstring": "\n Iterate over individual Feature objects.\n\n This is a fallback mode for applying a FeatureHandler that doesn't\n support checking the entire batch at once.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 52, "language": "en" } }, { "id": 34796, "commit_id": "623d8cb475804f2b0f85a47b04b8b2e522db06ef", "repo": "transformers", "path": "tests/test_pipelines_automatic_speech_recognition.py", "file_name": "test_pipelines_automatic_speech_recognition.py", "fun_name": "require_ffmpeg", "commit_message": "Adding support for `microphone` streaming within pipeline. (#15046)\n\n* Adding support for `microphone` streaming within pipeline.\r\n\r\n- Uses `ffmpeg` to get microphone data.\r\n- Makes sure alignment is made to `size_of_sample`.\r\n- Works by sending `{\"raw\": ..data.., \"stride\": (n, left, right),\r\n\"partial\": bool}`\r\ndirectly to the pipeline enabling to stream partial results and still\r\nget inference.\r\n- Let's `partial` information flow through the pipeline to enable caller\r\n to get it back and choose to display text or not.\r\n\r\n- The striding reconstitution is bound to have errors since CTC does not\r\nkeep previous state. Currently most of the errors are we don't know if\r\nthere's a space or not between two chunks.\r\nSince we have some left striding info, we could use that during decoding\r\nto choose what to do with those spaces and even extra letters maybe (if\r\nthe stride is long enough, it's bound to cover at least a few symbols)\r\n\r\nFixing tests.\r\n\r\nProtecting with `require_torch`.\r\n\r\n`raw_ctc` support for nicer demo.\r\n\r\nPost rebase fixes.\r\n\r\nRevamp to split raw_mic_data from it's live chunking.\r\n\r\n- Requires a refactor to make everything a bit cleaner.\r\n\r\nAutomatic resampling.\r\n\r\nSmall fix.\r\n\r\nSmall fix.\r\n\r\n* Post rebase fix (need to let super handle more logic, reorder args.)\r\n\r\n* Update docstrings\r\n\r\n* Docstring format.\r\n\r\n* Remove print.\r\n\r\n* Prevent flow of `input_values`.\r\n\r\n* Fixing `stride` too.\r\n\r\n* Fixing the PR by removing `raw_ctc`.\r\n\r\n* Better docstrings.\r\n\r\n* Fixing init.\r\n\r\n* Update src/transformers/pipelines/audio_utils.py\r\n\r\nCo-authored-by: Anton Lozhkov \r\n\r\n* Update tests/test_pipelines_automatic_speech_recognition.py\r\n\r\nCo-authored-by: Anton Lozhkov \r\n\r\n* Quality.\r\n\r\nCo-authored-by: Anton Lozhkov ", "code": "def require_ffmpeg(test_case):\n \n import subprocess\n\n try:\n subprocess.check_output([\"ffmpeg\", \"-h\"], stderr=subprocess.DEVNULL)\n return test_case\n except Exception:\n return unittest.skip(\"test requires ffmpeg\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 7, "token_counts": 41, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def require_ffmpeg(test_case):\n \n import subprocess\n\n try:\n s", "d_id": 6335, "documentation": { "docstring": "\n Decorator marking a test that requires FFmpeg.\n\n These tests are skipped when FFmpeg isn't installed.\n\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 48117, "commit_id": "3977e1798d8294ba628b5f330f43702c1a5c79fc", "repo": "airflow", "path": "tests/system/providers/google/tasks/example_queue.py", "file_name": "example_queue.py", "fun_name": "generate_random_string", "commit_message": "CloudTasks assets & system tests migration (AIP-47) (#23282)", "code": "def generate_random_string():\n \n import random\n import string\n\n return \"\".join(random.choices(string.ascii_uppercase + string.digits, k=8))\n\n random_string = generate_random_string()\n\n # [START create_queue]\n create_queue = CloudTasksQueueCreateOperator(\n location=LOCATION,\n task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=0.5)),\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n retry=Retry(maximum=10.0),\n timeout=5,\n task_id=\"create_queue\",\n )\n # [END create_queue]\n\n # [START delete_queue]\n delete_queue = CloudTasksQueueDeleteOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"delete_queue\",\n )\n # [END delete_queue]\n delete_queue.trigger_rule = TriggerRule.ALL_DONE\n\n # [START resume_queue]\n resume_queue = CloudTasksQueueResumeOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"resume_queue\",\n )\n # [END resume_queue]\n\n # [START pause_queue]\n pause_queue = CloudTasksQueuePauseOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"pause_queue\",\n )\n # [END pause_queue]\n\n # [START purge_queue]\n purge_queue = CloudTasksQueuePurgeOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"purge_queue\",\n )\n # [END purge_queue]\n\n # [START get_queue]\n get_queue = CloudTasksQueueGetOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"get_queue\",\n )\n\n get_queue_result = BashOperator(\n task_id=\"get_queue_result\",\n bash_command=f\"echo {get_queue.output}\",\n )\n # [END get_queue]\n\n # [START update_queue]\n update_queue = CloudTasksQueueUpdateOperator(\n task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=1)),\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n update_mask=FieldMask(paths=[\"stackdriver_logging_config.sampling_ratio\"]),\n task_id=\"update_queue\",\n )\n # [END update_queue]\n\n # [START list_queue]\n list_queue = CloudTasksQueuesListOperator(location=LOCATION, task_id=\"list_queue\")\n # [END list_queue]\n\n chain(\n random_string,\n create_queue,\n update_queue,\n pause_queue,\n resume_queue,\n purge_queue,\n get_queue,\n get_queue_result,\n list_queue,\n delete_queue,\n )\n\n from tests.system.utils.watcher import watcher\n\n # This test needs watcher in order to properly mark success/failure\n # when \"tearDown\" task with trigger rule is part of the DAG\n list(dag.tasks) >> watcher()\n\n\nfrom tests.system.utils import get_test_run # noqa: E402\n\n# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)\ntest_run = get_test_run(dag)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 636, "n_words": 221, "vocab_size": 115, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 517, "n_identifiers": 59, "random_cut": "def generate_random_string():\n \n import random\n import string\n\n return \"\".join(random.choices(string.ascii_uppercase + string.digits, k=8))\n\n random_string = generate_random_string()\n\n # [START create_queue]\n create_queue = CloudTasksQueueCreateOperator(\n location=LOCATION,\n task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=0.5)),\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n retry=Retry(maximum=10.0),\n timeout=5,\n task_id=\"create_queue\",\n )\n # [END create_queue]\n\n # [START delete_queue]\n delete_queue = CloudTasksQueueDeleteOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"delete_queue\",\n )\n # [END delete_queue]\n delete_queue.trigger_rule = TriggerRule.ALL_DONE\n\n # [START resume_queue]\n resume_queue = CloudTasksQueueResumeOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"resume_queue\",\n )\n # [END resume_queue]\n\n # [START pause_queue]\n pause_queue = CloudTasksQueuePauseOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n ", "d_id": 9364, "documentation": { "docstring": "\n Generate random string for queue and task names.\n Queue name cannot be repeated in preceding 7 days and\n task name in the last 1 hour.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 54, "language": "en" } }, { "id": 275619, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/utils.py", "file_name": "utils.py", "fun_name": "make_gradient_clipvalue_fn", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def make_gradient_clipvalue_fn(clipvalue):\n \n if clipvalue is None:\n return lambda grads_and_vars: grads_and_vars\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 20, "n_ast_nodes": 29, "n_identifiers": 3, "random_cut": "def make_gradient_clipvalue_fn(clipvalue):\n \n if clipvalue is None:\n ", "d_id": 81432, "documentation": { "docstring": "Creates a gradient transformation function for clipping by value.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 119312, "commit_id": "e085370ec4137cf0f73c5163cb664bc4e1c46082", "repo": "jax", "path": "jax/_src/scipy/signal.py", "file_name": "signal.py", "fun_name": "odd_ext", "commit_message": "Add some functions for spectral analysis.\n\nThis commit adds \"stft\", \"csd\", and \"welch\" functions in scipy.signal.", "code": "def odd_ext(x, n, axis=-1):\n \n if n < 1:\n return x\n if n > x.shape[axis] - 1:\n raise ValueError(\n f\"The extension length n ({n}) is too big. \"\n f\"It must not exceed x.shape[axis]-1, which is {x.shape[axis] - 1}.\")\n left_end = lax.slice_in_dim(x, 0, 1, axis=axis)\n left_ext = jnp.flip(lax.slice_in_dim(x, 1, n + 1, axis=axis), axis=axis)\n right_end = lax.slice_in_dim(x, -1, None, axis=axis)\n right_ext = jnp.flip(lax.slice_in_dim(x, -(n + 1), -1, axis=axis), axis=axis)\n ext = jnp.concatenate((2 * left_end - left_ext,\n x,\n 2 * right_end - right_ext),\n axis=axis)\n return ext\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 184, "n_words": 83, "vocab_size": 54, "complexity": 3, "nloc": 16, "token_counts": 159, "n_ast_nodes": 252, "n_identifiers": 16, "random_cut": "def odd_ext(x, n, axis=-1):\n \n if n < 1:\n return x\n if n > x.shape[axis] - 1:\n raise ValueError(\n f\"The extension length n ({n}) is too big. \"\n f\"It must not exceed x.shape[axis]-1, which is {x.shape[axis] - 1}.\")\n left_end = lax.slice_in_di", "d_id": 26583, "documentation": { "docstring": "Extends `x` along with `axis` by odd-extension.\n\n This function was previously a part of \"scipy.signal.signaltools\" but is no\n longer exposed.\n\n Args:\n x : input array\n n : the number of points to be added to the both end\n axis: the axis to be extended\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 57, "language": "en" } }, { "id": 268868, "commit_id": "a449efe29b092e658a29cd847e0494979a47d252", "repo": "keras", "path": "keras/tests/keras_doctest.py", "file_name": "keras_doctest.py", "fun_name": "filter_on_submodules", "commit_message": "Add a keras doctest modeled on tensorflow doctest\n\nPiperOrigin-RevId: 424672415", "code": "def filter_on_submodules(all_modules, submodule):\n \n\n filtered_modules = [\n mod for mod in all_modules if PACKAGE + submodule in mod.__name__\n ]\n return filtered_modules\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 5, "token_counts": 27, "n_ast_nodes": 43, "n_identifiers": 7, "random_cut": "def filter_on_submodules(all_modules, submodule):\n \n\n filtered_modules = [\n mod for mod in all_modules if PACKAGE + submodule in mod.__name__\n ]\n return filtered_modules\n\n", "d_id": 79737, "documentation": { "docstring": "Filters all the modules based on the module flag.\n\n The module flag has to be relative to the core package imported.\n For example, if `submodule=keras.layers` then, this function will return\n all the modules in the submodule.\n\n Args:\n all_modules: All the modules in the core package.\n submodule: Submodule to filter from all the modules.\n\n Returns:\n All the modules in the submodule.\n ", "n_words": 60, "vocab_size": 38, "n_whitespaces": 75, "language": "en" } }, { "id": 69776, "commit_id": "408c89df030998fe36df135570c9edd90a522996", "repo": "erpnext", "path": "erpnext/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py", "file_name": "bank_reconciliation_tool.py", "fun_name": "get_pe_matching_query", "commit_message": "Feat:Filter on Payment Entries and Journal Entries\n\nApplying filters on Payement entries and Journal Entries as per reference date and posting date", "code": "def get_pe_matching_query(amount_condition, account_from_to, transaction):\n\t# get matching payment entries query\n\tfrom_date = frappe.db.get_single_value(\"Bank Reconciliation Tool\", \"bank_statement_from_date\")\n\tto_date = frappe.db.get_single_value(\"Bank Reconciliation Tool\", \"bank_statement_to_date\")\n\tfrom_reference_date = frappe.db.get_single_value(\n\t\t\"Bank Reconciliation Tool\", \"from_reference_date\"\n\t)\n\tto_reference_date = frappe.db.get_single_value(\"Bank Reconciliation Tool\", \"to_reference_date\")\n\tfiltered_by_reference_date = frappe.db.get_single_value(\n\t\t\"Bank Reconciliation Tool\", \"filtered_by_reference_date\"\n\t)\n\tif transaction.deposit > 0:\n\t\tcurrency_field = \"paid_to_account_currency as currency\"\n\telse:\n\t\tcurrency_field = \"paid_from_account_currency as currency\"\n\tcond_filtered_from_ref_date = \"\"\n\tcond_filtered_to_ref_date = \"\"\n\tcond_filtered_from_posting_date = \"\"\n\tcond_filtered_to_posting_date = \"\"\n\tfrom_ref_date =\"\"\n\tto_ref_date =\"\"\n\tfrom_post_date = \"\"\n\tto_post_date = \"\"\n\tif(filtered_by_reference_date):\n\t\tcond_filtered_from_ref_date = \" AND reference_date >=\"\n\t\tcond_filtered_to_ref_date = \" AND reference_date <=\"\n\t\tfrom_ref_date = from_reference_date\n\t\tto_ref_date = to_reference_date\n\telif(not filtered_by_reference_date):\n\t\tcond_filtered_from_posting_date = \" AND posting_date >=\"\n\t\tcond_filtered_to_posting_date = \" AND posting_date <=\"\n\t\tfrom_post_date = from_date\n\t\tto_post_date = to_date\n\t\t\n\tpe_data= f\t\n\treturn pe_data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 91, "n_words": 124, "vocab_size": 60, "complexity": 4, "nloc": 61, "token_counts": 149, "n_ast_nodes": 336, "n_identifiers": 24, "random_cut": "def get_pe_matching_query(amount_condition, account_from_to, transaction):\n\t# get matching payment entries query\n\tfrom_date = frappe.db.get_single_value(\"Bank Reconciliation Tool\", \"bank_statement_from_date\")\n\tto_date = frappe.db.get_single_value(\"Bank Reconciliation Tool\", \"bank_statement_to_date\")\n\tfrom_reference_date = frappe.db.get_single_value(\n\t\t\"Bank Reconciliation Tool\", \"from_reference_date\"\n\t)\n\tto_reference_date = frappe.db.get_single_value(\"Bank Reconciliation Tool\", \"to_reference_date\")\n\tfiltered_by_reference_date = frappe.db.get_single_value(\n\t\t\"Bank Reconcil", "d_id": 15095, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\t(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t\t+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END\n\t\t\t+ 1 ) AS rank,\n\t\t\t'Payment Entry' as doctype,\n\t\t\tname,\n\t\t\tpaid_amount,\n\t\t\treference_no,\n\t\t\treference_date,\n\t\t\tparty,\n\t\t\tparty_type,\n\t\t\tposting_date,\n\t\t\t{currency_field}\n\t\tFROM\n\t\t\t`tabPayment Entry`\n\t\tWHERE\n\t\t\tpaid_amount {amount_condition} %(amount)s\n\t\t\tAND docstatus = 1\n\t\t\tAND payment_type IN (%(payment_type)s, 'Internal Transfer')\n\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\tAND {account_from_to} = %(bank_account)s\n\t\t\tAND reference_no = '{transaction.reference_number}'\n\t\t\t{cond_filtered_from_ref_date} \"{from_ref_date}\"\n\t\t\t{cond_filtered_to_ref_date} \"{to_ref_date}\"\n\t\t\t{cond_filtered_from_posting_date} \"{from_post_date}\"\n\t\t\t{cond_filtered_to_posting_date} \"{to_post_date}\"\n\t\t", "n_words": 80, "vocab_size": 60, "n_whitespaces": 55, "language": "en" } }, { "id": 168207, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/core/arrays/interval.py", "file_name": "interval.py", "fun_name": "closed", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def closed(self) -> IntervalInclusiveType:\n \n warnings.warn(\n \"Attribute `closed` is deprecated in favor of `inclusive`.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n return self.dtype.inclusive\n\n _interval_shared_docs[\"set_closed\"] = textwrap.dedent(\n \n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 97, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 34, "n_ast_nodes": 79, "n_identifiers": 15, "random_cut": "def closed(self) -> IntervalInclusiveType:\n \n warnings.warn(\n \"Attribute `closed` is deprecated in favor of `inclusive`.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n return self.dtype.inclusive\n\n _interval_shared_docs[\"set_closed\"] = textwrap.dedent(\n \n )", "d_id": 40231, "documentation": { "docstring": "\n String describing the inclusive side the intervals.\n\n Either ``left``, ``right``, ``both`` or ``neither`.\n \n Return an identical %(klass)s closed on the specified side.\n\n .. deprecated:: 1.5.0\n\n Parameters\n ----------\n closed : {'left', 'right', 'both', 'neither'}\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n new_index : %(klass)s\n\n %(examples)s\\\n ", "n_words": 51, "vocab_size": 41, "n_whitespaces": 166, "language": "en" } }, { "id": 196245, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/functions/elementary/exponential.py", "file_name": "exponential.py", "fun_name": "as_real_imag", "commit_message": "Updated import locations", "code": "def as_real_imag(self, deep=True, **hints):\n \n from sympy.functions.elementary.trigonometric import cos, sin\n re, im = self.args[0].as_real_imag()\n if deep:\n re = re.expand(deep, **hints)\n im = im.expand(deep, **hints)\n cos, sin = cos(im), sin(im)\n return (exp(re)*cos, exp(re)*sin)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 95, "n_words": 31, "vocab_size": 24, "complexity": 2, "nloc": 8, "token_counts": 93, "n_ast_nodes": 144, "n_identifiers": 15, "random_cut": "def as_real_imag(self, deep=True, **hints):\n \n from sympy.functions.elementary.trigonometric import cos, sin\n re, im = self.a", "d_id": 47745, "documentation": { "docstring": "\n Returns this function as a 2-tuple representing a complex number.\n\n Examples\n ========\n\n >>> from sympy import I, exp\n >>> from sympy.abc import x\n >>> exp(x).as_real_imag()\n (exp(re(x))*cos(im(x)), exp(re(x))*sin(im(x)))\n >>> exp(1).as_real_imag()\n (E, 0)\n >>> exp(I).as_real_imag()\n (cos(1), sin(1))\n >>> exp(1+I).as_real_imag()\n (E*cos(1), E*sin(1))\n\n See Also\n ========\n\n sympy.functions.elementary.complexes.re\n sympy.functions.elementary.complexes.im\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 171, "language": "en" } }, { "id": 159579, "commit_id": "ca316fc80cb490ecf1e2e7261fb7fcef22fccc4a", "repo": "rasa", "path": "rasa/core/exporter.py", "file_name": "exporter.py", "fun_name": "_get_conversation_ids_to_process", "commit_message": "Async Tracker Store Support (#10696)\n\nMake `TrackerStore` interface methods asynchronous and supply an `AwaitableTrackerstore` wrapper for custom tracker stores which do not implement the methods as asynchronous.\r\n\r\n\r\nSquashed commits:\r\n\r\n* refactor tracker store and tests to be async\r\n\r\n* update core modules with async tracker store calls\r\n\r\n* update server with async tracker store calls\r\n\r\n* await tracker store call in twilio voice\r\n\r\n* add await in test rasa export\r\n\r\n* add awaits to test_agent for tracker store\r\n\r\n* add awaits to tracker store functions in processor tests\r\n\r\n* refactor exporter tests for async tracker store\r\n\r\n* use asyncmock from unittest instead of custom\r\n\r\n* add async in test_rasa_export\r\n\r\n* fixture update for async tracker store\r\n\r\n* update marker logic to handle async tracker store\r\n\r\n* fix mark tracker loader tests for async tracker store\r\n\r\n* add awaits to server and server tests\r\n\r\n* add await to dialogue test with tracker store\r\n\r\n* add await to tracker test\r\n\r\n* formatting in tracker store\r\n\r\n* more formatting fixes\r\n\r\n* more formatting fixes\r\n\r\n* address formatting changes\r\n\r\n* change return type and remove awaitable tracker store wrapper in create\r\n\r\n* make stream_events async\r\n\r\n* address comments (remove redundant methods in awaitable tracker store + raise exception)\r\n\r\n* make _run_markers and _run_markers_cli sync to ensure CLI can be run\r\n\r\n* add warning and test for creating async tracker store from endpoint config\r\n\r\n* add changelog entry\r\n\r\n* use TrackerStore instead of \"TrackerStore\" in typehint\r\n\r\nCo-authored-by: Joe Juzl \r\n\r\n* use TrackerStore instead of \"TrackerStore\" in typehint\r\n\r\nCo-authored-by: Joe Juzl \r\n\r\n* change user warning to deprecation warning\r\n\r\n* fix typo in comment\r\n\r\n* have fallback_tracker_store return in memory tracker store without awaitable wrapper\r\n\r\n* import async mock from conftest instead of unittest to suport Python 3.7\r\n\r\n* delete unused imports in marker_tracker_loader\r\n\r\n* apply black to modules which failed ci linter\r\n\r\n* resolve line length linting in tracker_store.py\r\n\r\n* refer to request.app.ctx.agent object instead of request.app.agent\r\n\r\n* resolve ci failures from not adding async/await\r\n\r\n* applied black to reformat three modules failing code quality\r\n\r\n* correct most docstring linting errors\r\n\r\n* fix docstring linting errors\r\n\r\n* fix flake8 line length errors\r\n\r\n* fix mypy type checking errors\r\n\r\n* linting corrections after adding type ignores to methods\r\n\r\n* delete extra periods in docstring\r\n\r\nCo-authored-by: Joe Juzl ", "code": "async def _get_conversation_ids_to_process(self) -> Set[Text]:\n \n conversation_ids_in_tracker_store = (\n await self._get_conversation_ids_in_tracker()\n )\n\n if not self.requested_conversation_ids:\n return conversation_ids_in_tracker_store\n\n self._validate_all_requested_ids_exist(conversation_ids_in_tracker_store)\n\n conversation_ids_to_process = conversation_ids_in_tracker_store & set(\n self.requested_conversation_ids\n )\n\n if not conversation_ids_to_process:\n raise NoEventsToMigrateError(\n \"Could not find an overlap between the requested \"\n \"conversation IDs and those found in the tracker store. Exiting.\"\n )\n\n return conversation_ids_to_process\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 199, "n_words": 51, "vocab_size": 40, "complexity": 3, "nloc": 27, "token_counts": 57, "n_ast_nodes": 101, "n_identifiers": 11, "random_cut": "async def _get_conversation_ids_to_process(self) -> Set[Text]:\n \n conversation_ids_in_tracker_store = (\n await self._get_conversation_ids_in_tracker()\n )\n\n if not self.requested_conversation_ids:\n return conversation_ids_in_tracker_store\n\n self._validate_all_requested_ids_exist(conversation_ids_in_tracker_store)\n\n conversation_ids_to_process = conversation_ids_in_tracker_store & set(\n self.request", "d_id": 38346, "documentation": { "docstring": "Get conversation IDs that are good for processing.\n\n Finds the intersection of events that are contained in the tracker store with\n those events requested as a command-line argument.\n\n Returns:\n Conversation IDs that are both requested and contained in the tracker\n store. If no conversation IDs are requested, all conversation IDs in the\n tracker store are returned.\n\n ", "n_words": 56, "vocab_size": 34, "n_whitespaces": 117, "language": "en" } }, { "id": 271577, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "call", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def call(self, inputs, training=None, mask=None):\n \n raise NotImplementedError(\n \"Unimplemented `tf.keras.Model.call()`: if you \"\n \"intend to create a `Model` with the Functional \"\n \"API, please provide `inputs` and `outputs` \"\n \"arguments. Otherwise, subclass `Model` with an \"\n \"overridden `call()` method.\"\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 115, "n_words": 39, "vocab_size": 34, "complexity": 1, "nloc": 8, "token_counts": 25, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def call(self, inputs, training=None, mask=None):\n \n raise NotImplementedError(\n \"Unimplemented `tf.keras.Model.call()`: if you \"\n \"intend to create a `Model` with the Functional \"\n \"API, please provide `inputs` and ", "d_id": 80808, "documentation": { "docstring": "Calls the model on new inputs and returns the outputs as tensors.\n\n In this case `call()` just reapplies\n all ops in the graph to the new inputs\n (e.g. build a new computational graph from the provided inputs).\n\n Note: This method should not be called directly. It is only meant to be\n overridden when subclassing `tf.keras.Model`.\n To call a model on an input, always use the `__call__()` method,\n i.e. `model(inputs)`, which relies on the underlying `call()` method.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n training: Boolean or boolean scalar tensor, indicating whether to run\n the `Network` in training mode or inference mode.\n mask: A mask or list of masks. A mask can be either a boolean tensor or\n None (no mask). For more details, check the guide\n [here](https://www.tensorflow.org/guide/keras/masking_and_padding).\n\n Returns:\n A tensor if there is a single output, or\n a list of tensors if there are more than one outputs.\n ", "n_words": 150, "vocab_size": 106, "n_whitespaces": 316, "language": "en" } }, { "id": 153549, "commit_id": "97769988a6f19e4b76f34238c97bf159ee7626a5", "repo": "modin", "path": "modin/core/io/text/json_dispatcher.py", "file_name": "json_dispatcher.py", "fun_name": "_read", "commit_message": "REFACTOR-#3853: interacting with Dask interface through 'DaskWrapper' class (#3854)\n\nCo-authored-by: Devin Petersohn \r\nCo-authored-by: Dmitry Chigarev \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Anatoly Myachev ", "code": "def _read(cls, path_or_buf, **kwargs):\n \n path_or_buf = cls.get_path_or_buffer(path_or_buf)\n if isinstance(path_or_buf, str):\n if not cls.file_exists(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n path_or_buf = cls.get_path(path_or_buf)\n elif not cls.pathlib_or_pypath(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n if not kwargs.get(\"lines\", False):\n return cls.single_worker_read(path_or_buf, **kwargs)\n with OpenFile(path_or_buf, \"rb\") as f:\n columns = pandas.read_json(BytesIO(b\"\" + f.readline()), lines=True).columns\n kwargs[\"columns\"] = columns\n empty_pd_df = pandas.DataFrame(columns=columns)\n\n with OpenFile(path_or_buf, \"rb\", kwargs.get(\"compression\", \"infer\")) as f:\n partition_ids = []\n index_ids = []\n dtypes_ids = []\n\n column_widths, num_splits = cls._define_metadata(empty_pd_df, columns)\n\n args = {\"fname\": path_or_buf, \"num_splits\": num_splits, **kwargs}\n\n splits = cls.partitioned_file(\n f,\n num_partitions=NPartitions.get(),\n )\n for start, end in splits:\n args.update({\"start\": start, \"end\": end})\n partition_id = cls.deploy(cls.parse, num_returns=num_splits + 3, **args)\n partition_ids.append(partition_id[:-3])\n index_ids.append(partition_id[-3])\n dtypes_ids.append(partition_id[-2])\n\n # partition_id[-1] contains the columns for each partition, which will be useful\n # for implementing when `lines=False`.\n row_lengths = cls.materialize(index_ids)\n new_index = pandas.RangeIndex(sum(row_lengths))\n\n dtypes = cls.get_dtypes(dtypes_ids)\n partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)\n\n if isinstance(dtypes, pandas.Series):\n dtypes.index = columns\n else:\n dtypes = pandas.Series(dtypes, index=columns)\n\n new_frame = cls.frame_cls(\n np.array(partition_ids),\n new_index,\n columns,\n row_lengths,\n column_widths,\n dtypes=dtypes,\n )\n new_frame.synchronize_labels(axis=0)\n return cls.query_compiler_cls(new_frame)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 655, "n_words": 157, "vocab_size": 106, "complexity": 7, "nloc": 48, "token_counts": 398, "n_ast_nodes": 641, "n_identifiers": 58, "random_cut": "def _read(cls, path_or_buf, **kwargs):\n \n path_or_buf = cls.get_path_or_buffer(path_or_buf)\n if isinstance(path_or_buf, str):\n if not cls.file_exists(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n path_or_buf = cls.get_path(path_or_buf)\n elif not cls.pathlib_or_pypath(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n if not kwargs.get(\"lines\", False):\n return cls.single_worker_read(path_or_buf, **kwargs)\n with OpenFile(path_or_buf, \"rb\") as f:\n columns = pandas.read_json(BytesIO(b\"\" + f.readline()), lines=True).columns\n kwargs[\"columns\"] = columns\n empty_pd_df = pandas.DataFrame(columns=columns)\n\n with OpenFile(path_or_buf, \"rb\", kwargs.get(\"compression\", \"in", "d_id": 35438, "documentation": { "docstring": "\n Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.\n\n Parameters\n ----------\n path_or_buf : str, path object or file-like object\n `path_or_buf` parameter of `read_json` function.\n **kwargs : dict\n Parameters of `read_json` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 141, "language": "en" } }, { "id": 190124, "commit_id": "bd844f46d804c8cad50d06ad20ab5bebaee9987b", "repo": "manim", "path": "manim/scene/three_d_scene.py", "file_name": "three_d_scene.py", "fun_name": "stop_ambient_camera_rotation", "commit_message": "Replaced renderer strings with :class:`.RendererType` enum entries (#3017)\n\n* remove unused constants\r\n\r\n* remove deprecated --use_opengl_renderer flag\r\n\r\n* remove unnecessary workaround with class initialization\r\n\r\n* add OpenGLMobject.name to get rid of one renderer check\r\n\r\n* add VMobject.n_points_per_curve property to get rid of more renderer checks\r\n\r\n* replace renderer string checks with enum check\r\n\r\n* added mobject.utils module with renderer-dependent class getters\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* ensure that capitalization of passed renderer type is irrelevant\r\n\r\n* remove unused entries from mobject.utils.__all__\r\n\r\n* fixed isort ignore in manim.__init__\r\n\r\n* fixed lower-case casting of passed renderer\r\n\r\n* fixed doctests\r\n\r\n* more documentation + doctests for mobject.utils\r\n\r\n* removed incorrect paragraph about ConverToOpenGL metaclass\r\n\r\n* added docstring for RendererType enum\r\n\r\n* renderer compatibility section in plugin dev documentation\r\n\r\n* added mobject.utils to reference manual\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Remove actual doctest (it ran the compatibility code)\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Naveen M K ", "code": "def stop_ambient_camera_rotation(self, about=\"theta\"):\n \n about: str = about.lower()\n try:\n if config.renderer == RendererType.CAIRO:\n trackers = {\n \"theta\": self.camera.theta_tracker,\n \"phi\": self.camera.phi_tracker,\n \"gamma\": self.camera.gamma_tracker,\n }\n x: ValueTracker = trackers[about]\n x.clear_updaters()\n self.remove(x)\n elif config.renderer == RendererType.OPENGL:\n self.camera.clear_updaters()\n except Exception:\n raise ValueError(\"Invalid ambient rotation angle.\")\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 248, "n_words": 40, "vocab_size": 36, "complexity": 4, "nloc": 16, "token_counts": 101, "n_ast_nodes": 171, "n_identifiers": 21, "random_cut": "def stop_ambient_camera_rotation(self, about=\"theta\"):\n \n about: str = about.lower()\n try:\n if config.renderer == RendererType.CAIRO:\n trackers = {\n \"theta\": self.camera.theta_tracker,\n \"phi\": self.camera.phi_tracker,\n \"gamma\": self.camera.gamma_tracker,\n }\n x: ValueTracker = trackers[about]\n x.clear_updaters()\n self.remove(x)\n elif config.renderer == RendererType.OPENGL:\n self.camera.clear_updaters()\n except Exception:\n raise ValueError(\"Invalid ambient rotation angl", "d_id": 46339, "documentation": { "docstring": "\n This method stops all ambient camera rotation.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 247689, "commit_id": "1da0f79d5455b594f2aa989106a672786f5b990f", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "_get_bundled_aggregations", "commit_message": "Refactor relations tests (#12232)\n\n* Moves the relation pagination tests to a separate class.\r\n* Move the assertion of the response code into the `_send_relation` helper.\r\n* Moves some helpers into the base-class.", "code": "def _get_bundled_aggregations(self) -> JsonDict:\n \n # Fetch the bundled aggregations of the event.\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n return channel.json_body[\"unsigned\"].get(\"m.relations\", {})\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 11, "token_counts": 55, "n_ast_nodes": 105, "n_identifiers": 13, "random_cut": "def _get_bundled_aggregations(self) -> JsonDict:\n \n # Fetch the bundled aggregations of the event.\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n return channel.json_body[\"unsigned\"].get(\"m.relations\", {})\n", "d_id": 71842, "documentation": { "docstring": "\n Requests /event on the parent ID and returns the m.relations field (from unsigned), if it exists.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 102634, "commit_id": "89f15f591cc3cc3e8ae40e95ffc802f7f2561ece", "repo": "chia-blockchain", "path": "chia/types/spend_bundle.py", "file_name": "spend_bundle.py", "fun_name": "get_memos", "commit_message": "Merge standalone wallet into main (#9793)\n\n* wallet changes from pac\r\n\r\n* cat changes\r\n\r\n* pool tests\r\n\r\n* pooling tests passing\r\n\r\n* offers\r\n\r\n* lint\r\n\r\n* mempool_mode\r\n\r\n* black\r\n\r\n* linting\r\n\r\n* workflow files\r\n\r\n* flake8\r\n\r\n* more cleanup\r\n\r\n* renamed\r\n\r\n* remove obsolete test, don't cast announcement\r\n\r\n* memos are not only bytes32\r\n\r\n* trade renames\r\n\r\n* fix rpcs, block_record\r\n\r\n* wallet rpc, recompile settlement clvm\r\n\r\n* key derivation\r\n\r\n* clvm tests\r\n\r\n* lgtm issues and wallet peers\r\n\r\n* stash\r\n\r\n* rename\r\n\r\n* mypy linting\r\n\r\n* flake8\r\n\r\n* bad initializer\r\n\r\n* flaky tests\r\n\r\n* Make CAT wallets only create on verified hints (#9651)\r\n\r\n* fix clvm tests\r\n\r\n* return to log lvl warn\r\n\r\n* check puzzle unhardened\r\n\r\n* public key, not bytes. api caching change\r\n\r\n* precommit changes\r\n\r\n* remove unused import\r\n\r\n* mypy ci file, tests\r\n\r\n* ensure balance before creating a tx\r\n\r\n* Remove CAT logic from full node test (#9741)\r\n\r\n* Add confirmations and sleeps for wallet (#9742)\r\n\r\n* use pool executor\r\n\r\n* rever merge mistakes/cleanup\r\n\r\n* Fix trade test flakiness (#9751)\r\n\r\n* remove precommit\r\n\r\n* older version of black\r\n\r\n* lint only in super linter\r\n\r\n* Make announcements in RPC be objects instead of bytes (#9752)\r\n\r\n* Make announcements in RPC be objects instead of bytes\r\n\r\n* Lint\r\n\r\n* misc hint'ish cleanup (#9753)\r\n\r\n* misc hint'ish cleanup\r\n\r\n* unremove some ci bits\r\n\r\n* Use main cached_bls.py\r\n\r\n* Fix bad merge in main_pac (#9774)\r\n\r\n* Fix bad merge at 71da0487b9cd5564453ec24b76f1ac773c272b75\r\n\r\n* Remove unused ignores\r\n\r\n* more unused ignores\r\n\r\n* Fix bad merge at 3b143e705057d6c14e2fb3e00078aceff0552d7e\r\n\r\n* One more byte32.from_hexstr\r\n\r\n* Remove obsolete test\r\n\r\n* remove commented out\r\n\r\n* remove duplicate payment object\r\n\r\n* remove long sync\r\n\r\n* remove unused test, noise\r\n\r\n* memos type\r\n\r\n* bytes32\r\n\r\n* make it clear it's a single state at a time\r\n\r\n* copy over asset ids from pacr\r\n\r\n* file endl linter\r\n\r\n* Update chia/server/ws_connection.py\r\n\r\nCo-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>\r\n\r\nCo-authored-by: Matt Hauff \r\nCo-authored-by: Kyle Altendorf \r\nCo-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>", "code": "def get_memos(self) -> Dict[bytes32, List[bytes]]:\n \n memos: Dict[bytes32, List[bytes]] = {}\n for coin_spend in self.coin_spends:\n result = Program.from_bytes(bytes(coin_spend.puzzle_reveal)).run(\n Program.from_bytes(bytes(coin_spend.solution))\n )\n for condition in result.as_python():\n if condition[0] == ConditionOpcode.CREATE_COIN and len(condition) >= 4:\n # If only 3 elements (opcode + 2 args), there is no memo, this is ph, amount\n coin_added = Coin(coin_spend.coin.name(), bytes32(condition[1]), int_from_bytes(condition[2]))\n if type(condition[3]) != list:\n # If it's not a list, it's not the correct format\n continue\n memos[coin_added.name()] = condition[3]\n return memos\n\n # Note that `coin_spends` used to have the bad name `coin_solutions`.\n # Some API still expects this name. For now, we accept both names.\n #\n # TODO: continue this deprecation. Eventually, all code below here should be removed.\n # 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary)\n # 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary)\n # 3. remove all references to `include_legacy_keys=True`\n # 4. remove all code below this point\n", "url": "https://github.com/Chia-Network/chia-blockchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 394, "n_words": 153, "vocab_size": 109, "complexity": 6, "nloc": 18, "token_counts": 146, "n_ast_nodes": 235, "n_identifiers": 27, "random_cut": "def get_memos(self) -> Dict[bytes32, List[bytes]]:\n \n memos: Dict[bytes32, List[bytes]] = {}\n for coin_spend in self.coin_spends:\n result = Program.from_bytes(bytes(coin_spend.puzzle_reveal)).run(\n Program.from_bytes(bytes(coin_spend.solution))\n )\n for condition in result.as_python():\n if condition[0] == ConditionOpcode.CREATE_COIN and len(condition) >= 4:\n #", "d_id": 21558, "documentation": { "docstring": "\n Retrieves the memos for additions in this spend_bundle, which are formatted as a list in the 3rd parameter of\n CREATE_COIN. If there are no memos, the addition coin_id is not included. If they are not formatted as a list\n of bytes, they are not included. This is expensive to call, it should not be used in full node code.\n ", "n_words": 59, "vocab_size": 40, "n_whitespaces": 88, "language": "en" } }, { "id": 60712, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/collector.py", "file_name": "collector.py", "fun_name": "_clean_url_path_part", "commit_message": "upd; format", "code": "def _clean_url_path_part(part):\n # type: (str) -> str\n \n # We unquote prior to quoting to make sure nothing is double quoted.\n return urllib.parse.quote(urllib.parse.unquote(part))\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def _clean_url_path_part(part):\n # type: (str) -> str\n \n # We unquote prior ", "d_id": 12256, "documentation": { "docstring": "\n Clean a \"part\" of a URL path (i.e. after splitting on \"@\" characters).\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 20, "language": "en" } }, { "id": 60427, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/scripts/cpp_lint.py", "file_name": "cpp_lint.py", "fun_name": "CheckAltTokens", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def CheckAltTokens(filename, clean_lines, linenum, error):\n \n line = clean_lines.elided[linenum]\n\n # Avoid preprocessor lines\n if Match(r'^\\s*#', line):\n return\n\n # Last ditch effort to avoid multi-line comments. This will not help\n # if the comment started before the current line or ended after the\n # current line, but it catches most of the false positives. At least,\n # it provides a way to workaround this warning for people who use\n # multi-line comments in preprocessor macros.\n #\n # TODO(unknown): remove this once cpplint has better support for\n # multi-line comments.\n if line.find('/*') >= 0 or line.find('*/') >= 0:\n return\n\n for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):\n error(filename, linenum, 'readability/alt_tokens', 2,\n 'Use operator %s instead of %s' % (\n _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 161, "n_words": 114, "vocab_size": 84, "complexity": 5, "nloc": 10, "token_counts": 91, "n_ast_nodes": 154, "n_identifiers": 14, "random_cut": "def CheckAltTokens(filename, clean_lines, linenum, error):\n \n line = clean_lines.elided[linenum]\n\n # Avoid preprocessor lines\n if Match(r'^\\s*#', line):\n return\n\n # Last ditch effort to avoid multi-line comments. This will not help\n # if the comment started before the current line or ended after the\n # current line, but it catches most of the false positives. At least,\n # it provides a way to workaround this warning for people who use\n # multi-line comments in preprocessor macros.\n #\n # TODO(unknown): remove this once cpplint has better support for\n # multi-line comments.\n if line.find('/*') >= 0 or line.find('*/') >= 0:\n return\n\n for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(lin", "d_id": 12155, "documentation": { "docstring": "Check alternative keywords being used in boolean expressions.\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ", "n_words": 40, "vocab_size": 33, "n_whitespaces": 54, "language": "en" } }, { "id": 133165, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/joblib/__init__.py", "file_name": "__init__.py", "fun_name": "register_ray", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def register_ray():\n \n try:\n from ray.util.joblib.ray_backend import RayBackend\n\n register_parallel_backend(\"ray\", RayBackend)\n except ImportError:\n msg = (\n \"To use the ray backend you must install ray.\"\n \"Try running 'pip install ray'.\"\n \"See https://docs.ray.io/en/master/installation.html\"\n \"for more information.\"\n )\n raise ImportError(msg)\n\n\n__all__ = [\"register_ray\"]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 126, "n_words": 39, "vocab_size": 37, "complexity": 2, "nloc": 12, "token_counts": 39, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def register_ray():\n \n try:\n from ray.util.joblib.ray_backend import RayBackend\n\n register_parallel_backend(\"ray\", RayBackend)\n except ImportError:\n msg = (\n \"T", "d_id": 29950, "documentation": { "docstring": "Register Ray Backend to be called with parallel_backend(\"ray\").", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 203603, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/auth/backends.py", "file_name": "backends.py", "fun_name": "get_group_permissions", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_group_permissions(self, user_obj, obj=None):\n \n return self._get_permissions(user_obj, obj, \"group\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def get_group_permissions(self, user_obj, obj=None):\n \n return self._get_permissi", "d_id": 50470, "documentation": { "docstring": "\n Return a set of permission strings the user `user_obj` has from the\n groups they belong.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 257048, "commit_id": "a273c3a51dd432bd125e5b35df4be94260a2cdb7", "repo": "haystack", "path": "haystack/document_stores/deepsetcloud.py", "file_name": "deepsetcloud.py", "fun_name": "get_evaluation_sets", "commit_message": "EvaluationSetClient for deepset cloud to fetch evaluation sets and la… (#2345)\n\n* EvaluationSetClient for deepset cloud to fetch evaluation sets and labels for one specific evaluation set\r\n\r\n* make DeepsetCloudDocumentStore able to fetch uploaded evaluation set names\r\n\r\n* fix missing renaming of get_evaluation_set_names in DeepsetCloudDocumentStore\r\n\r\n* update documentation for evaluation set functionality in deepset cloud document store\r\n\r\n* DeepsetCloudDocumentStore tests for evaluation set functionality\r\n\r\n* rename index to evaluation_set_name for DeepsetCloudDocumentStore evaluation set functionality\r\n\r\n* raise DeepsetCloudError when no labels were found for evaluation set\r\n\r\n* make use of .get_with_auto_paging in EvaluationSetClient\r\n\r\n* Return result of get_with_auto_paging() as it parses the response already\r\n\r\n* Make schema import source more specific\r\n\r\n* fetch all evaluation sets for a workspace in deepset Cloud\r\n\r\n* Rename evaluation_set_name to label_index\r\n\r\n* make use of generator functionality for fetching labels\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Adjust function input for DeepsetCloudDocumentStore.get_all_labels, adjust tests for it, fix typos, make linter happy\r\n\r\n* Match error message with pytest.raises\r\n\r\n* Update Documentation & Code Style\r\n\r\n* DeepsetCloudDocumentStore.get_labels_count raises DeepsetCloudError when no evaluation set was found to count labels on\r\n\r\n* remove unneeded import in tests\r\n\r\n* DeepsetCloudDocumentStore tests, make reponse bodies a string through json.dumps\r\n\r\n* DeepsetcloudDocumentStore.get_label_count - move raise to return\r\n\r\n* stringify uuid before json.dump as uuid is not serilizable\r\n\r\n* DeepsetcloudDocumentStore - adjust response mocking in tests\r\n\r\n* DeepsetcloudDocumentStore - json dump response body in test\r\n\r\n* DeepsetCloudDocumentStore introduce label_index, EvaluationSetClient rename label_index to evaluation_set\r\n\r\n* Update Documentation & Code Style\r\n\r\n* DeepsetCloudDocumentStore rename evaluation_set to evaluation_set_response as there is a name clash with the input variable\r\n\r\n* DeepsetCloudDocumentStore - rename missed variable in test\r\n\r\n* DeepsetCloudDocumentStore - rename missed label_index to index in doc string, rename label_index to evaluation_set in EvaluationSetClient\r\n\r\n* Update Documentation & Code Style\r\n\r\n* DeepsetCloudDocumentStore - update docstrings for EvaluationSetClient\r\n\r\n* DeepsetCloudDocumentStore - fix typo in doc string\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def get_evaluation_sets(self) -> List[dict]:\n \n return self.evaluation_set_client.get_evaluation_sets()\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 8, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 5, "random_cut": "def get_evaluation_sets(self) -> List[dict]:\n \n return self.evaluation_set_client.get", "d_id": 74993, "documentation": { "docstring": "\n Returns a list of uploaded evaluation sets to deepset cloud.\n\n :return: list of evaluation sets as dicts\n These contain (\"name\", \"evaluation_set_id\", \"created_at\", \"matched_labels\", \"total_labels\") as fields.\n ", "n_words": 26, "vocab_size": 21, "n_whitespaces": 64, "language": "en" } }, { "id": 177986, "commit_id": "53f6308186aa131946e196b0409f3d732ec9e007", "repo": "label-studio", "path": "label_studio/data_import/uploader.py", "file_name": "uploader.py", "fun_name": "allowlist_svg", "commit_message": "fix: DEV-2236: Stored XSS via SVG file (#2273)\n\n* user uploaded content rendered as plain text or known image only\r\n\r\n* allow list for svg in progress\r\n\r\n* allow list for svg basic pass\r\n\r\n* add error handling\r\n\r\n* add to file processing re: code review\r\n\r\n* rm uneeded code\r\n\r\n* add env var to disable svg cleaning\r\n\r\n* add test\r\n\r\n* update env setting\r\n\r\n* rm lxml string methods\r\n\r\n* Update uploader.py\r\n\r\n* Update base.py\r\n\r\nCo-authored-by: Max Tkachenko ", "code": "def allowlist_svg(dirty_xml):\n \n from lxml.html import clean\n\n allow_tags = [\n 'xml',\n 'svg',\n 'circle',\n 'ellipse',\n 'line',\n 'path',\n 'polygon',\n 'polyline',\n 'rect'\n ]\n\n cleaner = clean.Cleaner(\n allow_tags=allow_tags,\n style=True,\n links=True,\n add_nofollow=False,\n page_structure=True,\n safe_attrs_only=False,\n remove_unknown_tags=False)\n\n clean_xml = cleaner.clean_html(dirty_xml)\n return clean_xml\n\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 231, "n_words": 34, "vocab_size": 31, "complexity": 1, "nloc": 23, "token_counts": 77, "n_ast_nodes": 126, "n_identifiers": 16, "random_cut": "def allowlist_svg(dirty_xml):\n \n from lxml.html import clean\n\n allow_tags = [\n 'xml',\n 'svg',\n 'circle',\n 'ellipse',\n 'line',\n 'path',\n 'polygon',\n 'polyline',\n 'rect'\n ]\n\n cleaner = clean.Cleaner(\n allow_tags=allow_tags,\n ", "d_id": 42555, "documentation": { "docstring": "Filter out malicious/harmful content from SVG files\n by defining allowed tags\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 17, "language": "en" } }, { "id": 101613, "commit_id": "98d01760e469fd2108eed8d0b0a1ba6297c3177c", "repo": "faceswap", "path": "tools/sort/sort_methods.py", "file_name": "sort_methods.py", "fun_name": "_sort_filelist", "commit_message": "Overhaul sort:\n - Standardize image data reading and writing\n - Optimize loading (just one pass required)\n - Make all sort groups binnable (to greater or lesser results)\n - Add sort by pitch\n - Deprecate multiple options\n - linting, docs + locales", "code": "def _sort_filelist(self) -> None:\n \n for filename, image, alignments in self._iterator():\n self.score_image(filename, image, alignments)\n\n self.sort()\n logger.debug(\"sorted list: %s\",\n [r[0] if isinstance(r, (tuple, list)) else r for r in self._result])\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 87, "n_words": 28, "vocab_size": 24, "complexity": 4, "nloc": 16, "token_counts": 68, "n_ast_nodes": 104, "n_identifiers": 15, "random_cut": "def _sort_filelist(self) -> None:\n \n for filename, image, alignments in self._iterator():\n self.score_image(filename, image, alignments)\n\n self.sort()\n logger.debug(\"sorted list: %s\",\n [r[0] if isinsta", "d_id": 21021, "documentation": { "docstring": " Call the sort method's logic to populate the :attr:`_results` attribute.\n\n Put logic for scoring an individual frame in in :attr:`score_image` of the child\n\n Returns\n -------\n list\n The sorted file. A list of tuples with the filename in the first position and score in\n the second position\n ", "n_words": 46, "vocab_size": 34, "n_whitespaces": 104, "language": "en" } }, { "id": 275284, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/optimizer.py", "file_name": "optimizer.py", "fun_name": "finalize_variable_values", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def finalize_variable_values(self, var_list):\n \n if self.use_ema:\n # If the optimizer uses EMA, then when finalizing, we replace the model\n # variable value with its moving average stored inside optimizer.\n self._overwrite_model_variables_with_average_value(var_list)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 76, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 3, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def finalize_variable_values(self, var_list):\n \n if self.use_ema:\n # If the optimizer uses EMA, then when finalizing, we replace the model\n # variable value with its moving average stored inside optimizer.\n self._overwrite_model_variable", "d_id": 81369, "documentation": { "docstring": "Set the final value of model's trainable variables.\n\n Sometimes there are some extra steps before ending the variable updates,\n such as overriding the model variables with its average value.\n\n Args:\n var_list: list of model variables.\n ", "n_words": 35, "vocab_size": 30, "n_whitespaces": 72, "language": "en" } }, { "id": 12761, "commit_id": "1b3edacf531e4e8d29eac4ea73785f8d201255d6", "repo": "jina", "path": "jina/serve/stream/__init__.py", "file_name": "__init__.py", "fun_name": "wait_floating_requests_end", "commit_message": "feat: wait for floating Executor tasks (#5004)", "code": "async def wait_floating_requests_end(self):\n \n while self.total_num_floating_tasks_alive > 0:\n await asyncio.sleep(0)\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 3, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "async def wait_floating_requests_end(self):\n \n while self.total_num_floating_tasks_alive > 0:\n ", "d_id": 2402, "documentation": { "docstring": "\n Await this coroutine to make sure that all the floating tasks that the request handler may bring are properly consumed\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 102266, "commit_id": "a35b4b49d2e2a215a64e455101c779ae623b3321", "repo": "pytorch", "path": "torch/functional.py", "file_name": "functional.py", "fun_name": "_lu_impl", "commit_message": "Add linalg.lu_factor (#66933)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/66933\n\nThis PR exposes `torch.lu` as `torch.linalg.lu_factor` and\n`torch.linalg.lu_factor_ex`.\n\nThis PR also adds support for matrices with zero elements both in\nthe size of the matrix and the batch. Note that this function simply\nreturns empty tensors of the correct size in this case.\n\nWe add a test and an OpInfo for the new function.\n\nThis PR also adds documentation for this new function in line of\nthe documentation in the rest of `torch.linalg`.\n\nFixes https://github.com/pytorch/pytorch/issues/56590\nFixes https://github.com/pytorch/pytorch/issues/64014\n\ncc jianyuh nikitaved pearu mruberry walterddr IvanYashchuk xwang233 Lezcano\n\nTest Plan: Imported from OSS\n\nReviewed By: gchanan\n\nDifferential Revision: D32834069\n\nPulled By: mruberry\n\nfbshipit-source-id: 51ef12535fa91d292f419acf83b800b86ee9c7eb", "code": "def _lu_impl(A, pivot=True, get_infos=False, out=None):\n # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor]\n r\n # If get_infos is True, then we don't need to check for errors and vice versa\n return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos))\n\nif TYPE_CHECKING:\n _ListOrSeq = Sequence[Tensor]\nelse:\n _ListOrSeq = List[Tensor]\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 46, "vocab_size": 42, "complexity": 1, "nloc": 77, "token_counts": 37, "n_ast_nodes": 83, "n_identifiers": 13, "random_cut": "def _lu_impl(A, pivot=True, get_infos=False, out=None):\n # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor]\n r\n # If get_infos is True, then we don't need to ch", "d_id": 21504, "documentation": { "docstring": "Computes the LU factorization of a matrix or batches of matrices\n :attr:`A`. Returns a tuple containing the LU factorization and\n pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to\n ``True``.\n\n .. note::\n * The returned permutation matrix for every matrix in the batch is\n represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``.\n ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm,\n the ``i``-th row was permuted with the ``j-1``-th row.\n * LU factorization with :attr:`pivot` = ``False`` is not available\n for CPU, and attempting to do so will throw an error. However,\n LU factorization with :attr:`pivot` = ``False`` is available for\n CUDA.\n * This function does not check if the factorization was successful\n or not if :attr:`get_infos` is ``True`` since the status of the\n factorization is present in the third element of the return tuple.\n * In the case of batches of square matrices with size less or equal\n to 32 on a CUDA device, the LU factorization is repeated for\n singular matrices due to the bug in the MAGMA library\n (see magma issue 13).\n * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`.\n\n .. warning::\n The gradients of this function will only be finite when :attr:`A` is full rank.\n This is because the LU decomposition is just differentiable at full rank matrices.\n Furthermore, if :attr:`A` is close to not being full rank,\n the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`.\n\n Args:\n A (Tensor): the tensor to factor of size :math:`(*, m, n)`\n pivot (bool, optional): controls whether pivoting is done. Default: ``True``\n get_infos (bool, optional): if set to ``True``, returns an info IntTensor.\n Default: ``False``\n out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,\n then the elements in the tuple are Tensor, IntTensor,\n and IntTensor. If :attr:`get_infos` is ``False``, then the\n elements in the tuple are Tensor, IntTensor. Default: ``None``\n\n Returns:\n (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing\n\n - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`\n\n - **pivots** (*IntTensor*): the pivots of size :math:`(*, \\text{min}(m, n))`.\n ``pivots`` stores all the intermediate transpositions of rows.\n The final permutation ``perm`` could be reconstructed by\n applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``,\n where ``perm`` is initially the identity permutation of :math:`m` elements\n (essentially this is what :func:`torch.lu_unpack` is doing).\n\n - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of\n size :math:`(*)` where non-zero values indicate whether factorization for the matrix or\n each minibatch has succeeded or failed\n\n Example::\n\n >>> A = torch.randn(2, 3, 3)\n >>> A_LU, pivots = torch.lu(A)\n >>> A_LU\n tensor([[[ 1.3506, 2.5558, -0.0816],\n [ 0.1684, 1.1551, 0.1940],\n [ 0.1193, 0.6189, -0.5497]],\n\n [[ 0.4526, 1.2526, -0.3285],\n [-0.7988, 0.7175, -0.9701],\n [ 0.2634, -0.9255, -0.3459]]])\n >>> pivots\n tensor([[ 3, 3, 3],\n [ 3, 3, 3]], dtype=torch.int32)\n >>> A_LU, pivots, info = torch.lu(A, get_infos=True)\n >>> if info.nonzero().size(0) == 0:\n ... print('LU factorization succeeded for all samples!')\n LU factorization succeeded for all samples!\n ", "n_words": 497, "vocab_size": 265, "n_whitespaces": 1147, "language": "en" } }, { "id": 248205, "commit_id": "c2d50e9f6c5f7b01cbd8bf1dca36cb8c0e7b007f", "repo": "synapse", "path": "tests/config/test_workers.py", "file_name": "test_workers.py", "fun_name": "test_new_configs_appservice_worker", "commit_message": "Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. (#12452)", "code": "def test_new_configs_appservice_worker(self) -> None:\n \n appservice_worker_config = self._make_worker_config(\n worker_app=\"synapse.app.generic_worker\", worker_name=\"worker1\"\n )\n\n self.assertTrue(\n appservice_worker_config._should_this_worker_perform_duty(\n {\n \"notify_appservices_from_worker\": \"worker1\",\n },\n \"notify_appservices\",\n \"synapse.app.appservice\",\n \"notify_appservices_from_worker\",\n )\n )\n\n self.assertFalse(\n appservice_worker_config._should_this_worker_perform_duty(\n {\n \"notify_appservices_from_worker\": \"worker2\",\n },\n \"notify_appservices\",\n \"synapse.app.appservice\",\n \"notify_appservices_from_worker\",\n )\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 324, "n_words": 32, "vocab_size": 21, "complexity": 1, "nloc": 27, "token_counts": 68, "n_ast_nodes": 125, "n_identifiers": 9, "random_cut": "def test_new_configs_appservice_worker(self) -> None:\n \n appservice_worker_config = self._make_worker_config(\n worker_app=\"synapse.app.generic_worker\", worker_name=\"worker1\"\n )\n\n self.assertTrue(\n appservice_worker_config._should_this_worker_perform_duty(\n {\n \"notify_appservices_from_worker\": \"worker1\",\n },\n \"notify_appservices\",\n \"synapse.app.appservice\",\n \"notify_appservices_from_worker\",\n )\n )\n\n self.assertFalse(\n appservice_worker_config._should_this_worker_perform_duty(\n {\n \"notify_appservices_from_worker\"", "d_id": 72153, "documentation": { "docstring": "\n Tests new config options. This is for the worker's config.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 197334, "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", "repo": "sympy", "path": "sympy/physics/hydrogen.py", "file_name": "hydrogen.py", "fun_name": "E_nl", "commit_message": "Remove abbreviations in documentation", "code": "def E_nl(n, Z=1):\n \n n, Z = S(n), S(Z)\n if n.is_integer and (n < 1):\n raise ValueError(\"'n' must be positive integer\")\n return -Z**2/(2*n**2)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 22, "vocab_size": 22, "complexity": 3, "nloc": 5, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 6, "random_cut": "def E_nl(n, Z=1):\n \n n", "d_id": 48477, "documentation": { "docstring": "\n Returns the energy of the state (n, l) in Hartree atomic units.\n\n The energy does not depend on \"l\".\n\n Parameters\n ==========\n\n n : integer\n Principal Quantum Number which is\n an integer with possible values as 1, 2, 3, 4,...\n Z :\n Atomic number (1 for Hydrogen, 2 for Helium, ...)\n\n Examples\n ========\n\n >>> from sympy.physics.hydrogen import E_nl\n >>> from sympy.abc import n, Z\n >>> E_nl(n, Z)\n -Z**2/(2*n**2)\n >>> E_nl(1)\n -1/2\n >>> E_nl(2)\n -1/8\n >>> E_nl(3)\n -1/18\n >>> E_nl(3, 47)\n -2209/18\n\n ", "n_words": 80, "vocab_size": 66, "n_whitespaces": 165, "language": "en" } }, { "id": 241760, "commit_id": "d2d284fd6e3e8f53e9a44ab233771850af1e4dab", "repo": "lightning", "path": "tests/checkpointing/test_model_checkpoint.py", "file_name": "test_model_checkpoint.py", "fun_name": "test_model_checkpoint_no_extraneous_invocations", "commit_message": "Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408)\n\nCo-authored-by: Carlos Mocholí ", "code": "def test_model_checkpoint_no_extraneous_invocations(tmpdir):\n \n model = LogInTwoMethods()\n num_epochs = 4\n model_checkpoint = ModelCheckpointTestInvocations(monitor=\"early_stop_on\", expected_count=num_epochs, save_top_k=-1)\n trainer = Trainer(\n strategy=\"ddp_spawn\",\n accelerator=\"cpu\",\n devices=2,\n default_root_dir=tmpdir,\n callbacks=[model_checkpoint],\n max_epochs=num_epochs,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 96, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 14, "token_counts": 77, "n_ast_nodes": 130, "n_identifiers": 21, "random_cut": "def test_model_checkpoint_no_extraneous_invocations(tmpdir):\n \n model = LogInTwoMethods()\n num_epochs = 4\n model_checkpoint = ModelCheckpointTestInvocations(monitor=\"early_stop_on\", expected_count=num_epochs, save_top_k=-1)\n trainer = Trainer(\n strategy=\"ddp_spawn\",\n accelerator=\"cpu\",\n devices=2,\n default_root_dir=tmpdir,\n callbacks=[model_checkpoint],\n max_epochs=num_epochs,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n", "d_id": 69687, "documentation": { "docstring": "Test to ensure that the model callback saves the checkpoints only once in distributed mode.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 218207, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/abc.py", "file_name": "abc.py", "fun_name": "find_module", "commit_message": "add python 3.10.4 for windows", "code": "def find_module(self, fullname, path):\n \n warnings.warn(\"MetaPathFinder.find_module() is deprecated since Python \"\n \"3.4 in favor of MetaPathFinder.find_spec() and is \"\n \"slated for removal in Python 3.12\",\n DeprecationWarning,\n stacklevel=2)\n if not hasattr(self, 'find_spec'):\n return None\n found = self.find_spec(fullname, path)\n return found.loader if found is not None else None\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 175, "n_words": 45, "vocab_size": 34, "complexity": 3, "nloc": 10, "token_counts": 56, "n_ast_nodes": 92, "n_identifiers": 12, "random_cut": "def find_module(self, fullname, path):\n \n warnings.warn(\"MetaPathFinder.find_module() is deprecated since Python \"\n \"3.4 in favor of MetaPathFinder.find_spec() and is \"\n ", "d_id": 55204, "documentation": { "docstring": "Return a loader for the module.\n\n If no module is found, return None. The fullname is a str and\n the path is a list of strings or None.\n\n This method is deprecated since Python 3.4 in favor of\n finder.find_spec(). If find_spec() exists then backwards-compatible\n functionality is provided for this method.\n\n ", "n_words": 50, "vocab_size": 39, "n_whitespaces": 93, "language": "en" } }, { "id": 111992, "commit_id": "de6662a4a0fbfc557614b6c022edaf8117de7a5a", "repo": "nni", "path": "nni/algorithms/hpo/evolution_tuner.py", "file_name": "evolution_tuner.py", "fun_name": "_generate_individual", "commit_message": "[WIP] add doc for evolution (#4575)", "code": "def _generate_individual(self, parameter_id):\n \n pos = -1\n\n for i in range(len(self.population)):\n if self.population[i].result is None:\n pos = i\n break\n\n if pos != -1:\n indiv = copy.deepcopy(self.population[pos])\n self.population.pop(pos)\n else:\n random.shuffle(self.population)\n # avoid only 1 individual has result\n if len(self.population) > 1 and self.population[0].result < self.population[1].result:\n self.population[0] = self.population[1]\n\n # mutation on the worse individual\n space = json2space(self.searchspace_json,\n self.population[0].config)\n is_rand = dict()\n mutation_pos = space[random.randint(0, len(space)-1)]\n\n for i in range(len(self.space)):\n is_rand[self.space[i]] = (self.space[i] == mutation_pos)\n config = json2parameter(\n self.searchspace_json, is_rand, self.random_state, self.population[0].config)\n\n if len(self.population) > 1:\n self.population.pop(1)\n\n indiv = Individual(config=config)\n\n # remove \"_index\" from config and save params-id\n self.running_trials[parameter_id] = indiv\n config = split_index(indiv.config)\n return config\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 440, "n_words": 103, "vocab_size": 70, "complexity": 8, "nloc": 27, "token_counts": 259, "n_ast_nodes": 403, "n_identifiers": 28, "random_cut": "def _generate_individual(self, parameter_id):\n \n pos = -1\n\n for i in range(len(self.population)):\n if self.population[i].result is None:\n pos = i\n break\n\n if pos != -1:\n indiv = copy.deepcopy(self.population[pos])\n self.population.pop(pos)\n else:\n random.shuffle(self.population)\n # avoid only 1 individual has result\n if len(self.population) > 1 and self.population[0].result < self.population[1].result:\n self.population[0] = self.population[1]\n\n # mutation on the worse individual\n space = json2space(self.searchspace_json,\n self.population[0].config)\n is_rand = dict()\n mutation_pos = space[random.randint(0, len(space)-1)]\n\n ", "d_id": 24533, "documentation": { "docstring": "\n This function will generate the config for a trial.\n If at the first generation, randomly generates individuals to satisfy self.population_size.\n Otherwise, random choose a pair of individuals and compare their fitnesses.\n The worst of the pair will be removed. Copy the best of the pair and mutate it to generate a new individual.\n\n Parameters\n ----------\n\n parameter_id : int\n\n Returns\n -------\n dict\n A group of candidate parameters that evolution tuner generated.\n ", "n_words": 70, "vocab_size": 54, "n_whitespaces": 159, "language": "en" } }, { "id": 30232, "commit_id": "448bd75fe5de981995446a536963c5bd11e491ec", "repo": "spotify-downloader", "path": "spotdl/console/web.py", "file_name": "web.py", "fun_name": "connect", "commit_message": "fixed docstrings", "code": "async def connect(self):\n \n\n connection = {\"client_id\": self.client_id, \"websocket\": self.websocket}\n logging.info(f\"Connecting WebSocket: {connection}\")\n await self.websocket.accept()\n WSProgressHandler.instances.append(self)\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 50, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 44, "n_ast_nodes": 83, "n_identifiers": 11, "random_cut": "async def connect(self):\n \n\n connection = {\"client_id\": self.client_id, \"websocket\": self.websocket}\n logging.info(f\"Connect", "d_id": 5417, "documentation": { "docstring": "\n Called when a new client connects to the websocket.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 243990, "commit_id": "1516986a616fee8bb741d0ab2be40683045efccd", "repo": "mmdetection", "path": "mmdet/datasets/custom.py", "file_name": "custom.py", "fun_name": "prepare_test_img", "commit_message": "[Feature] Support OpenImages Dataset (#6331)\n\n* [Feature] support openimage group of eval\r\n\r\n* [Feature] support openimage group of eval\r\n\r\n* support openimage dataset\r\n\r\n* support openimage challenge dataset\r\n\r\n* fully support OpenImages-V6 and OpenImages Challenge 2019\r\n\r\n* Fix some logic error\r\n\r\n* update config file\r\n\r\n* fix get data_infos error\r\n\r\n* fully support OpenImages evaluation\r\n\r\n* update OpenImages config files\r\n\r\n* [Feature] support OpenImages datasets\r\n\r\n* fix bug\r\n\r\n* support load image metas from pipeline\r\n\r\n* fix bug\r\n\r\n* fix get classes logic error\r\n\r\n* update code\r\n\r\n* support get image metas\r\n\r\n* support openimags\r\n\r\n* support collect image metas\r\n\r\n* support Open Images\r\n\r\n* fix openimages logic\r\n\r\n* minor fix\r\n\r\n* add a new function to compute openimages tpfp\r\n\r\n* minor fix\r\n\r\n* fix ci error\r\n\r\n* minor fix\r\n\r\n* fix indication\r\n\r\n* minor fix\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* minor fix\r\n\r\n* update readme\r\n\r\n* support loading image level labels and fix some logic\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* add class names\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* add openimages test unit\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* fix test unit\r\n\r\n* minor fix\r\n\r\n* fix logic error\r\n\r\n* minor fix\r\n\r\n* fully support openimages\r\n\r\n* minor fix\r\n\r\n* fix docstring\r\n\r\n* fix docstrings in readthedocs\r\n\r\n* update get image metas script\r\n\r\n* label_description_file -> label_file\r\n\r\n* update openimages readme\r\n\r\n* fix test unit\r\n\r\n* fix test unit\r\n\r\n* minor fix\r\n\r\n* update readme file\r\n\r\n* Update get_image_metas.py", "code": "def prepare_test_img(self, idx):\n \n\n img_info = self.data_infos[idx]\n results = dict(img_info=img_info)\n if self.proposals is not None:\n results['proposals'] = self.proposals[idx]\n self.pre_pipeline(results)\n return self.pipeline(results)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 56, "n_ast_nodes": 92, "n_identifiers": 10, "random_cut": "def prepare_test_img(self, idx):\n \n\n img_info = self.data_infos[idx]\n results = dict(img_info=img_in", "d_id": 70180, "documentation": { "docstring": "Get testing data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Testing data after pipeline with new keys introduced by \\\n pipeline.\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 82, "language": "en" } }, { "id": 67253, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/provident_fund_deductions/provident_fund_deductions.py", "file_name": "provident_fund_deductions.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(filters):\n\tdata = []\n\n\tconditions = get_conditions(filters)\n\n\tsalary_slips = frappe.db.sql(\n\t\t\n\t\t% (conditions),\n\t\tas_dict=1,\n\t)\n\n\tcomponent_type_dict = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\t)\n\n\tif not len(component_type_dict):\n\t\treturn []\n\n\tentry = frappe.db.sql(\n\t\t\n\t\t% (conditions, \", \".join([\"%s\"] * len(component_type_dict))),\n\t\ttuple(component_type_dict.keys()),\n\t\tas_dict=1,\n\t)\n\n\tdata_list = prepare_data(entry, component_type_dict)\n\n\tfor d in salary_slips:\n\t\ttotal = 0\n\t\tif data_list.get(d.name):\n\t\t\temployee = {\n\t\t\t\t\"employee\": data_list.get(d.name).get(\"employee\"),\n\t\t\t\t\"employee_name\": data_list.get(d.name).get(\"employee_name\"),\n\t\t\t\t\"pf_account\": data_list.get(d.name).get(\"pf_account\"),\n\t\t\t}\n\n\t\t\tif data_list.get(d.name).get(\"Provident Fund\"):\n\t\t\t\temployee[\"pf_amount\"] = data_list.get(d.name).get(\"Provident Fund\")\n\t\t\t\ttotal += data_list.get(d.name).get(\"Provident Fund\")\n\n\t\t\tif data_list.get(d.name).get(\"Additional Provident Fund\"):\n\t\t\t\temployee[\"additional_pf\"] = data_list.get(d.name).get(\"Additional Provident Fund\")\n\t\t\t\ttotal += data_list.get(d.name).get(\"Additional Provident Fund\")\n\n\t\t\tif data_list.get(d.name).get(\"Provident Fund Loan\"):\n\t\t\t\temployee[\"pf_loan\"] = data_list.get(d.name).get(\"Provident Fund Loan\")\n\t\t\t\ttotal += data_list.get(d.name).get(\"Provident Fund Loan\")\n\n\t\t\temployee[\"total\"] = total\n\n\t\t\tdata.append(employee)\n\n\treturn data\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 67, "n_words": 107, "vocab_size": 60, "complexity": 7, "nloc": 52, "token_counts": 337, "n_ast_nodes": 586, "n_identifiers": 26, "random_cut": "def get_data(filters):\n\tdata = []\n\n\tconditions = get_conditions(filters)\n\n\tsalary_slips = frappe.db.sql(\n\t\t\n\t\t% (conditions),\n\t\tas_dict=1,\n\t)\n\n\tcomponent_type_dict = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\t)\n\n\tif not len(component_type_dict):\n\t\treturn []\n\n\tentry = frappe.db.sql(\n\t\t\n\t\t% (conditions, \", \".join([\"%s\"] * len(component_type_dict))),\n\t\ttuple(component_type_dict.keys()),\n\t\tas_dict=1,\n\t)\n\n\tdata_list = prepare_data(entry, component_type_dict)\n\n\tfor d in salary_slips:\n\t\ttotal = 0\n\t\tif data_list.get(d.name):\n\t\t\temployee = {\n\t\t\t\t\"employee\": data_list.get(d.name).get(\"employee\"),\n\t\t\t\t\"employee_name\": data_list.get(d.name).get(\"employee_name\"),\n\t\t\t\t\"pf_account\": data_list.get(d.name).get(\"pf_account\"),\n\t\t\t}\n\n\t\t\tif data_list.get(d.name).get(\"Provident Fund\"):\n\t\t\t\temployee[\"pf_amount\"] = data_list.get(d.name).get(\"Provident Fund\")\n\t\t\t\ttotal += data_list.get(", "d_id": 14456, "documentation": { "docstring": " select sal.name from `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t\t select name, component_type from `tabSalary Component`\n\t\twhere component_type in ('Provident Fund', 'Additional Provident Fund', 'Provident Fund Loan') select sal.name, sal.employee, sal.employee_name, ded.salary_component, ded.amount\n\t\tfrom `tabSalary Slip` sal, `tabSalary Detail` ded\n\t\twhere sal.name = ded.parent\n\t\tand ded.parentfield = 'deductions'\n\t\tand ded.parenttype = 'Salary Slip'\n\t\tand sal.docstatus = 1 %s\n\t\tand ded.salary_component in (%s)\n\t", "n_words": 63, "vocab_size": 40, "n_whitespaces": 55, "language": "en" } }, { "id": 307681, "commit_id": "219cee2ca9f6cd9eb7e0abcbda6d9540240e20d3", "repo": "core", "path": "homeassistant/components/trace/models.py", "file_name": "models.py", "fun_name": "as_dict", "commit_message": "Move Trace classes to separate module (#78433)", "code": "def as_dict(self) -> dict[str, Any]:\n \n return {\n \"extended_dict\": self.as_extended_dict(),\n \"short_dict\": self.as_short_dict(),\n }\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 32, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def as_dict(self) -> dict[str, Any]:\n \n return {\n \"extended_dict\": self.a", "d_id": 106449, "documentation": { "docstring": "Return an dictionary version of this ActionTrace for saving.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 280502, "commit_id": "5a105aadbdc6fde2c2529280c4789864adbb81c7", "repo": "keras", "path": "keras/optimizers/__init__.py", "file_name": "__init__.py", "fun_name": "deserialize", "commit_message": "Move new optimizer out of optimizer_experimental/ directory.\n\nPiperOrigin-RevId: 488998585", "code": "def deserialize(config, custom_objects=None, **kwargs):\n \n # loss_scale_optimizer has a direct dependency of optimizer, import here\n # rather than top to avoid the cyclic dependency.\n from keras.mixed_precision import (\n loss_scale_optimizer,\n )\n\n use_legacy_optimizer = kwargs.pop(\"use_legacy_optimizer\", False)\n if len(config[\"config\"]) > 0:\n # If the optimizer config is not empty, then we use the value of\n # `is_legacy_optimizer` to override `use_legacy_optimizer`. If\n # `is_legacy_optimizer` does not exist in config, it means we are\n # using the legacy optimzier.\n use_legacy_optimizer = config[\"config\"].get(\"is_legacy_optimizer\", True)\n if (\n tf.__internal__.tf2.enabled()\n and tf.executing_eagerly()\n and not use_legacy_optimizer\n ):\n all_classes = {\n \"adadelta\": adadelta_experimental.Adadelta,\n \"adagrad\": adagrad_experimental.Adagrad,\n \"adam\": adam_experimental.Adam,\n \"adamax\": adamax_experimental.Adamax,\n \"experimentaladadelta\": adadelta_experimental.Adadelta,\n \"experimentaladagrad\": adagrad_experimental.Adagrad,\n \"experimentaladam\": adam_experimental.Adam,\n \"experimentalsgd\": sgd_experimental.SGD,\n \"nadam\": nadam_experimental.Nadam,\n \"rmsprop\": rmsprop_experimental.RMSprop,\n \"sgd\": sgd_experimental.SGD,\n \"ftrl\": ftrl_experimental.Ftrl,\n \"lossscaleoptimizer\": loss_scale_optimizer.LossScaleOptimizerV3,\n \"lossscaleoptimizerv3\": loss_scale_optimizer.LossScaleOptimizerV3,\n # LossScaleOptimizerV1 was an old version of LSO that was removed.\n # Deserializing it turns it into a LossScaleOptimizer\n \"lossscaleoptimizerv1\": loss_scale_optimizer.LossScaleOptimizer,\n }\n else:\n all_classes = {\n \"adadelta\": adadelta_v2.Adadelta,\n \"adagrad\": adagrad_v2.Adagrad,\n \"adam\": adam_v2.Adam,\n \"adamax\": adamax_v2.Adamax,\n \"experimentaladadelta\": adadelta_experimental.Adadelta,\n \"experimentaladagrad\": adagrad_experimental.Adagrad,\n \"experimentaladam\": adam_experimental.Adam,\n \"experimentalsgd\": sgd_experimental.SGD,\n \"nadam\": nadam_v2.Nadam,\n \"rmsprop\": rmsprop_v2.RMSprop,\n \"sgd\": gradient_descent_v2.SGD,\n \"ftrl\": ftrl_v2.Ftrl,\n \"lossscaleoptimizer\": loss_scale_optimizer.LossScaleOptimizer,\n \"lossscaleoptimizerv3\": loss_scale_optimizer.LossScaleOptimizerV3,\n # LossScaleOptimizerV1 was an old version of LSO that was removed.\n # Deserializing it turns it into a LossScaleOptimizer\n \"lossscaleoptimizerv1\": loss_scale_optimizer.LossScaleOptimizer,\n }\n\n # Make deserialization case-insensitive for built-in optimizers.\n if config[\"class_name\"].lower() in all_classes:\n config[\"class_name\"] = config[\"class_name\"].lower()\n return deserialize_keras_object(\n config,\n module_objects=all_classes,\n custom_objects=custom_objects,\n printable_module_name=\"optimizer\",\n )\n\n\n@keras_export(\n \"keras.__internal__.optimizers.convert_to_legacy_optimizer\", v1=[]\n)", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\n \"keras.__internal__.optimizers.convert_to_legacy_optimizer\", v1=[]\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 761, "n_words": 218, "vocab_size": 130, "complexity": 6, "nloc": 55, "token_counts": 311, "n_ast_nodes": 547, "n_identifiers": 49, "random_cut": "def deserialize(config, custom_objects=None, **kwargs):\n \n # loss_scale_optimizer has a direct dependency of optimizer, import here\n # rather than top to avoid the cyclic ", "d_id": 83359, "documentation": { "docstring": "Inverse of the `serialize` function.\n\n Args:\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during\n deserialization.\n\n Returns:\n A Keras Optimizer instance.\n ", "n_words": 32, "vocab_size": 30, "n_whitespaces": 84, "language": "en" } }, { "id": 219661, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "multiply", "commit_message": "add python 3.10.4 for windows", "code": "def multiply(self, a, b):\n \n a = _convert_other(a, raiseit=True)\n r = a.__mul__(b, context=self)\n if r is NotImplemented:\n raise TypeError(\"Unable to convert %s to Decimal\" % b)\n else:\n return r\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 85, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 7, "token_counts": 48, "n_ast_nodes": 78, "n_identifiers": 11, "random_cut": "def multiply(self, a, b):\n \n a = _convert_other(a, raiseit=True)\n r = a.__mul__(b, context=self)\n if r is NotImplemented:\n raise TypeError(\"Unable to convert %s to Decimal\" %", "d_id": 55689, "documentation": { "docstring": "multiply multiplies two operands.\n\n If either operand is a special value then the general rules apply.\n Otherwise, the operands are multiplied together\n ('long multiplication'), resulting in a number which may be as long as\n the sum of the lengths of the two operands.\n\n >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))\n Decimal('3.60')\n >>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))\n Decimal('21')\n >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))\n Decimal('0.72')\n >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))\n Decimal('-0.0')\n >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))\n Decimal('4.28135971E+11')\n >>> ExtendedContext.multiply(7, 7)\n Decimal('49')\n >>> ExtendedContext.multiply(Decimal(7), 7)\n Decimal('49')\n >>> ExtendedContext.multiply(7, Decimal(7))\n Decimal('49')\n ", "n_words": 75, "vocab_size": 53, "n_whitespaces": 222, "language": "en" } }, { "id": 43210, "commit_id": "95bd6b71cc9f5da377e272707f7b68000d980939", "repo": "airflow", "path": "tests/utils/test_db_cleanup.py", "file_name": "test_db_cleanup.py", "fun_name": "test_run_cleanup_skip_archive", "commit_message": "Don't rely on current ORM structure for db clean command (#23574)\n\nFor command DB clean, by not relying on the ORM models, we will be able to use the command even when the metadatabase is not yet upgraded to the version of Airflow you have installed.\r\n\r\nAdditionally we archive all rows before deletion.", "code": "def test_run_cleanup_skip_archive(self, cleanup_table_mock, kwargs, should_skip):\n \n run_cleanup(\n clean_before_timestamp=None,\n table_names=['log'],\n dry_run=None,\n verbose=None,\n confirm=False,\n **kwargs,\n )\n assert cleanup_table_mock.call_args[1]['skip_archive'] is should_skip\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 111, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 10, "token_counts": 52, "n_ast_nodes": 78, "n_identifiers": 12, "random_cut": "def test_run_cleanup_skip_archive(self, cleanup_table_mock, kwargs, should_skip):\n \n run_cleanup(\n clean_before_timestamp=None,\n table_", "d_id": 7871, "documentation": { "docstring": "test that delete confirmation input is called when appropriate", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 6409, "commit_id": "d0bcbb2a6e2ab82501fd34ef583329ff2ac22a15", "repo": "ludwig", "path": "ludwig/datasets/base_dataset.py", "file_name": "base_dataset.py", "fun_name": "process", "commit_message": "Add and expand docstrings in base_dataset.py (#1819)", "code": "def process(self) -> None:\n \n if not self.is_downloaded():\n self.download()\n self.process_downloaded_dataset()\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 48, "n_identifiers": 5, "random_cut": "def process(self) -> None:\n \n if not self.is_downloaded():\n self.download()\n ", "d_id": 976, "documentation": { "docstring": "Process the dataset into a dataframe and save it at self.processed_dataset_path.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 136008, "commit_id": "e707ce4fb3717e3c05118c57f503dfbd03552ca9", "repo": "ray", "path": "rllib/utils/tests/test_actor_manager.py", "file_name": "test_actor_manager.py", "fun_name": "test_healthy_only_works_for_list_of_functions", "commit_message": "[RLlib] Refactor `WorkerSet` on top of `FaultTolerantActorManager`. (#29938)\n\nSigned-off-by: Jun Gong ", "code": "def test_healthy_only_works_for_list_of_functions(self):\n \n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n # Mark first and second actor as unhealthy.\n manager.set_actor_state(1, False)\n manager.set_actor_state(2, False)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 66, "n_words": 24, "vocab_size": 22, "complexity": 4, "nloc": 11, "token_counts": 115, "n_ast_nodes": 77, "n_identifiers": 10, "random_cut": "def test_healthy_only_works_for_list_of_functions(self):\n \n act", "d_id": 30799, "documentation": { "docstring": "Test healthy only mode works when a list of funcs are provided.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 99587, "commit_id": "1730c481f1a8a71446326fa1ff72e10663016385", "repo": "sentry", "path": "tests/sentry/integrations/slack/notifications/test_unassigned.py", "file_name": "test_unassigned.py", "fun_name": "test_unassignment", "commit_message": "fix(notifications): Use `metrics_key` (#34572)", "code": "def test_unassignment(self, mock_func):\n \n notification = UnassignedActivityNotification(\n Activity(\n project=self.project,\n group=self.group,\n user=self.user,\n type=ActivityType.ASSIGNED,\n data={\"assignee\": \"\"},\n )\n )\n with self.tasks():\n notification.send()\n\n attachment, text = get_attachment()\n assert text == f\"Issue unassigned by {self.name}\"\n assert attachment[\"title\"] == self.group.title\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 235, "n_words": 42, "vocab_size": 34, "complexity": 1, "nloc": 19, "token_counts": 93, "n_ast_nodes": 171, "n_identifiers": 21, "random_cut": "def test_unassignment(self, mock_func):\n \n notification = UnassignedActivityNotification(\n ", "d_id": 19665, "documentation": { "docstring": "\n Test that a Slack message is sent with the expected payload when an issue is unassigned\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 281546, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/insider/insider_controller.py", "file_name": "insider_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n has_ticker_start = \"[unvl]\" if not self.ticker else \"\"\n has_ticker_end = \"[/unvl]\" if not self.ticker else \"\"\n\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Insider Trading\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 61, "n_words": 26, "vocab_size": 19, "complexity": 3, "nloc": 45, "token_counts": 42, "n_ast_nodes": 100, "n_identifiers": 11, "random_cut": "def print_help(self):\n \n has_ticker_start = \"", "d_id": 83843, "documentation": { "docstring": "Print help[cmds]\n view view available presets\n set set one of the available presets[/cmds]\n\n[param]PRESET: [/param]{self.preset}[cmds]\n\n filter filter insiders based on preset [src][Open Insider][/src]\n\n\n load load a specific stock ticker for analysis[/cmds]\n{has_ticker_start}\n[param]Ticker: [/param]{self.ticker}\n\n stats insider stats of the company [src][Open Insider][/src]\n act insider activity over time [src][Business Insider][/src]\n lins last insider trading of the company [src][Finviz][/src]\n{has_ticker_end}\n\n[info]Latest Insiders[/info] [src][Open Insider][/src][cmds]\n lcb latest cluster boys\n lpsb latest penny stock buys\n lit latest insider trading (all filings)\n lip latest insider purchases\n blip big latest insider purchases ($25k+)\n blop big latest officer purchases ($25k+)\n blcp big latest CEO/CFO purchases ($25k+)\n lis latest insider sales\n blis big latest insider sales ($100k+)\n blos big latest officer sales ($100k+)\n blcs big latest CEO/CFO sales ($100k+)\n[info]Top Insiders [src][Open Insider][/src][/info]\n topt top officer purchases today\n toppw top officer purchases past week\n toppm top officer purchases past month\n tipt top insider purchases today\n tippw top insider purchases past week\n tippm top insider purchases past month\n tist top insider sales today\n tispw top insider sales past week\n tispm top insider sales past month[/cmds]\n", "n_words": 176, "vocab_size": 88, "n_whitespaces": 490, "language": "en" } }, { "id": 87078, "commit_id": "e0e2c4ff4248042abda3cc93024930dada416af8", "repo": "sentry", "path": "tests/sentry/relay/test_config.py", "file_name": "test_config.py", "fun_name": "test_project_config_dynamic_sampling_is_none", "commit_message": "feat(dynamic-sampling): Handles updating ProjectConfig with uniform DS rule for v2 [TET-465] (#40268)\n\nThis PR forces your uniform rule by your plan\r\nor respect old logic. If both feature flags are enabled\r\ndynamic-sampling-basic flag takes the highest precedence.\r\n\r\nOriginal PR https://github.com/getsentry/sentry/pull/40180 was reverted\r\nvia https://github.com/getsentry/sentry/pull/40266 due to issue of\r\nremoving incorrect line.\r\n\r\nCo-authored-by: Joris Bayer ", "code": "def test_project_config_dynamic_sampling_is_none(default_project):\n \n default_project.update_option(\"sentry:dynamic_sampling\", None)\n\n with Feature({\"organizations:server-side-sampling\": True}):\n cfg = get_project_config(default_project)\n\n cfg = cfg.to_dict()\n dynamic_sampling = get_path(cfg, \"config\", \"dynamicSampling\")\n\n assert dynamic_sampling is None\n\n\n@pytest.mark.django_db", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.django_db", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 47, "n_words": 23, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 103, "n_identifiers": 12, "random_cut": "def test_project_config_dynamic_sampling_is_none(default_project):\n \n default_project.update_option(\"sentry:dynamic_sampling\", None)\n\n with Feature({\"organizations:server-side-sampling\": True}):\n cfg = get_project_config(default_project)\n\n cfg = cfg.to_dict()\n dynamic_sampling = get_path(cfg, \"config\", \"dynamicSampling\")\n\n assert dynamic_sampling is None", "d_id": 18216, "documentation": { "docstring": "\n Tests test check inc-237 that dynamic sampling is None,\n so it's pass when we have fix and fails when we dont\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 31, "language": "en" } }, { "id": 221198, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bz2.py", "file_name": "bz2.py", "fun_name": "seek", "commit_message": "add python 3.10.4 for windows", "code": "def seek(self, offset, whence=io.SEEK_SET):\n \n self._check_can_seek()\n return self._buffer.seek(offset, whence)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 48, "n_identifiers": 8, "random_cut": "def seek(self, offset, whence=io.SEEK_SET):\n ", "d_id": 56268, "documentation": { "docstring": "Change the file position.\n\n The new position is specified by offset, relative to the\n position indicated by whence. Values for whence are:\n\n 0: start of stream (default); offset must not be negative\n 1: current stream position\n 2: end of stream; offset must not be positive\n\n Returns the new file position.\n\n Note that seeking is emulated, so depending on the parameters,\n this operation may be extremely slow.\n ", "n_words": 66, "vocab_size": 49, "n_whitespaces": 141, "language": "en" } }, { "id": 204818, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/base.py", "file_name": "base.py", "fun_name": "savepoint", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def savepoint(self):\n \n if not self._savepoint_allowed():\n return\n\n thread_ident = _thread.get_ident()\n tid = str(thread_ident).replace(\"-\", \"\")\n\n self.savepoint_state += 1\n sid = \"s%s_x%d\" % (tid, self.savepoint_state)\n\n self.validate_thread_sharing()\n self._savepoint(sid)\n\n return sid\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 100, "n_words": 26, "vocab_size": 22, "complexity": 2, "nloc": 10, "token_counts": 64, "n_ast_nodes": 113, "n_identifiers": 13, "random_cut": "def savepoint(self):\n \n if not self._savepoint_allowed():\n return\n\n thread_ident = _thread.get_ident()\n tid = str(thread_ident).replace(\"-\", \"\")\n\n self.savepoint_state += 1\n sid = \"s%s_x%d\" % (tid, self.savepoint_state)\n\n self.validate_thread_sharing()\n self._savepoint(sid)\n\n return sid\n", "d_id": 50903, "documentation": { "docstring": "\n Create a savepoint inside the current transaction. Return an\n identifier for the savepoint that will be used for the subsequent\n rollback or commit. Do nothing if savepoints are not supported.\n ", "n_words": 30, "vocab_size": 26, "n_whitespaces": 59, "language": "en" } }, { "id": 65388, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/unpaid_expense_claim/unpaid_expense_claim.py", "file_name": "unpaid_expense_claim.py", "fun_name": "get_unclaimed_expese_claims", "commit_message": "style: format code with black", "code": "def get_unclaimed_expese_claims(filters):\n\tcond = \"1=1\"\n\tif filters.get(\"employee\"):\n\t\tcond = \"ec.employee = %(employee)s\"\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tcond=cond\n\t\t),\n\t\tfilters,\n\t\tas_list=1,\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 9, "n_words": 20, "vocab_size": 17, "complexity": 2, "nloc": 22, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def get_unclaimed_expese_claims(filters):\n\tcond = \"1=1\"\n\tif filters.get(\"employee\"):", "d_id": 13876, "documentation": { "docstring": "\n\t\tselect\n\t\t\tec.employee, ec.employee_name, ec.name, ec.total_sanctioned_amount, ec.total_amount_reimbursed,\n\t\t\tsum(gle.credit_in_account_currency - gle.debit_in_account_currency) as outstanding_amt\n\t\tfrom\n\t\t\t`tabExpense Claim` ec, `tabGL Entry` gle\n\t\twhere\n\t\t\tgle.against_voucher_type = \"Expense Claim\" and gle.against_voucher = ec.name\n\t\t\tand gle.party is not null and ec.docstatus = 1 and ec.is_paid = 0 and {cond} group by ec.name\n\t\thaving\n\t\t\toutstanding_amt > 0\n\t", "n_words": 49, "vocab_size": 39, "n_whitespaces": 39, "language": "en" } }, { "id": 20227, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/unix.py", "file_name": "unix.py", "fun_name": "site_config_dir", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def site_config_dir(self) -> str:\n \n # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False\n path = os.environ.get(\"XDG_CONFIG_DIRS\", \"\")\n if not path.strip():\n path = \"/etc/xdg\"\n return self._with_multi_path(path)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 73, "n_words": 27, "vocab_size": 24, "complexity": 2, "nloc": 10, "token_counts": 38, "n_ast_nodes": 71, "n_identifiers": 9, "random_cut": "def site_config_dir(self) -> str:\n \n # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False\n path = os.environ.get", "d_id": 3279, "documentation": { "docstring": "\n :return: config directories shared by users (if `multipath `\n is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS\n path separator), e.g. ``/etc/xdg/$appname/$version``\n ", "n_words": 34, "vocab_size": 25, "n_whitespaces": 65, "language": "en" } }, { "id": 260360, "commit_id": "4cc347d4d0cbbfdcbd353f08842e0668fed78c9f", "repo": "scikit-learn", "path": "sklearn/decomposition/_fastica.py", "file_name": "_fastica.py", "fun_name": "fit_transform", "commit_message": "MAINT Use _validate_params in FastICA (#23711)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: jeremiedbb ", "code": "def fit_transform(self, X, y=None):\n \n self._validate_params()\n\n return self._fit_transform(X, compute_sources=True)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def fit_transform(self, X, y=None):\n \n self._validate_params()\n\n return self._fit_transform(X, compute_sources=", "d_id": 76206, "documentation": { "docstring": "Fit the model and recover the sources from X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Estimated sources obtained by transforming the data with the\n estimated unmixing matrix.\n ", "n_words": 66, "vocab_size": 49, "n_whitespaces": 177, "language": "en" } }, { "id": 86689, "commit_id": "ceee9dfd8d6fed70d34546e7b46ebb7bf1d49745", "repo": "sentry", "path": "src/sentry/api/endpoints/project_dynamic_sampling.py", "file_name": "project_dynamic_sampling.py", "fun_name": "__fetch_randomly_sampled_transactions", "commit_message": "feat(dynamic-sampling): Improve empty transaction breakdown message [TET-338] (#39539)\n\nThis PR add new attribute parentProjectBreakdown to\r\n/api/0/projects///dynamic-sampling/distribution/\r\napi:\r\n```\r\n{\r\n \"projectBreakdown\": null,\r\n \"sampleSize\": 0,\r\n \"startTimestamp\": null,\r\n \"endTimestamp\": null,\r\n \"parentProjectBreakdown\": [\r\n {\r\n \"projectId\": 1,\r\n \"percentage\": 0.9,\r\n \"project\": \"sentry\"\r\n },\r\n {\r\n \"projectId\": 2,\r\n \"percentage\": 0.1,\r\n \"project\": \"javascript\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\nTODO:\r\n- [x] Update src/sentry/snuba/referrer.py\r\nhttps://github.com/getsentry/sentry/blob/0fbbf1626f86399b1ca4a2781d66ef96aac69de7/src/sentry/snuba/referrer.py#L208-L210\r\n- [x] Add missing tests\r\n\r\nCo-authored-by: Andrii Soldatenko \r\nCo-authored-by: ahmedetefy ", "code": "def __fetch_randomly_sampled_transactions(self, project, query, sample_size, query_time_range):\n \n sampling_factor = self.__generate_transactions_sampling_factor(\n project=project,\n query=query,\n sample_size=sample_size,\n query_time_range=query_time_range,\n )\n builder = QueryBuilder(\n Dataset.Discover,\n params={\n \"start\": query_time_range.start_time,\n \"end\": query_time_range.end_time,\n \"project_id\": [project.id],\n \"organization_id\": project.organization.id,\n },\n query=f\"{query} event.type:transaction\",\n selected_columns=[\n \"id\",\n \"trace\",\n \"random_number() as rand_num\",\n f\"modulo(rand_num, {sampling_factor}) as modulo_num\",\n ],\n equations=[],\n orderby=None,\n auto_fields=True,\n auto_aggregations=True,\n use_aggregate_conditions=True,\n functions_acl=[\"random_number\", \"modulo\"],\n limit=sample_size,\n offset=0,\n equation_config={\"auto_add\": False},\n )\n builder.add_conditions([Condition(lhs=Column(\"modulo_num\"), op=Op.EQ, rhs=0)])\n snuba_query = builder.get_snql_query().query\n\n snuba_query = snuba_query.set_select(\n snuba_query.select\n + [\n Function(\n \"not\",\n [Function(\"has\", [Column(\"contexts.key\"), TRACE_PARENT_SPAN_CONTEXT])],\n alias=\"is_root\",\n )\n ]\n )\n snuba_query = snuba_query.set_groupby(\n snuba_query.groupby + [Column(\"modulo_num\"), Column(\"contexts.key\")]\n )\n\n data = raw_snql_query(\n SnubaRequest(dataset=Dataset.Discover.value, app_id=\"default\", query=snuba_query),\n referrer=Referrer.DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_TRANSACTIONS.value,\n )[\"data\"]\n return data\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 672, "n_words": 92, "vocab_size": 78, "complexity": 1, "nloc": 52, "token_counts": 275, "n_ast_nodes": 436, "n_identifiers": 53, "random_cut": "def __fetch_randomly_sampled_transactions(self, project, query, sample_size, query_time_range):\n \n sampling_factor = self.__generate_transactions_sampling_factor(\n project=project,\n query=query,\n sample_size=sample_size,\n query_time_range=query_time_range,\n )\n builder = QueryBuilder(\n Dataset.Discover,\n params={\n \"start\": query_time_range.start_time,\n \"end\": query_time_range.end_time,\n \"project_id\": [project.id],\n \"organization_id\": project.organization.id,\n },\n query=f\"{query} event.type:transaction\",\n selected_columns=[\n \"id\",\n \"trace\",\n \"random_number() as rand_num\",\n f\"modulo(rand_num, {sampling_factor}) as modulo_num\",\n ],\n equations=[],\n orderby=None,\n auto_fields=True,\n auto_aggregations=True,\n use_aggregate_conditions=True,\n functions_acl=[\"random_number\", \"modulo\"],\n limit=sample_size,\n offset=0,\n equation_config={\"auto_add\": False},\n )\n builder.add_conditions([Condition(lhs=Column(\"modulo_num\"), op=Op.EQ, rhs=0)])\n snuba_query = builder.get_snql_query().query\n\n snuba_query = snuba_query.set_select(\n snuba_query.select\n + [\n Function(\n \"not\",\n [Function(\"has\", [Column(\"contexts.key\"), TRACE_PARENT_SPAN_CONTEXT", "d_id": 18148, "documentation": { "docstring": "\n Fetches a random sample of transactions of size `sample_size` in the last period\n defined by `stats_period`. The random sample is fetched by generating a random number by\n for every row, and then doing a modulo operation on it, and if that number is divisible\n by the sampling factor then its kept, otherwise is discarded. This is an alternative to\n sampling the query before applying the conditions. The goal here is to fetch the\n transaction ids, their sample rates and their trace ids.\n ", "n_words": 82, "vocab_size": 56, "n_whitespaces": 132, "language": "en" } }, { "id": 64270, "commit_id": "41a95e56241ff8f3dceac7285f0bc6b9a43d7a06", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "item_query", "commit_message": "fix: ignore empty customer/supplier in item query (#29610)\n\n* fix: dont try to filter by customer/supplier if None\r\n\r\n* test: item query with emtpy supplier", "code": "def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False):\n\tconditions = []\n\n\tif isinstance(filters, str):\n\t\tfilters = json.loads(filters)\n\n\t#Get searchfields from meta and use in Item Link field query\n\tmeta = frappe.get_meta(\"Item\", cached=True)\n\tsearchfields = meta.get_search_fields()\n\n\t# these are handled separately\n\tignored_search_fields = (\"item_name\", \"description\")\n\tfor ignored_field in ignored_search_fields:\n\t\tif ignored_field in searchfields:\n\t\t\tsearchfields.remove(ignored_field)\n\n\tcolumns = ''\n\textra_searchfields = [field for field in searchfields\n\t\tif not field in [\"name\", \"item_group\", \"description\", \"item_name\"]]\n\n\tif extra_searchfields:\n\t\tcolumns = \", \" + \", \".join(extra_searchfields)\n\n\tsearchfields = searchfields + [field for field in[searchfield or \"name\", \"item_code\", \"item_group\", \"item_name\"]\n\t\tif not field in searchfields]\n\tsearchfields = \" or \".join([field + \" like %(txt)s\" for field in searchfields])\n\n\tif filters and isinstance(filters, dict):\n\t\tif filters.get('customer') or filters.get('supplier'):\n\t\t\tparty = filters.get('customer') or filters.get('supplier')\n\t\t\titem_rules_list = frappe.get_all('Party Specific Item',\n\t\t\t\tfilters = {'party': party}, fields = ['restrict_based_on', 'based_on_value'])\n\n\t\t\tfilters_dict = {}\n\t\t\tfor rule in item_rules_list:\n\t\t\t\tif rule['restrict_based_on'] == 'Item':\n\t\t\t\t\trule['restrict_based_on'] = 'name'\n\t\t\t\tfilters_dict[rule.restrict_based_on] = []\n\n\t\t\tfor rule in item_rules_list:\n\t\t\t\tfilters_dict[rule.restrict_based_on].append(rule.based_on_value)\n\n\t\t\tfor filter in filters_dict:\n\t\t\t\tfilters[scrub(filter)] = ['in', filters_dict[filter]]\n\n\t\t\tif filters.get('customer'):\n\t\t\t\tdel filters['customer']\n\t\t\telse:\n\t\t\t\tdel filters['supplier']\n\t\telse:\n\t\t\tfilters.pop('customer', None)\n\t\t\tfilters.pop('supplier', None)\n\n\n\tdescription_cond = ''\n\tif frappe.db.count('Item', cache=True) < 50000:\n\t\t# scan description only if items are less than 50000\n\t\tdescription_cond = 'or tabItem.description LIKE %(txt)s'\n\treturn frappe.db.sql(.format(\n\t\t\tcolumns=columns,\n\t\t\tscond=searchfields,\n\t\t\tfcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'),\n\t\t\tmcond=get_match_cond(doctype).replace('%', '%%'),\n\t\t\tdescription_cond = description_cond),\n\t\t\t{\n\t\t\t\t\"today\": nowdate(),\n\t\t\t\t\"txt\": \"%%%s%%\" % txt,\n\t\t\t\t\"_txt\": txt.replace(\"%\", \"\"),\n\t\t\t\t\"start\": start,\n\t\t\t\t\"page_len\": page_len\n\t\t\t}, as_dict=as_dict)\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 175, "n_words": 235, "vocab_size": 145, "complexity": 22, "nloc": 73, "token_counts": 449, "n_ast_nodes": 783, "n_identifiers": 55, "random_cut": "def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False):\n\tconditions = []\n\n\tif isinstance(filters, str):\n\t\tfilters = json.loads(filters)\n\n\t#Get searchfields from meta and use in Item Link field query\n\tmeta = frappe.get_meta(\"Item\", cached=True)\n\tsearchfields = meta.get_search_fields()\n\n\t# these are handled separately\n\tignored_search_fields = (\"item_name\", \"description\")\n\tfor ignored_field in ignored_search_fields:\n\t\tif ignored_field in searchfields:\n\t\t\tsearchfields.remove(ignored_field)\n\n\tcolumns = ''\n\textra_searchfields = [field for field in searchfields\n\t\tif not field in [\"name\", \"item_group\", \"description\", \"item_name\"]]\n\n\tif extra_searchfields:\n\t\tcolumns = \", \" + \", \".join(extra_searchfields)\n\n\tsearchfields = searchfields + [field for field in[searchfield or \"name\", \"item_code\", \"item_group\", \"item_name\"]\n\t\tif not field in searchfields]\n\tsearchfields = \" or \".join([field + \" like %(txt)s\" for field in searchfields])\n\n\tif filters and isinstance(filters, dict):\n\t\tif filters.get('customer') or filters.get('supplier'):\n\t\t\tparty = filters.get('customer') or filters.get('supplier')\n\t\t\titem_rules_list = frappe.get_all('Party Specific Item',\n\t\t\t\tfilters = {'party': party}, fields = ['restrict_based_on', 'based_on_value'])\n\n\t\t\tfilters_dict = {}\n\t\t\tfor rule in item_rules_list:\n\t\t\t\tif rule['restrict_based_on'] == 'Item':\n\t\t\t\t\trule['restrict_based_on'] = 'name'\n\t\t\t\tfilters_dict[rule.restrict_based_on] = []\n\n\t\t\tfor rule in item_rules_list:\n\t\t\t\tfilters_dict[rule.restrict_based_on].append(rule.based_on_value)\n\n\t\t\tfor filter in filters_dict:\n\t\t\t\tfilters[scrub(filter)] = ['in', filters_dict[filter]]\n\n\t\t\tif filters.get('customer'):\n\t\t\t\tdel filters['customer']\n\t\t\telse:\n\t\t\t\tdel filters['supplier']\n\t\telse:\n\t\t\tfilters.pop('customer', None)\n\t\t\tfilters.pop('supplier', None)\n\n\n\tdescription_cond = ''\n\tif frappe.db.count('Item', cache=True) < 50000:\n\t\t# scan description only if items are less than 50000\n\t\tdescription_cond = 'or tabItem.description LIKE %(txt)s'\n\treturn frappe.db.sql(.format(\n\t\t\t", "d_id": 13591, "documentation": { "docstring": "select\n\t\t\ttabItem.name, tabItem.item_name, tabItem.item_group,\n\t\tif(length(tabItem.description) > 40, \\\n\t\t\tconcat(substr(tabItem.description, 1, 40), \"...\"), description) as description\n\t\t{columns}\n\t\tfrom tabItem\n\t\twhere tabItem.docstatus < 2\n\t\t\tand tabItem.disabled=0\n\t\t\tand tabItem.has_variants=0\n\t\t\tand (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00')\n\t\t\tand ({scond} or tabItem.item_code IN (select parent from `tabItem Barcode` where barcode LIKE %(txt)s)\n\t\t\t\t{description_cond})\n\t\t\t{fcond} {mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, item_name\n\t\tlimit %(start)s, %(page_len)s ", "n_words": 69, "vocab_size": 57, "n_whitespaces": 51, "language": "en" } }, { "id": 156567, "commit_id": "2b90415b02d3ad1b08362889e0818590ca3133f4", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "apply_and_enforce", "commit_message": "Add kwarg ``enforce_ndim`` to ``dask.array.map_blocks()`` (#8865)", "code": "def apply_and_enforce(*args, **kwargs):\n \n func = kwargs.pop(\"_func\")\n expected_ndim = kwargs.pop(\"expected_ndim\")\n out = func(*args, **kwargs)\n if getattr(out, \"ndim\", 0) != expected_ndim:\n out_ndim = getattr(out, \"ndim\", 0)\n raise ValueError(\n f\"Dimension mismatch: expected output of {func} \"\n f\"to have dims = {expected_ndim}. Got {out_ndim} instead.\"\n )\n return out\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 106, "n_words": 44, "vocab_size": 36, "complexity": 2, "nloc": 11, "token_counts": 68, "n_ast_nodes": 129, "n_identifiers": 10, "random_cut": "def apply_and_enforce(*args, **kwargs):\n \n func = kwargs.pop(\"_func\")\n expected_ndim = kwargs.pop(\"expected_ndim\")\n out = func(*args, **kwargs)\n if getattr(out, \"ndim\", 0) != expected_ndim:\n out_ndim = getattr(out, \"ndim\", 0)\n raise ValueError(\n f\"Dimensio", "d_id": 36680, "documentation": { "docstring": "Apply a function, and enforce the output.ndim to match expected_ndim\n\n Ensures the output has the expected dimensionality.", "n_words": 17, "vocab_size": 15, "n_whitespaces": 19, "language": "en" } }, { "id": 170534, "commit_id": "6b4fa02e10480c4ddae0714e36b7fe765fa42eac", "repo": "pandas", "path": "pandas/core/construction.py", "file_name": "construction.py", "fun_name": "_sanitize_non_ordered", "commit_message": "REF: simplify sanitize_array (#49347)\n\nREF: simpify sanitize_array", "code": "def _sanitize_non_ordered(data) -> None:\n \n if isinstance(data, (set, frozenset)):\n raise TypeError(f\"'{type(data).__name__}' type is unordered\")\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 26, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 26, "n_ast_nodes": 55, "n_identifiers": 8, "random_cut": "def _sanitize_non_ordered(data) -> None:\n \n if isinstance(data, (set, frozenset)):\n raise TypeError(f\"'{type(data).__name__}' type is unordered\")\n\n", "d_id": 40570, "documentation": { "docstring": "\n Raise only for unordered sets, e.g., not for dict_keys\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 16, "language": "en" } }, { "id": 222529, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/dis.py", "file_name": "dis.py", "fun_name": "dis", "commit_message": "add python 3.10.4 for windows", "code": "def dis(x=None, *, file=None, depth=None):\n \n if x is None:\n distb(file=file)\n return\n # Extract functions from methods.\n if hasattr(x, '__func__'):\n x = x.__func__\n # Extract compiled code objects from...\n if hasattr(x, '__code__'): # ...a function, or\n x = x.__code__\n elif hasattr(x, 'gi_code'): #...a generator object, or\n x = x.gi_code\n elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or\n x = x.ag_code\n elif hasattr(x, 'cr_code'): #...a coroutine.\n x = x.cr_code\n # Perform the disassembly.\n if hasattr(x, '__dict__'): # Class or module\n items = sorted(x.__dict__.items())\n for name, x1 in items:\n if isinstance(x1, _have_code):\n print(\"Disassembly of %s:\" % name, file=file)\n try:\n dis(x1, file=file, depth=depth)\n except TypeError as msg:\n print(\"Sorry:\", msg, file=file)\n print(file=file)\n elif hasattr(x, 'co_code'): # Code object\n _disassemble_recursive(x, file=file, depth=depth)\n elif isinstance(x, (bytes, bytearray)): # Raw bytecode\n _disassemble_bytes(x, file=file)\n elif isinstance(x, str): # Source code\n _disassemble_str(x, file=file, depth=depth)\n else:\n raise TypeError(\"don't know how to disassemble %s objects\" %\n type(x).__name__)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 421, "n_words": 145, "vocab_size": 96, "complexity": 14, "nloc": 33, "token_counts": 249, "n_ast_nodes": 413, "n_identifiers": 29, "random_cut": "def dis(x=None, *, file=None, depth=None):\n \n if x is Non", "d_id": 56619, "documentation": { "docstring": "Disassemble classes, methods, functions, and other compiled objects.\n\n With no argument, disassemble the last traceback.\n\n Compiled objects currently include generator objects, async generator\n objects, and coroutine objects, all of which store their code object\n in a special attribute.\n ", "n_words": 38, "vocab_size": 34, "n_whitespaces": 53, "language": "en" } }, { "id": 199680, "commit_id": "d1d46df73ebaad94089847558d00a8b7269f554d", "repo": "sympy", "path": "sympy/polys/appellseqs.py", "file_name": "appellseqs.py", "fun_name": "bernoulli_poly", "commit_message": "Run orthopolys and appellseqs through a common interface\n\nIncluding unifying the two Chebyshev generators into one function.\nThere are also two kinds of Hermite polynomials, and they too share the\nsame recurrence, but the second type He_n(x) (aka the probabilist,\nreduced or small polynomials) will not be added here.", "code": "def bernoulli_poly(n, x=None, polys=False):\n r\n return named_poly(n, dup_bernoulli, QQ, \"Bernoulli polynomial\", (x,), polys)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 54, "token_counts": 33, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def bernoulli_poly(n, x=None, polys=False):\n ", "d_id": 49336, "documentation": { "docstring": "Generates the Bernoulli polynomial `\\operatorname{B}_n(x)`.\n\n `\\operatorname{B}_n(x)` is the unique polynomial satisfying\n\n .. math :: \\int_{x}^{x+1} \\operatorname{B}_n(t) \\,dt = x^n.\n\n Based on this, we have for nonnegative integer `s` and integer\n `a` and `b`\n\n .. math :: \\sum_{k=a}^{b} k^s = \\frac{\\operatorname{B}_{s+1}(b+1) -\n \\operatorname{B}_{s+1}(a)}{s+1}\n\n which is related to Jakob Bernoulli's original motivation for introducing\n the Bernoulli numbers, the values of these polynomials at `x = 1`.\n\n Examples\n ========\n\n >>> from sympy import summation\n >>> from sympy.abc import x\n >>> from sympy.polys import bernoulli_poly\n >>> bernoulli_poly(5, x)\n x**5 - 5*x**4/2 + 5*x**3/3 - x/6\n\n >>> def psum(p, a, b):\n ... return (bernoulli_poly(p+1,b+1) - bernoulli_poly(p+1,a)) / (p+1)\n >>> psum(4, -6, 27)\n 3144337\n >>> summation(x**4, (x, -6, 27))\n 3144337\n\n >>> psum(1, 1, x).factor()\n x*(x + 1)/2\n >>> psum(2, 1, x).factor()\n x*(x + 1)*(2*x + 1)/6\n >>> psum(3, 1, x).factor()\n x**2*(x + 1)**2/4\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Bernoulli_polynomials\n ", "n_words": 168, "vocab_size": 117, "n_whitespaces": 302, "language": "en" } }, { "id": 63813, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/tenacity/after.py", "file_name": "after.py", "fun_name": "after_log", "commit_message": "upd; format", "code": "def after_log(logger, log_level, sec_format=\"%0.3f\"):\n \n log_tpl = (\n \"Finished call to '%s' after \" + str(sec_format) + \"(s), \"\n \"this was the %s time calling it.\"\n )\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 7, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 6, "random_cut": "def after_log(logger, log_level, sec_format=\"%0.3f\"):\n \n log_tpl = (\n \"Finished call to '%s' af", "d_id": 13510, "documentation": { "docstring": "After call strategy that logs to some logger the finished attempt.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 110568, "commit_id": "de2192589f8ea50c9dc90be87b649399ff623feb", "repo": "matplotlib", "path": "lib/matplotlib/offsetbox.py", "file_name": "offsetbox.py", "fun_name": "_compat_get_offset", "commit_message": "Reparametrize offsetbox calculations in terms of bboxes.\n\nPassing a single bbox instead of (xdescent, ydescent, width, height)\nseparately is easier to follow (see e.g. the changes in VPacker and\nHPacker, which no longer have to repeatedly pack/unpack whd_list), and\navoids having to figure out e.g. the sign of the descents and whether\nwidth/height includes the descents, for example.\n\nCurrently get_offset keeps a back compatible signature (we *could*\nconsider killing the old signature but let's not do that for now), and\n_get_bbox_and_child_offsets is private because I *may* want to later\nalso change the convention to make offsets relative to the bbox (0, 0)\npoint rather than the bbox lower-left corner.", "code": "def _compat_get_offset(meth):\n \n sigs = [lambda self, width, height, xdescent, ydescent, renderer: locals(),\n lambda self, bbox, renderer: locals()]\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 55, "n_identifiers": 11, "random_cut": "def _compat_get_offset(meth):\n \n sigs = [lambda self, ", "d_id": 24214, "documentation": { "docstring": "\n Decorator for the get_offset method of OffsetBox and subclasses, that\n allows supporting both the new signature (self, bbox, renderer) and the old\n signature (self, width, height, xdescent, ydescent, renderer).\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 42, "language": "en" } }, { "id": 218549, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "sixtofour", "commit_message": "add python 3.10.4 for windows", "code": "def sixtofour(self):\n \n if (self._ip >> 112) != 0x2002:\n return None\n return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 53, "n_identifiers": 4, "random_cut": "def sixtofour(self):\n ", "d_id": 55381, "documentation": { "docstring": "Return the IPv4 6to4 embedded address.\n\n Returns:\n The IPv4 6to4-embedded address if present or None if the\n address doesn't appear to contain a 6to4 embedded address.\n\n ", "n_words": 26, "vocab_size": 19, "n_whitespaces": 62, "language": "en" } }, { "id": 148805, "commit_id": "42e36f44f8a91a79a8ffa14698542f38df39cb50", "repo": "freqtrade", "path": "freqtrade/exchange/exchange.py", "file_name": "exchange.py", "fun_name": "reload_markets", "commit_message": "replaced \"leverage\" with \"tiers\"", "code": "def reload_markets(self) -> None:\n \n # Check whether markets have to be reloaded\n if (self._last_markets_refresh > 0) and (\n self._last_markets_refresh + self.markets_refresh_interval\n > arrow.utcnow().int_timestamp):\n return None\n logger.debug(\"Performing scheduled market reload..\")\n try:\n self._markets = self._api.load_markets(reload=True)\n # Also reload async markets to avoid issues with newly listed pairs\n self._load_async_markets(reload=True)\n self._last_markets_refresh = arrow.utcnow().int_timestamp\n self.fill_leverage_tiers()\n except ccxt.BaseError:\n logger.exception(\"Could not reload markets.\")\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 205, "n_words": 56, "vocab_size": 49, "complexity": 4, "nloc": 14, "token_counts": 94, "n_ast_nodes": 161, "n_identifiers": 18, "random_cut": "def reload_markets(self) -> None:\n \n # Check whether markets have to be reloaded\n if (self._last_markets_refresh > 0) and (\n self._last_markets_refresh + self.markets_refresh_interval\n > arrow.utcnow().int_timestamp):\n return None\n logger.debug(\"Performing scheduled market reload..\")\n try:\n self._markets = self._api.load_markets(reload=True)\n # Also reload async markets to avoid issues with newly listed pairs\n self._load_async_markets(reload=True)\n self._last_markets_refresh = arrow.utcnow().int_timestamp\n self.f", "d_id": 34338, "documentation": { "docstring": "Reload markets both sync and async if refresh interval has passed ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 196352, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/common.py", "file_name": "common.py", "fun_name": "is_strongly_diagonally_dominant", "commit_message": "Moved imports to higher level", "code": "def is_strongly_diagonally_dominant(self):\n r\n if not self.is_square:\n return False\n\n rows, cols = self.shape\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 43, "n_words": 12, "vocab_size": 12, "complexity": 3, "nloc": 40, "token_counts": 39, "n_ast_nodes": 37, "n_identifiers": 6, "random_cut": "def is_strongly_diagonally_dominant(self):\n r\n", "d_id": 47852, "documentation": { "docstring": "Tests if the matrix is row strongly diagonally dominant.\n\n Explanation\n ===========\n\n A $n, n$ matrix $A$ is row strongly diagonally dominant if\n\n .. math::\n \\left|A_{i, i}\\right| > \\sum_{j = 0, j \\neq i}^{n-1}\n \\left|A_{i, j}\\right| \\quad {\\text{for all }}\n i \\in \\{ 0, ..., n-1 \\}\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]])\n >>> A.is_strongly_diagonally_dominant\n False\n\n >>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]])\n >>> A.is_strongly_diagonally_dominant\n False\n\n >>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]])\n >>> A.is_strongly_diagonally_dominant\n True\n\n Notes\n =====\n\n If you want to test whether a matrix is column diagonally\n dominant, you can apply the test after transposing the matrix.\n ", "n_words": 121, "vocab_size": 79, "n_whitespaces": 301, "language": "en" } }, { "id": 7060, "commit_id": "e65f74e87e8e29922f4e9f9d839978ffb2c5b029", "repo": "ludwig", "path": "ludwig/features/binary_feature.py", "file_name": "binary_feature.py", "fun_name": "create_calibration_module", "commit_message": "Adds mechanism for calibrating probabilities for category and binary features (#1949)\n\n* Started adding files for calibration implementation.\r\n\r\n* Adds option to return logits and labels in predictor.\r\n\r\n* Pre-commit fixes\r\n\r\n* First pass temperature scaling working.\r\n\r\n* Fixes calibration for categorical feature.\r\n\r\n* Separate calibrated logits from logits.\r\n\r\n* Adds option to revert temperature scaling.\r\n\r\n* Refactoring, move binary prediction logic into calibration class.\r\n\r\n* Reverted accidental commit to simple_model_training.py\r\n\r\n* Adds checks and comments.\r\n\r\n* Fixes matrix scaling, convert pandas series to numpy arrays.\r\n\r\n* Fixes number of classes for categorical features.\r\n\r\n* Adds structured calibration result, unit tests.\r\n\r\n* Make create_calibration_module not abstract, default implementation returns None.\r\n\r\n* Relax precision requirement for calibration test.\r\n\r\n* Save weights after calibration, so calibration results are included in save file.\r\n\r\n* Implemented dirichlet scaling with l2 off-diagonal regularization.\r\n\r\n* Adds masked_select off_diagonal method.\r\n\r\n* Change back to matrix scaling.\r\n\r\n* Updates test expectations to reflect learning rate settings.\r\n\r\n* Tuned default regularization weight.\r\n\r\n* Comments.\r\n\r\n* Set random seed, testing to see if that makes a difference.\r\n\r\n* Remove checks for exact NLL, ECE values post calibration.\r\n\r\n* Restored LOGITS to EXCLUDE_PRED_SET, added another option to return logits in batch_predict.\r\n\r\n* Factor calibration method out of Trainer into Calibrator\r\n\r\n* Removed horovod argument from calibrator.\r\n\r\n* Return batch_size if eval_batch_size not specified.\r\n\r\n* Fix calibration_module docstring.\r\n\r\n* Updates comment, adds fallback method of calibrating on training set if no validation set available.\r\n\r\n* Adds calibration registry, replaces if statements for instantiating calibration.\r\n\r\n* Raise ValueError if unsupported calibration method specified.\r\n\r\n* Remove calibrate method from Trainer\r\n\r\n* f string\r\n\r\n* Use backend to create predictor for calibration.\r\n\r\n* Moves saving out of calibrator\r\n\r\n* Fix comment.\r\n\r\n* Adds ray test of calibration.\r\n\r\n* Implements collect_logits in ray predictor.\r\n\r\n* First pass implementation of collect_labels.\r\n\r\n* Implements collect_logits and collect_labels in ray backend.\r\n\r\n* Merge predictions and labels in ray backend\r\n\r\n* Reverts collect_labels, get labels from dataset in calibrate.\r\n\r\n* Allow overriding EXCLUDE_PRED_SET when getting preds.\r\n\r\n* Changes 'calibration' config option to binary.\r\n\r\n* Test both binary and category output features in ray test.\r\n\r\n* Comments/\r\n\r\n* Adds type hints.\r\n\r\nCo-authored-by: Daniel Treiman ", "code": "def create_calibration_module(self, feature) -> torch.nn.Module:\n \n if feature.get(\"calibration\"):\n calibration_cls = calibration.get_calibration_cls(BINARY, \"temperature_scaling\")\n return calibration_cls(binary=True)\n return None\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 10, "token_counts": 41, "n_ast_nodes": 70, "n_identifiers": 12, "random_cut": "def create_calibration_module(self, feature) -> torch.nn.Module:\n \n if feature.get(\"calibration\"):\n calibration_cls = calibration.get_calibration_cls(BINARY, \"temperature_scaling\")\n return calibration_cls(binary=True)\n return None\n", "d_id": 1111, "documentation": { "docstring": "Creates the appropriate calibration module based on the feature config.\n\n Today, only one type of calibration (\"temperature_scaling\") is available, but more options may be supported in\n the future.\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 49, "language": "en" } }, { "id": 210002, "commit_id": "1dcec15b6600df750d680e97e89117fcb8eb84a0", "repo": "PaddleDetection", "path": "ppdet/utils/download.py", "file_name": "download.py", "fun_name": "_download", "commit_message": "fix download.py (#5159)", "code": "def _download(url, path, md5sum=None):\n \n if not osp.exists(path):\n os.makedirs(path)\n\n fname = osp.split(url)[-1]\n fullname = osp.join(path, fname)\n retry_cnt = 0\n\n while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum,\n url)):\n if retry_cnt < DOWNLOAD_RETRY_LIMIT:\n retry_cnt += 1\n else:\n raise RuntimeError(\"Download from {} failed. \"\n \"Retry limit reached\".format(url))\n\n logger.info(\"Downloading {} from {}\".format(fname, url))\n\n # NOTE: windows path join may incur \\, which is invalid in url\n if sys.platform == \"win32\":\n url = url.replace('\\\\', '/')\n\n req = requests.get(url, stream=True)\n if req.status_code != 200:\n raise RuntimeError(\"Downloading from {} failed with code \"\n \"{}!\".format(url, req.status_code))\n\n # For protecting download interupted, download to\n # tmp_fullname firstly, move tmp_fullname to fullname\n # after download finished\n tmp_fullname = fullname + \"_tmp\"\n total_size = req.headers.get('content-length')\n with open(tmp_fullname, 'wb') as f:\n if total_size:\n for chunk in tqdm.tqdm(\n req.iter_content(chunk_size=1024),\n total=(int(total_size) + 1023) // 1024,\n unit='KB'):\n f.write(chunk)\n else:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n shutil.move(tmp_fullname, fullname)\n return fullname\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 616, "n_words": 143, "vocab_size": 103, "complexity": 11, "nloc": 35, "token_counts": 256, "n_ast_nodes": 435, "n_identifiers": 42, "random_cut": "def _download(url, path, md5sum=None):\n \n if not osp.exists(path):\n os.makedir", "d_id": 52846, "documentation": { "docstring": "\n Download from url, save to path.\n\n url (str): download url\n path (str): download to given path\n ", "n_words": 16, "vocab_size": 11, "n_whitespaces": 29, "language": "en" } }, { "id": 205715, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/options.py", "file_name": "options.py", "fun_name": "related_objects", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def related_objects(self):\n \n all_related_fields = self._get_fields(\n forward=False, reverse=True, include_hidden=True\n )\n return make_immutable_fields_list(\n \"related_objects\",\n (\n obj\n for obj in all_related_fields\n if not obj.hidden or obj.field.many_to_many\n ),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 149, "n_words": 25, "vocab_size": 22, "complexity": 4, "nloc": 12, "token_counts": 49, "n_ast_nodes": 77, "n_identifiers": 12, "random_cut": "def related_objects(self):\n \n all_related_fields = self._get_fields(\n forward=False, reverse=True, include_hidden=True\n )\n return make_immutable_fields_list(\n \"related_objects\",\n (\n obj\n for obj in", "d_id": 51172, "documentation": { "docstring": "\n Return all related objects pointing to the current model. The related\n objects can come from a one-to-one, one-to-many, or many-to-many field\n relation type.\n\n Private API intended only to be used by Django itself; get_fields()\n combined with filtering of field properties is the public API for\n obtaining this field list.\n ", "n_words": 49, "vocab_size": 42, "n_whitespaces": 99, "language": "en" } }, { "id": 202419, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_https_good_referer", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_https_good_referer(self):\n \n req = self._get_POST_request_with_token()\n req._is_secure_override = True\n req.META[\"HTTP_HOST\"] = \"www.example.com\"\n req.META[\"HTTP_REFERER\"] = \"https://www.example.com/somepage\"\n mw = CsrfViewMiddleware(post_form_view)\n mw.process_request(req)\n resp = mw.process_view(req, post_form_view, (), {})\n self.assertIsNone(resp)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 88, "n_words": 25, "vocab_size": 20, "complexity": 1, "nloc": 9, "token_counts": 68, "n_ast_nodes": 118, "n_identifiers": 13, "random_cut": "def test_https_good_referer(self):\n \n req = self._get_POST_request_with_token()\n ", "d_id": 50121, "documentation": { "docstring": "\n A POST HTTPS request with a good referer is accepted.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 247057, "commit_id": "1901cb1d4a8b7d9af64493fbd336e9aa2561c20c", "repo": "synapse", "path": "tests/rest/client/test_retention.py", "file_name": "test_retention.py", "fun_name": "test_state_policy", "commit_message": "Add type hints to `tests/rest/client` (#12084)", "code": "def test_state_policy(self) -> None:\n \n room_id = self.helper.create_room_as(self.user_id, tok=self.token)\n\n # Set the maximum lifetime to 35 days so that the first event gets expired but not\n # the second one.\n self.helper.send_state(\n room_id=room_id,\n event_type=EventTypes.Retention,\n body={\"max_lifetime\": one_day_ms * 35},\n tok=self.token,\n )\n\n self._test_retention(room_id, expected_code_for_first_event=404)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 133, "n_words": 40, "vocab_size": 37, "complexity": 1, "nloc": 12, "token_counts": 69, "n_ast_nodes": 109, "n_identifiers": 16, "random_cut": "def test_state_policy(self) -> None:\n \n room_id = self.helper.create_room_as(self.user_id, tok=self.token)\n\n # Set the maximum lifetime to 35 days so that the first event gets expired but not\n # the second one.\n self.helper.send_state(\n room_id=room_id,\n event_type=EventTypes.Rete", "d_id": 71467, "documentation": { "docstring": "Tests that an event gets correctly expired if there is no default retention\n policy but there's a policy specific to the room.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 36, "language": "en" } }, { "id": 22638, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "insertion_sort.py", "file_name": "insertion_sort.py", "fun_name": "insertion_sort", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def insertion_sort(list, n):\n \n for i in range(0, n):\n key = list[i]\n j = i - 1\n # Swap elements witth key iff they are\n # greater than key\n while j >= 0 and list[j] > key:\n list[j + 1] = list[j]\n j = j - 1\n list[j + 1] = key\n return list\n\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 126, "n_words": 53, "vocab_size": 34, "complexity": 4, "nloc": 9, "token_counts": 67, "n_ast_nodes": 105, "n_identifiers": 7, "random_cut": "def insertion_sort(list, n):\n \n for i in range(0, n):\n key = list[i]\n j = i - 1\n # Swap elements witth key iff they are\n # greater than", "d_id": 4382, "documentation": { "docstring": "\n sort list in assending order\n\n INPUT:\n list=list of values to be sorted\n n=size of list that contains values to be sorted\n\n OUTPUT:\n list of sorted values in assending order\n ", "n_words": 29, "vocab_size": 16, "n_whitespaces": 63, "language": "en" } }, { "id": 179232, "commit_id": "cc0cff893f9d7d472788adc2510c123967b384fe", "repo": "gradio", "path": "gradio/component.py", "file_name": "component.py", "fun_name": "get_template_context", "commit_message": "Format The Codebase\n- black formatting\n- isort formatting", "code": "def get_template_context(self):\n \n return {\"name\": self.__class__.__name__.lower(), \"label\": self.label}\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 26, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def get_template_context(self):\n \n return {\"name\": self.__class__.__name__.lower(), ", "d_id": 42922, "documentation": { "docstring": "\n :return: a dictionary with context variables for the javascript file associated with the context\n ", "n_words": 14, "vocab_size": 11, "n_whitespaces": 29, "language": "en" } }, { "id": 116751, "commit_id": "47c5e0ac2d89807f8ff7239d423a3d346bd39a1e", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/teradata_handler/teradata_handler.py", "file_name": "teradata_handler.py", "fun_name": "connect", "commit_message": "feat: add teradata integration", "code": "def connect(self):\n \n\n if self.is_connected is True:\n return self.connection\n\n connection = teradatasql.connect(\n **self.connection_data\n )\n\n self.is_connected = True\n self.connection = connection\n return self.connection\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 92, "n_words": 21, "vocab_size": 14, "complexity": 2, "nloc": 9, "token_counts": 42, "n_ast_nodes": 70, "n_identifiers": 6, "random_cut": "def connect(self):\n \n\n if self.is_connected is True:\n r", "d_id": 25824, "documentation": { "docstring": "\n Handles the connection to a Teradata database insance.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 194429, "commit_id": "1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4", "repo": "kivy", "path": "kivy/core/window/__init__.py", "file_name": "__init__.py", "fun_name": "unregister_event_manager", "commit_message": "Feature: EventManagerBase (#7658)\n\n* Added EventManagerBase class and event_managers attribute to WindowBase class.\r\n* Added on_motion event to Widget class.\r\n* Updated post_dispatch_input in EventLoopBase to skip non-touch events.\r\n* Using type ids in MouseMotionEventProvider.\r\n* Added on_motion method to Widget subclasses.\r\n* Updated Widget.on_motion method to dispatch to filtered widgets if 'pos' is not in me.profile.\r\n* Changed motion_filter property in Widget to store key to list values.\r\n* Updated Widget.on_motion to not dispatch event to children if widget is disabled.\r\n* Widget: Using flags to control dispatching in on_motion method.\r\n* Widget: Don't dispatch on_motion to children if only self is registered.\r\n* Widget: Removed collision on disabled check from on_motion method.\r\n* Widget: Added docstrings for motion_filter and related methods.\r\n* EventManager: Moved motion event flags to eventmanager/__init__.py module.\r\n* ScreenManager: Overrode the on_motion method.\r\n* WindowBase: Using attributes event_managers and event_managers_dict.\r\n* WindowBase: Added doc for register_event_manager and unregister_event_manager methods.\r\n* Widget: Improved default dispatch to stop after the last registered widgets.\r\n* EventManagerBase: Added initial docs class and module.\r\n* Widget: Added experimental warnings to motion_filter property and to on_motion and (un)register_for_motion_event methods.\r\n* WindowBase: Added docs for event_managers and event_managers_dict attributes.\r\n* MotionEvent: Added type_id and flags to push_attrs list.\r\n* EventManagerBase: Added versionadded tag on all flags.\r\n* EventManagerBase: Use dispatch modes instead of flags.", "code": "def unregister_event_manager(self, manager):\n \n self.event_managers.remove(manager)\n for type_id in manager.type_ids:\n self.event_managers_dict[type_id].remove(manager)\n manager.stop()\n manager.window = None\n", "url": "https://github.com/kivy/kivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def unregister_event_manager(self, manager):\n \n self.event_managers.remove(manager)\n for type_id in manager.type_ids:\n self.event_managers_dict[type_id].remove(manager)\n m", "d_id": 46963, "documentation": { "docstring": "Unregister and stop an event manager previously registered with\n :meth:`register_event_manager`.\n\n .. versionadded:: 2.1.0\n\n .. warning::\n This is an experimental method and it remains so until this warning\n is present as it can be changed or removed in the next versions of\n Kivy.\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 103, "language": "en" } }, { "id": 217481, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/functools.py", "file_name": "functools.py", "fun_name": "_c3_mro", "commit_message": "add python 3.10.4 for windows", "code": "def _c3_mro(cls, abcs=None):\n \n for i, base in enumerate(reversed(cls.__bases__)):\n if hasattr(base, '__abstractmethods__'):\n boundary = len(cls.__bases__) - i\n break # Bases up to the last explicit ABC are considered first.\n else:\n boundary = 0\n abcs = list(abcs) if abcs else []\n explicit_bases = list(cls.__bases__[:boundary])\n abstract_bases = []\n other_bases = list(cls.__bases__[boundary:])\n for base in abcs:\n if issubclass(cls, base) and not any(\n issubclass(b, base) for b in cls.__bases__\n ):\n # If *cls* is the class that introduces behaviour described by\n # an ABC *base*, insert said ABC to its MRO.\n abstract_bases.append(base)\n for base in abstract_bases:\n abcs.remove(base)\n explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]\n abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]\n other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]\n return _c3_merge(\n [[cls]] +\n explicit_c3_mros + abstract_c3_mros + other_c3_mros +\n [explicit_bases] + [abstract_bases] + [other_bases]\n )\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 306, "n_words": 132, "vocab_size": 83, "complexity": 12, "nloc": 26, "token_counts": 210, "n_ast_nodes": 328, "n_identifiers": 24, "random_cut": "def _c3_mro(cls, abcs=None):\n \n for i, base in enumerate(reversed(cls.__bases__)):\n if hasattr(base, '__abstractmethods__'):\n boundary = len(cls.__bases__) - i\n break # Bases up to the last explicit ABC are considered first.\n else:\n boundary = 0\n abcs = list(abcs) if abcs else []\n explicit_bases = list(cls.__bases__[:boundary])\n abstract_bases = []\n other_bases = list(cls.__bases__[boundary:])\n for base in abcs:\n if issubclass(cls, base) and not any(\n issubclass(b, base) for b in cls.__bases__\n ):\n # If *cls* is the class that introduces behaviour described by\n # an ABC *base*, insert said ABC to its MRO.\n abstract_bases.append(base)\n for base in abstract_bases:\n abcs.remove(base)\n explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]\n abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]\n other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]\n return _c3_merge", "d_id": 54816, "documentation": { "docstring": "Computes the method resolution order using extended C3 linearization.\n\n If no *abcs* are given, the algorithm works exactly like the built-in C3\n linearization used for method resolution.\n\n If given, *abcs* is a list of abstract base classes that should be inserted\n into the resulting MRO. Unrelated ABCs are ignored and don't end up in the\n result. The algorithm inserts ABCs where their functionality is introduced,\n i.e. issubclass(cls, abc) returns True for the class itself but returns\n False for all its direct base classes. Implicit ABCs for a given class\n (either registered or inferred from the presence of a special method like\n __len__) are inserted directly after the last ABC explicitly listed in the\n MRO of said class. If two implicit ABCs end up next to each other in the\n resulting MRO, their ordering depends on the order of types in *abcs*.\n\n ", "n_words": 141, "vocab_size": 96, "n_whitespaces": 177, "language": "en" } }, { "id": 308815, "commit_id": "10027b20904b678d8baecbc6e72c5bcc3f4f24b2", "repo": "core", "path": "homeassistant/components/nissan_leaf/__init__.py", "file_name": "__init__.py", "fun_name": "async_start_charging", "commit_message": "Add button to start leaf charge (#62948)\n\nCo-authored-by: Bruce Duncan ", "code": "async def async_start_charging(self) -> None:\n \n await self.hass.async_add_executor_job(self.leaf.start_charging)\n self.schedule_update()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 26, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "async def async_start_charging(self) -> None:\n \n awai", "d_id": 107548, "documentation": { "docstring": "Request to start charging the car. Used by the button platform.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 251831, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/http/test_http.py", "file_name": "test_http.py", "fun_name": "test_multiple_server_connections", "commit_message": "make it black!", "code": "def test_multiple_server_connections(tctx):\n \n server1 = Placeholder(Server)\n server2 = Placeholder(Server)\n playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 25, "n_words": 13, "vocab_size": 10, "complexity": 1, "nloc": 35, "token_counts": 219, "n_ast_nodes": 61, "n_identifiers": 13, "random_cut": "def test_multiple_server_connections(tctx):\n \n server1 = Placeholder(Server)\n server2 = Placehold", "d_id": 73835, "documentation": { "docstring": "Test multiple requests being rewritten to different targets.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 255404, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/compose_test.py", "file_name": "compose_test.py", "fun_name": "test_overlapping_output_names", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_overlapping_output_names(self) -> None:\n \n self._test_overlapping_names(\n outputs0=['o0', 'o1'], outputs1=['o1', 'o2'])\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 28, "n_ast_nodes": 52, "n_identifiers": 5, "random_cut": "def test_overlapping_output_names(self) -> None:\n \n self._test_overlapping_names(\n outputs0=['o0', 'o1'], outputs1=['o1', 'o2'", "d_id": 74744, "documentation": { "docstring": "\n Tests error checking when the name of the output overlaps\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 54559, "commit_id": "1dd7561062328e96594bbf60a6d15f49163c9d87", "repo": "prefect", "path": "tests/test_settings.py", "file_name": "test_settings.py", "fun_name": "test_write_profiles_does_not_include_default", "commit_message": "Tests passing", "code": "def test_write_profiles_does_not_include_default(self, temporary_profiles_path):\n \n write_profiles({})\n assert \"profiles.default\" not in temporary_profiles_path.read_text()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 40, "n_identifiers": 5, "random_cut": "def test_write_profiles_does_not_include_default(self, temporary_profiles_path):\n \n write_profiles({})\n assert \"profiles.default\" not in temporary_profiles_path.read_text()\n", "d_id": 11096, "documentation": { "docstring": "\n Including the default has a tendency to bake in settings the user may not want, and\n can prevent them from gaining new defaults.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 45, "language": "en" } }, { "id": 199970, "commit_id": "ae2baaa0bbcd42792bb2e7887ca61b97abc40463", "repo": "sympy", "path": "sympy/physics/optics/polarization.py", "file_name": "polarization.py", "fun_name": "phase_retarder", "commit_message": "removed backticks around variable names in docs according to PR review", "code": "def phase_retarder(theta=0, delta=0):\n \n R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2,\n (1-exp(I*delta))*cos(theta)*sin(theta)],\n [(1-exp(I*delta))*cos(theta)*sin(theta),\n sin(theta)**2 + exp(I*delta)*cos(theta)**2]])\n return R*exp(-I*delta/2)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 69, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 118, "n_ast_nodes": 185, "n_identifiers": 9, "random_cut": "def phase_retarder(theta=0, delta=0):\n \n R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2,\n (1-exp(I*delta))*cos(theta)*sin(theta)],\n [(1-exp(I*delta))*cos(theta)*sin(theta),\n sin(theta)**2 + exp(I*delta)*cos(theta)**2]])\n return R*exp(-I*delta/2)\n\n", "d_id": 49459, "documentation": { "docstring": "A phase retarder Jones matrix with retardance `delta` at angle `theta`.\n\n Parameters\n ==========\n\n theta : numeric type or SymPy Symbol\n The angle of the fast axis relative to the horizontal plane.\n delta : numeric type or SymPy Symbol\n The phase difference between the fast and slow axes of the\n transmitted light.\n\n Returns\n =======\n\n SymPy Matrix :\n A Jones matrix representing the retarder.\n\n Examples\n ========\n\n A generic retarder.\n\n >>> from sympy import pprint, symbols\n >>> from sympy.physics.optics.polarization import phase_retarder\n >>> theta, delta = symbols(\"theta, delta\", real=True)\n >>> R = phase_retarder(theta, delta)\n >>> pprint(R, use_unicode=True)\n ⎡ -ⅈ⋅δ -ⅈ⋅δ ⎤\n ⎢ ───── ───── ⎥\n ⎢⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎛ ⅈ⋅δ⎞ 2 ⎥\n ⎢⎝ℯ ⋅sin (θ) + cos (θ)⎠⋅ℯ ⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ)⎥\n ⎢ ⎥\n ⎢ -ⅈ⋅δ -ⅈ⋅δ ⎥\n ⎢ ───── ─────⎥\n ⎢⎛ ⅈ⋅δ⎞ 2 ⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎥\n ⎣⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ) ⎝ℯ ⋅cos (θ) + sin (θ)⎠⋅ℯ ⎦\n\n ", "n_words": 153, "vocab_size": 93, "n_whitespaces": 637, "language": "en" } }, { "id": 178724, "commit_id": "98badaaafd4e56529378947358acae489035fa1e", "repo": "Nuitka", "path": "nuitka/utils/Execution.py", "file_name": "Execution.py", "fun_name": "wrapCommandForDebuggerForExec", "commit_message": "Windows: Make running in debugger work with cmd files as well", "code": "def wrapCommandForDebuggerForExec(*args):\n \n\n gdb_path = getExecutablePath(\"gdb\")\n\n # Windows extra ball, attempt the downloaded one.\n if isWin32Windows() and gdb_path is None:\n from nuitka.Options import assumeYesForDownloads\n\n mingw64_gcc_path = getCachedDownloadedMinGW64(\n target_arch=getArchitecture(),\n assume_yes_for_downloads=assumeYesForDownloads(),\n )\n\n with withEnvironmentPathAdded(\"PATH\", os.path.dirname(mingw64_gcc_path)):\n lldb_path = getExecutablePath(\"lldb\")\n\n if gdb_path is None and lldb_path is None:\n lldb_path = getExecutablePath(\"lldb\")\n\n if lldb_path is None:\n general.sysexit(\"Error, no 'gdb' or 'lldb' binary found in path.\")\n\n if gdb_path is not None:\n args = (gdb_path, \"gdb\", \"-ex=run\", \"-ex=where\", \"-ex=quit\", \"--args\") + args\n else:\n args = (lldb_path, \"lldb\", \"-o\", \"run\", \"-o\", \"bt\", \"-o\", \"quit\", \"--\") + args\n\n return args\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 214, "n_words": 90, "vocab_size": 60, "complexity": 7, "nloc": 19, "token_counts": 142, "n_ast_nodes": 254, "n_identifiers": 20, "random_cut": "def wrapCommandForDebuggerForExec(*args):\n \n\n gdb_path = getExecutablePath(\"gdb\")\n\n # Windows extra ball, attempt the downloaded one.\n if isWin32Windows() and gdb_path is None:\n from nuitka.Options import assumeYesForDownloads\n\n mingw64_gcc_path = getCachedDownloadedMinGW64(\n target_arch=getArchitecture(),\n assume_yes_for_downloads=assumeYesForDownloads(),\n )\n\n with withEnvironmentPathAdded(\"PATH\", os.path.dirname(mingw64_gcc_path)):\n lldb_path = getExecutablePath(\"lldb\")\n\n if gdb_path is None and lldb_path is None:\n lldb_path = getExecutablePath(\"lldb\")\n\n if lldb_path is None:\n general.sysexit(\"Error, no 'gdb' or 'lldb' binary found in path.\")\n\n if gdb_path is not None:\n args = (gdb_path, \"gdb\", \"-ex=run\"", "d_id": 42804, "documentation": { "docstring": "Wrap a command for system debugger to call exec\n\n Args:\n args: (list of str) args for call to be debugged\n Returns:\n args tuple with debugger command inserted\n\n Notes:\n Currently only gdb and lldb are supported, but adding more\n debuggers would be very welcome.\n ", "n_words": 43, "vocab_size": 36, "n_whitespaces": 83, "language": "en" } }, { "id": 207377, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_commands_with_invalid_settings", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_commands_with_invalid_settings(self):\n \n args = [\"startproject\"]\n out, err = self.run_django_admin(args, settings_file=\"bad_settings\")\n self.assertNoOutput(out)\n self.assertOutput(err, \"You must provide a project name\", regex=True)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 43, "n_ast_nodes": 75, "n_identifiers": 10, "random_cut": "def test_commands_with_invalid_settings(self):\n \n args = [\"startproject\"]\n out, err = self.run_django_admin(args, settings_file=\"bad_settings\")\n self.assertNoOutput(out)\n ", "d_id": 51944, "documentation": { "docstring": "\n Commands that don't require settings succeed if the settings file\n doesn't exist.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 34, "language": "en" } }, { "id": 221321, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/chunk.py", "file_name": "chunk.py", "fun_name": "read", "commit_message": "add python 3.10.4 for windows", "code": "def read(self, size=-1):\n \n\n if self.closed:\n raise ValueError(\"I/O operation on closed file\")\n if self.size_read >= self.chunksize:\n return b''\n if size < 0:\n size = self.chunksize - self.size_read\n if size > self.chunksize - self.size_read:\n size = self.chunksize - self.size_read\n data = self.file.read(size)\n self.size_read = self.size_read + len(data)\n if self.size_read == self.chunksize and \\\n self.align and \\\n (self.chunksize & 1):\n dummy = self.file.read(1)\n self.size_read = self.size_read + len(dummy)\n return data\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 216, "n_words": 67, "vocab_size": 38, "complexity": 8, "nloc": 17, "token_counts": 136, "n_ast_nodes": 215, "n_identifiers": 12, "random_cut": "def read(self, size=-1):\n \n\n if self.closed:\n raise ValueError(\"I/O operation on closed file\")\n if self.size_read >= self.chunksize:\n ", "d_id": 56344, "documentation": { "docstring": "Read at most size bytes from the chunk.\n If size is omitted or negative, read until the end\n of the chunk.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 156064, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/array/slicing.py", "file_name": "slicing.py", "fun_name": "slicing_plan", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def slicing_plan(chunks, index):\n \n from dask.array.utils import asarray_safe\n\n if not is_arraylike(index):\n index = np.asanyarray(index)\n cum_chunks = cached_cumsum(chunks)\n\n cum_chunks = asarray_safe(cum_chunks, like=index)\n # this dispactches to the array library\n chunk_locations = np.searchsorted(cum_chunks, index, side=\"right\")\n\n # but we need chunk_locations as python ints for getitem calls downstream\n chunk_locations = chunk_locations.tolist()\n where = np.where(np.diff(chunk_locations))[0] + 1\n\n extra = asarray_safe([0], like=where)\n c_loc = asarray_safe([len(chunk_locations)], like=where)\n where = np.concatenate([extra, where, c_loc])\n\n out = []\n for i in range(len(where) - 1):\n sub_index = index[where[i] : where[i + 1]]\n chunk = chunk_locations[where[i]]\n if chunk > 0:\n sub_index = sub_index - cum_chunks[chunk - 1]\n out.append((chunk, sub_index))\n\n return out\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 193, "n_words": 99, "vocab_size": 72, "complexity": 4, "nloc": 20, "token_counts": 196, "n_ast_nodes": 305, "n_identifiers": 29, "random_cut": "def slicing_plan(chunks, index):\n \n from dask.array.utils import asarray_safe\n\n if not is_arraylike(index):\n index = np.asanyarray(index)\n cum_chunks = cached_cumsum(chunks)\n\n cum_chunks = asarray_safe(cum_chunks, like=index)\n # this dispactches to the array library\n chunk_locations = np.searchsorted(cum_chunks, index, side=\"right\")\n\n # bu", "d_id": 36529, "documentation": { "docstring": "Construct a plan to slice chunks with the given index\n\n Parameters\n ----------\n chunks : Tuple[int]\n One dimensions worth of chunking information\n index : np.ndarray[int]\n The index passed to slice on that dimension\n\n Returns\n -------\n out : List[Tuple[int, np.ndarray]]\n A list of chunk/sub-index pairs corresponding to each output chunk\n ", "n_words": 48, "vocab_size": 39, "n_whitespaces": 93, "language": "en" } }, { "id": 245112, "commit_id": "cd4e9ed8269b0c767e129169b7268b0ced7e60c9", "repo": "mmdetection", "path": "mmdet/models/roi_heads/bbox_heads/double_bbox_head.py", "file_name": "double_bbox_head.py", "fun_name": "_add_conv_branch", "commit_message": "Refactor Double Head, MS, Dynamic, Trident.", "code": "def _add_conv_branch(self) -> None:\n \n branch_convs = ModuleList()\n for i in range(self.num_convs):\n branch_convs.append(\n Bottleneck(\n inplanes=self.conv_out_channels,\n planes=self.conv_out_channels // 4,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n return branch_convs\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 151, "n_words": 21, "vocab_size": 20, "complexity": 2, "nloc": 11, "token_counts": 56, "n_ast_nodes": 91, "n_identifiers": 14, "random_cut": "def _add_conv_branch(self) -> None:\n \n branch_convs = ModuleList()\n for i in range(self.num_convs):\n branch_convs.append(\n Bottleneck(\n inplanes=self.conv_out_channels,\n planes=self.conv_out_channels // 4,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n", "d_id": 70662, "documentation": { "docstring": "Add the fc branch which consists of a sequential of conv layers.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 148287, "commit_id": "0e6c042e29cbbe429d81c9c1af3c75c261f00980", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "match_files", "commit_message": "[Bugfix] fix invalid excluding of Black (#24042)\n\n- We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options\r\n- Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.", "code": "def match_files(patterns, files):\n\t\n\tall_files = files if isinstance(files, Collection) else list(files)\n\treturn_files = set()\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tresult_files = pattern.match(all_files)\n\t\t\tif pattern.include:\n\t\t\t\treturn_files.update(result_files)\n\t\t\telse:\n\t\t\t\treturn_files.difference_update(result_files)\n\treturn return_files\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 22, "n_words": 33, "vocab_size": 28, "complexity": 5, "nloc": 11, "token_counts": 70, "n_ast_nodes": 113, "n_identifiers": 15, "random_cut": "def match_files(patterns, files):\n\t\n\tall_files = files if isinstance(files, Collection) else list(files)\n\treturn_files = set()\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tresult_files = pattern.match(all_files)\n\t\t\tif pattern.include:\n\t\t\t\treturn", "d_id": 34223, "documentation": { "docstring": "\n\tMatches the files to the patterns.\n\n\t*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)\n\tcontains the patterns to use.\n\n\t*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains\n\tthe normalized file paths to be matched against *patterns*.\n\n\tReturns the matched files (:class:`set` of :class:`str`).\n\t", "n_words": 36, "vocab_size": 24, "n_whitespaces": 30, "language": "en" } }, { "id": 166205, "commit_id": "90140f055892a46f473bd26affab88a7f171e394", "repo": "pandas", "path": "pandas/core/exchange/column.py", "file_name": "column.py", "fun_name": "_get_offsets_buffer", "commit_message": "ENH: Implement DataFrame interchange protocol (#46141)", "code": "def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]:\n \n if self.dtype[0] == DtypeKind.STRING:\n # For each string, we need to manually determine the next offset\n values = self._col.to_numpy()\n ptr = 0\n offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)\n for i, v in enumerate(values):\n # For missing values (in this case, `np.nan` values)\n # we don't increment the pointer\n if isinstance(v, str):\n b = v.encode(encoding=\"utf-8\")\n ptr += len(b)\n\n offsets[i + 1] = ptr\n\n # Convert the offsets to a Pandas \"buffer\" using\n # the NumPy array as the backing store\n buffer = PandasBuffer(offsets)\n\n # Assemble the buffer dtype info\n dtype = (\n DtypeKind.INT,\n 64,\n ArrowCTypes.INT64,\n Endianness.NATIVE,\n ) # note: currently only support native endianness\n else:\n raise NoBufferPresent(\n \"This column has a fixed-length dtype so \"\n \"it does not have an offsets buffer\"\n )\n\n return buffer, dtype\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 490, "n_words": 130, "vocab_size": 97, "complexity": 4, "nloc": 29, "token_counts": 139, "n_ast_nodes": 228, "n_identifiers": 33, "random_cut": "def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]:\n \n if self.dtype[0] == DtypeKind.STRING:\n # For each string, we need to manually determine the next offset\n values = self._col.to_numpy()\n ptr = 0\n offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)\n for i, v in enumerate(values):\n # For missing values (in this case, `np.nan` values)\n # we don't increment the pointer\n if isinstance(v, str):\n b = v.encode(encoding=\"utf-8\")\n ptr += len(b)\n\n offsets[i + 1] = ptr\n\n # Convert the offsets to a Pandas \"", "d_id": 39770, "documentation": { "docstring": "\n Return the buffer containing the offset values for variable-size binary\n data (e.g., variable-length strings) and the buffer's associated dtype.\n Raises NoBufferPresent if the data buffer does not have an associated\n offsets buffer.\n ", "n_words": 32, "vocab_size": 26, "n_whitespaces": 68, "language": "en" } }, { "id": 303211, "commit_id": "8910d265d6cf15fed4e6e98b4344031019c1016d", "repo": "core", "path": "homeassistant/helpers/update_coordinator.py", "file_name": "update_coordinator.py", "fun_name": "_unschedule_refresh", "commit_message": "Keep track of a context for each listener (#72702)\n\n* Remove async_remove_listener\r\n\r\nThis avoids the ambuigity as to what happens if same callback is added multiple times.\r\n\r\n* Keep track of a context for each listener\r\n\r\nThis allow a update coordinator to adapt what data to request on update from the backing service based on which entities are enabled.\r\n\r\n* Clone list before calling callbacks\r\n\r\nThe callbacks can end up unregistering and modifying the dict while iterating.\r\n\r\n* Only yield actual values\r\n\r\n* Add a test for update context\r\n\r\n* Factor out iteration of _listeners to helper\r\n\r\n* Verify context is passed to coordinator\r\n\r\n* Switch to Any as type instead of object\r\n\r\n* Remove function which use was dropped earliers\r\n\r\nThe use was removed in 8bee25c938a123f0da7569b4e2753598d478b900", "code": "def _unschedule_refresh(self) -> None:\n \n if self._unsub_refresh:\n self._unsub_refresh()\n self._unsub_refresh = None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 3, "random_cut": "def _unschedule_refresh(self) -> None:\n \n if self._unsub_refresh:\n self._unsub_refresh()\n self._unsub_refresh = None\n", "d_id": 102039, "documentation": { "docstring": "Unschedule any pending refresh since there is no longer any listeners.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 201749, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/postgresql/tests.py", "file_name": "tests.py", "fun_name": "test_connect_and_rollback", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_connect_and_rollback(self):\n \n new_connection = connection.copy()\n try:\n # Ensure the database default time zone is different than\n # the time zone in new_connection.settings_dict. We can\n # get the default time zone by reset & show.\n with new_connection.cursor() as cursor:\n cursor.execute(\"RESET TIMEZONE\")\n cursor.execute(\"SHOW TIMEZONE\")\n db_default_tz = cursor.fetchone()[0]\n new_tz = \"Europe/Paris\" if db_default_tz == \"UTC\" else \"UTC\"\n new_connection.close()\n\n # Invalidate timezone name cache, because the setting_changed\n # handler cannot know about new_connection.\n del new_connection.timezone_name\n\n # Fetch a new connection with the new_tz as default\n # time zone, run a query and rollback.\n with self.settings(TIME_ZONE=new_tz):\n new_connection.set_autocommit(False)\n new_connection.rollback()\n\n # Now let's see if the rollback rolled back the SET TIME ZONE.\n with new_connection.cursor() as cursor:\n cursor.execute(\"SHOW TIMEZONE\")\n tz = cursor.fetchone()[0]\n self.assertEqual(new_tz, tz)\n\n finally:\n new_connection.close()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 448, "n_words": 119, "vocab_size": 79, "complexity": 3, "nloc": 19, "token_counts": 125, "n_ast_nodes": 237, "n_identifiers": 18, "random_cut": "def test_connect_and_rollback(self):\n \n new_connection = connection.copy()\n try:\n # Ensure the database default time zone is different than\n # the time zone in new_connection.settings_dict. We can\n # get the default time zone by reset & show.\n with new_connection.cursor() as cursor:\n cursor.execute(\"RESET TIMEZONE\")\n cursor.execute(\"SHOW TIMEZONE\")\n db_default_tz = cursor.fetchone()[0]\n new_tz = \"Europe/Paris\" if db_default_tz == \"UTC\" else \"UTC\"\n new_connection.close()\n\n # Invalidate timezone name cache, because the setting_changed\n # handler cannot know about", "d_id": 49987, "documentation": { "docstring": "\n PostgreSQL shouldn't roll back SET TIME ZONE, even if the first\n transaction is rolled back (#17062).\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 135585, "commit_id": "9fab504fe776f96fecf85e12ea006264cbe92f4a", "repo": "ray", "path": "python/ray/data/tests/test_dataset_tfrecords.py", "file_name": "test_dataset_tfrecords.py", "fun_name": "test_readback_tfrecords", "commit_message": "[Datasets] Add writer for TFRecords. (#29448)\n\nThis PR enables users to write TFRecords from datasets.\r\n\r\nIn particular, the master branch already includes an API for reading TFRecords from datasets. Users have requested the ability to write these datasets back to TFRecords.", "code": "def test_readback_tfrecords(ray_start_regular_shared, tmp_path):\n \n\n # The dataset we will write to a .tfrecords file.\n ds = ray.data.from_items(\n [\n # Row one.\n {\n \"int_item\": 1,\n \"int_list\": [2, 2, 3],\n \"float_item\": 1.0,\n \"float_list\": [2.0, 3.0, 4.0],\n \"bytes_item\": b\"abc\",\n \"bytes_list\": [b\"abc\", b\"1234\"],\n },\n # Row two.\n {\n \"int_item\": 2,\n \"int_list\": [3, 3, 4],\n \"float_item\": 2.0,\n \"float_list\": [2.0, 2.0, 3.0],\n \"bytes_item\": b\"def\",\n \"bytes_list\": [b\"def\", b\"1234\"],\n },\n ]\n )\n\n # Write the TFRecords.\n ds.write_tfrecords(tmp_path)\n\n # Read the TFRecords.\n readback_ds = ray.data.read_tfrecords(tmp_path)\n assert ds.take() == readback_ds.take()\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 366, "n_words": 79, "vocab_size": 59, "complexity": 1, "nloc": 24, "token_counts": 155, "n_ast_nodes": 226, "n_identifiers": 11, "random_cut": "def test_readback_tfrecords(ray_start_regular_shared, tmp_path):\n \n\n # The dataset we will write to a .tfrecords file.\n ds = ray.data.from_items(\n [\n # Row one.\n {\n \"int_item\": 1,\n \"int_list\": [2, 2, 3],\n \"float_item\": 1.0,\n \"float_list\": [2.0, 3.0, 4.0],\n \"bytes_item\": b\"abc\",\n \"bytes_list\": [b\"abc\", b\"1234\"],\n },\n # Row two.\n {\n \"int_item\": 2,\n \"int_list\": [3, 3, 4],\n \"float_item\": 2.0,\n \"float_list\": [2.0, 2.0, 3.0],\n \"bytes_item\": b\"def\",\n \"bytes_list\": [b\"def\", b\"1234\"],\n },\n ]\n )\n\n # Write the TFRecords.\n ds.write_tfrecords(tmp_path)\n\n # Read the TFRecords.\n readback_ds = ray.data.read_tfrecords(tmp_path)\n assert ds.take() == readback_ds.take()\n\n", "d_id": 30664, "documentation": { "docstring": "\n Test reading back TFRecords written using datasets.\n The dataset we read back should be the same that we wrote.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 29, "language": "en" } }, { "id": 279493, "commit_id": "be73ac1a1e25d9abd4d793cba9707098d7adf231", "repo": "keras", "path": "keras/layers/rnn/legacy_cells.py", "file_name": "legacy_cells.py", "fun_name": "call", "commit_message": "Add f-string format and lint with flynt on the whole codebase", "code": "def call(self, inputs, state):\n \n cur_state_pos = 0\n cur_inp = inputs\n new_states = []\n for i, cell in enumerate(self._cells):\n with tf.compat.v1.variable_scope(\"cell_%d\" % i):\n if self._state_is_tuple:\n if not tf.nest.is_nested(state):\n raise ValueError(\n \"Expected state to be a tuple of length \"\n f\"{len(self.state_size)}\"\n f\", but received: {state}\"\n )\n cur_state = state[i]\n else:\n cur_state = tf.slice(\n state, [0, cur_state_pos], [-1, cell.state_size]\n )\n cur_state_pos += cell.state_size\n cur_inp, new_state = cell(cur_inp, cur_state)\n new_states.append(new_state)\n\n new_states = (\n tuple(new_states)\n if self._state_is_tuple\n else tf.concat(new_states, 1)\n )\n\n return cur_inp, new_states\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 484, "n_words": 79, "vocab_size": 64, "complexity": 5, "nloc": 27, "token_counts": 148, "n_ast_nodes": 252, "n_identifiers": 27, "random_cut": "def call(self, inputs, state):\n \n cur_state_pos = 0\n cur_inp = inputs\n new_states = []\n for i, cell in enumerate(self._cells):\n with tf.compat.v1.variable_scope(\"cell_%d\" % i):\n if self._state_is_tuple:\n if not tf.nest.is_nested(state):\n raise ValueError(\n \"Expected state to be a tuple of length \"\n f\"{len(self.state_size)}\"\n f\", but received: {state}\"\n )\n cur_state = state[i]\n else:\n cur_state = tf.slice(\n state, [0, cur_state_pos], [-1, cell.state_size]\n )\n ", "d_id": 83014, "documentation": { "docstring": "Run this multi-layer cell on inputs, starting from state.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 216279, "commit_id": "3c7e1ec1f08abd7cd1ba78ad7880acb6ba6fdce7", "repo": "salt", "path": "tests/pytests/functional/transport/server/test_req_channel.py", "file_name": "test_req_channel.py", "fun_name": "test_basic", "commit_message": "Fix minion unit tests, specifically .../tests/pytests/test_minion.py", "code": "def test_basic(push_channel):\n \n msgs = [\n {\"foo\": \"bar\"},\n {\"bar\": \"baz\"},\n {\"baz\": \"qux\", \"list\": [1, 2, 3]},\n ]\n for msg in msgs:\n ret = push_channel.send(msg, timeout=5, tries=1)\n assert ret[\"load\"] == msg\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 66, "n_ast_nodes": 112, "n_identifiers": 8, "random_cut": "def test_basic(push_channel):\n \n msgs = [\n {\"foo\": \"bar\"},\n {\"bar\": \"baz\"},\n {\"baz\": \"qux\", \"list\": [1, 2, 3]},\n", "d_id": 54497, "documentation": { "docstring": "\n Test a variety of messages, make sure we get the expected responses\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 247975, "commit_id": "85ca963c1add5ca12f59238a50dfc63df4846bb7", "repo": "synapse", "path": "tests/module_api/test_account_data_manager.py", "file_name": "test_account_data_manager.py", "fun_name": "test_put_global", "commit_message": "Add Module API for reading and writing global account data. (#12391)", "code": "def test_put_global(self) -> None:\n \n\n self.get_success(\n self._module_api.account_data_manager.put_global(\n self.user_id, \"test.data\", {\"wombat\": True}\n )\n )\n\n # Request that account data from the normal store; check it's as we expect.\n self.assertEqual(\n self.get_success(\n self._store.get_global_account_data_by_type_for_user(\n self.user_id, \"test.data\"\n )\n ),\n {\"wombat\": True},\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 197, "n_words": 36, "vocab_size": 30, "complexity": 1, "nloc": 17, "token_counts": 62, "n_ast_nodes": 105, "n_identifiers": 10, "random_cut": "def test_put_global(self) -> None:\n \n\n self.get_success(\n self._module_api.account_data_manager.put_global(\n self.user_id, \"test.data\", {\"wombat\": True}\n )\n )\n\n # Request that account data from the normal store; check it's as we expect.\n self.assertEqual(\n self.get_success(\n self._store.get_globa", "d_id": 72034, "documentation": { "docstring": "\n Tests that written account data using `put_global` can be read out again later.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 259914, "commit_id": "f862129f36786acbae3d9f2d161bbb72d77b87ec", "repo": "scikit-learn", "path": "build_tools/azure/update_environments_and_lock_files.py", "file_name": "update_environments_and_lock_files.py", "fun_name": "get_conda_environment_content", "commit_message": "CI: move Linux and MacOS Azure builds to conda lock files (#22448)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", "code": "def get_conda_environment_content(build_metadata):\n template = environment.from_string(\n .strip()\n )\n return template.render(build_metadata=build_metadata)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 24, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 21, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def get_conda_environment_content(build_metadata):\n template = environment.from_string(\n ", "d_id": 75990, "documentation": { "docstring": "\n# DO NOT EDIT: this file is generated from the specification found in the\n# following script to centralize the configuration for all Azure CI builds:\n# build_tools/azure/update_environments_and_lock_files.py\nchannels:\n - {{ build_metadata['channel'] }}\ndependencies:\n {% for conda_dep in build_metadata['conda_dependencies'] %}\n - {{ conda_dep | get_package_with_constraint(build_metadata) }}\n {% endfor %}\n {% if build_metadata['pip_dependencies'] %}\n - pip\n - pip:\n {% for pip_dep in build_metadata.get('pip_dependencies', []) %}\n - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}\n {% endfor %}\n {% endif %}", "n_words": 77, "vocab_size": 47, "n_whitespaces": 85, "language": "en" } }, { "id": 27990, "commit_id": "5d1a36b9aaf408016957db04f86397b2e53c2500", "repo": "saleor", "path": "saleor/thumbnail/utils.py", "file_name": "utils.py", "fun_name": "preprocess", "commit_message": "Better media thumbnails including WebP support (#9988)\n\n* Add thumbnail app\r\n\r\n* Update get_thumbnail_size method and add tests\r\n\r\n* Add logic for creating thumbnails\r\n\r\n* Update logic for getting thumbnail\r\n\r\n* Allow defining format for tumbnail generation\r\n\r\n* Clear handle_thumbnail views\r\n\r\n* Add prepare_image_proxy_url method\r\n\r\n* Use ImageField for user avatar\r\n\r\n* Allow defining thumbnail format when querying user avatar\r\n\r\n* Use ImageField for category backgound_image\r\n\r\n* Use ImageField for Collection backgound_image\r\n\r\n* Use ImageField for ProductMedia image\r\n\r\n* Ensure that thumbnails are deleted when category background_image is changed or deleted\r\n\r\n* Ensure that thumbnails are deleted when collection background_image is changed or deleted\r\n\r\n* Update product media deleteion task and failing tests\r\n\r\n* Delete thumbnail from storage when thumbnail objects is deleted\r\n\r\n* Fix import in product test_bulk_delete\r\n\r\n* Drop create_thumbnails command\r\n\r\n* Update Product.thumbnail resolver\r\n\r\n* Update OrderLine thumbnail resolver\r\n\r\n* Add missing ADDED_IN_35 and PREVIEW_FEATURE labels\r\n\r\n* Update account and product signals - ensure the image is deleted from storage\r\n\r\n* Refactor product_images methods\r\n\r\n* Add signal for product media image delete\r\n\r\n* Drop create_thumbnails method and not longer valid settings fields\r\n\r\n* Clean the ProcessedImage class\r\n\r\n* Drop versatileimagefield from INSTALLED_APPS\r\n\r\n* Update changelog\r\n\r\n* Drop comments from ThumbnailFormat\r\n\r\n* Add get_image_or_proxy_url method\r\n\r\n* Apply reiew suggestions - add ThumbnailField and use get_image_or_proxy_ur when it's possible\r\n\r\n* Update changelog\r\n\r\n* Replace ADDED_IN_35 with ADDED_IN_36 label\r\n\r\n* Update changelog\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def preprocess(self, image, image_format):\n \n format = self.format or image_format\n save_kwargs = {\"format\": format}\n\n # Ensuring image is properly rotated\n if hasattr(image, \"_getexif\"):\n exif_datadict = image._getexif() # returns None if no EXIF data\n if exif_datadict is not None:\n exif = dict(exif_datadict.items())\n orientation = exif.get(self.EXIF_ORIENTATION_KEY, None)\n if orientation == 3:\n image = image.transpose(Image.ROTATE_180)\n elif orientation == 6:\n image = image.transpose(Image.ROTATE_270)\n elif orientation == 8:\n image = image.transpose(Image.ROTATE_90)\n\n # Ensure any embedded ICC profile is preserved\n save_kwargs[\"icc_profile\"] = image.info.get(\"icc_profile\")\n\n if hasattr(self, \"preprocess_%s\" % format):\n image, addl_save_kwargs = getattr(self, \"preprocess_%s\" % format)(\n image=image\n )\n save_kwargs.update(addl_save_kwargs)\n\n return image, save_kwargs\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 360, "n_words": 94, "vocab_size": 62, "complexity": 8, "nloc": 21, "token_counts": 162, "n_ast_nodes": 271, "n_identifiers": 24, "random_cut": "def preprocess(self, image, image_format):\n \n format = self.format or image_format\n save_kwargs = {\"format\": format}\n\n # Ensuring image is properly rotated\n if hasattr(image, \"_getexif\"):\n exif_datadict = image._getexif() # returns None if no EXIF data\n if exif_datadict is not None:\n exif = dict(exif_datadict.items())\n orientation = exif.get(self.EXIF_ORIENTATION_KEY, None)\n if orientation == 3:\n image = image.transpose(Image.ROTATE_180)\n ", "d_id": 5147, "documentation": { "docstring": "Preprocess an image.\n\n An API hook for image pre-processing. Calls any image format specific\n pre-processors (if defined). I.E. If `image_format` is 'JPEG', this\n method will look for a method named `preprocess_JPEG`, if found\n `image` will be passed to it.\n\n Arguments:\n image: a PIL Image instance\n image_format: str, a valid PIL format (i.e. 'JPEG' or 'WEBP')\n\n Subclasses should return a 2-tuple:\n * [0]: A PIL Image instance.\n * [1]: A dictionary of additional keyword arguments to be used\n when the instance is saved. If no additional keyword\n arguments, return an empty dict ({}).\n\n ", "n_words": 92, "vocab_size": 70, "n_whitespaces": 223, "language": "en" } }, { "id": 262880, "commit_id": "e232aaf089d150b085502b97ce0fcf699b45e1b2", "repo": "pyinstaller", "path": "PyInstaller/utils/hooks/__init__.py", "file_name": "__init__.py", "fun_name": "get_package_paths", "commit_message": "hookutils: support multiple package paths in collect_* helpers\n\nSplit the functionality of ``get_package_paths`` into two new helpers,\n``get_all_package_paths`` and ``package_base_path``. The former obtains\nall package paths, while the latter simplifies removal of\npackage-specific sub-path from the full package-path.\n\nImplement the old, backwards-compatible ``get_package_paths`` using\nthese helpers; the function now supports namespace packages, but\nalways returns a single package path and its base path.\n\nHave ``collect_submodules``, ``collect_dynamic_libs``, and\n``collect_data_files`` helpers use the new ``get_all_package_paths``\nand extend them to process all returned package paths. This enables\nproper support for PEP420 namespace packages with multiple package\npaths.", "code": "def get_package_paths(package):\n \n pkg_paths = get_all_package_paths(package)\n if not pkg_paths:\n raise ValueError(f\"Package '{package}' does not exist or is not a package!\")\n\n if len(pkg_paths) > 1:\n logger.warning(\n \"get_package_paths - package %s has multiple paths (%r); returning only first one!\", package, pkg_paths\n )\n\n pkg_dir = pkg_paths[0]\n pkg_base = package_base_path(pkg_dir, package)\n\n return pkg_base, pkg_dir\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 102, "n_words": 49, "vocab_size": 42, "complexity": 3, "nloc": 11, "token_counts": 58, "n_ast_nodes": 100, "n_identifiers": 11, "random_cut": "def get_package_paths(package):\n \n pkg_paths = get_all_package_paths(package)\n if not pkg_paths:\n raise ValueError(f\"Package '{package}' does not exist or is not a package!\")\n\n if len(pkg_paths) > 1:\n logger.warning(\n \"get_package_paths - packa", "d_id": 77424, "documentation": { "docstring": "\n Given a package, return the path to packages stored on this machine and also returns the path to this particular\n package. For example, if pkg.subpkg lives in /abs/path/to/python/libs, then this function returns\n ``(/abs/path/to/python/libs, /abs/path/to/python/libs/pkg/subpkg)``.\n\n NOTE: due to backwards compatibility, this function returns only one package path along with its base directory.\n In case of PEP 420 namespace package with multiple location, only first location is returned. To obtain all\n package paths, use the ``get_all_package_paths`` function and obtain corresponding base directories using the\n ``package_base_path`` helper.\n ", "n_words": 84, "vocab_size": 63, "n_whitespaces": 109, "language": "en" } }, { "id": 65820, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/e_commerce/shopping_cart/cart.py", "file_name": "cart.py", "fun_name": "get_address_territory", "commit_message": "style: format code with black", "code": "def get_address_territory(address_name):\n\t\n\tterritory = None\n\n\tif address_name:\n\t\taddress_fields = frappe.db.get_value(\"Address\", address_name, [\"city\", \"state\", \"country\"])\n\t\tfor value in address_fields:\n\t\t\tterritory = frappe.db.get_value(\"Territory\", value)\n\t\t\tif territory:\n\t\t\t\tbreak\n\n\treturn territory\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 18, "n_words": 27, "vocab_size": 22, "complexity": 4, "nloc": 9, "token_counts": 55, "n_ast_nodes": 95, "n_identifiers": 8, "random_cut": "def get_address_territory(address_name):\n\t\n\tterritory = None\n\n\tif address_name:\n\t\taddress_fields = frappe.db.get_value(\"Address\", address_name, [\"city\", \"state\", \"country\"])\n\t\tfor value in address_fields:\n\t\t\tterritory = frappe.db.get_value(\"Territory\", value)\n\t\t\tif territory:\n\t\t\t\tbreak\n\n\treturn territory\n\n", "d_id": 14022, "documentation": { "docstring": "Tries to match city, state and country of address to existing territory", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 265105, "commit_id": "0c915f7de9612c7485da3713cc6d63f368698a5d", "repo": "netbox", "path": "netbox/dcim/svg.py", "file_name": "svg.py", "fun_name": "_get_device_coords", "commit_message": "Clean up rack elevation rendering", "code": "def _get_device_coords(self, position, height):\n \n x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH\n y = RACK_ELEVATION_BORDER_WIDTH\n if self.rack.desc_units:\n y += int((position - 1) * self.unit_height)\n else:\n y += int((self.rack.u_height - position + 1) * self.unit_height) - int(height * self.unit_height)\n\n return x, y\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 102, "n_words": 38, "vocab_size": 24, "complexity": 2, "nloc": 8, "token_counts": 76, "n_ast_nodes": 121, "n_identifiers": 13, "random_cut": "def _get_device_coords(self, position, height):\n \n x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH\n y = RACK_ELEVATION_BORDER_WIDTH\n if self.rack.desc_units:\n y += int((position - 1) * self.unit_height)\n ", "d_id": 77991, "documentation": { "docstring": "\n Return the X, Y coordinates of the top left corner for a device in the specified rack unit.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 33, "language": "en" } }, { "id": 195852, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/core/numbers.py", "file_name": "numbers.py", "fun_name": "igcd", "commit_message": "Improved documentation formatting", "code": "def igcd(*args):\n \n if len(args) < 2:\n raise TypeError(\n 'igcd() takes at least 2 arguments (%s given)' % len(args))\n args_temp = [abs(as_int(i)) for i in args]\n if 1 in args_temp:\n return 1\n a = args_temp.pop()\n if HAS_GMPY: # Using gmpy if present to speed up.\n for b in args_temp:\n a = gmpy.gcd(a, b) if b else a\n return as_int(a)\n for b in args_temp:\n a = math.gcd(a, b)\n return a\n\n\nigcd2 = math.gcd\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 151, "n_words": 71, "vocab_size": 46, "complexity": 8, "nloc": 15, "token_counts": 98, "n_ast_nodes": 166, "n_identifiers": 16, "random_cut": "def igcd(*args):\n \n if len(args) < 2:\n raise TypeError(\n 'igcd() takes at least 2 arguments (%s given)' % len(args))\n args_temp = [abs(as_int(i)) for i in args]\n if 1 in args_temp:\n return 1\n ", "d_id": 47439, "documentation": { "docstring": "Computes nonnegative integer greatest common divisor.\n\n Explanation\n ===========\n\n The algorithm is based on the well known Euclid's algorithm [1]_. To\n improve speed, ``igcd()`` has its own caching mechanism.\n\n Examples\n ========\n\n >>> from sympy import igcd\n >>> igcd(2, 4)\n 2\n >>> igcd(5, 10, 15)\n 5\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Euclidean_algorithm\n\n ", "n_words": 49, "vocab_size": 46, "n_whitespaces": 94, "language": "en" } }, { "id": 320887, "commit_id": "e15bda307e42c288b926f578e7bf8c610e4767af", "repo": "qutebrowser", "path": "qutebrowser/browser/webengine/webenginetab.py", "file_name": "webenginetab.py", "fun_name": "_prev_next_cb", "commit_message": "search: Split navigation/search callbacks\n\nThis way, we can move more logic (checking wrapping, etc.) into the API,\nthus making the commands much more simple and stateless.", "code": "def _prev_next_cb(self, found, *, going_up, callback):\n \n if found:\n result = browsertab.SearchNavigationResult.found\n # Check if the match count change is opposite to the search direction\n if self._old_match.current > 0:\n if not going_up and self._old_match.current > self.match.current:\n result = browsertab.SearchNavigationResult.wrapped_bottom\n elif going_up and self._old_match.current < self.match.current:\n result = browsertab.SearchNavigationResult.wrapped_top\n else:\n result = browsertab.SearchNavigationResult.not_found\n\n callback(result)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 192, "n_words": 52, "vocab_size": 36, "complexity": 7, "nloc": 11, "token_counts": 91, "n_ast_nodes": 145, "n_identifiers": 14, "random_cut": "def _prev_next_cb(self, found, *, going_up, callback):\n \n if found:\n result = browsertab.SearchNavigationResult.found\n # Check if the match count change is opposite to the search direction\n if self._old_match.current > 0:\n if not going_up and self._old_match.current > self.match.current:\n result = browsertab.SearchNavigationResult.wrapped_bottom\n elif going_up and self._old_match.current < self.m", "d_id": 117417, "documentation": { "docstring": "Call the prev/next callback based on the search result.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 178524, "commit_id": "ab7014c6457b2b65010aea41512ca75d93847c9a", "repo": "Nuitka", "path": "nuitka/plugins/standard/TensorflowPlugin.py", "file_name": "TensorflowPlugin.py", "fun_name": "onModuleSourceCode", "commit_message": "Plugins: Slight more helpful error message in case tensorflow works", "code": "def onModuleSourceCode(self, module_name, source_code):\n \n if module_name != \"tensorflow\":\n return source_code\n\n source_lines = source_code.splitlines()\n found_insert = False\n for i, l in enumerate(source_lines):\n if l.startswith(\"def \") and \"_running_from_pip_package():\" in l:\n source_lines.insert(i, \"_site_packages_dirs = []\")\n source_lines.insert(i, \"from tensorflow.python import keras\")\n found_insert = True\n break\n\n if found_insert is True:\n self.info(\"Patched 'running-from-pip' path magic.\")\n else:\n self.sysexit(\"Did not find 'running-from-pip' path magic code.\")\n\n return \"\\n\".join(source_lines)\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 219, "n_words": 59, "vocab_size": 47, "complexity": 6, "nloc": 16, "token_counts": 95, "n_ast_nodes": 170, "n_identifiers": 15, "random_cut": "def onModuleSourceCode(self, module_name, source_code):\n \n if module_name != \"tensorflow\":\n return source_code\n\n source_lines = source_code.splitlines()\n found_insert = False\n for i, l in enumerate(source_lines):\n if l.startswith(\"def \") and \"_running_from_pip_package():\" in l:\n source_lines.insert(i, \"_site_packages_dirs = []\")\n source_lines.insert(i, \"from tensorflow.python import keras\")\n found_insert = True\n break\n\n if found_insert is True:\n self.info(\"Patched 'running-from-pip' path magic.\")\n else:\n self.sysexit(\"Did not find 'running-from-pip' path magic code.\")\n", "d_id": 42730, "documentation": { "docstring": "Neutralize some path magic in tensorflow.\n\n Notes:\n Make sure tensorflow understands, we are not running as a PIP\n installed application.\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 56, "language": "en" } }, { "id": 170734, "commit_id": "9820edc174730e11cb423d7869650c13100eb314", "repo": "pandas", "path": "pandas/core/common.py", "file_name": "common.py", "fun_name": "cast_scalar_indexer", "commit_message": "DEPR: indexing (#49412)\n\n* DEPR: disallow Series.__getitem__ with a single-element list containing slice\r\n\r\n* DEPR: disallow slicing with positional slicer and .loc\r\n\r\n* DEPR: disallow positional indexing with float key\r\n\r\n* move whatsnew\r\n\r\n* DEPR: disallow multi-dimensional indexing\r\n\r\n* fix matplotlib tests\r\n\r\n* update install.rst", "code": "def cast_scalar_indexer(val):\n \n # assumes lib.is_scalar(val)\n if lib.is_float(val) and val.is_integer():\n raise IndexError(\n # GH#34193\n \"Indexing with a float is no longer supported. Manually convert \"\n \"to an integer key instead.\"\n )\n return val\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 91, "n_words": 32, "vocab_size": 31, "complexity": 3, "nloc": 7, "token_counts": 28, "n_ast_nodes": 55, "n_identifiers": 6, "random_cut": "def cast_scalar_indexer(val):\n \n # assumes lib.is_scalar(val)\n if lib.is_float(val) and val.is_integer():\n raise IndexError(\n # GH#34193\n \"Indexing with a float is no lon", "d_id": 40604, "documentation": { "docstring": "\n Disallow indexing with a float key, even if that key is a round number.\n\n Parameters\n ----------\n val : scalar\n\n Returns\n -------\n outval : scalar\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 49, "language": "en" } }, { "id": 268085, "commit_id": "b993b5cd49662f715774c333ce98e2845227ab66", "repo": "ansible", "path": "test/lib/ansible_test/_internal/util.py", "file_name": "util.py", "fun_name": "load_plugins", "commit_message": "ansible-test - Convert more type hints. (#78449)\n\n* Simple regex replace of multi-line function arg annotations.\r\n\r\n* Simple regex replace of multi-line function arg annotations with default values.\r\n\r\n* Simple regex replace of multi-line function arg return annotations.\r\n\r\n* Simple regex replace of assignment annotations.", "code": "def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None\n \n plugins: t.Dict[str, t.Type[C]] = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type))\n\n for plugin in plugins:\n database[plugin] = plugins[plugin]\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 45, "n_words": 28, "vocab_size": 23, "complexity": 3, "nloc": 4, "token_counts": 65, "n_ast_nodes": 99, "n_identifiers": 15, "random_cut": "def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None\n \n plugins: t.", "d_id": 79351, "documentation": { "docstring": "\n Load plugins of the specified type and track them in the specified database.\n Only plugins which have already been imported will be loaded.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 33, "language": "en" } }, { "id": 200437, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/solvers/ode/nonhomogeneous.py", "file_name": "nonhomogeneous.py", "fun_name": "_undetermined_coefficients_match", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero):\n r\n a = Wild('a', exclude=[x])\n b = Wild('b', exclude=[x])\n expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1)\n retdict = {}\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 29, "vocab_size": 23, "complexity": 7, "nloc": 54, "token_counts": 151, "n_ast_nodes": 93, "n_identifiers": 14, "random_cut": "def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero):\n r\n a = Wild('a', exclude=[x])\n b = W", "d_id": 49648, "documentation": { "docstring": "\n Returns a trial function match if undetermined coefficients can be applied\n to ``expr``, and ``None`` otherwise.\n\n A trial expression can be found for an expression for use with the method\n of undetermined coefficients if the expression is an\n additive/multiplicative combination of constants, polynomials in `x` (the\n independent variable of expr), `\\sin(a x + b)`, `\\cos(a x + b)`, and\n `e^{a x}` terms (in other words, it has a finite number of linearly\n independent derivatives).\n\n Note that you may still need to multiply each term returned here by\n sufficient `x` to make it linearly independent with the solutions to the\n homogeneous equation.\n\n This is intended for internal use by ``undetermined_coefficients`` hints.\n\n SymPy currently has no way to convert `\\sin^n(x) \\cos^m(y)` into a sum of\n only `\\sin(a x)` and `\\cos(b x)` terms, so these are not implemented. So,\n for example, you will need to manually convert `\\sin^2(x)` into `[1 +\n \\cos(2 x)]/2` to properly apply the method of undetermined coefficients on\n it.\n\n Examples\n ========\n\n >>> from sympy import log, exp\n >>> from sympy.solvers.ode.nonhomogeneous import _undetermined_coefficients_match\n >>> from sympy.abc import x\n >>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)\n {'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}}\n >>> _undetermined_coefficients_match(log(x), x)\n {'test': False}\n\n ", "n_words": 194, "vocab_size": 127, "n_whitespaces": 277, "language": "en" } }, { "id": 100698, "commit_id": "afec52309326304f4323029039e49bfcf928ef43", "repo": "faceswap", "path": "lib/gui/analysis/stats.py", "file_name": "stats.py", "fun_name": "_remove_raw", "commit_message": "Bugfixes:\n - Stats graph - Handle NaNs in data\n - logger - de-elevate matplotlib font messages", "code": "def _remove_raw(self) -> None:\n \n if \"raw\" in self._selections:\n return\n logger.debug(\"Removing Raw Data from output\")\n for key in list(self._stats.keys()):\n if key.startswith(\"raw\"):\n del self._stats[key]\n logger.debug(\"Removed Raw Data from output\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 99, "n_words": 27, "vocab_size": 21, "complexity": 4, "nloc": 9, "token_counts": 57, "n_ast_nodes": 102, "n_identifiers": 10, "random_cut": "def _remove_raw(self) -> None:\n \n if \"raw\" in self._selections:\n return\n logger.debug(\"Removing Raw Data from output\")\n for key in list(self._stats.keys()):\n if key.startswith(\"raw\"):\n ", "d_id": 20154, "documentation": { "docstring": " Remove raw values from :attr:`stats` if they are not requested. ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 269374, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/applications/efficientnet_weight_update_util.py", "file_name": "efficientnet_weight_update_util.py", "fun_name": "get_keras_blocks", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_keras_blocks(keras_weight_names):\n \n # example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a'\n keras_blocks = {x.split(\"_\")[0] for x in keras_weight_names if \"block\" in x}\n return sorted(keras_blocks)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 20, "vocab_size": 19, "complexity": 3, "nloc": 3, "token_counts": 32, "n_ast_nodes": 57, "n_identifiers": 6, "random_cut": "def get_keras_blocks(keras_weight_names):\n \n # example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a'\n keras_blocks = {x.split(\"_\")[0] for x in keras_weight_names if \"block\" in x}\n return sorted(keras_blocks)\n\n", "d_id": 80044, "documentation": { "docstring": "Extract the block names from list of full weight names.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 65128, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/party.py", "file_name": "party.py", "fun_name": "get_dashboard_info", "commit_message": "style: format code with black", "code": "def get_dashboard_info(party_type, party, loyalty_program=None):\n\tcurrent_fiscal_year = get_fiscal_year(nowdate(), as_dict=True)\n\n\tdoctype = \"Sales Invoice\" if party_type == \"Customer\" else \"Purchase Invoice\"\n\n\tcompanies = frappe.get_all(\n\t\tdoctype, filters={\"docstatus\": 1, party_type.lower(): party}, distinct=1, fields=[\"company\"]\n\t)\n\n\tcompany_wise_info = []\n\n\tcompany_wise_grand_total = frappe.get_all(\n\t\tdoctype,\n\t\tfilters={\n\t\t\t\"docstatus\": 1,\n\t\t\tparty_type.lower(): party,\n\t\t\t\"posting_date\": (\n\t\t\t\t\"between\",\n\t\t\t\t[current_fiscal_year.year_start_date, current_fiscal_year.year_end_date],\n\t\t\t),\n\t\t},\n\t\tgroup_by=\"company\",\n\t\tfields=[\n\t\t\t\"company\",\n\t\t\t\"sum(grand_total) as grand_total\",\n\t\t\t\"sum(base_grand_total) as base_grand_total\",\n\t\t],\n\t)\n\n\tloyalty_point_details = []\n\n\tif party_type == \"Customer\":\n\t\tloyalty_point_details = frappe._dict(\n\t\t\tfrappe.get_all(\n\t\t\t\t\"Loyalty Point Entry\",\n\t\t\t\tfilters={\n\t\t\t\t\t\"customer\": party,\n\t\t\t\t\t\"expiry_date\": (\">=\", getdate()),\n\t\t\t\t},\n\t\t\t\tgroup_by=\"company\",\n\t\t\t\tfields=[\"company\", \"sum(loyalty_points) as loyalty_points\"],\n\t\t\t\tas_list=1,\n\t\t\t)\n\t\t)\n\n\tcompany_wise_billing_this_year = frappe._dict()\n\n\tfor d in company_wise_grand_total:\n\t\tcompany_wise_billing_this_year.setdefault(\n\t\t\td.company, {\"grand_total\": d.grand_total, \"base_grand_total\": d.base_grand_total}\n\t\t)\n\n\tcompany_wise_total_unpaid = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\t(party_type, party),\n\t\t)\n\t)\n\n\tfor d in companies:\n\t\tcompany_default_currency = frappe.db.get_value(\"Company\", d.company, \"default_currency\")\n\t\tparty_account_currency = get_party_account_currency(party_type, party, d.company)\n\n\t\tif party_account_currency == company_default_currency:\n\t\t\tbilling_this_year = flt(\n\t\t\t\tcompany_wise_billing_this_year.get(d.company, {}).get(\"base_grand_total\")\n\t\t\t)\n\t\telse:\n\t\t\tbilling_this_year = flt(company_wise_billing_this_year.get(d.company, {}).get(\"grand_total\"))\n\n\t\ttotal_unpaid = flt(company_wise_total_unpaid.get(d.company))\n\n\t\tif loyalty_point_details:\n\t\t\tloyalty_points = loyalty_point_details.get(d.company)\n\n\t\tinfo = {}\n\t\tinfo[\"billing_this_year\"] = flt(billing_this_year) if billing_this_year else 0\n\t\tinfo[\"currency\"] = party_account_currency\n\t\tinfo[\"total_unpaid\"] = flt(total_unpaid) if total_unpaid else 0\n\t\tinfo[\"company\"] = d.company\n\n\t\tif party_type == \"Customer\" and loyalty_point_details:\n\t\t\tinfo[\"loyalty_points\"] = loyalty_points\n\n\t\tif party_type == \"Supplier\":\n\t\t\tinfo[\"total_unpaid\"] = -1 * info[\"total_unpaid\"]\n\n\t\tcompany_wise_info.append(info)\n\n\treturn company_wise_info\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 121, "n_words": 193, "vocab_size": 116, "complexity": 12, "nloc": 77, "token_counts": 432, "n_ast_nodes": 712, "n_identifiers": 45, "random_cut": "def get_dashboard_info(party_type, party, loyalty_program=None):\n\tcurrent_fiscal_year = get_fiscal_year(nowdate(), as_dict=True)\n\n\tdoctype = \"Sales Invoice\" if party_type == \"Customer\" else \"Purchase Invoice\"\n\n\tcompanies = frappe.get_all(\n\t\tdoctype, filters={\"docstatus\": 1, party_type.lower(): party}, distinct=1, fields=[\"company\"]\n\t)\n\n\tcompany_wise_info = []\n\n\tcompany_wise_grand_total = frappe.get_all(\n\t\tdoctype,\n\t\tfilters={\n\t\t\t\"docstatus\": 1,\n\t\t\tparty_type.lower(): party,\n\t\t\t\"posting_date\": (\n\t\t\t\t\"between\",\n\t\t\t\t[current_fiscal_year.year_start_date, current_fiscal_year.year_end_date],\n\t\t\t),\n\t\t},\n\t\tgroup_by=\"company\",\n\t\tfields=[\n\t\t\t\"company\",\n\t\t\t\"sum(grand_total) as grand_total\",\n\t\t\t\"sum(base_grand_total) as base_grand_total\",\n\t\t],\n\t)\n\n\tloyalty_point_details = []\n\n\tif party_type == \"Customer\":\n\t\tloyalty_point_details = frappe._dict(\n\t\t\tfrappe.get_all(\n\t\t\t\t\"Loyalty Point Entry\",\n\t\t\t\tfilters={\n\t\t\t\t\t\"customer\": party,\n\t\t\t\t\t\"expiry_d", "d_id": 13800, "documentation": { "docstring": "\n\t\tselect company, sum(debit_in_account_currency) - sum(credit_in_account_currency)\n\t\tfrom `tabGL Entry`\n\t\twhere party_type = %s and party=%s\n\t\tand is_cancelled = 0\n\t\tgroup by company", "n_words": 21, "vocab_size": 19, "n_whitespaces": 16, "language": "en" } }, { "id": 281509, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/portfolio/brokers/robinhood/robinhood_controller.py", "file_name": "robinhood_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n help_text = \n console.print(text=help_text, menu=\"Portfolio - Brokers - Robinhood\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 8, "token_counts": 21, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def print_help(self):\n \n help_text = \n console.print(text=", "d_id": 83815, "documentation": { "docstring": "Print help[cmds]\n login login to robinhood\n\n holdings show account holdings in stocks\n history show equity history of your account\n[/cmds]", "n_words": 20, "vocab_size": 15, "n_whitespaces": 40, "language": "en" } }, { "id": 71984, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_edit_handlers.py", "file_name": "test_edit_handlers.py", "fun_name": "test_page_with_inline_model_with_tabbed_panel_only", "commit_message": "Reformat with black", "code": "def test_page_with_inline_model_with_tabbed_panel_only(self):\n \n\n EventPageSpeaker.settings_panels = [\n FieldPanel(\"first_name\"),\n FieldPanel(\"last_name\"),\n ]\n\n warning = checks.Warning(\n \"EventPageSpeaker.settings_panels will have no effect on InlinePanel model editing\",\n hint=,\n obj=EventPageSpeaker,\n id=self.warning_id,\n )\n\n checks_results = self.get_checks_result()\n\n self.assertIn(warning, checks_results)\n\n delattr(EventPageSpeaker, \"settings_panels\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 153, "n_words": 31, "vocab_size": 29, "complexity": 1, "nloc": 15, "token_counts": 66, "n_ast_nodes": 112, "n_identifiers": 16, "random_cut": "def test_page_with_inline_model_with_tabbed_panel_only(self):\n \n\n EventPageSpeaker.settings_panels = [\n FieldPanel(\"first_name\"),\n FieldPanel(\"last_name\"),\n ]\n\n warning = checks.Warning(\n \"EventPageSpeaker.settings_panels will have no effect on InlinePanel model editing\",\n hint=,\n obj=EventPageSpeaker,\n id=self.warning_id,\n )\n\n checks_results = self.get_chec", "d_id": 15811, "documentation": { "docstring": "Test that checks will warn against setting single tabbed panel on InlinePanel modelEnsure that EventPageSpeaker uses `panels` instead of `settings_panels`.\nThere are no tabs on non-Page model editing within InlinePanels.", "n_words": 30, "vocab_size": 28, "n_whitespaces": 28, "language": "en" } }, { "id": 337478, "commit_id": "02e2ed567be0e6d54b65884265a14873c3a30b2a", "repo": "accelerate", "path": "src/accelerate/utils/dataclasses.py", "file_name": "dataclasses.py", "fun_name": "to_kwargs", "commit_message": "Refactor utils into its own module (#340)\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def to_kwargs(self):\n \n default_dict = self.__class__().to_dict()\n this_dict = self.to_dict()\n return {k: v for k, v in this_dict.items() if default_dict[k] != v}\n\n\n@dataclass", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "@dataclass", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 48, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 4, "token_counts": 47, "n_ast_nodes": 82, "n_identifiers": 10, "random_cut": "def to_kwargs(self):\n \n default_dict = self.__class__().to_dict()\n this_dict = self.to_dict()\n", "d_id": 121067, "documentation": { "docstring": "\n Returns a dictionary containing the attributes with values different from the default of this class.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 30, "language": "en" } }, { "id": 319942, "commit_id": "ab761e837c4be4974f699c8c97560a4291a8d298", "repo": "paperless-ngx", "path": "src/documents/tasks.py", "file_name": "tasks.py", "fun_name": "update_document_archive_file", "commit_message": "Implements a better re-do of OCR by making the document archiver function common. Actually creates updated file now", "code": "def update_document_archive_file(document_id):\n \n document = Document.objects.get(id=document_id)\n\n mime_type = document.mime_type\n\n parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type)\n\n if not parser_class:\n logger.error(\n f\"No parser found for mime type {mime_type}, cannot \"\n f\"archive document {document} (ID: {document_id})\",\n )\n return\n\n parser: DocumentParser = parser_class(logging_group=uuid.uuid4())\n\n try:\n parser.parse(document.source_path, mime_type, document.get_public_filename())\n\n thumbnail = parser.get_thumbnail(\n document.source_path,\n mime_type,\n document.get_public_filename(),\n )\n\n if parser.get_archive_path():\n with transaction.atomic():\n with open(parser.get_archive_path(), \"rb\") as f:\n checksum = hashlib.md5(f.read()).hexdigest()\n # I'm going to save first so that in case the file move\n # fails, the database is rolled back.\n # We also don't use save() since that triggers the filehandling\n # logic, and we don't want that yet (file not yet in place)\n document.archive_filename = generate_unique_filename(\n document,\n archive_filename=True,\n )\n Document.objects.filter(pk=document.pk).update(\n archive_checksum=checksum,\n content=parser.get_text(),\n archive_filename=document.archive_filename,\n )\n with FileLock(settings.MEDIA_LOCK):\n create_source_path_directory(document.archive_path)\n shutil.move(parser.get_archive_path(), document.archive_path)\n shutil.move(thumbnail, document.thumbnail_path)\n\n with index.open_index_writer() as writer:\n index.update_document(writer, document)\n\n except Exception:\n logger.exception(\n f\"Error while parsing document {document} \" f\"(ID: {document_id})\",\n )\n finally:\n parser.cleanup()\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 662, "n_words": 141, "vocab_size": 108, "complexity": 5, "nloc": 43, "token_counts": 266, "n_ast_nodes": 463, "n_identifiers": 56, "random_cut": "def update_document_archive_file(document_id):\n \n document = Document.objects.get(id=document_id)\n\n mime_type = document.mime_type\n\n parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type)\n\n if not parser_class:\n logger.error(\n f\"No parser found for mime type {mime_type}, cannot \"\n f\"archive document {document} (ID: {document_id})\",\n )\n return\n\n parser: DocumentParser = parser_class(logging_group=uuid.uuid4())\n\n try:\n parser.parse(document.source_path, mime_type, document.get_public_filename())\n\n thumbnail = parser.get_thumbnail(\n document.source_path,\n mime_type,\n document.get_public_filename(),\n )\n\n if parser.get_archive_path():\n with transaction.atomic():\n with open(parser.get_archive_path(), \"rb\") as f:\n checksum = hashlib.md5(f.read()).hexdigest()\n # I'm going to save first so that in case the file move\n # fails, the database is rolled back.\n # We also don't use save() since that triggers the filehandling\n # logic, and we don't want that yet (file not yet in place)\n document.archive_filename = generate_unique_filename(\n ", "d_id": 117024, "documentation": { "docstring": "\n Re-creates the archive file of a document, including new OCR content and thumbnail\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 20, "language": "en" } }, { "id": 66630, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v12_0/move_credit_limit_to_customer_credit_limit.py", "file_name": "move_credit_limit_to_customer_credit_limit.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\t\n\tfrappe.reload_doc(\"Selling\", \"doctype\", \"Customer Credit Limit\")\n\tfrappe.reload_doc(\"Selling\", \"doctype\", \"Customer\")\n\tfrappe.reload_doc(\"Setup\", \"doctype\", \"Customer Group\")\n\n\tif frappe.db.a_row_exists(\"Customer Credit Limit\"):\n\t\treturn\n\n\tmove_credit_limit_to_child_table()\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 20, "vocab_size": 15, "complexity": 2, "nloc": 7, "token_counts": 49, "n_ast_nodes": 98, "n_identifiers": 6, "random_cut": "def execute():\n\t\n\tfrappe.reload_doc(\"Selling\", \"doctype\", \"Customer Credit Limit\")\n\tfrappe.reload_doc(\"Selling\", \"doctype\", \"Customer\")\n\tfrappe.reload_doc(\"Setup\", \"doctype\", \"Customer Group", "d_id": 14258, "documentation": { "docstring": "Move credit limit and bypass credit limit to the child table of customer credit limit", "n_words": 15, "vocab_size": 11, "n_whitespaces": 14, "language": "en" } }, { "id": 150327, "commit_id": "82aecc81f393e98b86115e9bdfa46dac1a143fad", "repo": "freqtrade", "path": "scripts/rest_client.py", "file_name": "rest_client.py", "fun_name": "forceexit", "commit_message": "Accept parameters to forceexit", "code": "def forceexit(self, tradeid, ordertype=None, amount=None):\n \n\n return self._post(\"forceexit\", data={\n \"tradeid\": tradeid,\n \"ordertype\": ordertype,\n \"amount\": amount,\n })\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 66, "n_identifiers": 7, "random_cut": "def forceexit(self, tradeid, ordertype=None, amount=None):\n \n\n return self._post(\"forceexit\", data={\n \"tradeid\": tradeid,\n \"ordertype\": ordertype,\n \"amount\": amount,\n ", "d_id": 34708, "documentation": { "docstring": "Force-exit a trade.\n\n :param tradeid: Id of the trade (can be received via status command)\n :param ordertype: Order type to use (must be market or limit)\n :param amount: Amount to sell. Full sell if not given\n :return: json object\n ", "n_words": 39, "vocab_size": 35, "n_whitespaces": 74, "language": "en" } }, { "id": 62924, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/packaging/tags.py", "file_name": "tags.py", "fun_name": "_abi3_applies", "commit_message": "upd; format", "code": "def _abi3_applies(python_version):\n # type: (PythonVersion) -> bool\n \n return len(python_version) > 1 and tuple(python_version) >= (3, 2)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 2, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 4, "random_cut": "def _abi3_applies(python_version):\n # type: (PythonVersion) -> bool\n \n return len(python_version) > 1 and tu", "d_id": 13070, "documentation": { "docstring": "\n Determine if the Python version supports abi3.\n\n PEP 384 was first implemented in Python 3.2.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 25, "language": "en" } }, { "id": 269521, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "variable", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def variable(value, dtype=None, name=None, constraint=None):\n \n if dtype is None:\n dtype = floatx()\n if hasattr(value, \"tocoo\"):\n sparse_coo = value.tocoo()\n indices = np.concatenate(\n (\n np.expand_dims(sparse_coo.row, 1),\n np.expand_dims(sparse_coo.col, 1),\n ),\n 1,\n )\n v = tf.SparseTensor(\n indices=indices,\n values=sparse_coo.data,\n dense_shape=sparse_coo.shape,\n )\n v._keras_shape = sparse_coo.shape\n return v\n v = tf.Variable(\n value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint\n )\n if isinstance(value, np.ndarray):\n v._keras_shape = value.shape\n elif hasattr(value, \"shape\"):\n v._keras_shape = int_shape(value)\n track_variable(v)\n return v\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 264, "n_words": 64, "vocab_size": 44, "complexity": 5, "nloc": 28, "token_counts": 173, "n_ast_nodes": 265, "n_identifiers": 29, "random_cut": "def variable(value, dtype=None, name=None, constraint=None):\n \n if dtype is None:\n dtype = floatx()\n if hasattr(value, \"tocoo\"):\n sparse_coo = value.tocoo()\n indices = np.concatenate(\n (\n np.expand_dims(sparse_coo.row, 1),\n np.expand_dims(sparse_coo.col, 1),\n ),\n 1,\n )\n v = tf.SparseTensor(\n indices=indices,\n values=sparse_coo.data,\n dense_shape=sparse_coo.shape,\n )\n v._keras_shape = sparse_coo.shape\n return v\n v = tf.Variable(\n value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint\n )\n if isinstance(value, np.ndarray):\n ", "d_id": 80150, "documentation": { "docstring": "Instantiates a variable and returns it.\n\n Args:\n value: Numpy array, initial value of the tensor.\n dtype: Tensor type.\n name: Optional name string for the tensor.\n constraint: Optional projection function to be\n applied to the variable after an optimizer update.\n\n Returns:\n A variable instance (with Keras metadata included).\n\n Examples:\n\n >>> val = np.array([[1, 2], [3, 4]])\n >>> kvar = tf.keras.backend.variable(value=val, dtype='float64',\n ... name='example_var')\n >>> tf.keras.backend.dtype(kvar)\n 'float64'\n >>> print(kvar)\n \n\n ", "n_words": 77, "vocab_size": 66, "n_whitespaces": 206, "language": "en" } }, { "id": 284080, "commit_id": "73187d9e17a4838fc6ec583bcfcab593e06508cf", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/dark_pool_shorts/ibkr_model.py", "file_name": "ibkr_model.py", "fun_name": "get_cost_to_borrow", "commit_message": "Add cost to borrow of stocks. Data from IBKR (#1663)\n\n* add ctb to dps\r\n\r\n* add test for ctb\r\n\r\n* reformat using black\r\n\r\n* fix tests for ctb\r\n\r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: jmaslek ", "code": "def get_cost_to_borrow() -> pd.DataFrame:\n \n ftp = ftplib.FTP(\"ftp3.interactivebrokers.com\", \"shortstock\")\n\n flo = BytesIO()\n ftp.retrbinary(\"RETR usa.txt\", flo.write)\n flo.seek(0)\n\n data = pd.read_csv(flo, sep=\"|\", skiprows=1)\n data = data[[\"#SYM\", \"FEERATE\", \"AVAILABLE\"]]\n data[\"AVAILABLE\"] = data[\"AVAILABLE\"].replace(\">10000000\", 10000000)\n data.fillna(0, inplace=True)\n data[\"AVAILABLE\"] = data[\"AVAILABLE\"].astype(int)\n data.sort_values(by=[\"FEERATE\"], ascending=False, inplace=True)\n data[\"FEERATE\"] = data[\"FEERATE\"].apply(lambda x: str(x) + \"%\")\n data.columns = [\"Symbol\", \"Fees\", \"Available\"]\n return data\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 93, "n_words": 51, "vocab_size": 40, "complexity": 1, "nloc": 21, "token_counts": 161, "n_ast_nodes": 281, "n_identifiers": 27, "random_cut": "def get_cost_to_borrow() -> pd.DataFrame:\n \n ftp = ftplib.FTP(\"ftp3.interactivebrokers.com\", \"shortstock\")\n\n flo = BytesIO()\n ftp.retrbinary(\"RETR usa.txt\", flo.write)\n flo.seek(0)\n\n data = pd.read_csv(flo, sep=\"|\", skiprows=1)\n data = data[[\"#SYM\", \"FEERATE\", \"AVAILABLE\"]]\n data[\"AVAILABLE\"] = data[\"AVAILABLE\"].replace(\">10000000\", 10000000)\n data.fillna(0, inplace=True)\n data[\"AVAILABLE\"] = data[\"AVAILABLE\"].astype(int)\n data.sort_values(by=[\"FEERATE\"], ascending=False, inplace=True)\n data[\"FEERATE\"] = data[\"FEERATE\"].apply(lambda x: str(x) + \"%\")\n data.columns = [\"Symbol\", \"Fees\", \"Available\"]\n return dat", "d_id": 84633, "documentation": { "docstring": "Get stocks with highest cost to borrow [Source: Interactive Broker]\n\n Returns\n -------\n pd.DataFrame\n Cost to borrow\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 35, "language": "en" } }, { "id": 43885, "commit_id": "dba00ce6a32b7f50153887c6974f62985ca8023f", "repo": "airflow", "path": "airflow/jobs/local_task_job.py", "file_name": "local_task_job.py", "fun_name": "_enable_task_listeners", "commit_message": "Add Listener Plugin API that tracks TaskInstance state changes (#20443)\n\nThis adds new Plugin API - \"listeners\". It enables plugin authors to write\r\n[pluggy hook implementation][1] that will be called on certain formalized extension\r\npoints. To differentiate between current Airflow extension points, like\r\nplugins, and current Airflow hooks, implementations of those hooks are called\r\nlisteners.\r\n\r\nThe API is ment to be called across all dags, and all operators - in contrast\r\nto current on_success_callback, pre_execute and related family which are meant\r\nto provide callbacks for particular dag authors, or operator creators.\r\n\r\npluggy mechanism enables us to execute multiple, or none, listeners that\r\nimplement particular extension point, so that users can use multiple listeners\r\nseamlessly.\r\n\r\nIn this PR, three such extension points are added. When TaskInstance's state is\r\nchanged to RUNNING, on_task_instance_running hook is called. On change\r\ntoSUCCESS on_task_instance_success is called, similarly on FAILED\r\non_task_instance_failed is called.\r\n\r\nActual notification mechanism is be implemented using [SQLAlchemy’s events\r\nmechanism][2]. This ensures that plugins will get every change of state,\r\nregardless of where in the codebase it happened, and not require manual\r\nannotation of TI state changes across the codebase.\r\n\r\nTo make sure that this change is not affecting performance, running this\r\nmechanism on scheduler is disabled by default. The SQLAlchemy event mechanism\r\nis also not affected by default - the event listener is only added if we have\r\nany plugin which actually provides any listener.\r\n\r\n[1]: https://pluggy.readthedocs.io/en/stable/\r\n[2]: https://docs.sqlalchemy.org/en/13/orm/session_events.html#after-flush\r\n\r\nSigned-off-by: Maciej Obuchowski ", "code": "def _enable_task_listeners():\n \n if get_listener_manager().has_listeners:\n register_task_instance_state_events()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 5, "vocab_size": 5, "complexity": 2, "nloc": 3, "token_counts": 15, "n_ast_nodes": 30, "n_identifiers": 4, "random_cut": "def _enable_task_listeners():\n \n if get_listener_manager()", "d_id": 8086, "documentation": { "docstring": "\n Check if we have any registered listeners, then register sqlalchemy hooks for\n TI state change if we do.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 138495, "commit_id": "9ee24530abf1b5e3239869b5257dd7b678337b90", "repo": "ray", "path": "python/ray/data/impl/plan.py", "file_name": "plan.py", "fun_name": "has_computed_output", "commit_message": "[Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931)\n\nThis PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible in order to more eagerly release block memory. This PR is the final precursor to adding the actual out-of-band serialization APIs (PR 3/3).\r\n\r\nThe fully lineage has to be maintained, even for eagerly computed datasets, since the lineage is needed for out-of-band serialization of datasets.", "code": "def has_computed_output(self) -> bool:\n \n return self._snapshot_blocks is not None and not self._stages_after_snapshot\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def has_computed_output(self) -> bool:\n \n return self._snapshot_blocks is not N", "d_id": 31438, "documentation": { "docstring": "Whether this plan has a computed snapshot for the final stage, i.e. for the\n output of this plan.\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 32, "language": "en" } }, { "id": 68747, "commit_id": "c3219ebad1cac35afc04cc051c9e215c70cd1e9b", "repo": "erpnext", "path": "erpnext/accounts/report/sales_register/sales_register.py", "file_name": "sales_register.py", "fun_name": "get_conditions", "commit_message": "fix(Sales Register): incorrect query with dimensions\n\nIf accounting dimension is also part of the default filters then same\nquery is repeated with incorrect syntax.\n\ne.g. `item_group = (child1, child2)` instead of `in` query.\n\nfix: don't add default filter if they are part of dimensions to be\nadded.", "code": "def get_conditions(filters):\n\tconditions = \"\"\n\n\taccounting_dimensions = get_accounting_dimensions(as_list=False) or []\n\taccounting_dimensions_list = [d.fieldname for d in accounting_dimensions]\n\n\tif filters.get(\"company\"):\n\t\tconditions += \" and company=%(company)s\"\n\n\tif filters.get(\"customer\") and \"customer\" not in accounting_dimensions_list:\n\t\tconditions += \" and customer = %(customer)s\"\n\n\tif filters.get(\"from_date\"):\n\t\tconditions += \" and posting_date >= %(from_date)s\"\n\tif filters.get(\"to_date\"):\n\t\tconditions += \" and posting_date <= %(to_date)s\"\n\n\tif filters.get(\"owner\"):\n\t\tconditions += \" and owner = %(owner)s\"\n\n\tdef get_sales_invoice_item_field_condition(field, table=\"Sales Invoice Item\") -> str:\n\t\tif not filters.get(field) or field in accounting_dimensions_list:\n\t\t\treturn \"\"\n\t\treturn f\n\n\tconditions += get_sales_invoice_item_field_condition(\"mode_of_payments\", \"Sales Invoice Payment\")\n\tconditions += get_sales_invoice_item_field_condition(\"cost_center\")\n\tconditions += get_sales_invoice_item_field_condition(\"warehouse\")\n\tconditions += get_sales_invoice_item_field_condition(\"brand\")\n\tconditions += get_sales_invoice_item_field_condition(\"item_group\")\n\n\tif accounting_dimensions:\n\t\tcommon_condition = \n\t\tfor dimension in accounting_dimensions:\n\t\t\tif filters.get(dimension.fieldname):\n\t\t\t\tif frappe.get_cached_value(\"DocType\", dimension.document_type, \"is_tree\"):\n\t\t\t\t\tfilters[dimension.fieldname] = get_dimension_with_children(\n\t\t\t\t\t\tdimension.document_type, filters.get(dimension.fieldname)\n\t\t\t\t\t)\n\n\t\t\t\t\tconditions += (\n\t\t\t\t\t\tcommon_condition\n\t\t\t\t\t\t+ \"and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)\".format(dimension.fieldname)\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tconditions += (\n\t\t\t\t\t\tcommon_condition\n\t\t\t\t\t\t+ \"and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)\".format(dimension.fieldname)\n\t\t\t\t\t)\n\n\treturn conditions\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 110, "n_words": 150, "vocab_size": 73, "complexity": 13, "nloc": 41, "token_counts": 213, "n_ast_nodes": 446, "n_identifiers": 21, "random_cut": "def get_conditions(filters):\n\tconditions = \"\"\n\n\taccounting_dimensions = get_accounting_dimensions(as_list=False) or []\n\taccounting_dimensions_list = [d.fieldname for d in accounting_dimensions]\n\n\tif filters.get(\"company\"):\n\t\tconditions += \" and company=%(company)s\"\n\n\tif filters.get(\"customer\") and \"customer\" not in accounting_dimensions_list:\n\t\tconditions += \" and customer = %(customer)s\"\n\n\tif filters.get(\"from_date\"):\n\t\tconditions += \" and posting_date >= %(from_date)s\"\n\tif filters.get(\"to_date\"):\n\t\tconditions += \" and posting_date <= %(to_date)s\"\n\n\tif filters.get(\"owner\"):\n\t\tconditions += \" and owner = %(owner)s\"\n\n\tdef get_sales_invoice_item_field_condition(field, table=\"Sales Invoice Item\") -> str:\n\t\tif not filters.get(field) or field in accounting_dimensions_list:\n\t\t\treturn \"\"\n\t\treturn f\n\n\tconditions += get_sales_invoice_item_field_condition(\"mode_of_payments\", \"Sales Invoice Payment\")\n\tconditions += get_sales_invoice_item_field_condition(\"cost_center\")\n\tconditions += get_sales_invoice_item_field_condition(\"warehouse\")\n\tconditions += get_sales_invoice_item_field_condition(\"brand\")\n\tconditions += get_sales_invoice_item_field_condition(\"item_group\")\n\n\tif accounting_dimensions:\n\t\tcommon_condition = \n\t\tfor dimension in accounting_dimensions:\n\t\t\tif filters.get(dimension.fieldname):\n\t\t\t\tif frappe.get_cached_value(\"DocType\", dimension.document_type, \"is_tree\"):\n\t\t\t\t\tfilters[dimension.fieldname] = get_dimension_with_children(\n\t\t\t\t\t\tdimension.document_type, filters.get(dimension.fieldname)\n\t\t\t\t\t)\n\n\t\t\t\t\tconditions += (\n\t\t\t\t\t\tcommon_condition\n\t\t\t\t\t\t+ \"and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)\".format(dimension.fieldname)\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\tconditions += (\n\t\t\t\t\t\tcommon_condition\n\t\t\t\t\t\t+ \"and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)\".format(dim", "d_id": 14851, "documentation": { "docstring": " and exists(select name from `tab{table}`\n\t\t\t where parent=`tabSales Invoice`.name\n\t\t\t \tand ifnull(`tab{table}`.{field}, '') = %({field})s)\n\t\t\tand exists(select name from `tabSales Invoice Item`\n\t\t\t\twhere parent=`tabSales Invoice`.name\n\t\t\t", "n_words": 23, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 19198, "commit_id": "847eb6b22d03f0cffef945996cf835272870435a", "repo": "mlflow", "path": "mlflow/sklearn/utils.py", "file_name": "utils.py", "fun_name": "_get_classifier_artifacts", "commit_message": "Improve confusion matrix plot (#5273)\n\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight):\n \n import sklearn\n\n if not _is_plotting_supported():\n return []\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 13, "vocab_size": 13, "complexity": 3, "nloc": 48, "token_counts": 187, "n_ast_nodes": 41, "n_identifiers": 8, "random_cut": "def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight):\n \n import sklearn\n\n if", "d_id": 2909, "documentation": { "docstring": "\n Draw and record various common artifacts for classifier\n\n For all classifiers, we always log:\n (1) confusion matrix:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html\n\n For only binary classifiers, we will log:\n (2) precision recall curve:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html\n (3) roc curve:\n https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n\n Steps:\n 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets.\n 2. If the sample_weight argument exists in fit_func (accuracy_score by default\n has sample_weight), extract it from fit_args or fit_kwargs as\n (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)\n 3. return a list of artifacts path to be logged\n\n :param fitted_estimator: The already fitted regressor\n :param fit_args: Positional arguments given to fit_func.\n :param fit_kwargs: Keyword arguments given to fit_func.\n :return: List of artifacts to be logged\n ", "n_words": 117, "vocab_size": 91, "n_whitespaces": 178, "language": "en" } }, { "id": 176941, "commit_id": "7d910e7184abd385c929f789b0c935ab143fc932", "repo": "networkx", "path": "networkx/algorithms/swap.py", "file_name": "swap.py", "fun_name": "double_edge_swap", "commit_message": "Implement directed edge swap (#5663)\n\n* Add tests for directed edge swap\r\n\r\n* Add directed edge swap algorithm\r\n\r\n* Allow more swaps in directed tests\r\n\r\n* Fix errors in swap.py to meet test criteria\r\n\r\n* Remove TODOs\r\n\r\n* Update documentation for directed_edge_swap and run black\r\n\r\n* Fix incosistent spacing\r\n\r\n* Add references\r\n\r\n* Added PR to release docs\r\n\r\n* Fix reference formatting\r\n\r\n* Improve documentation\r\n\r\n* An -> A\r\n\r\n* Update networkx/algorithms/swap.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Add 'Raises' section to documentation\r\n\r\n* Update tests to use keyword arguments for directed_edge_swap\r\n\r\n* Fix references to 'triple-edge' swap\r\n\r\n* Use not_implemented_for decorator for directed_edge_swap\r\n\r\n* Rename n to tries and remove unnecessary updates\r\n\r\n* Rename e to msg\r\n\r\n* Use 'succ' instead of 'out_edges' for getting successors\r\n\r\n* Update networkx/algorithms/swap.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/tests/test_swap.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/tests/test_swap.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/tests/test_swap.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/swap.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Convert gnp graphs to path graphs for clarity\r\n\r\n* Use seed when testing directed edge swap\r\n\r\n* Check node equality sooner\r\n\r\n* Add directed_edge_swap to documentation\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult ", "code": "def double_edge_swap(G, nswap=1, max_tries=100, seed=None):\n \n if G.is_directed():\n raise nx.NetworkXError(\n \"double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead.\"\n )\n if nswap > max_tries:\n raise nx.NetworkXError(\"Number of swaps > number of tries allowed.\")\n if len(G) < 4:\n raise nx.NetworkXError(\"Graph has less than four nodes.\")\n # Instead of choosing uniformly at random from a generated edge list,\n # this algorithm chooses nonuniformly from the set of nodes with\n # probability weighted by degree.\n n = 0\n swapcount = 0\n keys, degrees = zip(*G.degree()) # keys, degree\n cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree\n discrete_sequence = nx.utils.discrete_sequence\n while swapcount < nswap:\n # if random.random() < 0.5: continue # trick to avoid periodicities?\n # pick two random edges without creating edge list\n # choose source node indices from discrete distribution\n (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed)\n if ui == xi:\n continue # same source, skip\n u = keys[ui] # convert index to label\n x = keys[xi]\n # choose target uniformly from neighbors\n v = seed.choice(list(G[u]))\n y = seed.choice(list(G[x]))\n if v == y:\n continue # same target, skip\n if (x not in G[u]) and (y not in G[v]): # don't create parallel edges\n G.add_edge(u, x)\n G.add_edge(v, y)\n G.remove_edge(u, v)\n G.remove_edge(x, y)\n swapcount += 1\n if n >= max_tries:\n e = (\n f\"Maximum number of swap attempts ({n}) exceeded \"\n f\"before desired swaps achieved ({nswap}).\"\n )\n raise nx.NetworkXAlgorithmError(e)\n n += 1\n return G\n\n\n@py_random_state(3)", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@py_random_state(3)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 559, "n_words": 228, "vocab_size": 154, "complexity": 10, "nloc": 38, "token_counts": 251, "n_ast_nodes": 428, "n_identifiers": 33, "random_cut": "def double_edge_swap(G, nswap=1, max_tries=100, seed=None):\n \n if G.is_directed():\n raise nx.NetworkXError(\n \"double_edge_swap() not defined for directed graphs. Use direc", "d_id": 42180, "documentation": { "docstring": "Swap two edges in the graph while keeping the node degrees fixed.\n\n A double-edge swap removes two randomly chosen edges u-v and x-y\n and creates the new edges u-x and v-y::\n\n u--v u v\n becomes | |\n x--y x y\n\n If either the edge u-x or v-y already exist no swap is performed\n and another attempt is made to find a suitable edge pair.\n\n Parameters\n ----------\n G : graph\n An undirected graph\n\n nswap : integer (optional, default=1)\n Number of double-edge swaps to perform\n\n max_tries : integer (optional)\n Maximum number of attempts to swap edges\n\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness`.\n\n Returns\n -------\n G : graph\n The graph after double edge swaps.\n\n Notes\n -----\n Does not enforce any connectivity constraints.\n\n The graph G is modified in place.\n ", "n_words": 135, "vocab_size": 96, "n_whitespaces": 272, "language": "en" } }, { "id": 124997, "commit_id": "569fe0109629048d08e1d9e023f7769f10bd2244", "repo": "ray", "path": "rllib/offline/tests/test_dataset_reader.py", "file_name": "test_dataset_reader.py", "fun_name": "test_dataset_shard_with_task_parallelization", "commit_message": "[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)", "code": "def test_dataset_shard_with_task_parallelization(self):\n \n config = {\n \"input\": \"dataset\",\n \"input_config\": {\n \"format\": \"json\",\n \"paths\": self.dset_path,\n \"parallelism\": 10,\n },\n }\n NUM_WORKERS = 4\n\n _, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS)\n\n assert len(shards) == NUM_WORKERS + 1\n assert shards[0] is None\n assert all(\n isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:]\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 196, "n_words": 44, "vocab_size": 38, "complexity": 2, "nloc": 16, "token_counts": 86, "n_ast_nodes": 143, "n_identifiers": 16, "random_cut": "def test_dataset_shard_with_task_parallelization(self):\n \n config = {\n \"input\": \"dataset\",\n \"input_config\": {\n \"format\": \"json\",\n \"paths\": self.dset_path,\n \"parallelism\": 10,\n },", "d_id": 27737, "documentation": { "docstring": "Tests whether the dataset_shard function works correctly with parallelism\n for reading the dataset.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 108410, "commit_id": "f3edc8771b7c292c5539e0e6444746b6ccefec04", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_compare_images.py", "file_name": "test_compare_images.py", "fun_name": "test_image_comparison_expect_rms", "commit_message": "Add uuid in im1 name", "code": "def test_image_comparison_expect_rms(im1, im2, tol, expect_rms):\n \n baseline_dir, result_dir = map(Path, _image_directories(lambda: \"dummy\"))\n # Copy both \"baseline\" and \"test\" image to result_dir, so that 1)\n # compare_images writes the diff to result_dir, rather than to the source\n # tree and 2) the baseline image doesn't appear missing to triage_tests.py.\n uid = str(uuid.uuid4())\n result_im1 = make_test_filename(result_dir / (uid + im1), \"expected\")\n shutil.copyfile(baseline_dir / im1, result_im1)\n result_im2 = result_dir / im1\n shutil.copyfile(baseline_dir / im2, result_im2)\n results = compare_images(\n result_im1, result_im2, tol=tol, in_decorator=True)\n\n if expect_rms is None:\n assert results is None\n else:\n assert results is not None\n assert results['rms'] == approx(expect_rms, abs=1e-4)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 164, "n_words": 97, "vocab_size": 70, "complexity": 2, "nloc": 14, "token_counts": 124, "n_ast_nodes": 194, "n_identifiers": 24, "random_cut": "def test_image_comparison_expect_rms(im1, im2, tol, expect_rms):\n \n baseline_dir, result_dir = map(Path, _image_directories(lambda: \"dummy\"))\n # Copy both \"baseline\" a", "d_id": 23178, "documentation": { "docstring": "\n Compare two images, expecting a particular RMS error.\n\n im1 and im2 are filenames relative to the baseline_dir directory.\n\n tol is the tolerance to pass to compare_images.\n\n expect_rms is the expected RMS value, or None. If None, the test will\n succeed if compare_images succeeds. Otherwise, the test will succeed if\n compare_images fails and returns an RMS error almost equal to this value.\n ", "n_words": 61, "vocab_size": 45, "n_whitespaces": 83, "language": "en" } }, { "id": 102147, "commit_id": "0ece9a49d7d705b1a0cd4406d4f1c526d720e1f3", "repo": "pytorch", "path": "test/jit/test_save_load.py", "file_name": "test_save_load.py", "fun_name": "test_versioned_symbols_reserialization", "commit_message": "Revert D33198155: Bump version number to 7 and compile old operators with old schema\n\nTest Plan: revert-hammer\n\nDifferential Revision:\nD33198155 (https://github.com/pytorch/pytorch/commit/d35fc409ad84c1a837e7e07ffe3f4e4942538e50)\n\nOriginal commit changeset: 38a1185f9ecb\n\nOriginal Phabricator Diff: D33198155 (https://github.com/pytorch/pytorch/commit/d35fc409ad84c1a837e7e07ffe3f4e4942538e50)\n\nfbshipit-source-id: 411aaeb4e047aad9202db50d4d0f2ff35bc51f9d", "code": "def test_versioned_symbols_reserialization(self):\n \n module_v2 = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/_test_serialization_subcmul_v2.pt\")\n buffer = io.BytesIO()\n torch.jit.save(module_v2, buffer)\n buffer.seek(0)\n module_reserialized = torch.jit.load(buffer)\n\n subcmul_nodes = sum(\"subcmul\" in n.kind() for\n n in module_reserialized.graph.nodes())\n self.assertEqual(subcmul_nodes, 0)\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 81, "n_ast_nodes": 136, "n_identifiers": 20, "random_cut": "def test_versioned_symbols_reserialization(self):\n \n module_v2 = torch.jit.load(py", "d_id": 21474, "documentation": { "docstring": "\n Tests that loading and saving serialized Torchscript with a versioned\n symbol won't persist the original function and will inline the\n versioned builtin.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 51, "language": "en" } }, { "id": 183837, "commit_id": "4dd0d9fae43583638f34257f97d5749ca4f2c00c", "repo": "textual", "path": "tests/css/test_stylesheet.py", "file_name": "test_stylesheet.py", "fun_name": "test_stylesheet_apply_takes_final_rule_in_specificity_clash", "commit_message": "Add various additional tests around CSS specificity", "code": "def test_stylesheet_apply_takes_final_rule_in_specificity_clash():\n \n css = \".a {background: red; color: lime;} .b {background: blue;}\"\n stylesheet = _make_stylesheet(css)\n node = DOMNode(classes=\"a b\", id=\"c\")\n stylesheet.apply(node)\n\n assert node.styles.color == Color(0, 255, 0) # color: lime\n assert node.styles.background == Color(0, 0, 255) # background: blue\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 62, "n_words": 39, "vocab_size": 31, "complexity": 1, "nloc": 7, "token_counts": 62, "n_ast_nodes": 105, "n_identifiers": 13, "random_cut": "def test_stylesheet_apply_takes_final_rule_in_specificity_clash():\n \n css = \".a {background: red; color: lime;} .b {background: blue;}\"\n stylesheet = _make_stylesheet(css)\n node = DOMNode(classes=\"a b\", id=\"c\")\n stylesheet.apply(node)\n\n assert node.styles.color == Color(0, 255, 0) # color: lime\n assert node.styles.backg", "d_id": 44359, "documentation": { "docstring": ".a and .b both contain background and have same specificity, so .b wins\n since it was declared last - the background should be blue.", "n_words": 24, "vocab_size": 21, "n_whitespaces": 26, "language": "en" } }, { "id": 46541, "commit_id": "2f5a567977e1219cab16c2548825a1b9eba07ab3", "repo": "airflow", "path": "airflow/migrations/versions/0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py", "file_name": "0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py", "fun_name": "upgrade", "commit_message": "Use Airflow.Base.metadata in FAB models (#22353)\n\nSince FAB models are now in airflow, it makes sense to monitor changes\r\nin them. Therefore we use Airflow.models.base.Base.metadata for FAB models", "code": "def upgrade():\n \n conn = op.get_bind()\n if conn.dialect.name == 'sqlite':\n op.execute('PRAGMA foreign_keys=OFF')\n with op.batch_alter_table('ab_view_menu', schema=None) as batch_op:\n batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name'])\n op.execute('PRAGMA foreign_keys=ON')\n elif conn.dialect.name == 'mysql':\n with op.batch_alter_table('ab_register_user', schema=None) as batch_op:\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=False)\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=False)\n with op.batch_alter_table('ab_user', schema=None) as batch_op:\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=False)\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=False)\n elif conn.dialect.name == 'mssql':\n with op.batch_alter_table('ab_register_user') as batch_op:\n # Drop the unique constraint on username and email\n constraints = get_mssql_table_constraints(conn, 'ab_register_user')\n for k, _ in constraints.get('UNIQUE').items():\n batch_op.drop_constraint(k, type_='unique')\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=False)\n batch_op.create_unique_constraint(None, ['username'])\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=False)\n with op.batch_alter_table('ab_user') as batch_op:\n # Drop the unique constraint on username and email\n constraints = get_mssql_table_constraints(conn, 'ab_user')\n for k, _ in constraints.get('UNIQUE').items():\n batch_op.drop_constraint(k, type_='unique')\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=False)\n batch_op.create_unique_constraint(None, ['username'])\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=False)\n batch_op.create_unique_constraint(None, ['email'])\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 408, "n_words": 116, "vocab_size": 53, "complexity": 6, "nloc": 30, "token_counts": 378, "n_ast_nodes": 652, "n_identifiers": 25, "random_cut": "def upgrade():\n \n conn = op.get_bind()\n if conn.dialect.name == 'sqlite':\n op.execute('PRAGMA foreign_keys=OFF')\n with op.batch_alter_table('ab_view_menu', schema=None) as batch_op:\n batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name'])\n op.execute('PRAGMA foreign_keys=ON')\n elif conn.dialect.name == 'mysql':\n with op.batch_alter_table('ab_register_user', schema=None) as batch_op:\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=False)\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=False)\n with op.batch_alter_table('ab_user', schema=None) as batch_op:\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=False)\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=False)\n elif conn.dialect.name == 'mssql':\n with op.batch_alter_table('ab_register_user') as batch_op:\n # Drop the unique constraint on username and email\n constraints = get_mssql_table", "d_id": 8919, "documentation": { "docstring": "Apply Update migration for FAB tables to add missing constraints", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 114082, "commit_id": "2a39e0ab3c81f09a227c50c98a3fb7ee57ec8fac", "repo": "mindsdb", "path": "mindsdb/migrations/versions/2022-02-09_27c5aca9e47e_test.py", "file_name": "2022-02-09_27c5aca9e47e_test.py", "fun_name": "upgrade", "commit_message": "migration", "code": "def upgrade():\n op.drop_table('ai_table')\n\n conn = op.get_bind()\n\n # views was created with unnamed fk. Therefore need recreate it\n op.create_table(\n 'view_tmp',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('company_id', sa.Integer(), nullable=True),\n sa.Column('query', sa.String(), nullable=False),\n sa.Column('integration_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['integration_id'], ['integration.id'], name='fk_integration_id'),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id')\n )\n conn.execute(text())\n op.drop_table('view')\n op.rename_table('view_tmp', 'view')\n\n op.create_table(\n 'analysis',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n\n with op.batch_alter_table('datasource', schema=None) as batch_op:\n batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True))\n batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id'])\n batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True))\n\n session = sa.orm.Session(bind=conn)\n dsatasources = conn.execute('select id, analysis from datasource').fetchall()\n for row in dsatasources:\n if row['analysis'] is not None:\n # NOTE 'returning' is relatively new in sqlite, so better will be use select after insert.\n conn.execute(\n text(), {\n 'id': row['id']\n }\n )\n analysis_id = conn.execute(text()).fetchall()\n conn.execute(\n text(), {\n 'analysis_id': analysis_id[0][0],\n 'id': row['id']\n }\n )\n\n with op.batch_alter_table('datasource', schema=None) as batch_op:\n batch_op.drop_column('analysis')\n\n op.create_table(\n 'file',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('company_id', sa.Integer(), nullable=True),\n sa.Column('source_file_path', sa.String(), nullable=False),\n sa.Column('file_path', sa.String(), nullable=False),\n sa.Column('row_count', sa.Integer(), nullable=False),\n sa.Column('columns', mindsdb.interfaces.storage.db.Json(), nullable=False),\n # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ?????\n # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now()\n # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now), # ?????\n # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now, server_onupdate=datetime.datetime.now), # ????? erver_default=func.now()\n sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ?????\n sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now()\n sa.Column('analysis_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='fk_analysis_id'),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id')\n )\n\n # delete ds where data is none\n dsatasources = conn.execute(text('select * from datasource')).fetchall()\n for ds in dsatasources:\n if ds['data'] is None:\n conn.execute(text('delete from datasource where id = :id'), {'id': ds['id']})\n continue\n ds_data = json.loads(ds['data'])\n creation_info = json.loads(ds['creation_info'])\n datasource_name = ds_data.get('source_type')\n if datasource_name == 'file':\n created_at = None\n if isinstance(ds['created_at'], str):\n created_at = datetime.datetime.fromisoformat(ds['created_at'])\n elif isinstance(ds['created_at'], [float, int]):\n created_at = datetime.fromtimestamp(ds['created_at'])\n\n updated_at = None\n if isinstance(ds['updated_at'], str):\n updated_at = datetime.datetime.fromisoformat(ds['updated_at'])\n elif isinstance(ds['updated_at'], [float, int]):\n updated_at = datetime.fromtimestamp(ds['updated_at'])\n\n file = mindsdb.interfaces.storage.db.File(\n name=ds['name'],\n company_id=ds['company_id'],\n source_file_path=ds_data['source'],\n file_path=creation_info['args'][0],\n row_count=ds_data['row_count'],\n columns=ds_data['columns'],\n created_at=created_at,\n updated_at=updated_at,\n analysis_id=ds['analysis_id']\n )\n session.add(file)\n\n conn.execute(\n text(), {\n 'datasource_name': datasource_name,\n 'company_id': ds['company_id'],\n 'ds_class': creation_info['class'],\n 'id': ds['id']\n }\n )\n\n session.commit()\n\n op.rename_table('datasource', 'dataset')\n\n with op.batch_alter_table('dataset', schema=None) as batch_op:\n batch_op.create_foreign_key('fk_integration_id', 'integration', ['integration_id'], ['id'])\n\n # NOTE two different 'batch' is necessary, in other way FK is not creating\n with op.batch_alter_table('predictor', schema=None) as batch_op:\n batch_op.alter_column('datasource_id', new_column_name='dataset_id')\n with op.batch_alter_table('predictor', schema=None) as batch_op:\n batch_op.create_foreign_key('fk_dataset_id', 'dataset', ['dataset_id'], ['id'])\n with op.batch_alter_table('predictor', schema=None) as batch_op:\n batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id'])\n\n with op.batch_alter_table('integration', schema=None) as batch_op:\n batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id'])\n\n with op.batch_alter_table('dataset', schema=None) as batch_op:\n batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id'])\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1416, "n_words": 386, "vocab_size": 197, "complexity": 10, "nloc": 130, "token_counts": 1172, "n_ast_nodes": 1989, "n_identifiers": 70, "random_cut": "def upgrade():\n op.drop_table('ai_table')\n\n conn = op.get_bind()\n\n # views was created with unnamed fk. Therefore need recreate it\n op.create_table(\n 'view_tmp',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('company_id', sa.Integer(), nullable=True),\n sa.Column('query', sa.String(), nullable=False),\n sa.Column('integration_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['integration_id'], ['integration.id'], name='fk_integration_id'),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id')\n )\n conn.execute(text())\n op.drop_table('view')\n op.rename_table('view_tmp', 'view')\n\n op.create_table(\n 'analysis',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n\n with op.batch_alter_table('datasource', schema=None) as batch_op:\n batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True))\n batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id'])\n batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True))\n\n session = sa.orm.Session(bind=conn)\n dsatasources = conn.execute('select id, analysis from datasource').fetchall()\n for row in dsatasources:\n if row['analysis'] is not None:\n # NOTE 'returning' is relatively new in sqlite, so better wi", "d_id": 25090, "documentation": { "docstring": "\n insert into view_tmp (id, name, company_id, query, integration_id)\n select id, name, company_id, query, datasource_id from view;\n \n insert into analysis (analysis) select analysis from datasource where id = :id;\n \n select id from analysis order by id desc limit 1;\n \n update datasource set analysis_id = :analysis_id where id = :id\n \n update datasource\n set integration_id = (select id from integration where name = :datasource_name and company_id = :company_id),\n ds_class = :ds_class\n where id = :id\n ", "n_words": 72, "vocab_size": 40, "n_whitespaces": 263, "language": "en" } }, { "id": 260810, "commit_id": "60f16feaadaca28f9a1cc68d2f406201860d27e8", "repo": "scikit-learn", "path": "sklearn/cluster/_bisect_k_means.py", "file_name": "_bisect_k_means.py", "fun_name": "_predict_recursive", "commit_message": "MAINT Remove `x_squared_norms` arg from `k_means_lloyd` signature (#24264)\n\nCo-authored-by: Thomas J. Fan ", "code": "def _predict_recursive(self, X, sample_weight, cluster_node):\n \n if cluster_node.left is None:\n # This cluster has no subcluster. Labels are just the label of the cluster.\n return np.full(X.shape[0], cluster_node.label, dtype=np.int32)\n\n # Determine if data points belong to the left or right subcluster\n centers = np.vstack((cluster_node.left.center, cluster_node.right.center))\n if hasattr(self, \"_X_mean\"):\n centers += self._X_mean\n\n cluster_labels = _labels_inertia_threadpool_limit(\n X,\n sample_weight,\n centers,\n self._n_threads,\n return_inertia=False,\n )\n mask = cluster_labels == 0\n\n # Compute the labels for each subset of the data points.\n labels = np.full(X.shape[0], -1, dtype=np.int32)\n\n labels[mask] = self._predict_recursive(\n X[mask], sample_weight[mask], cluster_node.left\n )\n\n labels[~mask] = self._predict_recursive(\n X[~mask], sample_weight[~mask], cluster_node.right\n )\n\n return labels\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 310, "n_words": 95, "vocab_size": 67, "complexity": 3, "nloc": 22, "token_counts": 171, "n_ast_nodes": 254, "n_identifiers": 24, "random_cut": "def _predict_recursive(self, X, sample_weight, cluster_node):\n \n if cluster_node.left is None:\n # This cluster has no subcluster. Labels are just the label of the cluster.\n return np.full(X.shape[0], cluster_node.label, dtype=np.int32)\n\n # Determine if data points belong to the left or right subc", "d_id": 76511, "documentation": { "docstring": "Predict recursively by going down the hierarchical tree.\n\n Parameters\n ----------\n X : {ndarray, csr_matrix} of shape (n_samples, n_features)\n The data points, currently assigned to `cluster_node`, to predict between\n the subclusters of this node.\n\n sample_weight : ndarray of shape (n_samples,)\n The weights for each observation in X.\n\n cluster_node : _BisectingTree node object\n The cluster node of the hierarchical tree.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Index of the cluster each sample belongs to.\n ", "n_words": 74, "vocab_size": 51, "n_whitespaces": 192, "language": "en" } }, { "id": 262331, "commit_id": "dbe9da7f15544b83043f481a99e5bcb23e002dc9", "repo": "TTS", "path": "TTS/tts/models/vits.py", "file_name": "vits.py", "fun_name": "inference_voice_conversion", "commit_message": "Add Voice conversion inference support (#1337)\n\n* Add support for voice conversion inference\r\n\r\n* Cache d_vectors_by_speaker for fast inference using a bigger speakers.json\r\n\r\n* Rebase bug fix\r\n\r\n* Use the average d-vector for inference", "code": "def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None):\n \n # compute spectrograms\n y = wav_to_spec(reference_wav, self.config.audio.fft_size, self.config.audio.hop_length, self.config.audio.win_length, center=False).transpose(1, 2)\n y_lengths = torch.tensor([y.size(-1)]).to(y.device)\n speaker_cond_src = reference_speaker_id if reference_speaker_id is not None else reference_d_vector\n speaker_cond_tgt = speaker_id if speaker_id is not None else d_vector\n # print(y.shape, y_lengths.shape)\n wav, _, _ = self.voice_conversion(y, y_lengths, speaker_cond_src, speaker_cond_tgt)\n return wav\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 117, "n_words": 54, "vocab_size": 42, "complexity": 3, "nloc": 7, "token_counts": 128, "n_ast_nodes": 187, "n_identifiers": 27, "random_cut": "def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None):\n \n # com", "d_id": 77188, "documentation": { "docstring": "Inference for voice conversion\n\n Args:\n reference_wav (Tensor): Reference wavform. Tensor of shape [B, T]\n speaker_id (Tensor): speaker_id of the target speaker. Tensor of shape [B]\n d_vector (Tensor): d_vector embedding of target speaker. Tensor of shape `[B, C]`\n reference_speaker_id (Tensor): speaker_id of the reference_wav speaker. Tensor of shape [B]\n reference_d_vector (Tensor): d_vector embedding of the reference_wav speaker. Tensor of shape `[B, C]`\n ", "n_words": 61, "vocab_size": 25, "n_whitespaces": 130, "language": "en" } }, { "id": 48736, "commit_id": "48a21aa0eb3a95d32456c2a927eff9552a04231e", "repo": "django-rest-framework", "path": "tests/test_routers.py", "file_name": "test_routers.py", "fun_name": "test_nonconflicting_specified_basename", "commit_message": "raise ImproperlyConfigured exception if `basename` is not unique (#8438)\n\n* raise ImproperlyConfigured if basename already exists\r\n\r\n* rename already_registered function; return True/False\r\n\r\n* additional basename tests\r\n\r\n* additional basename tests\r\n\r\n* Update rest_framework/routers.py\r\n\r\nCo-authored-by: David Graves \r\nCo-authored-by: Asif Saif Uddin ", "code": "def test_nonconflicting_specified_basename(self):\n \n self.router.register(r'notes', NoteViewSet, basename='notes')\n self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='notes_kwduplicate')\n self.router.register(r'notes_duplicate', NoteViewSet, basename='notes_duplicate')\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 39, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 51, "n_ast_nodes": 85, "n_identifiers": 7, "random_cut": "def test_nonconflicting_specified_basename(self):\n \n self.router.register(r'notes', NoteViewSet, basename='notes')\n self.router.register(r'notes_kwduplicate', KWa", "d_id": 9587, "documentation": { "docstring": "\n Ensure 2 routers with the same model, and a distinct basename specified\n on each does not throw an exception\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 314340, "commit_id": "9b8c3e37bbee3dbaa949705c7ae7b29f521988e7", "repo": "core", "path": "tests/components/group/test_cover.py", "file_name": "test_cover.py", "fun_name": "test_state", "commit_message": "Improve group tests (#73630)", "code": "async def test_state(hass, setup_comp):\n \n state = hass.states.get(COVER_GROUP)\n # No entity has a valid state -> group state unknown\n assert state.state == STATE_UNKNOWN\n assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME\n assert state.attributes[ATTR_ENTITY_ID] == [\n DEMO_COVER,\n DEMO_COVER_POS,\n DEMO_COVER_TILT,\n DEMO_TILT,\n ]\n assert ATTR_ASSUMED_STATE not in state.attributes\n assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0\n assert ATTR_CURRENT_POSITION not in state.attributes\n assert ATTR_CURRENT_TILT_POSITION not in state.attributes\n\n # The group state is unknown if all group members are unknown or unavailable.\n for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_UNAVAILABLE, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_UNKNOWN\n\n for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_UNKNOWN, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_UNKNOWN\n\n # At least one member opening -> group opening\n for state_1 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_OPENING,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n for state_2 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_OPENING,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n for state_3 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_OPENING,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_OPENING, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_OPENING\n\n # At least one member closing -> group closing\n for state_1 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n for state_2 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n for state_3 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_CLOSING, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_CLOSING\n\n # At least one member open -> group open\n for state_1 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_2 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_3 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_OPEN, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_OPEN\n\n # At least one member closed -> group closed\n for state_1 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_2 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_3 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_CLOSED, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_CLOSED\n\n # All group members removed from the state machine -> unknown\n hass.states.async_remove(DEMO_COVER)\n hass.states.async_remove(DEMO_COVER_POS)\n hass.states.async_remove(DEMO_COVER_TILT)\n hass.states.async_remove(DEMO_TILT)\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_UNKNOWN\n\n\n@pytest.mark.parametrize(\"config_count\", [(CONFIG_ATTRIBUTES, 1)])", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"config_count\", [(CONFIG_ATTRIBUTES, 1)])", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 1649, "n_words": 389, "vocab_size": 94, "complexity": 19, "nloc": 120, "token_counts": 807, "n_ast_nodes": 1196, "n_identifiers": 35, "random_cut": "async def test_state(hass, setup_comp):\n \n state = hass.states.get(COVER_GROUP)\n # No entity has a valid state -> group state unknown\n assert state.state == STATE_UNKNOWN\n assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME\n assert state.attributes[ATTR_ENTITY_ID] == [\n DEMO_COVER,\n DEMO_COVER_POS,\n DEMO_COVER_TILT,\n DEMO_TILT,\n ]\n assert ATTR_ASSUMED_STATE not in state.attributes\n assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0\n assert ATTR_CURRENT_POSITION not in state.attributes\n assert ATTR_CURRENT_TILT_POSITION not in state.attributes\n\n # The group state is unknown if all group members are unknown or unavailable.\n for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_UNAVAILABLE, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_UNKNOWN\n\n for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_UNKNOWN, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_UNKNOWN\n\n # At least one member opening -> group opening\n for state_1 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_OPENING,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n for state_2 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_OPENING,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n for state_3 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_OPENING,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_OPENING, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_OPENING\n\n # At least one member closing -> group closing\n for state_1 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n for state_2 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n for state_3 in (\n STATE_CLOSED,\n STATE_CLOSING,\n STATE_OPEN,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n ):\n hass.states.async_set(DEMO_COVER, state_1, {})\n hass.states.async_set(DEMO_COVER_POS, state_2, {})\n hass.states.async_set(DEMO_COVER_TILT, state_3, {})\n hass.states.async_set(DEMO_TILT, STATE_CLOSING, {})\n await hass.async_block_till_done()\n state = hass.states.get(COVER_GROUP)\n assert state.state == STATE_CLOSING\n\n # At least one member open -> group open\n for state_1 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_2 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN):\n for state_3 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAIL", "d_id": 112947, "documentation": { "docstring": "Test handling of state.\n\n The group state is unknown if all group members are unknown or unavailable.\n Otherwise, the group state is opening if at least one group member is opening.\n Otherwise, the group state is closing if at least one group member is closing.\n Otherwise, the group state is open if at least one group member is open.\n Otherwise, the group state is closed.\n ", "n_words": 65, "vocab_size": 28, "n_whitespaces": 83, "language": "en" } }, { "id": 109877, "commit_id": "e199c3b819f66a56f49657de0a9b3fb60c745b94", "repo": "matplotlib", "path": "lib/matplotlib/cbook/__init__.py", "file_name": "__init__.py", "fun_name": "connect", "commit_message": "Remove miscellaneous deprecations from 3.5", "code": "def connect(self, signal, func):\n \n if self._signals is not None:\n _api.check_in_list(self._signals, signal=signal)\n self._func_cid_map.setdefault(signal, {})\n proxy = _weak_or_strong_ref(func, self._remove_proxy)\n if proxy in self._func_cid_map[signal]:\n return self._func_cid_map[signal][proxy]\n cid = next(self._cid_gen)\n self._func_cid_map[signal][proxy] = cid\n self.callbacks.setdefault(signal, {})\n self.callbacks[signal][cid] = proxy\n return cid\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 128, "n_words": 36, "vocab_size": 25, "complexity": 4, "nloc": 15, "token_counts": 137, "n_ast_nodes": 173, "n_identifiers": 16, "random_cut": "def connect(self, signal, func):\n \n if self._signals is not None:\n _api.check_in_list(self._signals, signal=signal)\n self._func_cid_map.setdefault(signal, {})\n proxy = _weak_or_strong_ref(func, self._remove_proxy)\n ", "d_id": 23787, "documentation": { "docstring": "Register *func* to be called when signal *signal* is generated.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 67548, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/setup/setup_wizard/operations/taxes_setup.py", "file_name": "taxes_setup.py", "fun_name": "get_or_create_account", "commit_message": "style: format code with black", "code": "def get_or_create_account(company_name, account):\n\t\n\tdefault_root_type = \"Liability\"\n\troot_type = account.get(\"root_type\", default_root_type)\n\n\texisting_accounts = frappe.get_all(\n\t\t\"Account\",\n\t\tfilters={\"company\": company_name, \"root_type\": root_type},\n\t\tor_filters={\n\t\t\t\"account_name\": account.get(\"account_name\"),\n\t\t\t\"account_number\": account.get(\"account_number\"),\n\t\t},\n\t)\n\n\tif existing_accounts:\n\t\treturn frappe.get_doc(\"Account\", existing_accounts[0].name)\n\n\ttax_group = get_or_create_tax_group(company_name, root_type)\n\n\taccount[\"doctype\"] = \"Account\"\n\taccount[\"company\"] = company_name\n\taccount[\"parent_account\"] = tax_group\n\taccount[\"report_type\"] = \"Balance Sheet\"\n\taccount[\"account_type\"] = \"Tax\"\n\taccount[\"root_type\"] = root_type\n\taccount[\"is_group\"] = 0\n\n\tdoc = frappe.get_doc(account)\n\tdoc.flags.ignore_links = True\n\tdoc.flags.ignore_validate = True\n\tdoc.insert(ignore_permissions=True, ignore_mandatory=True)\n\treturn doc\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 43, "n_words": 69, "vocab_size": 51, "complexity": 2, "nloc": 26, "token_counts": 168, "n_ast_nodes": 294, "n_identifiers": 22, "random_cut": "def get_or_create_account(company_name, account):\n\t\n\tdefault_root_type = \"Liability\"\n\troot_type = account.get(\"root_type\", default_root_type)\n\n\texisting_accounts = frappe.get_all(\n\t\t\"Account\",\n\t\tfilters={\"company\": company_name, \"root_type\": root_type},\n\t\tor_filters={\n\t\t\t\"account_name\": account.get(\"account_name\"),\n\t\t", "d_id": 14551, "documentation": { "docstring": "\n\tCheck if account already exists. If not, create it.\n\tReturn a tax account or None.\n\t", "n_words": 15, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 101515, "commit_id": "dc18c74eea0c7837a820d27628cb12b0824fa30e", "repo": "faceswap", "path": "lib/gui/utils.py", "file_name": "utils.py", "fun_name": "set_default_options", "commit_message": "Bugfix: Preview for extract in batch mode", "code": "def set_default_options(self) -> None:\n \n default = self.cli_opts.get_option_values()\n logger.debug(default)\n self._gui_objects.default_options = default\n self.project.set_default_options()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 47, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 12, "token_counts": 37, "n_ast_nodes": 64, "n_identifiers": 10, "random_cut": "def set_default_options(self) -> None:\n \n default = self.cli_opts.get_option_values()\n logger.debug(default)\n self.", "d_id": 20926, "documentation": { "docstring": " Set the default options for :mod:`lib.gui.projects`\n\n The Default GUI options are stored on Faceswap startup.\n\n Exposed as the :attr:`_default_opts` for a project cannot be set until after the main\n Command Tabs have been loaded.\n ", "n_words": 34, "vocab_size": 30, "n_whitespaces": 63, "language": "en" } }, { "id": 20326, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/html.py", "file_name": "html.py", "fun_name": "_format_lines", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _format_lines(self, tokensource):\n \n nocls = self.noclasses\n lsep = self.lineseparator\n tagsfile = self.tagsfile\n\n lspan = ''\n line = []\n for ttype, value in tokensource:\n try:\n cspan = self.span_element_openers[ttype]\n except KeyError:\n title = ' title=\"%s\"' % '.'.join(ttype) if self.debug_token_types else ''\n if nocls:\n css_style = self._get_css_inline_styles(ttype)\n if css_style:\n css_style = self.class2style[css_style][0]\n cspan = '' % (css_style, title)\n else:\n cspan = ''\n else:\n css_class = self._get_css_classes(ttype)\n if css_class:\n cspan = '' % (css_class, title)\n else:\n cspan = ''\n self.span_element_openers[ttype] = cspan\n\n parts = self._translate_parts(value)\n\n if tagsfile and ttype in Token.Name:\n filename, linenumber = self._lookup_ctag(value)\n if linenumber:\n base, filename = os.path.split(filename)\n if base:\n base += '/'\n filename, extension = os.path.splitext(filename)\n url = self.tagurlformat % {'path': base, 'fname': filename,\n 'fext': extension}\n parts[0] = \"
    %s\" % \\\n (url, self.lineanchors, linenumber, parts[0])\n parts[-1] = parts[-1] + \"\"\n\n # for all but the last line\n for part in parts[:-1]:\n if line:\n if lspan != cspan:\n line.extend(((lspan and ''), cspan, part,\n (cspan and ''), lsep))\n else: # both are the same\n line.extend((part, (lspan and ''), lsep))\n yield 1, ''.join(line)\n line = []\n elif part:\n yield 1, ''.join((cspan, part, (cspan and ''), lsep))\n else:\n yield 1, lsep\n # for the last line\n if line and parts[-1]:\n if lspan != cspan:\n line.extend(((lspan and ''), cspan, parts[-1]))\n lspan = cspan\n else:\n line.append(parts[-1])\n elif parts[-1]:\n line = [cspan, parts[-1]]\n lspan = cspan\n # else we neither have to open a new span nor set lspan\n\n if line:\n line.extend(((lspan and ''), lsep))\n yield 1, ''.join(line)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1331, "n_words": 244, "vocab_size": 124, "complexity": 26, "nloc": 63, "token_counts": 453, "n_ast_nodes": 751, "n_identifiers": 42, "random_cut": "def _format_lines(self, tokensource):", "d_id": 3324, "documentation": { "docstring": "\n Just format the tokens, without any wrapping tags.\n Yield individual lines.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 4250, "commit_id": "56bf982cb96f831fe04f5e44a92ee4a669b9e16a", "repo": "airbyte", "path": "octavia-cli/octavia_cli/apply/resources.py", "file_name": "resources.py", "fun_name": "_get_remote_resource", "commit_message": "🐙 octavia-cli: `apply` connections (#10881)", "code": "def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]:\n \n search_results = self._search().get(f\"{self.resource_type}s\", [])\n if len(search_results) > 1:\n raise DuplicateResourceError(\"Two or more ressources exist with the same name.\")\n if len(search_results) == 1:\n return search_results[0]\n else:\n return None\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 101, "n_words": 33, "vocab_size": 29, "complexity": 3, "nloc": 16, "token_counts": 64, "n_ast_nodes": 111, "n_identifiers": 13, "random_cut": "def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]:\n \n search_results = self._search().get(f\"{self.resource_type}s\", [])\n if len(search_results) > 1:\n raise DuplicateResourceError(\"Two or more ressources exist with the same name.\")\n if len(search_results) == 1:\n return search_results[0]\n else:\n return None\n", "d_id": 643, "documentation": { "docstring": "Find the remote resource on the Airbyte instance associated with the current resource.\n\n Raises:\n DuplicateResourceError: raised if the search results return multiple resources.\n\n Returns:\n Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: The remote resource found.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 74, "language": "en" } }, { "id": 271592, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "_get_compile_args", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _get_compile_args(self, user_metrics=True):\n \n self._assert_compile_was_called()\n # pylint: disable=protected-access\n\n saved_metrics = self.compiled_metrics._user_metrics\n saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics\n\n if not user_metrics:\n if saved_metrics is not None:\n saved_metrics = self.compiled_metrics._metrics\n if saved_weighted_metrics is not None:\n saved_weighted_metrics = self.compiled_metrics._weighted_metrics\n\n compile_args = {\n \"optimizer\": self.optimizer,\n \"loss\": self.compiled_loss._user_losses,\n \"metrics\": saved_metrics,\n \"weighted_metrics\": saved_weighted_metrics,\n \"loss_weights\": self.compiled_loss._user_loss_weights,\n }\n # pylint: enable=protected-access\n return compile_args\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 228, "n_words": 51, "vocab_size": 34, "complexity": 4, "nloc": 17, "token_counts": 95, "n_ast_nodes": 158, "n_identifiers": 16, "random_cut": "def _get_compile_args(self, user_metrics=True):\n \n self._assert_compile_was_called()\n # pylint: disable=protected-a", "d_id": 80817, "documentation": { "docstring": "Used for saving or cloning a Model.\n\n Args:\n user_metrics: Whether to return user-supplied metrics or `Metric` objects.\n Defaults to returning the user-supplied metrics.\n\n Returns:\n Dictionary of arguments that were used when compiling the model.\n ", "n_words": 34, "vocab_size": 30, "n_whitespaces": 84, "language": "en" } }, { "id": 160170, "commit_id": "729ad4f92420231e2a7009b3223c6c7620b8b808", "repo": "numpy", "path": "numpy/f2py/tests/test_f2py2e.py", "file_name": "test_f2py2e.py", "fun_name": "test_norestexdoc", "commit_message": "TST: Initialize f2py2e tests of the F2PY CLI (#20668)\n\nIncreases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.\r\n\r\nMore importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.", "code": "def test_norestexdoc(capfd, hello_world_f90, monkeypatch):\n \n ipath = Path(hello_world_f90)\n mname = \"blah\"\n monkeypatch.setattr(sys, \"argv\",\n f'f2py -m {mname} {ipath} --no-rest-doc'.split())\n\n with util.switchdir(ipath.parent):\n f2pycli()\n out, _ = capfd.readouterr()\n assert \"ReST Documentation is saved to file\" not in out\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 93, "n_words": 34, "vocab_size": 32, "complexity": 1, "nloc": 9, "token_counts": 61, "n_ast_nodes": 115, "n_identifiers": 17, "random_cut": "def test_norestexdoc(capfd, hello_world_f90, monkeypatch):\n \n ipath = Path(hello_world_f90)\n mname = \"blah\"\n monkeypatch.setattr(sys, \"argv\",\n ", "d_id": 38542, "documentation": { "docstring": "Ensures that TeX documentation is written out\n\n CLI :: --no-rest-doc\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 275342, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v1.py", "file_name": "optimizer_v1.py", "fun_name": "set_weights", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def set_weights(self, weights):\n \n params = self.weights\n if len(params) != len(weights):\n raise ValueError(\n \"Length of the specified weight list (\"\n + str(len(weights))\n + \") does not match the number of weights \"\n \"of the optimizer (\" + str(len(params)) + \")\"\n )\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError(\n \"Optimizer weight shape \"\n + str(pv.shape)\n + \" not compatible with \"\n \"provided weight shape \" + str(w.shape)\n )\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 341, "n_words": 82, "vocab_size": 56, "complexity": 4, "nloc": 21, "token_counts": 125, "n_ast_nodes": 212, "n_identifiers": 18, "random_cut": "def set_weights(self, weights):\n \n params = self.weights\n if len(params) != len(weights):\n raise ValueError(\n \"Length of the specified weight list (\"\n + str(len(weights))\n + \") does not match the number of weights \"\n \"of the optimizer (\" + str(len(params)) + \")\"\n )\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError(\n \"Optimizer weight shape \"\n + ", "d_id": 81383, "documentation": { "docstring": "Sets the weights of the optimizer, from Numpy arrays.\n\n Should only be called after computing the gradients\n (otherwise the optimizer has no weights).\n\n Args:\n weights: a list of Numpy arrays. The number of arrays and their shape\n must match number of the dimensions of the weights of the optimizer\n (i.e. it should match the output of `get_weights`).\n\n Raises:\n ValueError: in case of incompatible weight shapes.\n ", "n_words": 65, "vocab_size": 45, "n_whitespaces": 148, "language": "en" } }, { "id": 271840, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_utils_v1.py", "file_name": "training_utils_v1.py", "fun_name": "extract_tensors_from_dataset", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def extract_tensors_from_dataset(dataset):\n \n iterator = get_iterator(dataset)\n inputs, targets, sample_weight = unpack_iterator_input(iterator)\n return inputs, targets, sample_weight\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 14, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def extract_tensors_from_dataset(dataset):\n \n iterator = get_iterator(dataset)\n inputs, targets, sample_weight = unpack_iterator_input(iterator)\n return inputs, targets, sample_weight\n\n", "d_id": 80860, "documentation": { "docstring": "Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.\n\n Args:\n dataset: Dataset instance.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n ", "n_words": 29, "vocab_size": 26, "n_whitespaces": 48, "language": "en" } }, { "id": 204808, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/base.py", "file_name": "base.py", "fun_name": "_set_autocommit", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _set_autocommit(self, autocommit):\n \n raise NotImplementedError(\n \"subclasses of BaseDatabaseWrapper may require a _set_autocommit() method\"\n )\n\n # ##### Generic transaction management methods #####\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 56, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 4, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 4, "random_cut": "def _set_autocommit(self, autocommit):\n \n raise NotImplementedError(\n \"subclasses of BaseDatabaseWrapper ", "d_id": 50895, "documentation": { "docstring": "\n Backend-specific implementation to enable or disable autocommit.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 72413, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/generic/multiple_upload.py", "file_name": "multiple_upload.py", "fun_name": "get_edit_upload_form_context_data", "commit_message": "Reformat with black", "code": "def get_edit_upload_form_context_data(self):\n \n edit_form_class = self.get_edit_form_class()\n return {\n self.context_upload_name: self.upload_object,\n \"edit_action\": reverse(\n self.edit_upload_url_name, args=(self.upload_object.id,)\n ),\n \"delete_action\": reverse(\n self.delete_upload_url_name, args=(self.upload_object.id,)\n ),\n \"form\": edit_form_class(\n instance=self.object,\n prefix=\"%s-%d\" % (self.edit_upload_form_prefix, self.upload_object.id),\n user=self.request.user,\n ),\n }\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 209, "n_words": 29, "vocab_size": 25, "complexity": 1, "nloc": 16, "token_counts": 100, "n_ast_nodes": 155, "n_identifiers": 17, "random_cut": "def get_edit_upload_form_context_data(self):\n \n edit_form_class = self.get_edit_form_class()\n return {\n self.context_upload_name: self.upload_object,\n \"edit_action\": reverse(\n self.edit_upload_url", "d_id": 15886, "documentation": { "docstring": "\n Return the context data necessary for rendering the HTML form for supplying the\n metadata to turn an upload object into a final object\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 45, "language": "en" } }, { "id": 207841, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_overriding_has_module_permission", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_overriding_has_module_permission(self):\n \n articles = Article._meta.verbose_name_plural.title()\n sections = Section._meta.verbose_name_plural.title()\n index_url = reverse(\"admin7:index\")\n\n self.client.force_login(self.superuser)\n response = self.client.get(index_url)\n self.assertContains(response, sections)\n self.assertNotContains(response, articles)\n self.client.logout()\n\n self.client.force_login(self.viewuser)\n response = self.client.get(index_url)\n self.assertNotContains(response, \"admin_views\")\n self.assertNotContains(response, articles)\n self.client.logout()\n\n self.client.force_login(self.adduser)\n response = self.client.get(index_url)\n self.assertNotContains(response, \"admin_views\")\n self.assertNotContains(response, articles)\n self.client.logout()\n\n self.client.force_login(self.changeuser)\n response = self.client.get(index_url)\n self.assertNotContains(response, \"admin_views\")\n self.assertNotContains(response, articles)\n self.client.logout()\n\n self.client.force_login(self.deleteuser)\n response = self.client.get(index_url)\n self.assertNotContains(response, articles)\n\n # The app list displays Sections but not Articles as the latter has\n # ModelAdmin.has_module_permission() = False.\n self.client.force_login(self.superuser)\n response = self.client.get(reverse(\"admin7:app_list\", args=(\"admin_views\",)))\n self.assertContains(response, sections)\n self.assertNotContains(response, articles)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 310, "n_words": 79, "vocab_size": 39, "complexity": 1, "nloc": 31, "token_counts": 280, "n_ast_nodes": 459, "n_identifiers": 24, "random_cut": "def test_overriding_has_module_permission(self):\n \n articles = Article._meta.verbose_name_plural.title()\n sections = Section._meta.verbose_name_plural.title()\n index_url = reverse(\"admin7:index\")\n\n self.client.force_login(self.superuser)\n response = self.client.get(index_url)\n self.assertContains(response, sections)\n self.assertNotContains(response, articles", "d_id": 52128, "documentation": { "docstring": "\n If has_module_permission() always returns False, the module shouldn't\n be displayed on the admin index page for any users.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 205960, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/forms/forms.py", "file_name": "forms.py", "fun_name": "non_field_errors", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def non_field_errors(self):\n \n return self.errors.get(\n NON_FIELD_ERRORS,\n self.error_class(error_class=\"nonfield\", renderer=self.renderer),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 51, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def non_field_errors(self):\n \n return self.errors.get(\n NON_FIELD_ERRORS,\n self.error_class(error_class", "d_id": 51299, "documentation": { "docstring": "\n Return an ErrorList of errors that aren't associated with a particular\n field -- i.e., from Form.clean(). Return an empty ErrorList if there\n are none.\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 53, "language": "en" } }, { "id": 36152, "commit_id": "0a057201a96565df29984d716f660fd8d634329a", "repo": "transformers", "path": "src/transformers/models/van/modeling_van.py", "file_name": "modeling_van.py", "fun_name": "_set_gradient_checkpointing", "commit_message": "Visual Attention Network (VAN) (#16027)\n\n* encoder works\r\n\r\n* addded files\r\n\r\n* norm in stage\r\n\r\n* convertion script\r\n\r\n* tests\r\n\r\n* fix copies\r\n\r\n* make fix-copies\r\n\r\n* fixed __init__\r\n\r\n* make fix-copies\r\n\r\n* fix\r\n\r\n* shapiro test needed\r\n\r\n* make fix-copie\r\n\r\n* minor changes\r\n\r\n* make style + quality\r\n\r\n* minor refactor conversion script\r\n\r\n* rebase + tests\r\n\r\n* removed unused variables\r\n\r\n* updated doc\r\n\r\n* toctree\r\n\r\n* CI\r\n\r\n* doc\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* resolved conversations\r\n\r\n* make fixup\r\n\r\n* config passed to modules\r\n\r\n* config passed to modules\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* conversations\r\n\r\n* conversations\r\n\r\n* copyrights\r\n\r\n* normal test\r\n\r\n* tests\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>", "code": "def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, VanModel):\n module.gradient_checkpointing = value\n\n\nVAN_START_DOCSTRING = r\n\nVAN_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.\",\n VAN_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.\",\n VAN_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 59, "n_words": 40, "vocab_size": 36, "complexity": 2, "nloc": 3, "token_counts": 24, "n_ast_nodes": 64, "n_identifiers": 10, "random_cut": "def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, V", "d_id": 6577, "documentation": { "docstring": "\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`VanConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See\n [`AutoFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "n_words": 128, "vocab_size": 88, "n_whitespaces": 248, "language": "en" } }, { "id": 61396, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py", "file_name": "versioncontrol.py", "fun_name": "update", "commit_message": "upd; format", "code": "def update(self, dest, url, rev_options):\n # type: (str, HiddenText, RevOptions) -> None\n \n raise NotImplementedError\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 35, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 23, "n_identifiers": 6, "random_cut": "def update(self, dest, url, rev_options):\n # type: (str, HiddenText, RevOptions) -> None\n \n raise NotImplementedError\n", "d_id": 12544, "documentation": { "docstring": "\n Update an already-existing repo to the given ``rev_options``.\n\n Args:\n rev_options: a RevOptions object.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 44, "language": "en" } }, { "id": 177721, "commit_id": "b2aa62dc675036f7695c0b09dd509617ba9df90d", "repo": "label-studio", "path": "label_studio/webhooks/utils.py", "file_name": "utils.py", "fun_name": "get_nested_field", "commit_message": "fix: DEV-1725: Add ANNOTATIONS_CREATED webhook action to predictions to annotations action (#2052)\n\n* fix: DEV-1725: Add ANNOTATIONS_CREATED webhook action to predictions to annotations action\r\n\r\n* Update predictions_to_annotations.py\r\n\r\nCo-authored-by: Max Tkachenko ", "code": "def get_nested_field(value, field):\n \n if field == '__self__':\n return value\n fields = field.split('__')\n for fld in fields:\n if isinstance(value, list):\n value = [getattr(v, fld) for v in value]\n else:\n value = getattr(value, fld)\n return value", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 92, "n_words": 34, "vocab_size": 24, "complexity": 5, "nloc": 10, "token_counts": 62, "n_ast_nodes": 101, "n_identifiers": 10, "random_cut": "def get_nested_field(value, field):\n \n if field == '__self__':\n return value\n fields =", "d_id": 42482, "documentation": { "docstring": "\n Get nested field from list of objects or single instance\n :param value: Single instance or list to look up field\n :param field: Field to lookup\n :return: List or single instance of looked up field\n ", "n_words": 34, "vocab_size": 22, "n_whitespaces": 50, "language": "en" } }, { "id": 183841, "commit_id": "4dd0d9fae43583638f34257f97d5749ca4f2c00c", "repo": "textual", "path": "tests/css/test_stylesheet.py", "file_name": "test_stylesheet.py", "fun_name": "test_stylesheet_many_classes_dont_overrule_id", "commit_message": "Add various additional tests around CSS specificity", "code": "def test_stylesheet_many_classes_dont_overrule_id():\n \n css = \"#id {color: red;} .a.b.c.d {color: blue;}\"\n stylesheet = _make_stylesheet(css)\n node = DOMNode(classes=\"a b c d\", id=\"id\")\n stylesheet.apply(node)\n\n assert node.styles.color == Color(255, 0, 0)\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 6, "token_counts": 47, "n_ast_nodes": 82, "n_identifiers": 12, "random_cut": "def test_stylesheet_many_classes_dont_overrule_id():\n \n css = \"#id {color: red;} .a.b.c.d {color: blue;}\"\n stylesheet = _make_stylesheet(css)\n node = DOMNode(classes=\"a b c d\", id=\"id\")\n style", "d_id": 44362, "documentation": { "docstring": "#id is further to the left in the specificity tuple than class, and\n a selector containing multiple classes cannot take priority over even a\n single class.", "n_words": 26, "vocab_size": 24, "n_whitespaces": 31, "language": "en" } }, { "id": 20220, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/macos.py", "file_name": "macos.py", "fun_name": "site_data_dir", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def site_data_dir(self) -> str:\n \n return self._append_app_name_and_version(\"/Library/Application Support\")\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 15, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def site_data_dir(self) -> str:\n \n return self._append_", "d_id": 3272, "documentation": { "docstring": ":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 181764, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/tpot_tests.py", "file_name": "tpot_tests.py", "fun_name": "test_warm_start", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_warm_start():\n \n tpot_obj = TPOTClassifier(\n random_state=42,\n population_size=1,\n offspring_size=2,\n generations=1,\n verbosity=0,\n config_dict='TPOT light',\n warm_start=True)\n tpot_obj.fit(pretest_X, pretest_y)\n\n assert tpot_obj._pop is not None\n assert tpot_obj._pareto_front is not None\n\n first_pop = tpot_obj._pop\n tpot_obj.random_state = 21\n tpot_obj.fit(pretest_X, pretest_y)\n\n assert tpot_obj._pop == first_pop\n\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 113, "n_words": 37, "vocab_size": 25, "complexity": 1, "nloc": 16, "token_counts": 83, "n_ast_nodes": 126, "n_identifiers": 16, "random_cut": "def test_warm_start():\n \n tpot_obj = TPOTClassifier(\n random_state=42,\n population_size=1,\n offspring_size=2,\n generations=1,\n verbosity=0,\n config_dict='TPOT light',\n warm_start=True)\n tpot_obj.fit(pretest_X, pretest_y)\n\n assert tpot_obj._pop is not None\n assert tpot_obj._pareto_front is not None\n\n first_pop = tpot_obj._pop\n tpot_obj.random_state = 21\n tpot_obj.fit(pretest_X, pret", "d_id": 43550, "documentation": { "docstring": "Assert that the TPOT warm_start flag stores the pop and pareto_front from the first run.", "n_words": 15, "vocab_size": 13, "n_whitespaces": 14, "language": "en" } }, { "id": 158212, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "load_data_wiki", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def load_data_wiki(batch_size, max_len):\n \n num_workers = d2l.get_dataloader_workers()\n data_dir = d2l.download_extract('wikitext-2', 'wikitext-2')\n paragraphs = _read_wiki(data_dir)\n train_set = _WikiTextDataset(paragraphs, max_len)\n train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True,\n num_workers=num_workers)\n return train_iter, train_set.vocab\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 85, "n_words": 26, "vocab_size": 22, "complexity": 1, "nloc": 8, "token_counts": 65, "n_ast_nodes": 105, "n_identifiers": 18, "random_cut": "def load_data_wiki(batch_size, max_len):\n \n num_workers = d2l.get_dataloader_workers()\n data_dir = d2l.download_extract('wikitext-2', 'wikitext-2')\n paragraphs = _read_wiki(data_dir)\n train_set = _WikiTextDataset(paragraphs, max_len)\n train_iter = gluon.data.DataLoader(train_set, batch_size, s", "d_id": 37381, "documentation": { "docstring": "Load the WikiText-2 dataset.\n\n Defined in :numref:`subsec_prepare_mlm_data`", "n_words": 7, "vocab_size": 7, "n_whitespaces": 9, "language": "en" } }, { "id": 42087, "commit_id": "762db897b52d16ab2f164d5103df4cc26c1d0503", "repo": "seaborn", "path": "seaborn/_core/plot.py", "file_name": "plot.py", "fun_name": "save", "commit_message": "Add rudimentary themeing support (#2929)\n\n* WIP Plot.theme\r\n\r\n* Add default values for theme to match set_theme()\r\n\r\n* Depend on matplotib style defaults and update rcParams more selectively\r\n\r\n* Fix lines test\r\n\r\n* Improve test coverage", "code": "def save(self, loc, **kwargs) -> Plot:\n \n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 16, "token_counts": 38, "n_ast_nodes": 66, "n_identifiers": 8, "random_cut": "def save(self, loc, **kwargs) -> Plot:\n \n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n", "d_id": 7483, "documentation": { "docstring": "\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n ", "n_words": 43, "vocab_size": 32, "n_whitespaces": 119, "language": "en" } }, { "id": 85028, "commit_id": "582d5b0aa31ac79a5ee1af95b2e71c4bfc53d5aa", "repo": "zulip", "path": "zerver/tests/test_signup.py", "file_name": "test_signup.py", "fun_name": "test_create_realm_no_creation_key", "commit_message": "realm_creation: Rework error pages.\n\nThe previous error page was inadequate for serving the two different\nscenarios where we show errors in realm_creations, in particular\ncontaining a misleading sentence about realm creation being disabled\n(even in the case where it was actually enabled and the user simply had\nan expired link).", "code": "def test_create_realm_no_creation_key(self) -> None:\n \n email = \"user1@test.com\"\n\n with self.settings(OPEN_REALM_CREATION=False):\n # Create new realm with the email, but no creation key.\n result = self.client_post(\"/new/\", {\"email\": email})\n self.assertEqual(result.status_code, 200)\n self.assert_in_response(\"Organization creation link required\", result)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 97, "n_words": 32, "vocab_size": 29, "complexity": 1, "nloc": 10, "token_counts": 53, "n_ast_nodes": 96, "n_identifiers": 10, "random_cut": "def test_create_realm_no_creation_key(self) -> None:\n \n email = \"user1@test.com\"\n\n with self.settings(OPEN_REALM_CREATION=False):\n # Create new realm with the email, but no creation key.\n ", "d_id": 17908, "documentation": { "docstring": "\n Trying to create a realm without a creation_key should fail when\n OPEN_REALM_CREATION is false.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 100574, "commit_id": "bdbbad4d310fb606b6f412aa81e9f57ccd994e97", "repo": "faceswap", "path": "lib/gpu_stats/nvidia.py", "file_name": "nvidia.py", "fun_name": "_get_device_names", "commit_message": "Refactor lib.gpu_stats (#1218)\n\n* inital gpu_stats refactor\r\n\r\n* Add dummy CPU Backend\r\n\r\n* Update Sphinx documentation", "code": "def _get_device_names(self) -> List[str]:\n \n names = [pynvml.nvmlDeviceGetName(handle).decode(\"utf-8\")\n for handle in self._handles]\n self._log(\"debug\", f\"GPU Devices: {names}\")\n return names\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 61, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 12, "token_counts": 43, "n_ast_nodes": 77, "n_identifiers": 11, "random_cut": "def _get_device_names(self) -> List[str]:\n \n names = [pynvml.nvmlDeviceGetName(handle).d", "d_id": 20038, "documentation": { "docstring": " Obtain the list of names of connected Nvidia GPUs as identified in :attr:`_handles`.\n\n Returns\n -------\n list\n The list of connected Nvidia GPU names\n ", "n_words": 23, "vocab_size": 16, "n_whitespaces": 63, "language": "en" } }, { "id": 249592, "commit_id": "285b9e9b6c3558718e7d4f513062e277948ac35d", "repo": "synapse", "path": "tests/push/test_push_rule_evaluator.py", "file_name": "test_push_rule_evaluator.py", "fun_name": "test_delayed_message", "commit_message": "Speed up calculating push actions in large rooms (#13973)\n\nWe move the expensive check of visibility to after calculating push actions, avoiding the expensive check for users who won't get pushed anyway.\r\n\r\nI think this should have a big impact on rooms with large numbers of local users that have pushed disabled.", "code": "def test_delayed_message(self) -> None:\n \n user1 = UserID.from_string(self.user_id1)\n\n # Send a message before user2 joins\n event_id1 = self.create_and_send_event(self.room_id, user1)\n\n # Have user2 join the room\n self.helper.join(self.room_id, self.user_id2, tok=self.tok2)\n\n # They start off with no notifications\n self.assertEqual(self.get_notif_count(self.user_id2), 0)\n\n # Send another message that references the event before the join to\n # simulate a \"delayed\" event\n self.create_and_send_event(self.room_id, user1, prev_event_ids=[event_id1])\n\n # user2 should not be notified about it, because they can't see it.\n self.assertEqual(self.get_notif_count(self.user_id2), 0)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 162, "n_words": 71, "vocab_size": 52, "complexity": 1, "nloc": 10, "token_counts": 96, "n_ast_nodes": 154, "n_identifiers": 17, "random_cut": "def test_delayed_message(self) -> None:\n \n user1 = UserID.from_string(self.user_id1)\n\n # Send a message before user2 joins\n event_id1 = self.create_and_send_event(self.room_id, user1)\n\n # Have user2 join the room\n self.helper.join(self.room_id, self.user_id2, tok=self.tok2)\n\n # They start off with no notifications\n self.assertEqual(self.get_notif_count(self.user_id2), 0)\n\n # Send another message tha", "d_id": 73013, "documentation": { "docstring": "Test that a delayed message that was from before a user joined\n doesn't cause a notification for the joined user.\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 34, "language": "en" } }, { "id": 260409, "commit_id": "9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/tests/test_glm.py", "file_name": "test_glm.py", "fun_name": "test_glm_regression_unpenalized_hstacked_X", "commit_message": "TST tight tests for GLMs (#23619)\n\nCo-authored-by: Olivier Grisel ", "code": "def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):\n \n model, X, y, coef, _, _, _ = glm_dataset\n n_samples, n_features = X.shape\n alpha = 0 # unpenalized\n params = dict(\n alpha=alpha,\n fit_intercept=fit_intercept,\n # solver=solver, # only lbfgs available\n tol=1e-12,\n max_iter=1000,\n )\n\n model = clone(model).set_params(**params)\n if fit_intercept:\n intercept = coef[-1]\n coef = coef[:-1]\n if n_samples > n_features:\n X = X[:, :-1] # remove intercept\n X = 0.5 * np.concatenate((X, X), axis=1)\n else:\n # To know the minimum norm solution, we keep one intercept column and do\n # not divide by 2. Later on, we must take special care.\n X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]]\n else:\n intercept = 0\n X = 0.5 * np.concatenate((X, X), axis=1)\n assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)\n\n with warnings.catch_warnings():\n if fit_intercept and n_samples <= n_features:\n # XXX: Investigate if the lack of convergence in this case should be\n # considered a bug or not.\n warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)\n model.fit(X, y)\n\n if fit_intercept and n_samples <= n_features:\n # Here we take special care.\n model_intercept = 2 * model.intercept_\n model_coef = 2 * model.coef_[:-1] # exclude the other intercept term.\n # For minimum norm solution, we would have\n # assert model.intercept_ == pytest.approx(model.coef_[-1])\n else:\n model_intercept = model.intercept_\n model_coef = model.coef_\n\n rtol = 6e-5\n if n_samples > n_features:\n assert model_intercept == pytest.approx(intercept)\n assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4)\n else:\n # As it is an underdetermined problem, prediction = y. The following shows that\n # we get a solution, i.e. a (non-unique) minimum of the objective function ...\n assert_allclose(model.predict(X), y, rtol=1e-6)\n if fit_intercept:\n # Same as in test_glm_regression_unpenalized.\n # But it is not the minimum norm solution. Otherwise the norms would be\n # equal.\n norm_solution = np.linalg.norm(\n 0.5 * np.r_[intercept, intercept, coef, coef]\n )\n norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])\n assert norm_model > (1 + 1e-12) * norm_solution\n # For minimum norm solution, we would have\n # assert model.intercept_ == pytest.approx(model.coef_[-1])\n else:\n assert model_intercept == pytest.approx(intercept)\n assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol)\n\n\n@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 777, "n_words": 314, "vocab_size": 170, "complexity": 9, "nloc": 48, "token_counts": 414, "n_ast_nodes": 664, "n_identifiers": 49, "random_cut": "def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):\n \n model, X, y, coef, _, _, _ = glm_dataset\n n_samples, n_features = X.shape\n alpha = 0 # unpenalized\n params = dict(\n alpha=alpha,\n fit_intercept=fit_intercept,\n # solver=solver, # only lbfgs available\n tol=1e-12,\n max_iter=1000,\n )\n\n model = clone(model).set_params(**params)\n if fit_intercept:\n intercept = coef[-1]\n coef = coef[:-1]\n if n_samples > n_features:\n X = X[:, :-1] # remove intercept\n X = 0.5 * np.concatenate((X, X), axis=1)\n else:\n # To know the minimum norm solution, we keep one intercept column and do\n # not divide by 2. Later on, we must take special care.\n X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]]\n else:\n intercept = 0\n X = 0.5 * np.concatenate((X, X), axis=1)\n assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)\n\n with warnings.catch_warnings():\n if fit_intercept and n_samples <= n_features:\n # XXX: Investigate if the lack of convergence in this case should be\n # considered a bug or not.\n warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)\n model.fit(X, y)\n\n if fit_intercept and n_samples <= n_features:\n # Here we take special care.\n model_intercept = 2 * model.intercept_\n model_coef = 2 * model.coef_[:-1] # exclude the ot", "d_id": 76233, "documentation": { "docstring": "Test that unpenalized GLM converges for all solvers to correct solution.\n\n We work with a simple constructed data set with known solution.\n GLM fit on [X] is the same as fit on [X, X]/2.\n For long X, [X, X] is a singular matrix and we check against the minimum norm\n solution:\n min ||w||_2 subject to w = argmin deviance(X, y, w)\n ", "n_words": 61, "vocab_size": 51, "n_whitespaces": 83, "language": "en" } }, { "id": 249612, "commit_id": "b4ec4f5e71a87d5bdc840a4220dfd9a34c54c847", "repo": "synapse", "path": "tests/storage/test_event_push_actions.py", "file_name": "test_event_push_actions.py", "fun_name": "test_count_aggregation_threads", "commit_message": "Track notification counts per thread (implement MSC3773). (#13776)\n\nWhen retrieving counts of notifications segment the results based on the\r\nthread ID, but choose whether to return them as individual threads or as\r\na single summed field by letting the client opt-in via a sync flag.\r\n\r\nThe summarization code is also updated to be per thread, instead of per\r\nroom.", "code": "def test_count_aggregation_threads(self) -> None:\n \n\n user_id, token, _, other_token, room_id = self._create_users_and_room()\n thread_id: str\n\n last_event_id: str\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 43, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 69, "token_counts": 434, "n_ast_nodes": 49, "n_identifiers": 11, "random_cut": "def test_count_aggregation_threads(self) -> None:\n \n\n ", "d_id": 73022, "documentation": { "docstring": "\n This is essentially the same test as test_count_aggregation, but adds\n events to the main timeline and to a thread.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 248249, "commit_id": "d38d242411b8910dfacde1e61fd3a0ec5cbcaa66", "repo": "synapse", "path": "tests/config/test_cache.py", "file_name": "test_cache.py", "fun_name": "test_global_instantiated_before_config_load", "commit_message": "Reload cache factors from disk on SIGHUP (#12673)", "code": "def test_global_instantiated_before_config_load(self):\n \n cache = LruCache(100)\n add_resizable_cache(\"foo\", cache_resize_callback=cache.set_cache_factor)\n self.assertEqual(cache.max_size, 50)\n\n config = {\"caches\": {\"global_factor\": 4}}\n self.config.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.config.resize_all_caches()\n\n self.assertEqual(cache.max_size, 400)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 76, "n_ast_nodes": 130, "n_identifiers": 14, "random_cut": "def test_global_instantiated_before_config_load(self):\n \n cache = LruCache(100)\n add_resizable_cache(\"foo\", cache_resize_callback=cache.set_cache_factor)\n self.as", "d_id": 72180, "documentation": { "docstring": "\n If a cache is instantiated before the config is read, it will be given\n the default cache size in the interim, and then resized to the new\n default cache size once the config is loaded.\n ", "n_words": 35, "vocab_size": 24, "n_whitespaces": 64, "language": "en" } }, { "id": 269448, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "in_top_k", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def in_top_k(predictions, targets, k):\n \n return tf.compat.v1.math.in_top_k(predictions, targets, k)\n\n\n# CONVOLUTIONS\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 15, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 27, "n_ast_nodes": 42, "n_identifiers": 8, "random_cut": "def in_top_k(predictions, targets, k):\n \n return tf.compat.v1.math.in_top_k(p", "d_id": 80086, "documentation": { "docstring": "Returns whether the `targets` are in the top `k` `predictions`.\n\n Args:\n predictions: A tensor of shape `(batch_size, classes)` and type `float32`.\n targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.\n k: An `int`, number of top elements to consider.\n\n Returns:\n A 1D tensor of length `batch_size` and type `bool`.\n `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`\n values of `predictions[i]`.\n ", "n_words": 64, "vocab_size": 46, "n_whitespaces": 115, "language": "en" } }, { "id": 3805, "commit_id": "a3aae8017a0a40ff2006e2567f71dccb04c997a5", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py", "file_name": "test_async_job.py", "fun_name": "test_update_job", "commit_message": "🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)\n\n* Facebook Marketing performance improvement\r\n\r\n* add comments and little refactoring\r\n\r\n* fix integration tests with the new config\r\n\r\n* improve job status handling, limit concurrency to 10\r\n\r\n* fix campaign jobs, refactor manager\r\n\r\n* big refactoring of async jobs, support random order of slices\r\n\r\n* update source _read_incremental to hook new state logic\r\n\r\n* fix issues with timeout\r\n\r\n* remove debugging and clean up, improve retry logic\r\n\r\n* merge changes from #8234\r\n\r\n* fix call super _read_increment\r\n\r\n* generalize batch execution, add use_batch flag\r\n\r\n* improve coverage, do some refactoring of spec\r\n\r\n* update test, remove overrides of source\r\n\r\n* add split by AdSet\r\n\r\n* add smaller insights\r\n\r\n* fix end_date < start_date case\r\n\r\n* add account_id to PK\r\n\r\n* add notes\r\n\r\n* fix new streams\r\n\r\n* fix reversed incremental stream\r\n\r\n* update spec.json for SAT\r\n\r\n* upgrade CDK and bump version\r\n\r\nCo-authored-by: Dmytro Rezchykov \r\nCo-authored-by: Eugene Kulak ", "code": "def test_update_job(self, parent_job, grouped_jobs, api, batch):\n \n parent_job.update_job()\n\n # assert\n for job in grouped_jobs:\n job.update_job.assert_called_once_with(batch=batch)\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 54, "n_identifiers": 9, "random_cut": "def test_update_job(self, parent_job, grouped_jobs, api, batch):\n \n parent_job.update_job()\n\n # assert\n for job in grouped_jobs:\n j", "d_id": 564, "documentation": { "docstring": "Checks jobs status in advance and restart if some failed.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 216169, "commit_id": "2bd6323ef5f87d871891a59917ee96f44ef55e75", "repo": "salt", "path": "salt/modules/cp.py", "file_name": "cp.py", "fun_name": "list_master_symlinks", "commit_message": "fixes saltstack/salt#61562 cp functions derive saltenv from config", "code": "def list_master_symlinks(saltenv=None, prefix=\"\"):\n \n if not saltenv:\n saltenv = __opts__[\"saltenv\"] or \"base\"\n return _client().symlink_list(saltenv, prefix)\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 4, "token_counts": 35, "n_ast_nodes": 63, "n_identifiers": 6, "random_cut": "def list_master_symlinks(saltenv=None, prefix=\"\"):\n \n if not saltenv:\n salt", "d_id": 54446, "documentation": { "docstring": "\n .. versionchanged:: 3005\n ``saltenv`` will use value from config if not explicitly set\n\n List all of the symlinks stored on the master\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.list_master_symlinks\n ", "n_words": 30, "vocab_size": 28, "n_whitespaces": 60, "language": "en" } }, { "id": 281557, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/options/screener_controller.py", "file_name": "screener_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n has_screen_tickers_start = \"\" if self.screen_tickers else \"[unvl]\"\n has_screen_tickers_end = \"\" if self.screen_tickers else \"[/unvl]\"\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Options - Screener\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 25, "vocab_size": 18, "complexity": 3, "nloc": 16, "token_counts": 40, "n_ast_nodes": 106, "n_identifiers": 12, "random_cut": "def print_help(self):\n \n has_screen_tickers_start = \"\" if self.screen_tickers else \"[unvl]\"\n has_screen_tickers_end = \"\" if self.screen_tic", "d_id": 83854, "documentation": { "docstring": "Print help[cmds]\n view view available presets (or one in particular)\n set set one of the available presets\n[/cmds]\n[param]PRESET: [/param]{self.preset}[cmds]\n\n scr screen data from this preset[/cmds]\n{has_screen_tickers_start}\n[param]Last screened tickers: [/param]{', '.join(self.screen_tickers)}[menu]\n> ca take these to comparison analysis menu\n> po take these to portoflio optimization menu{has_screen_tickers_end}\n ", "n_words": 48, "vocab_size": 39, "n_whitespaces": 116, "language": "en" } }, { "id": 200292, "commit_id": "6d2bbf80752549276a968fd4af78231c569d55c5", "repo": "sympy", "path": "sympy/testing/runtests.py", "file_name": "runtests.py", "fun_name": "_find", "commit_message": "runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy", "code": "def _find(self, tests, obj, name, module, source_lines, globs, seen):\n \n if self._verbose:\n print('Finding tests in %s' % name)\n\n # If we've already processed this object, then ignore it.\n if id(obj) in seen:\n return\n seen[id(obj)] = 1\n\n # Make sure we don't run doctests for classes outside of sympy, such\n # as in numpy or scipy.\n if inspect.isclass(obj):\n if obj.__module__.split('.')[0] != 'sympy':\n return\n\n # Find a test for this object, and add it to the list of tests.\n test = self._get_test(obj, name, module, globs, source_lines)\n if test is not None:\n tests.append(test)\n\n if not self._recurse:\n return\n\n # Look for tests in a module's contained objects.\n if inspect.ismodule(obj):\n for rawname, val in obj.__dict__.items():\n # Recurse to functions & classes.\n if inspect.isfunction(val) or inspect.isclass(val):\n # Make sure we don't run doctests functions or classes\n # from different modules\n if val.__module__ != module.__name__:\n continue\n\n assert self._from_module(module, val), \\\n \"%s is not in module %s (rawname %s)\" % (val, module, rawname)\n\n try:\n valname = '%s.%s' % (name, rawname)\n self._find(tests, val, valname, module,\n source_lines, globs, seen)\n except KeyboardInterrupt:\n raise\n\n # Look for tests in a module's __test__ dictionary.\n for valname, val in getattr(obj, '__test__', {}).items():\n if not isinstance(valname, str):\n raise ValueError(\"SymPyDocTestFinder.find: __test__ keys \"\n \"must be strings: %r\" %\n (type(valname),))\n if not (inspect.isfunction(val) or inspect.isclass(val) or\n inspect.ismethod(val) or inspect.ismodule(val) or\n isinstance(val, str)):\n raise ValueError(\"SymPyDocTestFinder.find: __test__ values \"\n \"must be strings, functions, methods, \"\n \"classes, or modules: %r\" %\n (type(val),))\n valname = '%s.__test__.%s' % (name, valname)\n self._find(tests, val, valname, module, source_lines,\n globs, seen)\n\n\n # Look for tests in a class's contained objects.\n if inspect.isclass(obj):\n for valname, val in obj.__dict__.items():\n # Special handling for staticmethod/classmethod.\n if isinstance(val, staticmethod):\n val = getattr(obj, valname)\n if isinstance(val, classmethod):\n val = getattr(obj, valname).__func__\n\n\n # Recurse to methods, properties, and nested classes.\n if ((inspect.isfunction(unwrap(val)) or\n inspect.isclass(val) or\n isinstance(val, property)) and\n self._from_module(module, val)):\n # Make sure we don't run doctests functions or classes\n # from different modules\n if isinstance(val, property):\n if hasattr(val.fget, '__module__'):\n if val.fget.__module__ != module.__name__:\n continue\n else:\n if val.__module__ != module.__name__:\n continue\n\n assert self._from_module(module, val), \\\n \"%s is not in module %s (valname %s)\" % (\n val, module, valname)\n\n valname = '%s.%s' % (name, valname)\n self._find(tests, val, valname, module, source_lines,\n globs, seen)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1749, "n_words": 358, "vocab_size": 161, "complexity": 32, "nloc": 65, "token_counts": 512, "n_ast_nodes": 803, "n_identifiers": 43, "random_cut": "def _find(self, tests, obj, name, module, source_lines, globs, seen):\n \n if self._verbose:\n print('Finding tests in %s' % name)\n\n # If we've already processed this object, then ignore it.\n if id(obj) in seen:\n return\n seen[id(obj)] = 1\n\n # Make sure we don't run doctests for classes outside of sympy, such\n # as in numpy or scipy.\n if inspect.isclass(obj):\n if obj.__module__.split('.')[0] != 'sympy':\n return\n", "d_id": 49586, "documentation": { "docstring": "\n Find tests for the given object and any contained objects, and\n add them to ``tests``.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 92382, "commit_id": "c4cc0467974bcfb2b3c95120bd19c337aa977183", "repo": "sentry", "path": "src/sentry/sentry_metrics/indexer/base.py", "file_name": "base.py", "fun_name": "get_mapped_key_strings_to_ints", "commit_message": "feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380] (#36263)\n\n* feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380]\r\n\r\nThe postgres string indexer now is able to rate limit writes using four\r\nsentry options. If that happens, `None` is returned in place of an\r\ninteger, and the FetchType is RATE_LIMITED.\r\n\r\nThe kafka consumer/message processor explicitly checks for those `None`\r\nvalues and throws away every message that references a rate-limited\r\nstring. It logs a Sentry error for every dropped message just because\r\nthat's already what we do for other kinds of dropped messages.\r\n\r\nRate limiting and quota management currently creates a ton of\r\ndataclasses and that probably wastes time. There are a ton of\r\nlow-hanging fruits:\r\n\r\n* the return value of _construct_quotas could be globally cached, as\r\n long as the cache is wiped when the sentry options change.\r\n\r\n* the same Quota object (for global limits) is referenced from multiple\r\n RequestedQuota instances (one for each org).\r\n `sentry.ratelimits.sliding_windows` could check the `id()` of the\r\n quota (if there is no prefix override) to avoid computing and checking\r\n the same quota multiple times.\r\n\r\nAn even lower hanging fruit is that we're fetching the same keys from\r\nRedis multiple times, because multiple organizations (and therefore\r\nmultiple RequestedQuota instances) adhere to the global quota. So that's\r\nbeen fixed, but as for the rest let's wait for timings from prod.\r\n\r\n* fix typo\r\n\r\n* fix typing\r\n\r\n* apply review feedback\r\n\r\n* fix typing, add test\r\n\r\n* fix tests\r\n\r\n* apply review feedback about logging too many msgs\r\n\r\n* fix leaking option in test\r\n\r\n* sike, more test failures", "code": "def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]:\n \n cache_key_results: MutableMapping[str, int] = {}\n for org_id, result_dict in self.results.items():\n for string, id in result_dict.items():\n key = f\"{org_id}:{string}\"\n if id is not None:\n cache_key_results[key] = id\n\n return cache_key_results\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 121, "n_words": 33, "vocab_size": 26, "complexity": 4, "nloc": 18, "token_counts": 66, "n_ast_nodes": 111, "n_identifiers": 13, "random_cut": "def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]:\n \n cache_", "d_id": 18905, "documentation": { "docstring": "\n Return the results, but formatted as the following:\n {\n \"1:a\": 10,\n \"1:b\": 11,\n \"1:c\", 12,\n \"2:e\": 13\n }\n This is for when we use indexer_cache.set_many()\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 129, "language": "en" } }, { "id": 223585, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "get_atext", "commit_message": "add python 3.10.4 for windows", "code": "def get_atext(value):\n \n m = _non_atom_end_matcher(value)\n if not m:\n raise errors.HeaderParseError(\n \"expected atext but found '{}'\".format(value))\n atext = m.group()\n value = value[len(atext):]\n atext = ValueTerminal(atext, 'atext')\n _validate_xtext(atext)\n return atext, value\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 29, "vocab_size": 23, "complexity": 2, "nloc": 10, "token_counts": 61, "n_ast_nodes": 106, "n_identifiers": 12, "random_cut": "def get_atext(value):\n \n m = _non_atom_end_matcher(value)\n if not m:\n raise errors.HeaderParseError(\n \"expected atext but found '{}'\".format(value))\n atext = m.group()\n value = value[len(atext):]\n atext = ValueTerminal(atext, 'atext')\n _validate_xtext(atext)\n return atext, ", "d_id": 56992, "documentation": { "docstring": "atext = \n\n We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to\n the token's defects list if we find non-atext characters.\n ", "n_words": 24, "vocab_size": 24, "n_whitespaces": 33, "language": "en" } }, { "id": 20336, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/img.py", "file_name": "img.py", "fun_name": "_get_linenumber_pos", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _get_linenumber_pos(self, lineno):\n \n return (self.image_pad, self._get_line_y(lineno))\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def _get_linenumber_pos(self, lineno):\n \n retur", "d_id": 3328, "documentation": { "docstring": "\n Get the actual position for the start of a line number.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 71919, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_contentstate.py", "file_name": "test_contentstate.py", "fun_name": "test_image_inside_paragraph", "commit_message": "Reformat with black", "code": "def test_image_inside_paragraph(self):\n # In Draftail's data model, images are block-level elements and therefore\n # split up preceding / following text into their own paragraphs\n converter = ContentstateConverter(features=[\"image\"])\n result = json.loads(\n converter.from_database_format(\n \n )\n )\n self.assertContentStateEqual(\n result,\n {\n \"blocks\": [\n {\n \"key\": \"00000\",\n \"inlineStyleRanges\": [],\n \"entityRanges\": [],\n \"depth\": 0,\n \"text\": \"before\",\n \"type\": \"unstyled\",\n },\n {\n \"key\": \"00000\",\n \"inlineStyleRanges\": [],\n \"entityRanges\": [{\"key\": 0, \"offset\": 0, \"length\": 1}],\n \"depth\": 0,\n \"text\": \" \",\n \"type\": \"atomic\",\n },\n {\n \"key\": \"00000\",\n \"inlineStyleRanges\": [],\n \"entityRanges\": [],\n \"depth\": 0,\n \"text\": \"after\",\n \"type\": \"unstyled\",\n },\n ],\n \"entityMap\": {\n \"0\": {\n \"data\": {\n \"format\": \"left\",\n \"alt\": \"an image\",\n \"id\": \"1\",\n \"src\": \"/media/not-found\",\n },\n \"mutability\": \"IMMUTABLE\",\n \"type\": \"IMAGE\",\n }\n },\n },\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1056, "n_words": 111, "vocab_size": 72, "complexity": 1, "nloc": 52, "token_counts": 181, "n_ast_nodes": 347, "n_identifiers": 10, "random_cut": "def test_image_inside_paragraph(self):\n ", "d_id": 15780, "documentation": { "docstring": "\n

    before after

    \n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 32, "language": "en" } }, { "id": 260816, "commit_id": "6d16698dd8ba4407e5c3c588d7b5e6a5257eddc9", "repo": "scikit-learn", "path": "sklearn/svm/_bounds.py", "file_name": "_bounds.py", "fun_name": "l1_min_c", "commit_message": "DOC Ensures that l1_min_c passes numpydoc validation (#24134)", "code": "def l1_min_c(X, y, *, loss=\"squared_hinge\", fit_intercept=True, intercept_scaling=1.0):\n \n if loss not in (\"squared_hinge\", \"log\"):\n raise ValueError('loss type not in (\"squared_hinge\", \"log\")')\n\n X = check_array(X, accept_sparse=\"csc\")\n check_consistent_length(X, y)\n\n Y = LabelBinarizer(neg_label=-1).fit_transform(y).T\n # maximum absolute value over classes and features\n den = np.max(np.abs(safe_sparse_dot(Y, X)))\n if fit_intercept:\n bias = np.full(\n (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype\n )\n den = max(den, abs(np.dot(Y, bias)).max())\n\n if den == 0.0:\n raise ValueError(\n \"Ill-posed l1_min_c calculation: l1 will always \"\n \"select zero coefficients for this data\"\n )\n if loss == \"squared_hinge\":\n return 0.5 / den\n else: # loss == 'log':\n return 2.0 / den\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 216, "n_words": 93, "vocab_size": 70, "complexity": 5, "nloc": 21, "token_counts": 176, "n_ast_nodes": 276, "n_identifiers": 26, "random_cut": "def l1_min_c(X, y, *, loss=\"squared_hinge\", fit_intercept=True, intercept_scaling=1.0):\n \n if loss not in (\"squared_hinge\", \"log\"):\n raise ValueError('loss type not in (\"squared_hinge\", \"log\")')\n\n X = check_array(X, accept_sparse=\"csc\")\n check_consistent_length(X, y)\n\n Y = LabelBinarizer(neg_label=-1).fit_transform(y).T\n # maximum absolute value over classes and features\n den = np.max(np.abs(safe_sparse_dot(Y, X)))\n if fit_intercept:\n bias = np.full(\n (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype\n )\n den = max(den, abs(np.dot(Y, bias)).max())\n\n if den == 0.0:\n raise ValueError(\n \"Ill-posed l1_min_c calculation: l1 will always \"\n \"select zero coefficients for this data\"\n )\n if loss == \"squared_hinge\":\n return 0.5 / den\n else: # loss ==", "d_id": 76515, "documentation": { "docstring": "Return the lowest bound for C.\n\n The lower bound for C is computed such that for C in (l1_min_C, infinity)\n the model is guaranteed not to be empty. This applies to l1 penalized\n classifiers, such as LinearSVC with penalty='l1' and\n linear_model.LogisticRegression with penalty='l1'.\n\n This value is valid if class_weight parameter in fit() is not set.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target vector relative to X.\n\n loss : {'squared_hinge', 'log'}, default='squared_hinge'\n Specifies the loss function.\n With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).\n With 'log' it is the loss of logistic regression models.\n\n fit_intercept : bool, default=True\n Specifies if the intercept should be fitted by the model.\n It must match the fit() method parameter.\n\n intercept_scaling : float, default=1.0\n When fit_intercept is True, instance vector x becomes\n [x, intercept_scaling],\n i.e. a \"synthetic\" feature with constant value equals to\n intercept_scaling is appended to the instance vector.\n It must match the fit() method parameter.\n\n Returns\n -------\n l1_min_c : float\n Minimum value for C.\n ", "n_words": 190, "vocab_size": 121, "n_whitespaces": 336, "language": "en" } }, { "id": 43184, "commit_id": "677c42227c08f705142f298ab88915f133cd94e5", "repo": "airflow", "path": "airflow/migrations/versions/0111_2_3_3_add_indexes_for_cascade_deletes.py", "file_name": "0111_2_3_3_add_indexes_for_cascade_deletes.py", "fun_name": "_mysql_tables_where_indexes_already_present", "commit_message": "Add indexes for CASCADE deletes for task_instance (#24488)\n\nWhen we add foreign keys with ON DELETE CASCADE, and we delete rows in the foreign table, the database needs to join back to the referencing table. If there's no suitable index, then it can be slow to perform the deletes.", "code": "def _mysql_tables_where_indexes_already_present(conn):\n \n to_check = [\n ('xcom', 'idx_xcom_task_instance'),\n ('task_reschedule', 'idx_task_reschedule_dag_run'),\n ('task_fail', 'idx_task_fail_task_instance'),\n ]\n tables = set()\n for tbl, idx in to_check:\n if conn.execute(f\"show indexes from {tbl} where Key_name = '{idx}'\").first():\n tables.add(tbl)\n return tables\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 89, "n_words": 32, "vocab_size": 29, "complexity": 3, "nloc": 11, "token_counts": 61, "n_ast_nodes": 115, "n_identifiers": 10, "random_cut": "def _mysql_tables_where_indexes_already_present(conn):\n \n to_check = [\n ('xcom', 'idx_xcom_task_instance'),\n ('task_reschedule', 'idx_task_reschedule_dag_run'),\n ", "d_id": 7856, "documentation": { "docstring": "\n If user downgraded and is upgrading again, we have to check for existing\n indexes on mysql because we can't (and don't) drop them as part of the\n downgrade.\n ", "n_words": 28, "vocab_size": 27, "n_whitespaces": 41, "language": "en" } }, { "id": 246075, "commit_id": "121b9e2475f4d7b3bca50d81732f07db80b2264f", "repo": "synapse", "path": "tests/http/test_webclient.py", "file_name": "test_webclient.py", "fun_name": "test_webclient_resolves_with_client_resource", "commit_message": "Add a regression test for using both webclient and client resources simultaneously (#11765)", "code": "def test_webclient_resolves_with_client_resource(self):\n \n for resource_name_order_list in [\n [\"webclient\", \"client\"],\n [\"client\", \"webclient\"],\n ]:\n # Create a dictionary from path regex -> resource\n resource_dict: Dict[str, Resource] = {}\n\n for resource_name in resource_name_order_list:\n resource_dict.update(\n SynapseHomeServer._configure_named_resource(self.hs, resource_name)\n )\n\n # Create a root resource which ties the above resources together into one\n root_resource = Resource()\n create_resource_tree(resource_dict, root_resource)\n\n # Create a site configured with this resource to make HTTP requests against\n listener_config = ListenerConfig(\n port=8008,\n bind_addresses=[\"127.0.0.1\"],\n type=\"http\",\n http_options=HttpListenerConfig(\n resources=[HttpResourceConfig(names=resource_name_order_list)]\n ),\n )\n test_site = SynapseSite(\n logger_name=\"synapse.access.http.fake\",\n site_tag=self.hs.config.server.server_name,\n config=listener_config,\n resource=root_resource,\n server_version_string=\"1\",\n max_request_body_size=1234,\n reactor=self.reactor,\n )\n\n # Attempt to make requests to endpoints on both the webclient and client resources\n # on test_site.\n self._request_client_and_webclient_resources(test_site)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 548, "n_words": 103, "vocab_size": 79, "complexity": 3, "nloc": 30, "token_counts": 150, "n_ast_nodes": 241, "n_identifiers": 36, "random_cut": "def test_webclient_resolves_with_client_resource(self):\n \n for resource_name_order_list in [\n [\"webclient\", \"client\"],\n [\"client\", \"webclient\"],\n ]:\n # Create a dictionary from path regex -> resource\n resource_dict: Dict[str, Resource] = {}\n\n ", "d_id": 70986, "documentation": { "docstring": "\n Tests that both client and webclient resources can be accessed simultaneously.\n\n This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763.\n ", "n_words": 21, "vocab_size": 21, "n_whitespaces": 43, "language": "en" } }, { "id": 147365, "commit_id": "60054995e65304fb14e6d0ab69bdec07aa9389fe", "repo": "ray", "path": "python/ray/cloudpickle/cloudpickle.py", "file_name": "cloudpickle.py", "fun_name": "unregister_pickle_by_value", "commit_message": "[docs] fix doctests and activate CI (#23418)", "code": "def unregister_pickle_by_value(module):\n \n if not isinstance(module, types.ModuleType):\n raise ValueError(f\"Input should be a module object, got {str(module)} instead\")\n if module.__name__ not in _PICKLE_BY_VALUE_MODULES:\n raise ValueError(f\"{module} is not registered for pickle by value\")\n else:\n _PICKLE_BY_VALUE_MODULES.remove(module.__name__)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 65, "n_words": 32, "vocab_size": 28, "complexity": 3, "nloc": 7, "token_counts": 47, "n_ast_nodes": 92, "n_identifiers": 10, "random_cut": "def unregister_pickle_by_value(module):\n \n if not isinstance(module, types.ModuleType):\n raise ValueError(f\"Input should be a module object, got {str(module)} instead\")\n if module.__name__ not in _PICKLE_BY_VALUE_MODULES:\n ", "d_id": 33921, "documentation": { "docstring": "Unregister that the input module should be pickled by value.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 75130, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_get_bad_permissions", "commit_message": "Reformat with black", "code": "def test_get_bad_permissions(self):\n \n # Remove privileges from user\n self.user.is_superuser = False\n self.user.user_permissions.add(\n Permission.objects.get(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n )\n self.user.save()\n\n # Get\n response = self.client.get(\n reverse(\"wagtailimages:url_generator\", args=(self.image.id,))\n )\n\n # Check response\n self.assertRedirects(response, reverse(\"wagtailadmin_home\"))\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 155, "n_words": 30, "vocab_size": 24, "complexity": 1, "nloc": 12, "token_counts": 78, "n_ast_nodes": 135, "n_identifiers": 19, "random_cut": "def test_get_bad_permissions(self):\n \n # Remove privileges from user\n self.user.is_superuser = False\n self.user.user_permissions.a", "d_id": 16362, "documentation": { "docstring": "\n This tests that the view returns a \"permission denied\" redirect if a user without correct\n permissions attempts to access it\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 42, "language": "en" } }, { "id": 21342, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py", "file_name": "shutil.py", "fun_name": "get_archive_formats", "commit_message": "Vendor in pip 22.1.2", "code": "def get_archive_formats():\n \n formats = [(name, registry[2]) for name, registry in\n _ARCHIVE_FORMATS.items()]\n formats.sort()\n return formats\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 40, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 34, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "def get_archive_formats():\n \n formats = [(name, registry[2]) for name, registry in\n _ARCHIVE_FORMATS.items", "d_id": 3772, "documentation": { "docstring": "Returns a list of supported formats for archiving and unarchiving.\n\n Each element of the returned sequence is a tuple (name, description)\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 27, "language": "en" } }, { "id": 189462, "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", "repo": "manim", "path": "manim/mobject/svg/svg_mobject.py", "file_name": "svg_mobject.py", "fun_name": "handle_transforms", "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", "code": "def _handle_transforms(self, element, mobject):\n \n\n if element.hasAttribute(\"x\") and element.hasAttribute(\"y\"):\n x = self._attribute_to_float(element.getAttribute(\"x\"))\n # Flip y\n y = -self._attribute_to_float(element.getAttribute(\"y\"))\n mobject.shift(x * RIGHT + y * UP)\n\n transform_attr_value = element.getAttribute(\"transform\")\n\n # parse the various transforms in the attribute value\n transform_names = [\"matrix\", \"translate\", \"scale\", \"rotate\", \"skewX\", \"skewY\"]\n\n # Borrowed/Inspired from:\n # https://github.com/cjlano/svg/blob/3ea3384457c9780fa7d67837c9c5fd4ebc42cb3b/svg/svg.py#L75\n\n # match any SVG transformation with its parameter (until final parenthesis)\n # [^)]* == anything but a closing parenthesis\n # '|'.join == OR-list of SVG transformations\n transform_regex = \"|\".join([x + r\"[^)]*\\)\" for x in transform_names])\n transforms = re.findall(transform_regex, transform_attr_value)\n\n number_regex = r\"[-+]?(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][-+]?\\d+)?\"\n\n for t in transforms:\n op_name, op_args = t.split(\"(\")\n op_name = op_name.strip()\n op_args = [float(x) for x in re.findall(number_regex, op_args)]\n\n if op_name == \"matrix\":\n transform_args = np.array(op_args).reshape([3, 2])\n x = transform_args[2][0]\n y = -transform_args[2][1]\n matrix = np.identity(self.dim)\n matrix[:2, :2] = transform_args[:2, :]\n matrix[1] *= -1\n matrix[:, 1] *= -1\n\n for mob in mobject.family_members_with_points():\n if config[\"renderer\"] == \"opengl\":\n mob.points = np.dot(mob.points, matrix)\n else:\n mob.points = np.dot(mob.points, matrix)\n mobject.shift(x * RIGHT + y * UP)\n\n elif op_name == \"scale\":\n scale_values = op_args\n if len(scale_values) == 2:\n scale_x, scale_y = scale_values\n mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN)\n elif len(scale_values) == 1:\n scale = scale_values[0]\n mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)\n\n elif op_name == \"translate\":\n if len(op_args) == 2:\n x, y = op_args\n else:\n x = op_args\n y = 0\n mobject.shift(x * RIGHT + y * DOWN)\n\n else:\n # TODO: handle rotate, skewX and skewY\n # for now adding a warning message\n logger.warning(\n \"Handling of %s transform is not supported yet!\",\n op_name,\n )\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1007, "n_words": 245, "vocab_size": 143, "complexity": 14, "nloc": 48, "token_counts": 429, "n_ast_nodes": 706, "n_identifiers": 48, "random_cut": "def _handle_transforms(self, element, mobject):\n \n\n if element.hasAttribute(\"x\") and element.hasAttribute(\"y\"):\n x = self._attribute_to_float(element.getAttribute(\"x\"))\n # Flip y\n y = -self._attribute_to_float(element.getAttribute(\"y\"))\n mobject.shift(x * RIGHT + y * UP)\n\n transform_attr_value = element.getAttribute(\"transform\")\n\n # parse the various transforms in the attribute value\n transform_names = [\"matrix\", \"translate\", \"scale\", \"rotate\", \"skewX\", \"skewY\"]\n\n # Borrowed/Inspired from:\n # https://github.com/cjlano/svg/blob/3ea3384457c9780fa7d67837c9c5fd4ebc42cb3b/svg/svg.py#L75\n\n # match any SVG transformation with its parameter (until final parenthesis)\n # [^)]* == anything but a closing parenthesis\n # '|'.join == OR-list of SVG transformations\n transform_regex = \"|\".join([x + r\"[^)]*\\)\" for x in transform_names])\n transforms = re.findall(transform_regex, transform_attr_value)\n\n number_regex = r\"[-+]?(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][-+]?\\d+)?\"\n\n for t in transforms:\n op", "d_id": 46070, "documentation": { "docstring": "Applies the SVG transform to the specified mobject. Transforms include:\n ``matrix``, ``translate``, and ``scale``.\n\n Parameters\n ----------\n element : :class:`minidom.Element`\n The transform command to perform\n\n mobject : :class:`Mobject`\n The Mobject to transform.\n ", "n_words": 31, "vocab_size": 25, "n_whitespaces": 95, "language": "en" } }, { "id": 298288, "commit_id": "34dc47ad1037c6bf569f8cb2199f5933c2a0a079", "repo": "core", "path": "tests/components/airvisual/conftest.py", "file_name": "conftest.py", "fun_name": "pro_data_fixture", "commit_message": "Ensure AirVisual Pro migration includes device and entity customizations (#84798)\n\n* Ensure AirVisual Pro migration includes device and entity customizations\r\n\r\n* Update homeassistant/components/airvisual/__init__.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Code review\r\n\r\n* Fix tests\r\n\r\n* Fix tests FOR REAL\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def pro_data_fixture():\n \n return json.loads(load_fixture(\"data.json\", \"airvisual_pro\"))\n\n\n@pytest.fixture(name=\"pro\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"pro\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def pro_data_fixture():\n \n return json.loads(load_fixture(\"data.json\", \"airvisual_pro\"))\n\n\n@pytest.fixture(", "d_id": 97233, "documentation": { "docstring": "Define an update coordinator data example for the Pro.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 260671, "commit_id": "fc656c2189d64a43089f514dcdedb0fae70dfe56", "repo": "scikit-learn", "path": "sklearn/datasets/_species_distributions.py", "file_name": "_species_distributions.py", "fun_name": "fetch_species_distributions", "commit_message": "DOC Ensures that fetch_species_distributions passes numpydoc validation (#24162)\n\nCo-authored-by: Franck Charras ", "code": "def fetch_species_distributions(*, data_home=None, download_if_missing=True):\n \n data_home = get_data_home(data_home)\n if not exists(data_home):\n makedirs(data_home)\n\n # Define parameters for the data files. These should not be changed\n # unless the data model changes. They will be saved in the npz file\n # with the downloaded data.\n extra_params = dict(\n x_left_lower_corner=-94.8,\n Nx=1212,\n y_left_lower_corner=-56.05,\n Ny=1592,\n grid_size=0.05,\n )\n dtype = np.int16\n\n archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)\n\n if not exists(archive_path):\n if not download_if_missing:\n raise IOError(\"Data not found and `download_if_missing` is False\")\n logger.info(\"Downloading species data from %s to %s\" % (SAMPLES.url, data_home))\n samples_path = _fetch_remote(SAMPLES, dirname=data_home)\n with np.load(samples_path) as X: # samples.zip is a valid npz\n for f in X.files:\n fhandle = BytesIO(X[f])\n if \"train\" in f:\n train = _load_csv(fhandle)\n if \"test\" in f:\n test = _load_csv(fhandle)\n remove(samples_path)\n\n logger.info(\n \"Downloading coverage data from %s to %s\" % (COVERAGES.url, data_home)\n )\n coverages_path = _fetch_remote(COVERAGES, dirname=data_home)\n with np.load(coverages_path) as X: # coverages.zip is a valid npz\n coverages = []\n for f in X.files:\n fhandle = BytesIO(X[f])\n logger.debug(\" - converting {}\".format(f))\n coverages.append(_load_coverage(fhandle))\n coverages = np.asarray(coverages, dtype=dtype)\n remove(coverages_path)\n\n bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)\n joblib.dump(bunch, archive_path, compress=9)\n else:\n bunch = joblib.load(archive_path)\n\n return bunch\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 549, "n_words": 179, "vocab_size": 115, "complexity": 8, "nloc": 43, "token_counts": 302, "n_ast_nodes": 485, "n_identifiers": 50, "random_cut": "def fetch_species_distributions(*, data_home=None, download_if_missing=True):\n \n data_home = get_data_home(data_home)\n if not exists(data_home):\n makedirs(data_home)\n\n # Define parameters for the data files. These should not be changed\n # unless the data model changes. They will be saved in the npz file\n # with the downloaded data.\n extra_params = dict(\n x_left_lower_corner=-94.8,\n Nx=1212,\n y_left_lower_corner=-56.05,\n Ny=1592,\n grid_size=0.05,\n )\n dtype = np.int16\n\n archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)\n\n if not exists(archive_path):\n if not download_if_missing:\n raise IOError(\"Data not found and `download_if_missing` is False\")\n logger.info(\"Downloading species data from %s to %s\" % (SAMPLES.url, data_home))\n samples_path = _fetch_remote(SAMPLES, dirname=data_home)\n with np.load(samples_path) as X: # samples.zip is a valid npz\n for f in X.files:\n fhandle = BytesIO(X[f])\n if \"train\" in f:\n train = _load_csv(fhandle)\n if \"test\" in f:\n test = _load_csv(fhandle)\n remove(samples_path)\n\n logger.info(\n \"Downloading coverage data from %s to %s\" % (COVERAGES.url, data_home)\n )\n coverages_path = _fetch_remote(COVERAGES, dirname=data_home)\n with np.load(coverages_path) as X: # coverages.zip is a valid npz\n coverages = []\n for f in X.files:\n fhandle = BytesIO(X[f])\n logger.debug(\" - converting {}\".format(f)", "d_id": 76408, "documentation": { "docstring": "Loader for species distribution dataset from Phillips et. al. (2006).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n data_home : str, default=None\n Specify another download and cache folder for the datasets. By default\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\n\n download_if_missing : bool, default=True\n If False, raise a IOError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n coverages : array, shape = [14, 1592, 1212]\n These represent the 14 features measured\n at each point of the map grid.\n The latitude/longitude values for the grid are discussed below.\n Missing data is represented by the value -9999.\n train : record array, shape = (1624,)\n The training points for the data. Each point has three fields:\n\n - train['species'] is the species name\n - train['dd long'] is the longitude, in degrees\n - train['dd lat'] is the latitude, in degrees\n test : record array, shape = (620,)\n The test points for the data. Same format as the training data.\n Nx, Ny : integers\n The number of longitudes (x) and latitudes (y) in the grid\n x_left_lower_corner, y_left_lower_corner : floats\n The (x,y) position of the lower-left corner, in degrees\n grid_size : float\n The spacing between points of the grid, in degrees\n\n Notes\n -----\n\n This dataset represents the geographic distribution of species.\n The dataset is provided by Phillips et. al. (2006).\n\n The two species are:\n\n - `\"Bradypus variegatus\"\n `_ ,\n the Brown-throated Sloth.\n\n - `\"Microryzomys minutus\"\n `_ ,\n also known as the Forest Small Rice Rat, a rodent that lives in Peru,\n Colombia, Ecuador, Peru, and Venezuela.\n\n - For an example of using this dataset with scikit-learn, see\n :ref:`examples/applications/plot_species_distribution_modeling.py\n `.\n\n References\n ----------\n\n * `\"Maximum entropy modeling of species geographic distributions\"\n `_\n S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,\n 190:231-259, 2006.\n ", "n_words": 310, "vocab_size": 197, "n_whitespaces": 631, "language": "en" } }, { "id": 213613, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy/core/device.py", "file_name": "device.py", "fun_name": "set_split_factor", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def set_split_factor(factor, dev=None):\n \n assert 0 <= factor\n global split_factors\n dev = ivy.default(dev, default_device())\n split_factors[dev] = factor\n\n\n# noinspection PyShadowingNames", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 34, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "def set_split_factor(factor, dev=None):\n \n assert 0 <= factor\n global split_factors\n dev = ivy.default(dev, default_device())\n split_f", "d_id": 53677, "documentation": { "docstring": "\n Set the global split factor for a given device, which can be used to scale batch splitting chunk sizes for the\n device across the codebase.\n\n :param factor: The factor to set the device-specific split factor to.\n :type factor: float\n :param dev: The device to set the split factor for. Sets the default device by default.\n :type dev: str, optional\n ", "n_words": 59, "vocab_size": 38, "n_whitespaces": 81, "language": "en" } }, { "id": 196977, "commit_id": "3ebd6862a0c33fcf357d9f4ac5c2a8fd80a98675", "repo": "sympy", "path": "sympy/testing/runtests.py", "file_name": "runtests.py", "fun_name": "run", "commit_message": "Enable doctests in Markdown files", "code": "def run(self, test, compileflags=None, out=None, clear_globs=True):\n \n self.test = test\n\n # Remove ``` from the end of example, which may appear in Markdown\n # files\n for example in test.examples:\n example.want = example.want.replace('```\\n', '')\n example.exc_msg = example.exc_msg and example.exc_msg.replace('```\\n', '')\n\n\n if compileflags is None:\n compileflags = pdoctest._extract_future_flags(test.globs)\n\n save_stdout = sys.stdout\n if out is None:\n out = save_stdout.write\n sys.stdout = self._fakeout\n\n # Patch pdb.set_trace to restore sys.stdout during interactive\n # debugging (so it's not still redirected to self._fakeout).\n # Note that the interactive output will go to *our*\n # save_stdout, even if that's not the real sys.stdout; this\n # allows us to write test cases for the set_trace behavior.\n save_set_trace = pdb.set_trace\n self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)\n self.debugger.reset()\n pdb.set_trace = self.debugger.set_trace\n\n # Patch linecache.getlines, so we can see the example's source\n # when we're inside the debugger.\n self.save_linecache_getlines = pdoctest.linecache.getlines\n linecache.getlines = self.__patched_linecache_getlines\n\n # Fail for deprecation warnings\n with raise_on_deprecated():\n try:\n return self.__run(test, compileflags, out)\n finally:\n sys.stdout = save_stdout\n pdb.set_trace = save_set_trace\n linecache.getlines = self.save_linecache_getlines\n if clear_globs:\n test.globs.clear()\n\n\n# We have to override the name mangled methods.\nmonkeypatched_methods = [\n 'patched_linecache_getlines',\n 'run',\n 'record_outcome'\n]\nfor method in monkeypatched_methods:\n oldname = '_DocTestRunner__' + method\n newname = '_SymPyDocTestRunner__' + method\n setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname))\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 539, "n_words": 197, "vocab_size": 129, "complexity": 7, "nloc": 26, "token_counts": 195, "n_ast_nodes": 392, "n_identifiers": 40, "random_cut": "def run(self, test, compileflags=None, out=None, clear_globs=True):\n \n self.test = test\n\n # Remove ``` from the end of example, which may appear in Markdown\n # files\n for example in test.examples:\n example.want = example.want.replace('```\\n', '')\n example.exc_msg = example.exc_msg and example.exc_msg.replace('```\\n', '')\n\n\n if compileflags is None:\n compileflags = pdoctest._extract_future_flags(test.globs)\n\n save_stdout = sys.stdout\n if out is None:\n out = save_stdout.write\n sys.stdout = self._fakeout\n\n # Patch pdb.set_trace to restore sys.stdout during interactive\n # debugging (so it's not still redirected to self._fakeout).\n # Note that the interactive output will go to *our*\n # save_stdout, even if that's not the real sys.stdout; this\n # allows us to write test cases for the set_trace behavior.\n save_set_trace = pdb.set_trace\n self.debugger = pdoctest._OutputRedirectingPdb(", "d_id": 48271, "documentation": { "docstring": "\n Run the examples in ``test``, and display the results using the\n writer function ``out``.\n\n The examples are run in the namespace ``test.globs``. If\n ``clear_globs`` is true (the default), then this namespace will\n be cleared after the test runs, to help with garbage\n collection. If you would like to examine the namespace after\n the test completes, then use ``clear_globs=False``.\n\n ``compileflags`` gives the set of flags that should be used by\n the Python compiler when running the examples. If not\n specified, then it will default to the set of future-import\n flags that apply to ``globs``.\n\n The output of each example is checked using\n ``SymPyDocTestRunner.check_output``, and the results are\n formatted by the ``SymPyDocTestRunner.report_*`` methods.\n ", "n_words": 111, "vocab_size": 72, "n_whitespaces": 220, "language": "en" } }, { "id": 29303, "commit_id": "d90be220d6b687d08153934a51354011a3cb5ca1", "repo": "saleor", "path": "saleor/graphql/product/tests/queries/test_product_variants_query.py", "file_name": "test_product_variants_query.py", "fun_name": "_fetch_all_variants", "commit_message": "Split test_product.py and test_variant.py into multiple files (#11173)\n\n* Split test_product.py into multiple files\r\n\r\n* Split test_variant.py into multiple files", "code": "def _fetch_all_variants(client, variables={}, permissions=None):\n query = \n response = client.post_graphql(\n query, variables, permissions=permissions, check_no_permissions=False\n )\n content = get_graphql_content(response)\n return content[\"data\"][\"productVariants\"]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 18, "token_counts": 49, "n_ast_nodes": 78, "n_identifiers": 10, "random_cut": "def _fetch_all_variants(client, variables={}, permissions=None):\n query = \n response = client.post_graphql(\n query, variables, permissions=permissions, check_no_permissions=False\n )\n content = get_graphql_content(response)\n return content[\"data\"][\"productVariants\"]\n\n", "d_id": 5217, "documentation": { "docstring": "\n query fetchAllVariants($channel: String) {\n productVariants(first: 10, channel: $channel) {\n totalCount\n edges {\n node {\n id\n }\n }\n }\n }\n ", "n_words": 19, "vocab_size": 13, "n_whitespaces": 165, "language": "en" } }, { "id": 19350, "commit_id": "c6bdd48715adcbe17c4146b7cae3b0fc569f7bde", "repo": "PythonRobotics", "path": "ArmNavigation/arm_obstacle_navigation/arm_obstacle_navigation.py", "file_name": "arm_obstacle_navigation.py", "fun_name": "astar_torus", "commit_message": "docs: Fix a few typos (#695)\n\nThere are small typos in:\r\n- ArmNavigation/arm_obstacle_navigation/arm_obstacle_navigation.py\r\n- ArmNavigation/arm_obstacle_navigation/arm_obstacle_navigation_2.py\r\n- docs/modules/slam/FastSLAM1/FastSLAM1_main.rst\r\n- docs/modules/slam/ekf_slam/ekf_slam_main.rst\r\n\r\nFixes:\r\n- Should read `configuration` rather than `configuation`.\r\n- Should read `trajectory` rather than `tracjectory`.\r\n- Should read `prediction` rather than `prediciton`.\r\n\r\nSigned-off-by: Tim Gates ", "code": "def astar_torus(grid, start_node, goal_node):\n \n colors = ['white', 'black', 'red', 'pink', 'yellow', 'green', 'orange']\n levels = [0, 1, 2, 3, 4, 5, 6, 7]\n cmap, norm = from_levels_and_colors(levels, colors)\n\n grid[start_node] = 4\n grid[goal_node] = 5\n\n parent_map = [[() for _ in range(M)] for _ in range(M)]\n\n heuristic_map = calc_heuristic_map(M, goal_node)\n\n explored_heuristic_map = np.full((M, M), np.inf)\n distance_map = np.full((M, M), np.inf)\n explored_heuristic_map[start_node] = heuristic_map[start_node]\n distance_map[start_node] = 0\n while True:\n grid[start_node] = 4\n grid[goal_node] = 5\n\n current_node = np.unravel_index(\n np.argmin(explored_heuristic_map, axis=None), explored_heuristic_map.shape)\n min_distance = np.min(explored_heuristic_map)\n if (current_node == goal_node) or np.isinf(min_distance):\n break\n\n grid[current_node] = 2\n explored_heuristic_map[current_node] = np.inf\n\n i, j = current_node[0], current_node[1]\n\n neighbors = find_neighbors(i, j)\n\n for neighbor in neighbors:\n if grid[neighbor] == 0 or grid[neighbor] == 5:\n distance_map[neighbor] = distance_map[current_node] + 1\n explored_heuristic_map[neighbor] = heuristic_map[neighbor]\n parent_map[neighbor[0]][neighbor[1]] = current_node\n grid[neighbor] = 3\n\n if np.isinf(explored_heuristic_map[goal_node]):\n route = []\n print(\"No route found.\")\n else:\n route = [goal_node]\n while parent_map[route[0][0]][route[0][1]] != ():\n route.insert(0, parent_map[route[0][0]][route[0][1]])\n\n print(\"The route found covers %d grid cells.\" % len(route))\n for i in range(1, len(route)):\n grid[route[i]] = 6\n plt.cla()\n # for stopping simulation with the esc key.\n plt.gcf().canvas.mpl_connect('key_release_event',\n lambda event: [exit(0) if event.key == 'escape' else None])\n plt.imshow(grid, cmap=cmap, norm=norm, interpolation=None)\n plt.show()\n plt.pause(1e-2)\n\n return route\n\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 552, "n_words": 192, "vocab_size": 134, "complexity": 13, "nloc": 47, "token_counts": 475, "n_ast_nodes": 721, "n_identifiers": 49, "random_cut": "def astar_torus(grid, start_node, goal_node):\n \n colors = ['white', 'black', 'red', 'pink', 'yellow', 'green', 'orange']\n levels = [0, 1, 2, 3, 4, 5, 6, 7]\n cmap, norm = from_levels_and_colors(levels, colors)\n\n grid[start_node] = 4\n grid[goal_node] = 5\n\n parent_map = [[() for _ in range(M)] for _ in range(M)]\n\n heuristic_map = calc_heuristic_map(M, goal_node)\n\n explored_heuristic_map = np.full((M, M), np.inf)\n distance_map = np.full((M, M), np.inf)\n explored_heuristic_map[start_node] = heuristic_map[start_node]\n distance_map[start_node] = 0\n while True:\n grid[start_node] = 4\n grid[goal_node] = 5\n\n current_node = np.unravel_index(\n np.argmin(explored_heuristic_map, axis=None), explored_heuristic_map.shape)\n min_distance = np.min(explored_heuristic_map)\n if (current_node == goal_node) or np.isinf(min_distance):\n break\n\n grid[current_node] = 2\n explored_heuristic_map[current_node] = np.inf\n\n i, j = current_node[0], current_node[1]\n\n neighbors = find_neighbors(i, j)\n\n for neighbor in neighbors:\n if grid[neighbor] == 0 or grid[neighbor] == 5:\n distance_map[neighbor] = distance_map[current_node] + 1\n explored_heuristic_map[neighbor] = heuristic_map[neighbor]\n parent_map[neighbor[0]][neighbo", "d_id": 2940, "documentation": { "docstring": "\n Finds a path between an initial and goal joint configuration using\n the A* Algorithm on a tororiadal grid.\n\n Args:\n grid: An occupancy grid (ndarray)\n start_node: Initial joint configuration (tuple)\n goal_node: Goal joint configuration (tuple)\n\n Returns:\n Obstacle-free route in joint space from start_node to goal_node\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 88, "language": "en" } }, { "id": 257234, "commit_id": "f8e02310bf0dfbd1ab79a1c3c73434e0aeba4f4b", "repo": "haystack", "path": "haystack/pipelines/base.py", "file_name": "base.py", "fun_name": "root_node", "commit_message": "Validate YAML files without loading the nodes (#2438)\n\n* Remove BasePipeline and make a module for RayPipeline\r\n\r\n* Can load pipelines from yaml, plenty of issues left\r\n\r\n* Extract graph validation logic into _add_node_to_pipeline_graph & refactor load_from_config and add_node to use it\r\n\r\n* Fix pipeline tests\r\n\r\n* Move some tests out of test_pipeline.py and create MockDenseRetriever\r\n\r\n* myoy and pylint (silencing too-many-public-methods)\r\n\r\n* Fix issue found in some yaml files and in schema files\r\n\r\n* Fix paths to YAML and fix some typos in Ray\r\n\r\n* Fix eval tests\r\n\r\n* Simplify MockDenseRetriever\r\n\r\n* Fix Ray test\r\n\r\n* Accidentally pushed merge coinflict, fixed\r\n\r\n* Typo in schemas\r\n\r\n* Typo in _json_schema.py\r\n\r\n* Slightly reduce noisyness of version validation warnings\r\n\r\n* Fix version logs tests\r\n\r\n* Fix version logs tests again\r\n\r\n* remove seemingly unused file\r\n\r\n* Add check and test to avoid adding the same node to the pipeline twice\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Revert config to pipeline_config\r\n\r\n* Remo0ve unused import\r\n\r\n* Complete reverting to pipeline_config\r\n\r\n* Some more stray config=\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Feedback\r\n\r\n* Move back other_nodes tests into pipeline tests temporarily\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fixing tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fixing ray and standard pipeline tests\r\n\r\n* Rename colliding load() methods in dense retrievers and faiss\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy on ray.py as well\r\n\r\n* Add check for no root node\r\n\r\n* Fix tests to use load_from_directory and load_index\r\n\r\n* Try to workaround the disabled add_node of RayPipeline\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix Ray test\r\n\r\n* Fix FAISS tests\r\n\r\n* Relax class check in _add_node_to_pipeline_graph\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Try to fix mypy in ray.py\r\n\r\n* unused import\r\n\r\n* Try another fix for Ray\r\n\r\n* Fix connector tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix ray\r\n\r\n* Update Documentation & Code Style\r\n\r\n* use BaseComponent.load() in pipelines/base.py\r\n\r\n* another round of feedback\r\n\r\n* stray BaseComponent.load()\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix FAISS tests too\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>\r\nCo-authored-by: tstadel <60758086+tstadel@users.noreply.github.com>", "code": "def root_node(self) -> Optional[str]:\n \n if len(self.graph.nodes) < 1:\n return None\n return list(self.graph.nodes)[0] # List conversion is required, see networkx docs\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 37, "n_ast_nodes": 61, "n_identifiers": 8, "random_cut": "def root_node(self) -> Optional[str]:\n \n if len(self.graph.nodes) < 1:\n retur", "d_id": 75049, "documentation": { "docstring": "\n Returns the root node of the pipeline's graph.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 23, "language": "en" } }, { "id": 197989, "commit_id": "ea7fed2718f07bac46d4e154bd4e7ec31a4289e7", "repo": "sympy", "path": "sympy/core/add.py", "file_name": "add.py", "fun_name": "as_coefficients_dict", "commit_message": "22531: as_coefficients_dict accepts symbols", "code": "def as_coefficients_dict(self, *syms):\n \n if not syms:\n d = defaultdict(list)\n for ai in self.args:\n c, m = ai.as_coeff_Mul()\n d[m].append(c)\n for k, v in d.items():\n if len(v) == 1:\n d[k] = v[0]\n else:\n d[k] = Add(*v)\n di = defaultdict(int)\n di.update(d)\n return di\n else:\n d = defaultdict(list)\n ind, dep = self.as_independent(*syms, as_Add=True)\n for i in Add.make_args(dep):\n c, x = i.as_independent(*syms, as_Add=False)\n d[x].append(c)\n d = {k: Add(*d[k]) for k in d}\n d.update({S.One: ind})\n return d\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 352, "n_words": 71, "vocab_size": 47, "complexity": 7, "nloc": 23, "token_counts": 187, "n_ast_nodes": 297, "n_identifiers": 29, "random_cut": "def as_coefficients_dict(self, *syms):\n \n i", "d_id": 48760, "documentation": { "docstring": "Return a dictionary mapping terms to their Rational coefficient.\n Since the dictionary is a defaultdict, inquiries about terms which\n were not present will return a coefficient of 0. If an expression is\n not an Add it is considered to have a single term.\n\n If symbols `syms` are provided, any multiplicative terms\n independent of them will be considered a coefficient and a\n regular dictionary of syms-dependent generators as keys and\n their corresponding coefficients as values will be returned.\n\n Examples\n ========\n\n >>> from sympy import exp\n >>> from sympy.abc import a, x\n >>> (3*x + a*x + 4).as_coefficients_dict()\n {1: 4, x: 3, a*x: 1}\n >>> _[a]\n 0\n >>> (3*a*x).as_coefficients_dict()\n {a*x: 3}\n\n >>> (3*exp(x)*x + a/x + 2).as_coefficients_dict(x)\n {1: 2, 1/x: a, x*exp(x): 3}\n ", "n_words": 121, "vocab_size": 83, "n_whitespaces": 261, "language": "en" } }, { "id": 83227, "commit_id": "b0ce4f1bce8031881addecb1e86073483517f392", "repo": "zulip", "path": "zerver/lib/test_classes.py", "file_name": "test_classes.py", "fun_name": "verify_emoji_code_foreign_keys", "commit_message": "docs: Fix many spelling mistakes.\n\nSigned-off-by: Anders Kaseorg ", "code": "def verify_emoji_code_foreign_keys(self) -> None:\n \n dct = {}\n\n for row in RealmEmoji.objects.all():\n dct[row.id] = row\n\n if not dct:\n raise AssertionError(\"test needs RealmEmoji rows\")\n\n count = 0\n for row in Reaction.objects.filter(reaction_type=Reaction.REALM_EMOJI):\n realm_emoji_id = int(row.emoji_code)\n assert realm_emoji_id in dct\n self.assertEqual(dct[realm_emoji_id].name, row.emoji_name)\n self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id)\n count += 1\n\n for row in UserStatus.objects.filter(reaction_type=UserStatus.REALM_EMOJI):\n realm_emoji_id = int(row.emoji_code)\n assert realm_emoji_id in dct\n self.assertEqual(dct[realm_emoji_id].name, row.emoji_name)\n self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id)\n count += 1\n\n if count == 0:\n raise AssertionError(\"test is meaningless without any pertinent rows\")\n\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 272, "n_words": 73, "vocab_size": 40, "complexity": 6, "nloc": 29, "token_counts": 179, "n_ast_nodes": 282, "n_identifiers": 23, "random_cut": "def verify_emoji_code_foreign_keys(self) -> None:\n \n dct = {}\n\n for row in RealmEmoji.objects.all():\n dct[row.id] = row\n\n if not dct:\n raise AssertionError(\"test needs RealmEmoji rows\")\n\n count = 0\n for row in Reaction.objects.filter(reaction_type=Reaction.REALM_EMOJI):\n realm_emoji_id = int(row.emoji_code)\n assert realm_emoji_id in dct\n self.assertEqual(dct[realm_emoji_id].name, row.emoji_name)\n self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id)\n count += 1\n\n for row in UserStatus.objects.filter(reaction_type=UserStatus.RE", "d_id": 17632, "documentation": { "docstring": "\n DB tables that refer to RealmEmoji use int(emoji_code) as the\n foreign key. Those tables tend to de-normalize emoji_name due\n to our inheritance-based setup. This helper makes sure those\n invariants are intact, which is particularly tricky during\n the import/export process (or during conversions from things\n like Slack/RocketChat/MatterMost/etc.).\n ", "n_words": 46, "vocab_size": 41, "n_whitespaces": 96, "language": "en" } }, { "id": 65988, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/erpnext_integrations/doctype/mpesa_settings/mpesa_settings.py", "file_name": "mpesa_settings.py", "fun_name": "format_string_to_json", "commit_message": "style: format code with black", "code": "def format_string_to_json(balance_info):\n\tWorking Account|KES|481000.00|481000.00|0.00|0.00\n\tbalance_dict = frappe._dict()\n\tfor account_info in balance_info.split(\"&\"):\n\t\taccount_info = account_info.split(\"|\")\n\t\tbalance_dict[account_info[0]] = dict(\n\t\t\tcurrent_balance=fmt_money(account_info[2], currency=\"KES\"),\n\t\t\tavailable_balance=fmt_money(account_info[3], currency=\"KES\"),\n\t\t\treserved_balance=fmt_money(account_info[4], currency=\"KES\"),\n\t\t\tuncleared_balance=fmt_money(account_info[5], currency=\"KES\"),\n\t\t)\n\treturn dumps(balance_dict)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 16, "n_words": 28, "vocab_size": 22, "complexity": 2, "nloc": 11, "token_counts": 103, "n_ast_nodes": 166, "n_identifiers": 15, "random_cut": "def format_string_to_json(balance_info):\n\tWorking Account|KES|481000.00|481000.00|0.00|0.00\n\tbalance_dict = frappe._dict()\n\tfor account_info in balance_info.split(\"&\"):\n\t\taccount_info = account_info.split(\"|\")\n\t\tbalance_dict[account_info[0]] = dict(\n\t\t\tcurrent_balance=fmt_money(account_info[2], currency=\"KES\"),\n\t\t\tavailable_balance=fmt_money(account_info[3], currency=\"KES\"),\n\t\t\treserved_balance=fmt_money(account_info[4], currency=\"KES\"),\n\t\t\tuncleared_balance=fmt_money(account_info[5], currency=\"KES\"),\n\t\t)\n\treturn dumps(balance_dict)\n\n", "d_id": 14080, "documentation": { "docstring": "\n\tFormat string to json.\n\n\te.g: \n\t=> {'Working Account': {'current_balance': '481000.00',\n\t 'available_balance': '481000.00',\n\t 'reserved_balance': '0.00',\n\t 'uncleared_balance': '0.00'}}\n\t", "n_words": 16, "vocab_size": 15, "n_whitespaces": 35, "language": "en" } }, { "id": 170943, "commit_id": "d13c9e034ce8a1d738766c4b1cec80c76f5523be", "repo": "pandas", "path": "pandas/io/xml.py", "file_name": "xml.py", "fun_name": "_validate_path", "commit_message": "STYLE: fix pylint: no-else-raise (#49520)\n\n* fix pylint: no-else-raise\r\n\r\n* fix possible imbalanced tuple unpacking warning\r\n\r\nCo-authored-by: carlotta ", "code": "def _validate_path(self) -> list[Any]:\n \n\n msg = (\n \"xpath does not return any nodes or attributes. \"\n \"Be sure to specify in `xpath` the parent nodes of \"\n \"children and attributes to parse. \"\n \"If document uses namespaces denoted with \"\n \"xmlns, be sure to define namespaces and \"\n \"use them in xpath.\"\n )\n try:\n elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)\n children = [ch for el in elems for ch in el.findall(\"*\")]\n attrs = {k: v for el in elems for k, v in el.attrib.items()}\n\n if elems is None:\n raise ValueError(msg)\n\n if elems is not None:\n if self.elems_only and children == []:\n raise ValueError(msg)\n if self.attrs_only and attrs == {}:\n raise ValueError(msg)\n if children == [] and attrs == {}:\n raise ValueError(msg)\n\n except (KeyError, SyntaxError):\n raise SyntaxError(\n \"You have used an incorrect or unsupported XPath \"\n \"expression for etree library or you used an \"\n \"undeclared namespace prefix.\"\n )\n\n return elems\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 495, "n_words": 148, "vocab_size": 86, "complexity": 14, "nloc": 35, "token_counts": 160, "n_ast_nodes": 268, "n_identifiers": 23, "random_cut": "def _validate_path(self) -> list[Any]:\n \n\n msg = (\n \"xpath does not return any nodes or attributes. \"\n \"Be sure to specify in `xpath` the parent nodes of", "d_id": 40626, "documentation": { "docstring": "\n Notes\n -----\n `etree` supports limited XPath. If user attempts a more complex\n expression syntax error will raise.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 53, "language": "en" } }, { "id": 153058, "commit_id": "58bbcc37477866d19c8b092a0e1974a4f0baa586", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py", "file_name": "partition.py", "fun_name": "mask", "commit_message": "REFACTOR-#2656: Update modin to fit algebra (code only) (#3717)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: Rehan Durrani ", "code": "def mask(self, row_labels, col_labels):\n \n new_obj = super().mask(row_labels, col_labels)\n if isinstance(row_labels, slice) and isinstance(\n self._length_cache, ObjectIDType\n ):\n new_obj._length_cache = compute_sliced_len.remote(\n row_labels, self._length_cache\n )\n if isinstance(col_labels, slice) and isinstance(\n self._width_cache, ObjectIDType\n ):\n new_obj._width_cache = compute_sliced_len.remote(\n col_labels, self._width_cache\n )\n return new_obj\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 183, "n_words": 38, "vocab_size": 26, "complexity": 5, "nloc": 15, "token_counts": 86, "n_ast_nodes": 131, "n_identifiers": 13, "random_cut": "def mask(self, row_labels, col_labels):\n \n new_obj = ", "d_id": 35242, "documentation": { "docstring": "\n Lazily create a mask that extracts the indices provided.\n\n Parameters\n ----------\n row_labels : list-like, slice or label\n The row labels for the rows to extract.\n col_labels : list-like, slice or label\n The column labels for the columns to extract.\n\n Returns\n -------\n PandasOnRayDataframePartition\n A new ``PandasOnRayDataframePartition`` object.\n ", "n_words": 46, "vocab_size": 34, "n_whitespaces": 143, "language": "en" } }, { "id": 51540, "commit_id": "7cd67aba38c19a835c3229d9b4be21798c5c8673", "repo": "PaddleHub", "path": "modules/image/classification/efficientnetb0_imagenet/processor.py", "file_name": "processor.py", "fun_name": "postprocess", "commit_message": "update efficientnetb0_imagenet (#2041)\n\n* update efficientnetb0_imagenet\r\n\r\n* remove unused print", "code": "def postprocess(data_out, label_list, top_k):\n \n output = []\n for result in data_out:\n result_i = softmax(result)\n output_i = {}\n indexs = np.argsort(result_i)[::-1][0:top_k]\n for index in indexs:\n label = label_list[index].split(',')[0]\n output_i[label] = float(result_i[index])\n output.append(output_i)\n return output\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 102, "n_words": 33, "vocab_size": 25, "complexity": 3, "nloc": 11, "token_counts": 86, "n_ast_nodes": 138, "n_identifiers": 17, "random_cut": "def postprocess(data_out, label_list, top_k):\n \n output = []\n for result in data_out:\n result_i = softmax(result)\n output_i = {}\n indexs = np.argsort(result_i)[::-1][0:top_k]\n for index in indexs:\n label = label_list[index].split(',')[0]\n output_i[label] = float(result_i[index])\n output.append(output_i)\n return output\n", "d_id": 10347, "documentation": { "docstring": "\n Postprocess output of network, one image at a time.\n\n Args:\n data_out (numpy.ndarray): output data of network.\n label_list (list): list of label.\n top_k (int): Return top k results.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 58, "language": "en" } }, { "id": 13415, "commit_id": "b44d767f22bd862cdb75926ba388c14f5db0323c", "repo": "jina", "path": "jina/serve/executors/__init__.py", "file_name": "__init__.py", "fun_name": "requests", "commit_message": "fix: fix bug inheritance, requests nested dict (#5380)", "code": "def requests(self):\n \n if hasattr(self, '_requests'):\n return self._requests\n else:\n if not hasattr(self, 'requests_by_class'):\n self.requests_by_class = {}\n if self.__class__.__name__ not in self.requests_by_class:\n self.requests_by_class[self.__class__.__name__] = {}\n # we need to copy so that different instances with different (requests) in input do not disturb one another\n self._requests = copy.copy(self.requests_by_class[self.__class__.__name__])\n return self._requests\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 164, "n_words": 47, "vocab_size": 34, "complexity": 4, "nloc": 10, "token_counts": 83, "n_ast_nodes": 137, "n_identifiers": 8, "random_cut": "def requests(self):\n \n if hasattr(self, '_requests'):\n return self._requests\n else:\n if not hasattr(self, 'requests_by_class'):\n ", "d_id": 2635, "documentation": { "docstring": "\n Get the request dictionary corresponding to this specific class\n\n :return: Returns the requests corresponding to the specific Executor instance class\n ", "n_words": 20, "vocab_size": 14, "n_whitespaces": 42, "language": "en" } }, { "id": 33860, "commit_id": "8c2618e6aac3473da7757fb230690ffd4aea4c32", "repo": "transformers", "path": "src/transformers/pipelines/text2text_generation.py", "file_name": "text2text_generation.py", "fun_name": "__call__", "commit_message": "Fixing t2t pipelines lists outputs. (#15008)\n\nBackward compatibility broken in\r\nhttps://github.com/huggingface/transformers/pull/14988", "code": "def __call__(self, *args, **kwargs):\n r\n\n result = super().__call__(*args, **kwargs)\n if isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]):\n return [res[0] for res in result]\n return result\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 27, "vocab_size": 23, "complexity": 5, "nloc": 32, "token_counts": 68, "n_ast_nodes": 102, "n_identifiers": 12, "random_cut": "def __call__(self, *args, **kwargs):\n r\n\n result = sup", "d_id": 6170, "documentation": { "docstring": "\n Generate the output text(s) using text(s) given as inputs.\n\n Args:\n args (`str` or `List[str]`):\n Input text for the encoder.\n return_tensors (`bool`, *optional*, defaults to `False`):\n Whether or not to include the tensors of predictions (as token indices) in the outputs.\n return_text (`bool`, *optional*, defaults to `True`):\n Whether or not to include the decoded texts in the outputs.\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\n Whether or not to clean up the potential extra spaces in the text output.\n truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):\n The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`\n (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's\n max_length instead of throwing an error down the line.\n generate_kwargs:\n Additional keyword arguments to pass along to the generate method of the model (see the generate method\n corresponding to your framework [here](./model#generative-models)).\n\n Return:\n A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:\n\n - **generated_text** (`str`, present when `return_text=True`) -- The generated text.\n - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token\n ids of the generated text.\n ", "n_words": 188, "vocab_size": 114, "n_whitespaces": 464, "language": "en" } }, { "id": 22145, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "rewind_body", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def rewind_body(prepared_request):\n \n body_seek = getattr(prepared_request.body, \"seek\", None)\n if body_seek is not None and isinstance(\n prepared_request._body_position, integer_types\n ):\n try:\n body_seek(prepared_request._body_position)\n except OSError:\n raise UnrewindableBodyError(\n \"An error occurred when rewinding request body for redirect.\"\n )\n else:\n raise UnrewindableBodyError(\"Unable to rewind request body for redirect.\")\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 133, "n_words": 42, "vocab_size": 37, "complexity": 4, "nloc": 13, "token_counts": 56, "n_ast_nodes": 97, "n_identifiers": 10, "random_cut": "def rewind_body(prepared_request):\n \n body_seek = getattr(prepared_request.body, \"seek\", None)\n if body_seek is not None and isinstance(\n prepared_request._body_p", "d_id": 4217, "documentation": { "docstring": "Move file pointer back to its recorded starting position\n so it can be read again on redirect.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 23, "language": "en" } }, { "id": 323181, "commit_id": "44a290e94d1becd1f09fddc3d873f9e19c9d6919", "repo": "PaddleNLP", "path": "paddlenlp/trainer/utils/helper.py", "file_name": "helper.py", "fun_name": "nested_concat", "commit_message": "[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)\n\n* add some datasets for finetune.\r\n\r\n* support fine tune for all tastks.\r\n\r\n* add trainer prototype.\r\n\r\n* init verison for paddlenlp trainer.\r\n\r\n* refine trainer.\r\n\r\n* update for some details.\r\n\r\n* support multi-cards training evaluation.\r\n\r\n* support load from ckpt.\r\n\r\n* support for export inference model.\r\n\r\n* first version of trainer.\r\n\r\n* seq cls support clue.\r\n\r\n* trainer support for token classification and question answersing tasks.\r\n\r\n* fix as reviews.\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def nested_concat(tensors, new_tensors, padding_index=-100):\n \n assert type(tensors) == type(\n new_tensors\n ), f\"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}.\"\n if isinstance(tensors, (list, tuple)):\n return type(tensors)(nested_concat(\n t, n, padding_index=padding_index)\n for t, n in zip(tensors, new_tensors))\n elif isinstance(tensors, paddle.Tensor):\n return paddle_pad_and_concatenate(\n tensors, new_tensors, padding_index=padding_index)\n elif isinstance(tensors, np.ndarray):\n return numpy_pad_and_concatenate(\n tensors, new_tensors, padding_index=padding_index)\n else:\n raise TypeError(\n f\"Unsupported type for concatenation: got {type(tensors)}\")\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 192, "n_words": 64, "vocab_size": 50, "complexity": 5, "nloc": 17, "token_counts": 116, "n_ast_nodes": 200, "n_identifiers": 18, "random_cut": "def nested_concat(tensors, new_tensors, padding_index=-100):\n \n assert type(tensors) == type(\n new_tensors\n ), f\"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}.\"\n if isinstance(tensors, (list, tuple)):\n return type(tensors)(nested_concat(\n t, n, padding_index=padding_index)\n ", "d_id": 118400, "documentation": { "docstring": "\n Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or\n nested list/tuples of tensors.\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 35, "language": "en" } }, { "id": 53269, "commit_id": "23365cf7727c45f38ad983d610ffec5c15ceca21", "repo": "prefect", "path": "src/prefect/cli/orion.py", "file_name": "orion.py", "fun_name": "kubernetes_manifest", "commit_message": "Add kubernetes manifest commands", "code": "def kubernetes_manifest():\n \n\n template = Template(\n (prefect.__module_path__ / \"cli\" / \"templates\" / \"kubernetes.yaml\").read_text()\n )\n manifest = template.substitute(\n {\n \"image_name\": get_prefect_image_name(),\n }\n )\n print(manifest)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 72, "n_words": 22, "vocab_size": 18, "complexity": 1, "nloc": 10, "token_counts": 44, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def kubernetes_manifest():\n \n\n tem", "d_id": 10764, "documentation": { "docstring": "\n Generates a kubernetes manifest for to deploy Orion to a cluster.\n\n Example:\n $ prefect orion kubernetes-manifest | kubectl apply -f -\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 38, "language": "en" } }, { "id": 86690, "commit_id": "ceee9dfd8d6fed70d34546e7b46ebb7bf1d49745", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_dynamic_sampling.py", "file_name": "test_project_dynamic_sampling.py", "fun_name": "test_queries_when_requested_project_is_head_of_trace", "commit_message": "feat(dynamic-sampling): Improve empty transaction breakdown message [TET-338] (#39539)\n\nThis PR add new attribute parentProjectBreakdown to\r\n/api/0/projects///dynamic-sampling/distribution/\r\napi:\r\n```\r\n{\r\n \"projectBreakdown\": null,\r\n \"sampleSize\": 0,\r\n \"startTimestamp\": null,\r\n \"endTimestamp\": null,\r\n \"parentProjectBreakdown\": [\r\n {\r\n \"projectId\": 1,\r\n \"percentage\": 0.9,\r\n \"project\": \"sentry\"\r\n },\r\n {\r\n \"projectId\": 2,\r\n \"percentage\": 0.1,\r\n \"project\": \"javascript\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\nTODO:\r\n- [x] Update src/sentry/snuba/referrer.py\r\nhttps://github.com/getsentry/sentry/blob/0fbbf1626f86399b1ca4a2781d66ef96aac69de7/src/sentry/snuba/referrer.py#L208-L210\r\n- [x] Add missing tests\r\n\r\nCo-authored-by: Andrii Soldatenko \r\nCo-authored-by: ahmedetefy ", "code": "def test_queries_when_requested_project_is_head_of_trace(self, mock_query, mock_querybuilder):\n \n # Case A: Head of trace project\n self.login_as(self.user)\n heart = self.create_project(\n name=\"Heart\", slug=\"heart\", teams=[self.team], fire_project_created=True\n )\n mock_query.side_effect = [\n {\"data\": [{\"count()\": 1000}]},\n ]\n mock_querybuilder.side_effect = [\n {\n \"data\": [\n {\n \"trace\": \"6503ee33b7bc43aead1facaa625a5dba\",\n \"id\": \"6ddc83ee612b4e89b95b5278c8fd188f\",\n \"random_number() AS random_number\": 4255299100,\n \"is_root\": 1,\n },\n {\n \"trace\": \"6503ee33b7bc43aead1facaa625a5dba\",\n \"id\": \"0b127a578f8440c793f9ba1de595229f\",\n \"random_number() AS random_number\": 3976019453,\n \"is_root\": 1,\n },\n ]\n },\n {\n \"data\": [\n {\n \"project\": self.project.slug,\n \"project_id\": self.project.id,\n \"count\": 2,\n \"root_count\": 2,\n },\n {\n \"project\": heart.slug,\n \"project_id\": heart.id,\n \"count\": 1,\n \"root_count\": 0,\n },\n ]\n },\n ]\n end_time = timezone.now()\n start_time = end_time - timedelta(hours=1)\n query = \"environment:dev\"\n requested_sample_size = 2\n\n calls = self.generate_fetch_transactions_count_query(\n query, start_time, end_time, requested_sample_size\n )\n\n snuba_query_random_transactions = random_transactions_snuba_query(\n query, requested_sample_size, start_time, end_time, self.project\n )\n snuba_query_project_stats = project_stats_snuba_query(\n query,\n start_time,\n end_time,\n self.project,\n trace_ids=[\"6503ee33b7bc43aead1facaa625a5dba\"] * 2,\n )\n\n with Feature({\"organizations:server-side-sampling\": True}):\n response = self.client.get(\n f\"{self.endpoint}?sampleSize={requested_sample_size}&query={query}\"\n )\n assert response.status_code == 200\n assert mock_query.mock_calls == calls\n assert len(mock_querybuilder.call_args_list) == 2\n self.assert_mocked_query_calls(\n snuba_query_random_transactions, snuba_query_project_stats, mock_querybuilder\n )\n\n response_data = response.json()\n assert response_data[\"projectBreakdown\"] == [\n {\"project_id\": self.project.id, \"project\": self.project.slug, \"count()\": 2},\n {\"project_id\": heart.id, \"project\": heart.slug, \"count()\": 1},\n ]\n assert response_data[\"parentProjectBreakdown\"] == [\n {\"project\": self.project.slug, \"projectId\": self.project.id, \"percentage\": 1.0}\n ]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 1253, "n_words": 183, "vocab_size": 103, "complexity": 1, "nloc": 77, "token_counts": 384, "n_ast_nodes": 644, "n_identifiers": 43, "random_cut": "def test_queries_when_requested_project_is_head_of_trace(self, mock_query, mock_querybuilder):\n \n # Case A: Head of trace project\n self.login_as(self.user)\n heart = self.create_project(\n name=\"Heart\", slug=\"heart\", teams=[self.team], fire_project_created=True\n )\n mock_query.side_effect = [\n {\"data\": [{\"count()\": 1000}]},\n ]\n mock_querybuilder.side_effect = [\n {\n \"data\": [\n {\n \"trace\": \"6503ee33b7bc43aead1facaa625a5dba\",\n \"id\": \"6ddc83ee612b4e89b95b5278c8fd188f\",\n \"random_number() AS random_number\": 42", "d_id": 18149, "documentation": { "docstring": "\n Case A: Requesting for a project (bar) that is root but is a head of distributed traces\n Example of smart query response (DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_PROJECT_STATS):\n |---------+-------+------|\n | project | count | root |\n |---------+-------+------|\n | bar | 100 | 100 |\n | heart | 5 | 0 |\n |---------+-------+------|\n ", "n_words": 47, "vocab_size": 28, "n_whitespaces": 127, "language": "en" } }, { "id": 66180, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/leave_ledger_entry/leave_ledger_entry.py", "file_name": "leave_ledger_entry.py", "fun_name": "validate_leave_allocation_against_leave_application", "commit_message": "style: format code with black", "code": "def validate_leave_allocation_against_leave_application(ledger):\n\t\n\tleave_application_records = frappe.db.sql_list(\n\t\t,\n\t\t(ledger.employee, ledger.leave_type, ledger.from_date, ledger.to_date),\n\t)\n\n\tif leave_application_records:\n\t\tfrappe.throw(\n\t\t\t_(\"Leave allocation {0} is linked with the Leave Application {1}\").format(\n\t\t\t\tledger.transaction_name, \", \".join(leave_application_records)\n\t\t\t)\n\t\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 18, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 20, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 15, "random_cut": "def validate_leave_allocation_against_leave_application(ledger):\n\t\n\tleave_app", "d_id": 14129, "documentation": { "docstring": "Checks that leave allocation has no leave application against it\n\t\tSELECT transaction_name\n\t\tFROM `tabLeave Ledger Entry`\n\t\tWHERE\n\t\t\temployee=%s\n\t\t\tAND leave_type=%s\n\t\t\tAND transaction_type='Leave Application'\n\t\t\tAND from_date>=%s\n\t\t\tAND to_date<=%s\n\t", "n_words": 27, "vocab_size": 23, "n_whitespaces": 18, "language": "en" } }, { "id": 209346, "commit_id": "a738a0b375a5599187626c9a9b081f7c25392f69", "repo": "scapy", "path": "scapy/contrib/pnio_rpc.py", "file_name": "pnio_rpc.py", "fun_name": "dce_rpc_endianess", "commit_message": "MS-RPCE support (#3674)\n\n* Add DCE/RPC\r\n* Add tests to DCERPC5 / PNIO_RPC fixes\r\n* Support for NDR fields in DCERPC\r\n* Fully implement KRB5_GSS\r\n* Support also RFC4121", "code": "def dce_rpc_endianess(pkt):\n \n try:\n endianness = pkt.underlayer.endian\n except AttributeError:\n # handle the case where a PNIO class is\n # built without its DCE-RPC under-layer\n # i.e there is no endianness indication\n return \"!\"\n if endianness == 0: # big endian\n return \">\"\n elif endianness == 1: # little endian\n return \"<\"\n else:\n return \"!\"\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 129, "n_words": 53, "vocab_size": 39, "complexity": 4, "nloc": 11, "token_counts": 38, "n_ast_nodes": 78, "n_identifiers": 6, "random_cut": "def dce_rpc_endianess(pkt):\n \n try:\n endianness = pkt.underlayer.endian\n except AttributeError:\n # handle the case where a PN", "d_id": 52657, "documentation": { "docstring": "determine the symbol for the endianness of a the DCE/RPC", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 53590, "commit_id": "b25d9d283b714c719f363176d49892188c50dffd", "repo": "prefect", "path": "src/prefect/flow_runners.py", "file_name": "flow_runners.py", "fun_name": "_get_extra_hosts", "commit_message": "Add pattern for loading CLI defaults from settings\n\nAlso, renames ORION_HOST to API_URL and adds utils to `Settings` to retrieve things by the envar key", "code": "def _get_extra_hosts(self, docker_client) -> Dict[str, str]:\n \n if sys.platform == \"linux\" and (\n # Do not warn if the user has specified a host manually that does not use\n # a local address\n \"PREFECT_API_URL\" not in self.env\n or re.search(\n \".*(localhost)|(127.0.0.1)|(host.docker.internal).*\",\n self.env[\"PREFECT_API_URL\"],\n )\n ):\n user_version = packaging.version.parse(docker_client.version()[\"Version\"])\n required_version = packaging.version.parse(\"20.10.0\")\n\n if user_version < required_version:\n warnings.warn(\n \"`host.docker.internal` could not be automatically resolved to your \"\n \"local ip address. This feature is not supported on Docker Engine \"\n f\"v{user_version}, upgrade to v{required_version}+ if you \"\n \"encounter issues.\"\n )\n return {}\n else:\n # Compatibility for linux -- https://github.com/docker/cli/issues/2290\n # Only supported by Docker v20.10.0+ which is our minimum recommend version\n return {\"host.docker.internal\": \"host-gateway\"}\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 424, "n_words": 108, "vocab_size": 87, "complexity": 5, "nloc": 25, "token_counts": 99, "n_ast_nodes": 188, "n_identifiers": 17, "random_cut": "def _get_extra_hosts(self, docker_client) -> Dict[str, str]:\n \n if sys.platform == \"linux\" and (\n # Do not", "d_id": 10853, "documentation": { "docstring": "\n A host.docker.internal -> host-gateway mapping is necessary for communicating\n with the API on Linux machines. Docker Desktop on macOS will automatically\n already have this mapping.\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 54, "language": "en" } }, { "id": 215927, "commit_id": "a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857", "repo": "salt", "path": "tests/pytests/unit/modules/test_win_certutil.py", "file_name": "test_win_certutil.py", "fun_name": "test_del_store", "commit_message": "Add tests, fix state module", "code": "def test_del_store():\n \n with patch(\"salt.modules.win_certutil.get_cert_serial\") as cert_serial_mock:\n cmd_mock = MagicMock(\n return_value=(\n \"CertInfo\\r\\n\"\n \"================ Certificate 0 ================\\r\\n\"\n \"Serial Number: 180720d39cd2db3244ba037417241e90\\r\\n\"\n \"OtherStuff\"\n )\n )\n cache_mock = MagicMock(return_value=\"/tmp/cert.cer\")\n cert_serial_mock.return_value = \"ABCDEF\"\n with patch.dict(\n certutil.__salt__, {\"cmd.run\": cmd_mock, \"cp.cache_file\": cache_mock}\n ), patch(\"os.path.exists\", MagicMock(return_value=True)):\n certutil.del_store(\"salt://path/to/file\", \"TrustedPublisher\")\n cmd_mock.assert_called_once_with(\n 'certutil.exe -delstore TrustedPublisher \"ABCDEF\"'\n )\n cache_mock.assert_called_once_with(\"salt://path/to/file\", \"base\")\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 247, "n_words": 47, "vocab_size": 42, "complexity": 1, "nloc": 20, "token_counts": 93, "n_ast_nodes": 188, "n_identifiers": 12, "random_cut": "def test_del_store():\n \n with patch(\"salt.modules.win_certutil.get_cert_serial\") as cert_serial_mock:\n cmd_mock = MagicMock(\n return_value=(\n \"CertInfo\\r\\n\"\n \"================ Certificate 0 ================\\r\\n\"\n \"Serial Number: 180720d39cd2db3244ba037417241e90\\r\\n\"\n \"OtherStuff\"\n )\n )\n cache_mock = MagicMock(return_value=\"/tmp/cert.cer\")\n cert_serial_mock.return_value = \"ABCDEF\"\n with patch.dict(\n certutil.__salt__, {\"cmd.run\": cmd_mock, \"cp.cache_file\": cache_mock}\n ), patch(\"os.path.exists\", MagicMock(return_value=True)):\n certutil.del_store(\"salt://path/to/file\", \"TrustedPublisher\")\n cmd_mock.assert_called_once_with(\n 'certutil.exe -delstore TrustedPublisher \"ABCDEF\"'\n ", "d_id": 54254, "documentation": { "docstring": "\n Test removing a certificate to a specific store\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 15, "language": "en" } }, { "id": 320622, "commit_id": "9c4169c7b7d96a10012a72c70fc38c6154f7481f", "repo": "qutebrowser", "path": "tests/conftest.py", "file_name": "conftest.py", "fun_name": "_select_backend", "commit_message": "tests: Remove some unused imports", "code": "def _select_backend(config):\n \n backend_arg = config.getoption('--qute-backend')\n backend_env = os.environ.get('QUTE_TESTS_BACKEND')\n\n backend = backend_arg or backend_env or _auto_select_backend()\n\n # Fail early if selected backend is not available\n # pylint: disable=unused-import\n if backend == 'webkit':\n import PyQt5.QtWebKitWidgets\n elif backend == 'webengine':\n import PyQt5.QtWebEngineWidgets\n else:\n raise utils.Unreachable(backend)\n\n return backend\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 44, "vocab_size": 31, "complexity": 5, "nloc": 11, "token_counts": 62, "n_ast_nodes": 113, "n_identifiers": 15, "random_cut": "def _select_backend(config):\n \n backend_arg = config.getoption('--qute-backend')\n backend_env = os.", "d_id": 117233, "documentation": { "docstring": "Select the backend for running tests.\n\n The backend is auto-selected in the following manner:\n 1. Use QtWebKit if available\n 2. Otherwise use QtWebEngine as a fallback\n\n Auto-selection is overridden by either passing a backend via\n `--qute-backend=` or setting the environment variable\n `QUTE_TESTS_BACKEND=`.\n\n Args:\n config: pytest config\n\n Raises:\n ImportError if the selected backend is not available.\n\n Returns:\n The selected backend as a string (e.g. 'webkit').\n ", "n_words": 64, "vocab_size": 49, "n_whitespaces": 115, "language": "en" } }, { "id": 196342, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/logic/boolalg.py", "file_name": "boolalg.py", "fun_name": "equals", "commit_message": "Updated import locations", "code": "def equals(self, other):\n \n from sympy.logic.inference import satisfiable\n from sympy.core.relational import Relational\n\n if self.has(Relational) or other.has(Relational):\n raise NotImplementedError('handling of relationals')\n return self.atoms() == other.atoms() and \\\n not satisfiable(Not(Equivalent(self, other)))\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 85, "n_words": 28, "vocab_size": 26, "complexity": 4, "nloc": 7, "token_counts": 71, "n_ast_nodes": 113, "n_identifiers": 15, "random_cut": "def equals(self, other):\n \n from sympy.logic.inference import satisfiable\n from sympy.core.relational import Relational\n\n if self.has(Relational) or other.has(Relational):\n raise NotImplementedError('handling of relationals')\n return self.atoms() == other.atoms() and \\\n not satisfiable(No", "d_id": 47842, "documentation": { "docstring": "\n Returns True if the given formulas have the same truth table.\n For two formulas to be equal they must have the same literals.\n\n Examples\n ========\n\n >>> from sympy.abc import A, B, C\n >>> from sympy import And, Or, Not\n >>> (A >> B).equals(~B >> ~A)\n True\n >>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C)))\n False\n >>> Not(And(A, Not(A))).equals(Or(B, Not(B)))\n False\n\n ", "n_words": 58, "vocab_size": 42, "n_whitespaces": 150, "language": "en" } }, { "id": 67999, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/utils.py", "file_name": "utils.py", "fun_name": "get_latest_stock_qty", "commit_message": "style: format code with black", "code": "def get_latest_stock_qty(item_code, warehouse=None):\n\tvalues, condition = [item_code], \"\"\n\tif warehouse:\n\t\tlft, rgt, is_group = frappe.db.get_value(\"Warehouse\", warehouse, [\"lft\", \"rgt\", \"is_group\"])\n\n\t\tif is_group:\n\t\t\tvalues.extend([lft, rgt])\n\t\t\tcondition += \"and exists (\\\n\t\t\t\tselect name from `tabWarehouse` wh where wh.name = tabBin.warehouse\\\n\t\t\t\tand wh.lft >= %s and wh.rgt <= %s)\"\n\n\t\telse:\n\t\t\tvalues.append(warehouse)\n\t\t\tcondition += \" AND warehouse = %s\"\n\n\tactual_qty = frappe.db.sql(\n\t\t.format(\n\t\t\tcondition\n\t\t),\n\t\tvalues,\n\t)[0][0]\n\n\treturn actual_qty\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 45, "n_words": 64, "vocab_size": 52, "complexity": 3, "nloc": 20, "token_counts": 98, "n_ast_nodes": 165, "n_identifiers": 16, "random_cut": "def get_latest_stock_qty(item_code, warehouse=None):\n\tvalues, condition = [item_code], \"\"\n\tif warehouse:\n\t\tlft, rgt, is_group = frappe.db.get_value(\"Warehouse\", warehouse, [\"lft\", \"rgt\", \"is_group\"])\n\n\t\tif is_group:\n\t\t\tvalues.extend([lft, rgt])\n\t\t\tcondition += \"and exists (\\\n\t\t\t\tselect", "d_id": 14703, "documentation": { "docstring": "select sum(actual_qty) from tabBin\n\t\twhere item_code=%s {0}", "n_words": 7, "vocab_size": 7, "n_whitespaces": 5, "language": "en" } }, { "id": 259125, "commit_id": "67a3feed2fe4e82c1cc129c34b9e223b94a8d531", "repo": "scikit-learn", "path": "sklearn/kernel_approximation.py", "file_name": "kernel_approximation.py", "fun_name": "get_feature_names_out", "commit_message": "ENH Adds get_feature_names_out for AdditiveChi2Sampler (#22137)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def get_feature_names_out(self, input_features=None):\n \n input_features = _check_feature_names_in(\n self, input_features, generate_names=True\n )\n est_name = self.__class__.__name__.lower()\n\n names_list = [f\"{est_name}_{name}_sqrt\" for name in input_features]\n\n for j in range(1, self.sample_steps):\n cos_names = [f\"{est_name}_{name}_cos{j}\" for name in input_features]\n sin_names = [f\"{est_name}_{name}_sin{j}\" for name in input_features]\n names_list.extend(cos_names + sin_names)\n\n return np.asarray(names_list, dtype=object)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 138, "n_words": 45, "vocab_size": 31, "complexity": 5, "nloc": 11, "token_counts": 94, "n_ast_nodes": 176, "n_identifiers": 21, "random_cut": "def get_feature_names_out(self, input_features=None):\n \n input_features = _check_feature_names_in(\n self, input_features, generate_names=True\n )\n est_name = self.__class__.__nam", "d_id": 75584, "documentation": { "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Only used to validate feature names with the names seen in :meth:`fit`.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n ", "n_words": 39, "vocab_size": 32, "n_whitespaces": 110, "language": "en" } }, { "id": 75312, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_templatetags.py", "file_name": "test_templatetags.py", "fun_name": "test_render_valid_image_as_context_variable", "commit_message": "Reformat with black", "code": "def test_render_valid_image_as_context_variable(self):\n \n context = {\"image\": self.image, \"image_node\": \"fake value\"}\n node = ImageNode(Variable(\"image\"), \"original\", \"image_node\")\n\n rendered = node.render(context)\n\n self.assertEqual(rendered, \"\")\n self.assertIsInstance(context[\"image_node\"], Rendition)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 6, "token_counts": 59, "n_ast_nodes": 108, "n_identifiers": 12, "random_cut": "def test_render_valid_image_as_context_variable(self):\n \n context = {\"image\": self.image, \"image_node\": \"fake value\"}\n node = ImageNode(Variable(\"image\"), \"original\", \"image_node\")\n\n rendered = node.render(context)", "d_id": 16391, "documentation": { "docstring": "\n Tests that an ImageNode with a valid image and a context variable name\n renders an empty string and puts a rendition in the context variable\n ", "n_words": 25, "vocab_size": 19, "n_whitespaces": 47, "language": "en" } }, { "id": 195934, "commit_id": "4f34fcc3406452ace4a70d541064f2dfdcee9205", "repo": "sympy", "path": "sympy/polys/rootisolation.py", "file_name": "rootisolation.py", "fun_name": "dup_cauchy_lower_bound", "commit_message": "Add `dup_...` funcs for Cauchy bounds.", "code": "def dup_cauchy_lower_bound(f, K):\n \n g = dup_reverse(f)\n if len(g) < 2:\n raise PolynomialError('Polynomial has no non-zero roots.')\n if K.is_ZZ:\n K = K.get_field()\n b = dup_cauchy_upper_bound(g, K)\n return K.one / b\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 29, "vocab_size": 25, "complexity": 3, "nloc": 8, "token_counts": 53, "n_ast_nodes": 89, "n_identifiers": 12, "random_cut": "def dup_cauchy_lower_bound(f, K):\n \n g = dup_reverse(f)\n if len(g) < 2:\n raise PolynomialError('Polynomial has no non-zero roots.')\n if K.is_ZZ:\n K = K.get_field()\n b = dup_cauchy_upper_bound(g, K)\n return ", "d_id": 47479, "documentation": { "docstring": "Compute the Cauchy lower bound on the absolute value of all non-zero\n roots of f, real or complex.", "n_words": 18, "vocab_size": 16, "n_whitespaces": 23, "language": "en" } }, { "id": 154142, "commit_id": "4548012a6372b8ce79d7e07c9ae13fd7444a91c8", "repo": "modin", "path": "modin/core/io/column_stores/parquet_dispatcher.py", "file_name": "parquet_dispatcher.py", "fun_name": "call_deploy", "commit_message": "FIX-#4756: Correctly propagate `storage_options` in `read_parquet` (#4764)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Alexey Prutskov \r\nSigned-off-by: Karthik Velayutham ", "code": "def call_deploy(cls, fname, col_partitions, storage_options, **kwargs):\n \n from pyarrow.parquet import ParquetFile\n from modin.core.storage_formats.pandas.parsers import ParquetFileToRead\n\n # If we don't have any columns to read, we should just return an empty\n # set of references.\n if len(col_partitions) == 0:\n return []\n\n filesystem, parquet_files = cls.get_fsspec_files(fname, storage_options)\n\n row_groups_per_file = []\n num_row_groups = 0\n # Count up the total number of row groups across all files and\n # keep track of row groups per file to use later.\n for file in parquet_files:\n with filesystem.open(file) as f:\n row_groups = ParquetFile(f).num_row_groups\n row_groups_per_file.append(row_groups)\n num_row_groups += row_groups\n\n # step determines how many row groups are going to be in a partition\n step = compute_chunksize(\n num_row_groups,\n NPartitions.get(),\n min_block_size=1,\n )\n current_partition_size = 0\n file_index = 0\n partition_files = [] # 2D array - each element contains list of chunks to read\n row_groups_used_in_current_file = 0\n total_row_groups_added = 0\n # On each iteration, we add a chunk of one file. That will\n # take us either to the end of a partition, or to the end\n # of a file.\n while total_row_groups_added < num_row_groups:\n if current_partition_size == 0:\n partition_files.append([])\n partition_file = partition_files[-1]\n file_path = parquet_files[file_index]\n row_group_start = row_groups_used_in_current_file\n row_groups_left_in_file = (\n row_groups_per_file[file_index] - row_groups_used_in_current_file\n )\n row_groups_left_for_this_partition = step - current_partition_size\n if row_groups_left_for_this_partition <= row_groups_left_in_file:\n # File has at least what we need to finish partition\n # So finish this partition and start a new one.\n num_row_groups_to_add = row_groups_left_for_this_partition\n current_partition_size = 0\n else:\n # File doesn't have enough to complete this partition. Add\n # it into current partition and go to next file.\n num_row_groups_to_add = row_groups_left_in_file\n current_partition_size += num_row_groups_to_add\n if num_row_groups_to_add == row_groups_left_in_file:\n file_index += 1\n row_groups_used_in_current_file = 0\n else:\n row_groups_used_in_current_file += num_row_groups_to_add\n partition_file.append(\n ParquetFileToRead(\n file_path, row_group_start, row_group_start + num_row_groups_to_add\n )\n )\n total_row_groups_added += num_row_groups_to_add\n\n assert (\n total_row_groups_added == num_row_groups\n ), \"row groups added does not match total num of row groups across parquet files\"\n\n all_partitions = []\n for files_to_read in partition_files:\n all_partitions.append(\n [\n cls.deploy(\n cls.parse,\n files_for_parser=files_to_read,\n columns=cols,\n num_returns=3,\n storage_options=storage_options,\n **kwargs,\n )\n for cols in col_partitions\n ]\n )\n return all_partitions\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1287, "n_words": 327, "vocab_size": 182, "complexity": 9, "nloc": 69, "token_counts": 287, "n_ast_nodes": 460, "n_identifiers": 50, "random_cut": "def call_deploy(cls, fname, col_partitions, storage_options, **kwargs):\n \n from pyarrow.parquet import ParquetFile\n from modin.core.storage_formats.pandas.parsers import ParquetFileToRead\n\n # If we don't have any columns to read, we should just return an empty\n # set of references.\n if len(col_partitions) == 0:\n return []\n\n filesystem, parquet_files = cls.get_fsspec_files(fname, storage_options)\n\n row_groups_per_file = []\n num_row_groups = 0\n # Count up the total number of row groups across all files and\n # keep track of row groups per file to use later.\n for file ", "d_id": 35807, "documentation": { "docstring": "\n Deploy remote tasks to the workers with passed parameters.\n\n Parameters\n ----------\n fname : str, path object or file-like object\n Name of the file to read.\n col_partitions : list\n List of arrays with columns names that should be read\n by each partition.\n storage_options : dict\n Parameters for specific storage engine.\n **kwargs : dict\n Parameters of deploying read_* function.\n\n Returns\n -------\n List\n Array with references to the task deploy result for each partition.\n ", "n_words": 71, "vocab_size": 52, "n_whitespaces": 215, "language": "en" } }, { "id": 153459, "commit_id": "2d40797b2b700d81d4db4a4cd023d563edf6431f", "repo": "modin", "path": "modin/db_conn.py", "file_name": "db_conn.py", "fun_name": "get_connection", "commit_message": "FEAT-#979: Enable reading from SQL server. (#4279)\n\nCo-authored-by: eavidan \r\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: mvashishtha ", "code": "def get_connection(self):\n \n if self.lib == _PSYCOPG_LIB_NAME:\n import psycopg2\n\n return psycopg2.connect(*self.args, **self.kwargs)\n if self.lib == _SQLALCHEMY_LIB_NAME:\n from sqlalchemy import create_engine\n\n return create_engine(*self.args, **self.kwargs).connect()\n\n raise UnsupportedDatabaseException(\"Unsupported database library\")\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 98, "n_words": 26, "vocab_size": 21, "complexity": 3, "nloc": 8, "token_counts": 63, "n_ast_nodes": 106, "n_identifiers": 12, "random_cut": "def get_connection(self):\n \n if self.lib == _PSYCOPG_LIB_NAME:\n import psycopg2\n\n return psycopg2.connect(*self.args, **self.kwargs)\n if self.lib == _SQLALCHEMY_LIB_NAME:\n from sqlalchemy import create_engine\n\n ", "d_id": 35407, "documentation": { "docstring": "\n Make the database connection and get it.\n\n For psycopg2, pass all arguments to psycopg2.connect() and return the\n result of psycopg2.connect(). For sqlalchemy, pass all arguments to\n sqlalchemy.create_engine() and return the result of calling connect()\n on the engine.\n\n Returns\n -------\n Any\n The open database connection.\n ", "n_words": 44, "vocab_size": 30, "n_whitespaces": 119, "language": "en" } }, { "id": 269459, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "argmax", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def argmax(x, axis=-1):\n \n return tf.argmax(x, axis)\n\n\n@keras_export(\"keras.backend.argmin\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.argmin\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 12, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 62, "n_identifiers": 10, "random_cut": "def argmax(x, axis=-1):\n \n return tf.argmax(x, axis)\n\n\n@keras_export(\"keras.backend.argmin\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "d_id": 80095, "documentation": { "docstring": "Returns the index of the maximum value along an axis.\n\n Args:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n\n Returns:\n A tensor.\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 56, "language": "en" } }, { "id": 199955, "commit_id": "f68e8de4252200cfc74b9433d00f77c4510ac68d", "repo": "sympy", "path": "sympy/core/facts.py", "file_name": "facts.py", "fun_name": "print_rules", "commit_message": "refactor", "code": "def print_rules(self) -> Iterator[str]:\n \n yield from self._defined_facts_lines()\n yield ''\n yield ''\n yield from self._full_implications_lines()\n yield ''\n yield ''\n yield from self._prereq_lines()\n yield ''\n yield ''\n yield from self._beta_rules_lines()\n yield ''\n yield ''\n yield \"generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications,\"\n yield \" 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers}\"\n yield ''\n yield ''\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 184, "n_words": 51, "vocab_size": 24, "complexity": 1, "nloc": 18, "token_counts": 63, "n_ast_nodes": 140, "n_identifiers": 8, "random_cut": "def print_rules(self) -> Iterator[str]:\n \n yield from self._defined_facts_lines()\n yield ''\n yield ''\n yield from self._full_implications_lines()\n yield ''\n yield ''\n yield from self._prereq_lines()\n yield ''\n yield ''\n yield from self._beta_rules_lines()\n yield ''\n yield ''\n yield \"generated_assumptions = {'defined_facts': defined_facts, 'full", "d_id": 49448, "documentation": { "docstring": " Returns a generator with lines to represent the facts and rules ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 222610, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/cmd.py", "file_name": "cmd.py", "fun_name": "ensure_string", "commit_message": "add python 3.10.4 for windows", "code": "def ensure_string(self, option, default=None):\n \n self._ensure_stringlike(option, \"string\", default)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 5, "random_cut": "def ensure_string(self, option, default=None):\n \n self._ensure_stringlike(option, \"strin", "d_id": 56673, "documentation": { "docstring": "Ensure that 'option' is a string; if not defined, set it to\n 'default'.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 27, "language": "en" } }, { "id": 49815, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/nn.py", "file_name": "nn.py", "fun_name": "update_ema", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def update_ema(target_params, source_params, rate=0.99):\n \n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 27, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 3, "token_counts": 47, "n_ast_nodes": 70, "n_identifiers": 11, "random_cut": "def update_ema(target_params, source_params, rate=0.99):\n \n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1", "d_id": 9926, "documentation": { "docstring": "\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n ", "n_words": 38, "vocab_size": 27, "n_whitespaces": 57, "language": "en" } }, { "id": 21526, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/android.py", "file_name": "android.py", "fun_name": "_android_folder", "commit_message": "Vendor in pip 22.1.2", "code": "def _android_folder() -> str | None:\n \n try:\n # First try to get path to android app via pyjnius\n from jnius import autoclass\n\n Context = autoclass(\"android.content.Context\") # noqa: N806\n result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()\n except Exception:\n # if fails find an android folder looking path on the sys.path\n pattern = re.compile(r\"/data/(data|user/\\d+)/(.+)/files\")\n for path in sys.path:\n if pattern.match(path):\n result = path.split(\"/files\")[0]\n break\n else:\n result = None\n return result\n\n\n@lru_cache(maxsize=1)", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@lru_cache(maxsize=1)", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 188, "n_words": 68, "vocab_size": 52, "complexity": 4, "nloc": 15, "token_counts": 86, "n_ast_nodes": 164, "n_identifiers": 19, "random_cut": "def _android_folder() -> str | None:\n \n try:\n # First try to get path to android app via pyjnius\n from jnius import autoclass\n\n ", "d_id": 3904, "documentation": { "docstring": ":return: base folder for the Android OS or None if cannot be found", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 209060, "commit_id": "e2fc7dddb40a7b80f2e65ad6593c0b10080019d0", "repo": "scapy", "path": "scapy/volatile.py", "file_name": "volatile.py", "fun_name": "de_bruijn", "commit_message": "Add CyclicPattern class for generation of payload data (#3508)\n\n* Add CyclicPattern class for generation of payload data\r\n\r\n* minor enhancment\r\n\r\n* fix python2\r\n\r\n* fix python2\r\n\r\n* use six\r\n\r\n* fix flake", "code": "def de_bruijn(charset, n, maxlen):\n # type: (str, int, int) -> str\n \n k = len(charset)\n a = [0] * k * n\n sequence = [] # type: List[str]\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 63, "n_words": 27, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 44, "n_ast_nodes": 50, "n_identifiers": 8, "random_cut": "def de_bruijn(charset, n, maxlen):\n # type: (str, int, int) -> str\n \n k = len(char", "d_id": 52587, "documentation": { "docstring": "\n Generate the De Bruijn Sequence up to `maxlen` characters\n for the charset `charset` and subsequences of length `n`.\n Algorithm modified from wikipedia\n https://en.wikipedia.org/wiki/De_Bruijn_sequence\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 59, "language": "en" } }, { "id": 101901, "commit_id": "dab823a3eb7a5257cb1e0818ee10ed234d3de97f", "repo": "faceswap", "path": "lib/gui/display_command.py", "file_name": "display_command.py", "fun_name": "_add_option_refresh", "commit_message": "Typing - lib.gui.display_command", "code": "def _add_option_refresh(self) -> None:\n \n logger.debug(\"Adding refresh option\")\n btnrefresh = ttk.Button(self.optsframe,\n image=get_images().icons[\"reload\"],\n command=lambda x=\"update\": preview_trigger().set(x)) # type:ignore\n btnrefresh.pack(padx=2, side=tk.RIGHT)\n Tooltip(btnrefresh,\n text=_(\"Preview updates at every model save. Click to refresh now.\"),\n wrap_length=200)\n logger.debug(\"Added refresh option\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 168, "n_words": 33, "vocab_size": 30, "complexity": 1, "nloc": 11, "token_counts": 86, "n_ast_nodes": 147, "n_identifiers": 24, "random_cut": "def _add_option_refresh(self) -> None:\n \n logger.debug(\"Adding refresh option\")\n btnrefresh = ttk.Button(self.optsframe,\n image=get_images().icons[\"reload\"],\n command=lambda x=\"update\": preview_trigger().set(x)) # type:ignore\n btnrefresh.pack(padx=2, side=tk.RIGHT)\n Tooltip(btnrefresh,\n text=_(\"Preview updates at every model save. Click to refresh now.\"),\n wrap_length=200)\n logger.debug(\"Added refresh option\")\n", "d_id": 21283, "documentation": { "docstring": " Add refresh button to refresh preview immediately ", "n_words": 7, "vocab_size": 6, "n_whitespaces": 8, "language": "en" } }, { "id": 242808, "commit_id": "7fa92c67b1471a66739c4768cdef616c27675981", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "close", "commit_message": "[Private] class names should be CamelCase", "code": "def close(self):\n \n try:\n if hasattr(self, \"_close__fp\"):\n self._close__fp()\n if self.fp:\n self.fp.close()\n self.fp = None\n except Exception as msg:\n logger.debug(\"Error closing: %s\", msg)\n\n if getattr(self, \"map\", None):\n self.map = None\n\n # Instead of simply setting to None, we're setting up a\n # deferred error that will better explain that the core image\n # object is gone.\n self.im = DeferredError(ValueError(\"Operation on closed image\"))\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 201, "n_words": 60, "vocab_size": 51, "complexity": 5, "nloc": 12, "token_counts": 77, "n_ast_nodes": 138, "n_identifiers": 14, "random_cut": "def close(self):\n \n try:\n if hasattr(self, \"_close__fp\"):\n self._close__fp()\n if self.fp:\n self.fp.close()\n self.fp = None\n except Exception as msg:\n logger.debug(\"Error closing: %s\", msg)\n\n if getat", "d_id": 69929, "documentation": { "docstring": "\n Closes the file pointer, if possible.\n\n This operation will destroy the image core and release its memory.\n The image data will be unusable afterward.\n\n This function is required to close images that have multiple frames or\n have not had their file read and closed by the\n :py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for\n more information.\n ", "n_words": 53, "vocab_size": 45, "n_whitespaces": 110, "language": "en" } }, { "id": 195871, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/solvers/diophantine/diophantine.py", "file_name": "diophantine.py", "fun_name": "diop_general_sum_of_squares", "commit_message": "Improved documentation formatting", "code": "def diop_general_sum_of_squares(eq, limit=1):\n r\n var, coeff, diop_type = classify_diop(eq, _dict=False)\n\n if diop_type == GeneralSumOfSquares.name:\n return set(GeneralSumOfSquares(eq).solve(limit=limit))\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 31, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 37, "token_counts": 47, "n_ast_nodes": 73, "n_identifiers": 12, "random_cut": "def diop_general_sum_of_squares(eq, limit=1):\n r\n var, coeff, diop_type = classify_diop(eq, _dict=False)\n\n if diop_type == GeneralSumOfSquares.name:\n return set(GeneralSumOfSquares(eq).solve(limit=limit))\n\n", "d_id": 47458, "documentation": { "docstring": "\n Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.\n\n Returns at most ``limit`` number of solutions.\n\n Usage\n =====\n\n ``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which\n is assumed to be zero. Also, ``eq`` should be in the form,\n `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.\n\n Details\n =======\n\n When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \\in Z` then there will be\n no solutions. Refer to [1]_ for more details.\n\n Examples\n ========\n\n >>> from sympy.solvers.diophantine.diophantine import diop_general_sum_of_squares\n >>> from sympy.abc import a, b, c, d, e\n >>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345)\n {(15, 22, 22, 24, 24)}\n\n Reference\n =========\n\n .. [1] Representing an integer as a sum of three squares, [online],\n Available:\n http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares\n ", "n_words": 138, "vocab_size": 98, "n_whitespaces": 216, "language": "en" } }, { "id": 186806, "commit_id": "f7e61edcb2ea3195c9889c407a08e6dffb7f60dc", "repo": "certbot", "path": "acme/acme/messages.py", "file_name": "messages.py", "fun_name": "resolved_combinations", "commit_message": "deprecate more attributes in acme (#9369)\n\n* deprecate more attributes in acme\r\n\r\n* Deprecate .Authorization.combinations by renaming the field and\r\n deprecating in getters/setters\r\n\r\n* Silence deprecation warnings from our own imports of acme.mixins\r\n\r\nCo-authored-by: Brad Warren ", "code": "def resolved_combinations(self) -> Tuple[Tuple[ChallengeBody, ...], ...]:\n \n warnings.warn(\n \"acme.messages.Authorization.resolved_combinations is deprecated and will be \"\n \"removed in a future release.\", DeprecationWarning)\n return tuple(tuple(self.challenges[idx] for idx in combo)\n for combo in self.combinations) # pylint: disable=not-an-iterable\n\n\n@Directory.register", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "@Directory.register", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 97, "n_words": 34, "vocab_size": 31, "complexity": 3, "nloc": 11, "token_counts": 50, "n_ast_nodes": 87, "n_identifiers": 14, "random_cut": "def resolved_combinations(self) -> Tuple[Tuple[ChallengeBody, ...], ...]:\n ", "d_id": 45631, "documentation": { "docstring": "Combinations with challenges instead of indices.\n\n .. deprecated: 1.30.0\n\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 23, "language": "en" } }, { "id": 259434, "commit_id": "75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc", "repo": "scikit-learn", "path": "sklearn/_loss/tests/test_loss.py", "file_name": "test_loss.py", "fun_name": "test_tweedie_log_identity_consistency", "commit_message": "ENH migrate GLMs / TweedieRegressor to linear loss (#22548)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", "code": "def test_tweedie_log_identity_consistency(p):\n \n half_tweedie_log = HalfTweedieLoss(power=p)\n half_tweedie_identity = HalfTweedieLossIdentity(power=p)\n n_samples = 10\n y_true, raw_prediction = random_y_true_raw_prediction(\n loss=half_tweedie_log, n_samples=n_samples, seed=42\n )\n y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)\n\n # Let's compare the loss values, up to some constant term that is dropped\n # in HalfTweedieLoss but not in HalfTweedieLossIdentity.\n loss_log = half_tweedie_log.loss(\n y_true=y_true, raw_prediction=raw_prediction\n ) + half_tweedie_log.constant_to_optimal_zero(y_true)\n loss_identity = half_tweedie_identity.loss(\n y_true=y_true, raw_prediction=y_pred\n ) + half_tweedie_identity.constant_to_optimal_zero(y_true)\n # Note that HalfTweedieLoss ignores different constant terms than\n # HalfTweedieLossIdentity. Constant terms means terms not depending on\n # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses\n # give the same values.\n assert_allclose(loss_log, loss_identity)\n\n # For gradients and hessians, the constant terms do not matter. We have, however,\n # to account for the chain rule, i.e. with x=raw_prediction\n # gradient_log(x) = d/dx loss_log(x)\n # = d/dx loss_identity(exp(x))\n # = exp(x) * gradient_identity(exp(x))\n # Similarly,\n # hessian_log(x) = exp(x) * gradient_identity(exp(x))\n # + exp(x)**2 * hessian_identity(x)\n gradient_log, hessian_log = half_tweedie_log.gradient_hessian(\n y_true=y_true, raw_prediction=raw_prediction\n )\n gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian(\n y_true=y_true, raw_prediction=y_pred\n )\n assert_allclose(gradient_log, y_pred * gradient_identity)\n assert_allclose(\n hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity\n )\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 383, "n_words": 174, "vocab_size": 109, "complexity": 1, "nloc": 25, "token_counts": 155, "n_ast_nodes": 255, "n_identifiers": 25, "random_cut": "def test_tweedie_log_identity_consistency(p):\n \n half_tweedie_log = HalfTweedieLoss(power=p)\n half_tweedie_identity = HalfTweedieLossIdentity(power=p)\n n_samples = 10\n y_true, raw_prediction = random_y_true_raw_prediction(\n loss=half_tweedie_log, n_samples=n_samples, seed=42\n )\n y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)\n\n # Let's compare the loss values, up to some constant term that is dropped\n # in HalfTweedieLoss but not in HalfTweedieLossIdentity.\n loss_log = half_tweedie_log.loss(\n y_true=y_true, raw_prediction=raw_prediction\n ) + half_tweedie_log.constant_to_optimal_zero(y_true)\n loss_identity = half_tweedie_identity.loss(\n y_true=y_true, raw_prediction=y_pred\n ) + half_tweedie_identity.constant_to_optimal_zero(y_true)\n # Note that HalfTweedieLoss ignores different constant terms than\n # HalfTweedieLos", "d_id": 75768, "documentation": { "docstring": "Test for identical losses when only the link function is different.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 196851, "commit_id": "1eeb01e15f06c6692a5bfd6fd2d2a3002d864a07", "repo": "sympy", "path": "sympy/integrals/transforms.py", "file_name": "transforms.py", "fun_name": "laplace_transform", "commit_message": "Fix a few docstring formatting issues", "code": "def laplace_transform(f, t, s, legacy_matrix=True, **hints):\n r\n\n debug('\\n***** laplace_transform(%s, %s, %s)'%(f, t, s))\n\n if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'):\n\n conds = not hints.get('noconds', False)\n\n if conds and legacy_matrix:\n SymPyDeprecationWarning(\n feature=\"laplace_transform of a Matrix with noconds=False (default)\",\n useinstead=\"the option legacy_matrix=False to get the new behaviour\",\n issue=21504,\n deprecated_since_version=\"1.9\"\n ).warn()\n return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints))\n else:\n elements_trans = [laplace_transform(fij, t, s, **hints) for fij in f]\n if conds:\n elements, avals, conditions = zip(*elements_trans)\n f_laplace = type(f)(*f.shape, elements)\n return f_laplace, Max(*avals), And(*conditions)\n else:\n return type(f)(*f.shape, elements_trans)\n\n return LaplaceTransform(f, t, s).doit(**hints)\n\n\n@_noconds_(True)", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "@_noconds_(True)", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 306, "n_words": 89, "vocab_size": 71, "complexity": 7, "nloc": 85, "token_counts": 196, "n_ast_nodes": 315, "n_identifiers": 33, "random_cut": "def laplace_transform(f, t, s, legacy_matrix=True, **hints):\n r\n\n debug('\\n***** laplace_transform(%s, %s, %s)'%(f, t, s))\n\n if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'):\n\n conds = not hints.get('noconds', False)\n\n if conds and legacy_matrix:\n SymPyDeprecationWarning(\n feature=\"laplace_transform of a Matrix with noconds=False (default)\",\n useinstead=\"the option legacy_matrix=False to get the new behaviour\",\n issue=21504,\n deprecated_since_version=\"1.9\"\n ).warn()\n return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints))\n else:\n elements_trans = [laplace_transform(fij, t, s, **hints) for fij in f]\n if c", "d_id": 48218, "documentation": { "docstring": "\n Compute the Laplace Transform `F(s)` of `f(t)`,\n\n .. math :: F(s) = \\int_{0^{-}}^\\infty e^{-st} f(t) \\mathrm{d}t.\n\n Explanation\n ===========\n\n For all sensible functions, this converges absolutely in a\n half-plane\n\n .. math :: a < \\operatorname{Re}(s)\n\n This function returns ``(F, a, cond)`` where ``F`` is the Laplace\n transform of ``f``, `a` is the half-plane of convergence, and `cond` are\n auxiliary convergence conditions.\n\n The implementation is rule-based, and if you are interested in which\n rules are applied, and whether integration is attemped, you can switch\n debug information on by setting ``sympy.SYMPY_DEBUG=True``.\n\n The lower bound is `0-`, meaning that this bound should be approached\n from the lower side. This is only necessary if distributions are involved.\n At present, it is only done if `f(t)` contains ``DiracDelta``, in which\n case the Laplace transform is computed implicitly as\n\n .. math :: F(s) = \\lim_{\\tau\\to 0^{-}} \\int_{\\tau}^\\infty e^{-st} f(t) \\mathrm{d}t\n\n by applying rules.\n\n If the integral cannot be fully computed in closed form, this function\n returns an unevaluated :class:`LaplaceTransform` object.\n\n For a description of possible hints, refer to the docstring of\n :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``,\n only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``).\n\n .. deprecated:: 1.9\n Legacy behavior for matrices where ``laplace_transform`` with\n ``noconds=False`` (the default) returns a Matrix whose elements are\n tuples. The behavior of ``laplace_transform`` for matrices will change\n in a future release of SymPy to return a tuple of the transformed\n Matrix and the convergence conditions for the matrix as a whole. Use\n ``legacy_matrix=False`` to enable the new behavior.\n\n Examples\n ========\n\n >>> from sympy import DiracDelta, exp, laplace_transform\n >>> from sympy.abc import t, s, a\n >>> laplace_transform(t**4, t, s)\n (24/s**5, 0, True)\n >>> laplace_transform(t**a, t, s)\n (gamma(a + 1)/(s*s**a), 0, re(a) > -1)\n >>> laplace_transform(DiracDelta(t)-a*exp(-a*t),t,s)\n (s/(a + s), Max(0, -a), True)\n\n See Also\n ========\n\n inverse_laplace_transform, mellin_transform, fourier_transform\n hankel_transform, inverse_hankel_transform\n\n ", "n_words": 300, "vocab_size": 192, "n_whitespaces": 463, "language": "en" } }, { "id": 256315, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "test/benchmarks/nq_to_squad.py", "file_name": "nq_to_squad.py", "fun_name": "reduce_annotations", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def reduce_annotations(anno_types, answers):\n \n for at in set(anno_types):\n assert at in (\"no_answer\", \"short_answer\")\n if anno_types.count(\"short_answer\") >= anno_types.count(\"no_answer\"):\n majority = \"short_answer\"\n is_impossible = False\n else:\n majority = \"no_answer\"\n is_impossible = True\n answers = [a for at, a in zip(anno_types, answers) if at == majority]\n reduction = len(anno_types) - len(answers)\n assert reduction < 3\n if not is_impossible:\n global n_no_ans\n n_no_ans += reduction\n else:\n global n_short\n n_short += reduction\n answers = []\n return answers, is_impossible\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 171, "n_words": 71, "vocab_size": 45, "complexity": 6, "nloc": 20, "token_counts": 112, "n_ast_nodes": 191, "n_identifiers": 14, "random_cut": "def reduce_annotations(anno_types, answers):\n \n for at in set(anno_types):\n assert at in (\"no_answer\", \"short_answer\")\n if anno_types.count(\"short_answer\") >= anno_types.count(\"no_answer\"):\n majority = \"short_answer\"\n is_impossible = False\n else:\n majority = \"no_answer\"\n is_impossible = True\n answers = [a for at, a in zip(anno_types, answers) if at == majority]\n reduction = len(anno_types) - ", "d_id": 74867, "documentation": { "docstring": "\n In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers,\n depending on which is more numerous, with a bias towards picking short_answers.\n\n Note: By this stage, all long_answer annotations and all samples with yes/no answer have been removed.\n This leaves just no_answer and short_answers", "n_words": 52, "vocab_size": 44, "n_whitespaces": 64, "language": "en" } }, { "id": 308405, "commit_id": "d0c4f0fec4216e4193da716001b5e13e1e3f2106", "repo": "core", "path": "homeassistant/components/mqtt/cover.py", "file_name": "cover.py", "fun_name": "async_open_cover", "commit_message": "Add mqtt encoding support for publishing (#62739)\n\n* encoding support for mqtt publishing - todo tests\r\n\r\n* signature allows None values for qos and retain\r\n\r\n* common test for mqtt publishing encoding\r\n\r\n* better test with command templates\r\n\r\n* more tests\r\n\r\n* fix tests alarm control panel+tests light basic\r\n\r\n* tests light json and template\r\n\r\n* add tests vacuum and fix tests light_template", "code": "async def async_open_cover(self, **kwargs):\n \n await mqtt.async_publish(\n self.hass,\n self._config.get(CONF_COMMAND_TOPIC),\n self._config[CONF_PAYLOAD_OPEN],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that cover has changed state.\n self._state = STATE_OPEN\n if self._config.get(CONF_GET_POSITION_TOPIC):\n self._position = self.find_percentage_in_range(\n self._config[CONF_POSITION_OPEN], COVER_PAYLOAD\n )\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 222, "n_words": 35, "vocab_size": 32, "complexity": 3, "nloc": 16, "token_counts": 98, "n_ast_nodes": 150, "n_identifiers": 22, "random_cut": "async def async_open_cover(self, **kwargs):\n \n await mqtt.async_publish(\n self.ha", "d_id": 107162, "documentation": { "docstring": "Move the cover up.\n\n This method is a coroutine.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 23, "language": "en" } }, { "id": 20042, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distro.py", "file_name": "distro.py", "fun_name": "_parse_distro_release_content", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _parse_distro_release_content(line):\n # type: (str) -> Dict[str, str]\n \n matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])\n distro_info = {}\n if matches:\n # regexp ensures non-None\n distro_info[\"name\"] = matches.group(3)[::-1]\n if matches.group(2):\n distro_info[\"version_id\"] = matches.group(2)[::-1]\n if matches.group(1):\n distro_info[\"codename\"] = matches.group(1)[::-1]\n elif line:\n distro_info[\"name\"] = line.strip()\n return distro_info\n\n\n_distro = LinuxDistribution()\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 176, "n_words": 43, "vocab_size": 32, "complexity": 5, "nloc": 12, "token_counts": 109, "n_ast_nodes": 201, "n_identifiers": 10, "random_cut": "def _parse_distro_release_content(line):\n # type: (str) -> Dict[str, str]\n \n matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])\n distro_info = {}\n if matches:\n # regexp ensures non-None\n distro_info[\"name\"] = matches.group(3", "d_id": 3191, "documentation": { "docstring": "\n Parse a line from a distro release file.\n\n Parameters:\n * line: Line from the distro release file. Must be a unicode string\n or a UTF-8 encoded byte string.\n\n Returns:\n A dictionary containing all information items.\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 97, "language": "en" } }, { "id": 37755, "commit_id": "a8fa2f91f409a0657937016b983b74f58a07ae72", "repo": "transformers", "path": "src/transformers/modeling_utils.py", "file_name": "modeling_utils.py", "fun_name": "load_sharded_checkpoint", "commit_message": "Make Trainer compatible with sharded checkpoints (#17053)\n\n* Make Trainer compatible with sharded checkpoints\r\n\r\n* Add doc", "code": "def load_sharded_checkpoint(model, folder, strict=True):\n \n # Load the index\n index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)\n if not os.path.isfile(index_file):\n raise ValueError(f\"Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}.\")\n\n with open(index_file, \"r\", encoding=\"utf-8\") as f:\n index = json.load(f)\n\n shard_files = list(set(index[\"weight_map\"].values()))\n\n # If strict=True, error before loading any of the state dicts.\n loaded_keys = index[\"weight_map\"].keys()\n model_keys = model.state_dict().keys()\n missing_keys = [key for key in model_keys if key not in loaded_keys]\n unexpected_keys = [key for key in loaded_keys if key not in model_keys]\n if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):\n error_message = f\"Error(s) in loading state_dict for {model.__class__.__name__}\"\n if len(missing_keys) > 0:\n str_missing_keys = \",\".join([f'\"{k}\"' for k in missing_keys])\n error_message += f\"\\nMissing key(s): {str_missing_keys}.\"\n if len(unexpected_keys) > 0:\n str_unexpected_keys = \",\".join([f'\"{k}\"' for k in unexpected_keys])\n error_message += f\"\\nMissing key(s): {str_unexpected_keys}.\"\n raise RuntimeError(error_message)\n\n for shard_file in shard_files:\n state_dict = torch.load(os.path.join(folder, shard_file))\n model.load_state_dict(state_dict, strict=False)\n\n # Make sure memory is fred before we load the next state dict.\n del state_dict\n gc.collect()\n\n # Return the same thing as PyTorch load_state_dict function.\n return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 333, "n_words": 167, "vocab_size": 104, "complexity": 14, "nloc": 26, "token_counts": 264, "n_ast_nodes": 468, "n_identifiers": 45, "random_cut": "def load_sharded_checkpoint(model, folder, strict=True):\n \n # Load the index\n index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)\n if not os.path.isfile(index_file):\n raise ValueError(f\"Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}.\")\n\n with open(index_file, \"r\", encoding=\"utf-8\") as f:\n index = json.load(f)\n\n shard_files = list(set(index[\"weight_map\"].values()))\n\n # If strict=True, error before loading any of the state dicts.\n loaded_keys = index[\"weight_map\"].keys()\n model_keys = model.state_dict().keys()\n missing_keys = [key for key in model_keys if key not in loaded_keys]\n unexpected_keys = [key for key in loaded_keys if key not in model_keys]\n if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):\n error_message = f\"Error(s) in loading state_dict for {model.__class__.__name__}\"\n if len(missing_keys) > 0:\n str_missing_keys = \",", "d_id": 6866, "documentation": { "docstring": "\n This is the same as\n [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict)\n but for a sharded checkpoint.\n\n This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being\n loaded in the model.\n\n Args:\n model (`torch.nn.Module`): The model in which to load the checkpoint.\n folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint.\n strict (`bool`, *optional`, defaults to `True`):\n Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.\n\n Returns:\n `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields\n - `missing_keys` is a list of str containing the missing keys\n - `unexpected_keys` is a list of str containing the unexpected keys\n ", "n_words": 115, "vocab_size": 67, "n_whitespaces": 201, "language": "en" } }, { "id": 65661, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "customer_query", "commit_message": "style: format code with black", "code": "def customer_query(doctype, txt, searchfield, start, page_len, filters):\n\tconditions = []\n\tcust_master_name = frappe.defaults.get_user_default(\"cust_master_name\")\n\n\tif cust_master_name == \"Customer Name\":\n\t\tfields = [\"name\", \"customer_group\", \"territory\"]\n\telse:\n\t\tfields = [\"name\", \"customer_name\", \"customer_group\", \"territory\"]\n\n\tfields = get_fields(\"Customer\", fields)\n\n\tsearchfields = frappe.get_meta(\"Customer\").get_search_fields()\n\tsearchfields = \" or \".join(field + \" like %(txt)s\" for field in searchfields)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\t**{\n\t\t\t\t\"fields\": \", \".join(fields),\n\t\t\t\t\"scond\": searchfields,\n\t\t\t\t\"mcond\": get_match_cond(doctype),\n\t\t\t\t\"fcond\": get_filters_cond(doctype, filters, conditions).replace(\"%\", \"%%\"),\n\t\t\t}\n\t\t),\n\t\t{\"txt\": \"%%%s%%\" % txt, \"_txt\": txt.replace(\"%\", \"\"), \"start\": start, \"page_len\": page_len},\n\t)\n\n\n# searches for supplier\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 62, "n_words": 86, "vocab_size": 69, "complexity": 3, "nloc": 30, "token_counts": 172, "n_ast_nodes": 322, "n_identifiers": 27, "random_cut": "def customer_query(doctype, txt, searchfield, start, page_len, filters):\n\tconditions = []\n\tcust_master_name = frappe.defaults.get_user_default(\"cust_master_name\")\n\n\tif cust_master_name == \"Customer Name\":\n\t\tfields = [\"name\", \"customer_group\", \"territory\"]\n\telse:\n\t\tfields = [\"name\", \"customer_name\", \"customer_group\", \"territory\"]\n\n\tfields = get_fields(\"Customer\", fields)\n\n\tsearchfields = frappe.get_meta(\"Customer\").get_search_fields()\n\tsearchfields = \" or \".join(field + \" like %(txt)s\" for field in searchfields)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\t**{\n\t\t\t\t\"fields\": \", \".join(fields),\n\t\t\t\t\"scond\": searchfields,\n\t\t\t\t\"mcond\": get_match_cond(doctype),\n\t\t\t\t\"fc", "d_id": 13980, "documentation": { "docstring": "select {fields} from `tabCustomer`\n\t\twhere docstatus < 2\n\t\t\tand ({scond}) and disabled=0\n\t\t\t{fcond} {mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, customer_name\n\t\tlimit %(start)s, %(page_len)s", "n_words": 33, "vocab_size": 27, "n_whitespaces": 23, "language": "en" } }, { "id": 85773, "commit_id": "72e351082168f68cbaa5700a51e8ed577222e887", "repo": "sentry", "path": "src/sentry/tagstore/base.py", "file_name": "base.py", "fun_name": "get_group_tag_value_count", "commit_message": "feat(perf_issues): Fix `GroupTagKeyDetailsEndpoint` to work for performance issues (#38860)\n\nThis allows this endpoint to return results for performance issues.", "code": "def get_group_tag_value_count(self, group, environment_id, key):\n \n raise NotImplementedError\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 22, "n_identifiers": 6, "random_cut": "def get_group_tag_value_count(self, group, environment_id, key):\n \n raise No", "d_id": 18042, "documentation": { "docstring": "\n >>> get_group_tag_value_count(group, 3, 'key1')\n ", "n_words": 4, "vocab_size": 4, "n_whitespaces": 19, "language": "en" } }, { "id": 47522, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker):\n \n dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'\n task_id_1 = 'dummy'\n session = settings.Session()\n with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session):\n task1 = EmptyOperator(task_id=task_id_1)\n\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n\n dr1 = dag_maker.create_dagrun(state=state)\n ti = dr1.get_task_instance(task1.task_id, session)\n ti.state = State.SCHEDULED\n session.merge(ti)\n session.commit()\n\n with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:\n self.scheduler_job._enqueue_task_instances_with_queued_state([ti])\n ti.refresh_from_db()\n assert ti.state == State.NONE\n mock_queue_command.assert_not_called()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 174, "n_words": 47, "vocab_size": 38, "complexity": 1, "nloc": 17, "token_counts": 139, "n_ast_nodes": 233, "n_identifiers": 35, "random_cut": "def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker):\n \n dag_i", "d_id": 9144, "documentation": { "docstring": "This tests that task instances whose dagrun is in finished state are not queued", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 61907, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py", "file_name": "compat.py", "fun_name": "match_hostname", "commit_message": "upd; format", "code": "def match_hostname(cert, hostname):\n \n if not cert:\n raise ValueError(\"empty or no certificate, match_hostname needs a \"\n \"SSL socket or SSL context with either \"\n \"CERT_OPTIONAL or CERT_REQUIRED\")\n dnsnames = []\n san = cert.get('subjectAltName', ())\n for key, value in san:\n if key == 'DNS':\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n if not dnsnames:\n # The subject is only checked when there is no dNSName entry\n # in subjectAltName\n for sub in cert.get('subject', ()):\n for key, value in sub:\n # XXX according to RFC 2818, the most specific Common Name\n # must be used.\n if key == 'commonName':\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n if len(dnsnames) > 1:\n raise CertificateError(\"hostname %r \"\n \"doesn't match either of %s\"\n % (hostname, ', '.join(map(repr, dnsnames))))\n elif len(dnsnames) == 1:\n raise CertificateError(\"hostname %r \"\n \"doesn't match %r\"\n % (hostname, dnsnames[0]))\n else:\n raise CertificateError(\"no appropriate commonName or \"\n \"subjectAltName fields were found\")\n\n\ntry:\n from types import SimpleNamespace as Container\nexcept ImportError: # pragma: no cover", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 633, "n_words": 155, "vocab_size": 106, "complexity": 12, "nloc": 30, "token_counts": 166, "n_ast_nodes": 314, "n_identifiers": 21, "random_cut": "def match_hostname(cert, hostname):\n \n if not cert:\n raise ValueError(\"empty or no certificate, match_hostname needs a \"\n \"SSL socket or SSL context with either \"\n \"CERT_OPTIONAL or CERT_REQUIRED\")\n dnsnames = []\n san = cert.get('subjectAltName', ())\n for key, value in san:\n if key == 'DNS':\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n if not dnsnames:\n # The subject is only checked when there is no dNSName entry\n # in subjectAltName\n for sub in cert.get('subject', ()):\n for key, value in sub:\n # XXX according to RFC 2818, the most specific Common Name\n # must be used.\n if key == 'commonName':\n", "d_id": 12746, "documentation": { "docstring": "Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n returns nothing.\n ", "n_words": 40, "vocab_size": 36, "n_whitespaces": 76, "language": "en" } }, { "id": 104860, "commit_id": "f51b6994db27ea69261ef919fb7775928f9ec10b", "repo": "datasets", "path": "src/datasets/iterable_dataset.py", "file_name": "iterable_dataset.py", "fun_name": "take", "commit_message": "Stream private zipped images (#4173)\n\n* keep track of repo_id and token to decode remote images\r\n\r\n* add test\r\n\r\n* fix\r\n\r\n* docstrings + comments\r\n\r\n* fix string_to_dict\r\n\r\n* fix tests", "code": "def take(self, n) -> \"IterableDataset\":\n \n ex_iterable = TakeExamplesIterable(self._ex_iterable, n)\n return iterable_dataset(\n ex_iterable=ex_iterable,\n info=self._info.copy(),\n split=self._split,\n format_type=self._format_type,\n shuffling=copy.deepcopy(self._shuffling),\n token_per_repo_id=self._token_per_repo_id,\n )\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 112, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 29, "token_counts": 67, "n_ast_nodes": 106, "n_identifiers": 19, "random_cut": "def take(self, n) -> \"IterableDataset\":\n \n ex_iterable = TakeExamplesIterable(self._ex_iterable, n)\n return iterable_dataset(\n ex_iterable=ex_iterable,\n info=self._info.copy(),\n split=self._split,\n format_type=self._format_type,\n shuffling=copy.deepcopy(self._shuffling),\n token_per_repo_id=self._token_per_repo_id,", "d_id": 22008, "documentation": { "docstring": "\n Create a new IterableDataset with only the first ``n`` elements.\n\n Args:\n n (:obj:`int`): number of elements to take.\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\", split=\"train\", streaming=True)\n >>> small_ds = ds.take(2)\n >>> list(small_ds)\n [{'label': 1,\n 'text': 'the rock is destined to be the 21st century\\'s new \" conan \" and that he\\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},\n {'label': 1,\n 'text': 'the gorgeously elaborate continuation of \" the lord of the rings \" trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\\'s expanded vision of j . r . r . tolkien\\'s middle-earth .'}]\n ```\n ", "n_words": 117, "vocab_size": 90, "n_whitespaces": 230, "language": "en" } }, { "id": 94333, "commit_id": "39cfdcb446e74732c67ce07d7dd8d8d5ace471b1", "repo": "sentry", "path": "tests/sentry/event_manager/test_event_manager.py", "file_name": "test_event_manager.py", "fun_name": "test_category_match_group", "commit_message": "test(event_manager): Fix incorrect invocations of manager.save (#36615)", "code": "def test_category_match_group(self):\n \n from sentry.grouping.enhancer import Enhancements\n\n enhancement = Enhancements.from_config_string(\n ,\n )\n\n event = make_event(\n platform=\"native\",\n exception={\n \"values\": [\n {\n \"type\": \"Hello\",\n \"stacktrace\": {\n \"frames\": [\n {\n \"function\": \"foo\",\n },\n {\n \"function\": \"bar\",\n },\n ]\n },\n }\n ]\n },\n )\n\n manager = EventManager(event)\n manager.normalize()\n\n grouping_config = {\n \"enhancements\": enhancement.dumps(),\n \"id\": \"mobile:2021-02-12\",\n }\n\n manager.get_data()[\"grouping_config\"] = grouping_config\n event1 = manager.save(self.project.id)\n\n event2 = Event(event1.project_id, event1.event_id, data=event1.data)\n\n assert event1.get_hashes().hashes == event2.get_hashes(grouping_config).hashes\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 615, "n_words": 66, "vocab_size": 47, "complexity": 1, "nloc": 38, "token_counts": 154, "n_ast_nodes": 265, "n_identifiers": 29, "random_cut": "def test_category_match_group(self):\n \n from sentry.grouping.enhancer import Enhancements\n\n enhancement = Enhancements.from_config_string(\n ,\n )\n\n event = make_event(\n platform=\"native\",\n exception={\n \"values\": [\n {\n \"type\": \"Hello\",\n \"stacktrace\": {\n \"frames\": [\n {\n \"function\": \"foo\",\n },\n {\n \"function\": \"bar\",\n },", "d_id": 19068, "documentation": { "docstring": "\n Regression test to ensure categories are applied consistently and don't\n produce hash mismatches.\n \n function:foo category=foo_like\n category:foo_like -group\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 73, "language": "en" } }, { "id": 96559, "commit_id": "542484c0cd71625e62e086f3f7c5aaf85360f724", "repo": "sentry", "path": "src/sentry/plugins/bases/notify.py", "file_name": "notify.py", "fun_name": "notify", "commit_message": "fix(plugins): Silence error (#32042)", "code": "def notify(self, notification, raise_exception=False):\n \n event = notification.event\n try:\n return self.notify_users(\n event.group, event, triggering_rules=[r.label for r in notification.rules]\n )\n except (\n ApiError,\n HTTPError,\n InvalidIdentity,\n PluginError,\n SSLError,\n UrllibHTTPError,\n ) as err:\n self.logger.info(\n \"notification-plugin.notify-failed\",\n extra={\n \"error\": str(err),\n \"plugin\": self.slug,\n \"project_id\": event.group.project_id,\n \"organization_id\": event.group.project.organization_id,\n },\n )\n if raise_exception:\n raise err\n return False\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 366, "n_words": 48, "vocab_size": 45, "complexity": 4, "nloc": 26, "token_counts": 114, "n_ast_nodes": 175, "n_identifiers": 26, "random_cut": "def notify(self, notification, raise_exception=False):\n \n event = notification.event\n try:\n return self.notify_users(\n event.group, event, triggering_rules=[r.label for r in notification.rules]\n )\n except (\n ApiError,\n HTTPError,\n InvalidIdentity,\n PluginError,\n SSLError,\n UrllibHTTPError,\n ) as err:\n self.logger.info(\n \"notification-plugin.notify-failed\",\n extra={\n \"error\": str(err),\n \"plugin\": self.slug,\n \"project_id\": event.group.project_id,\n \"organization_id\": event.group.project.organization_id,\n },\n )\n if raise_exception:\n ", "d_id": 19326, "documentation": { "docstring": "\n This calls the notify_users method of the plugin.\n Normally this method eats the error and logs it but if we\n set raise_exception=True like we do for the test plugin button,\n the exception is raised\n ", "n_words": 34, "vocab_size": 28, "n_whitespaces": 70, "language": "en" } }, { "id": 82298, "commit_id": "a3110e1ff24085373898c7d2a85f628abeb8518d", "repo": "django-cms", "path": "cms/tests/test_rendering.py", "file_name": "test_rendering.py", "fun_name": "test_processors", "commit_message": "Enabled isort workflow (#7200)\n\n* Ran isort\r\n\r\n* Enabled isort workflow\r\n\r\nCo-authored-by: Vinit Kumar ", "code": "def test_processors(self):\n \n from djangocms_text_ckeditor.cms_plugins import TextPlugin\n\n from cms.plugin_pool import plugin_pool\n\n instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0]\n\n load_from_string = self.load_template_from_string\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 51, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 27, "token_counts": 169, "n_ast_nodes": 69, "n_identifiers": 14, "random_cut": "def test_processors(self):\n \n from djangocms_text_ckeditor.cms_plugins import TextPlugin\n\n from cms.plugin_pool import plugin_pool\n\n instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0]\n\n load_from_string = self.load_template_from_string\n", "d_id": 17345, "documentation": { "docstring": "\n Tests that plugin processors and plugin context processors can be defined\n in settings and are working and that extra plugin context processors can be\n passed to PluginContext.\n ", "n_words": 27, "vocab_size": 17, "n_whitespaces": 56, "language": "en" } }, { "id": 316447, "commit_id": "7cd68381f1d4f58930ffd631dfbfc7159d459832", "repo": "core", "path": "tests/test_config_entries.py", "file_name": "test_config_entries.py", "fun_name": "test_unique_id_ignore", "commit_message": "Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)", "code": "async def test_unique_id_ignore(hass, manager):\n \n async_setup_entry = AsyncMock(return_value=False)\n mock_integration(hass, MockModule(\"comp\", async_setup_entry=async_setup_entry))\n mock_entity_platform(hass, \"config_flow.comp\", None)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 25, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 24, "token_counts": 185, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "async def test_unique_id_ignore(hass, manager):\n \n async_setup_entry = AsyncMock(return_value=False)\n mock_integration(hass, MockModule(\"comp\", async_setup_entry=async_setup_entry))\n mock_entity_platform(hass, \"config_flow.comp\", None)\n", "d_id": 115025, "documentation": { "docstring": "Test that we can ignore flows that are in progress and have a unique ID.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 212840, "commit_id": "b3680477c755277192715b343e9cd4254de7c45e", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "bind", "commit_message": "Added propagate parameter to the Element.bind and Window.bind methods. Indicates whether tkinter should propagate the event to the corresponding element/window or stop with the user callback", "code": "def bind(self, bind_string, key, propagate=True):\n \n if not self._is_window_created('tried Window.bind'):\n return\n self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate))\n self.user_bind_dict[bind_string] = key\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 58, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 54, "n_ast_nodes": 85, "n_identifiers": 10, "random_cut": "def bind(self, bind_string, key, propagate=True):\n \n if not self._is_window_created('tried Window.bind'):\n return\n self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate))\n self.user_bind_d", "d_id": 53448, "documentation": { "docstring": "\n Used to add tkinter events to a Window.\n The tkinter specific data is in the Window's member variable user_bind_event\n :param bind_string: The string tkinter expected in its bind function\n :type bind_string: (str)\n :param key: The event that will be generated when the tkinter event occurs\n :type key: str | int | tuple | object\n :param propagate: If True then tkinter will be told to propagate the event\n :type propagate: (bool)\n ", "n_words": 70, "vocab_size": 46, "n_whitespaces": 157, "language": "en" } }, { "id": 211000, "commit_id": "d409ec06779e9de0cdbd76af4dc2c00b4b58ccb0", "repo": "PaddleDetection", "path": "ppdet/modeling/heads/cascade_head.py", "file_name": "cascade_head.py", "fun_name": "forward", "commit_message": "upgrade cascade model (#6346)\n\n* add reg_class_agnostic\r\n\r\n* add loss_rpn_bbox", "code": "def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):\n \n targets = []\n if self.training:\n rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs)\n targets_list = [targets]\n self.assigned_rois = (rois, rois_num)\n self.assigned_targets = targets\n\n pred_bbox = None\n head_out_list = []\n for i in range(self.num_cascade_stages):\n if i > 0:\n rois, rois_num = self._get_rois_from_boxes(pred_bbox,\n inputs['im_shape'])\n if self.training:\n rois, rois_num, targets = self.bbox_assigner(\n rois, rois_num, inputs, i, is_cascade=True)\n targets_list.append(targets)\n\n rois_feat = self.roi_extractor(body_feats, rois, rois_num)\n bbox_feat = self.head(rois_feat, i)\n scores = self.bbox_score_list[i](bbox_feat)\n deltas = self.bbox_delta_list[i](bbox_feat)\n\n # TODO (lyuwenyu) Is it correct for only one class ?\n if not self.reg_class_agnostic and i < self.num_cascade_stages - 1:\n deltas = deltas.reshape([-1, self.num_classes, 4])\n labels = scores[:, :-1].argmax(axis=-1)\n deltas = deltas[paddle.arange(deltas.shape[0]), labels]\n\n head_out_list.append([scores, deltas, rois])\n pred_bbox = self._get_pred_bbox(deltas, rois, self.bbox_weight[i])\n\n if self.training:\n loss = {}\n for stage, value in enumerate(zip(head_out_list, targets_list)):\n (scores, deltas, rois), targets = value\n loss_stage = self.get_loss(scores, deltas, targets, rois,\n self.bbox_weight[stage])\n for k, v in loss_stage.items():\n loss[k + \"_stage{}\".format(\n stage)] = v / self.num_cascade_stages\n\n return loss, bbox_feat\n else:\n scores, deltas, self.refined_rois = self.get_prediction(\n head_out_list)\n return (deltas, scores), self.head\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 759, "n_words": 167, "vocab_size": 107, "complexity": 10, "nloc": 41, "token_counts": 390, "n_ast_nodes": 585, "n_identifiers": 52, "random_cut": "def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):\n \n targets = []\n if self.training:\n rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs)\n targets_list = [targets]\n self.assigned_rois = (rois, rois_num)\n self.assigned_targets = targets\n\n pred_bbox = None\n head_out_list = []\n for i in range(self.num_cascade_stages):\n if i > 0:\n rois, rois_num = self._get_rois_from_boxes(pred_bbox,\n inputs['im_shape'])\n if self.training:\n rois, rois_num, targets = self.bbox_assigner(\n rois, rois_num, inputs, i, is_cascade=True)\n targets_list.append(targets)\n\n rois_feat = self.roi_extractor(body_feats, rois, rois_num)\n bbox_feat = self.head(rois_feat, i)\n scores = self.bbox_score_list[i](bbox_feat)\n deltas = self.bbox_delta_list[i](bbox_feat)\n\n ", "d_id": 52997, "documentation": { "docstring": "\n body_feats (list[Tensor]): Feature maps from backbone\n rois (Tensor): RoIs generated from RPN module\n rois_num (Tensor): The number of RoIs in each image\n inputs (dict{Tensor}): The ground-truth of image\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 64, "language": "en" } }, { "id": 223601, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "get_fws", "commit_message": "add python 3.10.4 for windows", "code": "def get_fws(value):\n \n newvalue = value.lstrip()\n fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')\n return fws, newvalue\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 24, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 64, "n_identifiers": 7, "random_cut": "def get_fws(value):\n \n newvalue = value.lstrip()\n fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')\n return fws, newvalue\n", "d_id": 56997, "documentation": { "docstring": "FWS = 1*WSP\n\n This isn't the RFC definition. We're using fws to represent tokens where\n folding can be done, but when we are parsing the *un*folding has already\n been done so we don't need to watch out for CRLF.\n\n ", "n_words": 39, "vocab_size": 36, "n_whitespaces": 52, "language": "en" } }, { "id": 158218, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "load_data_snli", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def load_data_snli(batch_size, num_steps=50):\n \n num_workers = d2l.get_dataloader_workers()\n data_dir = d2l.download_extract('SNLI')\n train_data = read_snli(data_dir, True)\n test_data = read_snli(data_dir, False)\n train_set = SNLIDataset(train_data, num_steps)\n test_set = SNLIDataset(test_data, num_steps, train_set.vocab)\n train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True,\n num_workers=num_workers)\n test_iter = gluon.data.DataLoader(test_set, batch_size, shuffle=False,\n num_workers=num_workers)\n return train_iter, test_iter, train_set.vocab\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 147, "n_words": 42, "vocab_size": 32, "complexity": 1, "nloc": 12, "token_counts": 109, "n_ast_nodes": 165, "n_identifiers": 21, "random_cut": "def load_data_snli(batch_size, num_steps=50):\n \n num_workers = d2l.get_dataloader_workers()\n data_dir = d2l.download_extract('SNLI')\n train_data = read_snli(data_dir, True)\n test_data = read_snli(data_dir, False)\n train_set = SNLIDataset(train_data,", "d_id": 37386, "documentation": { "docstring": "Download the SNLI dataset and return data iterators and vocabulary.\n\n Defined in :numref:`sec_natural-language-inference-and-dataset`", "n_words": 13, "vocab_size": 12, "n_whitespaces": 15, "language": "en" } }, { "id": 151197, "commit_id": "86aa875bc9d5edeba04f908fe45b011e52045c83", "repo": "freqtrade", "path": "freqtrade/freqai/utils.py", "file_name": "utils.py", "fun_name": "plot_feature_importance", "commit_message": "plot features as html instead of png", "code": "def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None:\n \n try:\n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n except ImportError:\n logger.exception(\"Module plotly not found \\n Please install using `pip3 install plotly`\")\n exit(1)\n\n from freqtrade.plot.plotting import store_plot_file\n\n # Gather feature importance from model\n if \"catboost.core\" in str(model.__class__):\n feature_importance = model.get_feature_importance()\n elif \"lightgbm.sklearn\" in str(model.__class__):\n feature_importance = model.feature_importances_\n else:\n raise NotImplementedError(f\"Cannot extract feature importance for {model.__class__}\")\n\n # Data preparation\n fi_df = pd.DataFrame({\n \"feature_names\": np.array(feature_names),\n \"feature_importance\": np.array(feature_importance)\n })\n fi_df_top = fi_df.nlargest(count_max, \"feature_importance\")[::-1]\n fi_df_worst = fi_df.nsmallest(count_max, \"feature_importance\")[::-1]\n\n # Plotting", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 189, "n_words": 84, "vocab_size": 67, "complexity": 4, "nloc": 37, "token_counts": 229, "n_ast_nodes": 261, "n_identifiers": 34, "random_cut": "def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None:\n \n try:\n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n except ImportError:\n logger.exception(\"Module plotly not found \\n Please install using `pip3 install plotly`\")\n exit(1)\n\n from freqtrade.plot.plotting import store_plot_file\n\n # Gather feature importance from model\n if \"c", "d_id": 34972, "documentation": { "docstring": "\n Plot Best and Worst Features by importance for CatBoost model.\n Called once per sub-train.\n Usage: plot_feature_importance(\n model=model,\n feature_names=dk.training_features_list,\n pair=pair,\n train_dir=dk.data_path)\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 89, "language": "en" } }, { "id": 205205, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/sqlite3/introspection.py", "file_name": "introspection.py", "fun_name": "get_primary_key_column", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_primary_key_column(self, cursor, table_name):\n \n cursor.execute(\n \"PRAGMA table_info(%s)\" % self.connection.ops.quote_name(table_name)\n )\n for _, name, *_, pk in cursor.fetchall():\n if pk:\n return name\n return None\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 95, "n_words": 23, "vocab_size": 22, "complexity": 3, "nloc": 8, "token_counts": 50, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "def get_primary_key_column(self, cursor, table_name):\n \n cursor.execute(\n \"PRAGMA table_info(%s)\" % self.connection.ops.quote_name(table_name)\n )\n for _, name, *_, pk in cursor.fetchall():\n if pk:\n return name\n return", "d_id": 51034, "documentation": { "docstring": "Return the column name of the primary key for the given table.", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 82603, "commit_id": "d38f4a1cc7fc6b9e06a01622dd584329b73b410d", "repo": "django-cms", "path": "cms/utils/setup.py", "file_name": "setup.py", "fun_name": "validate_settings", "commit_message": "fix: Adds a deprecation warning for SEND_BROKEN_LINK_EMAILS (#7420)\n\n* Fix:\t\ttoolbar bug 3.10.rc1\r\n\r\n* Feat:\tDark mode support, including input from @marksweb, bugfix for tooltips\r\n\r\n* Upstream change to be able to merge\r\n\r\n* Feat: Dark mode support, including input from @marksweb, bugfix for tooltips\r\n\r\n* Revert \"Fix:\t\ttoolbar bug 3.10.rc1\"\r\n\r\nThis reverts commit 592a2b604e8f72b8e9c948e83163394cc6e8fe3d.\r\n\r\n* Fix:\t\tRecommit toolbar fix (??)\r\n\r\n* Fix:\t\tAfter lint failure: Remove spaces added by PyCharm\r\n\r\n* Fix:\t\tWizzard button color\r\n\r\n* Fix:\t\tCorrect toolbar according to cms_path\r\nFix:\t\tAvoid unnecessary toolbar loading\r\n\r\n* TASK: use isort to sort imports\r\n\r\n* Fix:\tMove CMS.API.Toolbar.get_color_scheme to CMS.API.Helpers.getColorScheme and CMS.API.Toolbar.set_color_scheme to CMS.API.Helpers.setColorScheme\r\n\r\n* Fix:\t\tTypo in comment\r\n\r\n* Fix:\t\tTypos in comments\r\n\r\n* Fix:\t\tTypos in comments\r\n\r\n* Add:\t\tChangelog entry\r\n\r\n* Fix:\t\tbase unit test for js frontend\r\n\r\n* Add:\t\tBasic set/get color scheme test\r\n\r\n* fix:\tdeprecate SEND_BROKEN_LINK_EMAILS setting\r\n\r\n* fix: flake8 w504\r\n\r\nCo-authored-by: Vinit Kumar \r\nCo-authored-by: Simon Krull \r\nCo-authored-by: Mark Walker ", "code": "def validate_settings():\n \n try:\n django_backend = [x for x in settings.TEMPLATES\n if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0]\n except IndexError:\n raise ImproperlyConfigured(\n \"django CMS requires django.template.context_processors.request in \"\n \"'django.template.backends.django.DjangoTemplates' context processors.\"\n )\n\n context_processors = django_backend.get('OPTIONS', {}).get('context_processors', [])\n if ('django.core.context_processors.request' not in context_processors and # noqa: W504\n 'django.template.context_processors.request' not in context_processors):\n raise ImproperlyConfigured(\"django CMS requires django.template.context_processors.request in \"\n \"'django.template.backends.django.DjangoTemplates' context processors.\")\n\n if (\n hasattr(settings, \"SEND_BROKEN_LINK_EMAILS\") and # noqa: W504\n \"django.middleware.common.BrokenLinkEmailsMiddleware\" not in getattr(settings, \"MIDDLEWARE\", [])\n ):\n warnings.warn('The setting \"SEND_BROKEN_LINK_EMAILS\" will not be honored by django CMS as of version 4.1. '\n 'Add \"django.middleware.common.BrokenLinkEmailsMiddleware\" to your MIDDLEWARE settings '\n 'instead.', DeprecationWarning)\n\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 301, "n_words": 95, "vocab_size": 68, "complexity": 8, "nloc": 21, "token_counts": 108, "n_ast_nodes": 201, "n_identifiers": 14, "random_cut": "def validate_settings():\n \n try:\n django_backend = [x for x in settings.TEMPLATES\n if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0]\n except IndexError:\n raise ImproperlyConfigured(\n \"django CMS requires django.template.context_processors.request in \"\n \"'django.template.backends.django.DjangoTemplates' context processors.\"\n ", "d_id": 17455, "documentation": { "docstring": "\n Check project settings file for required options\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 152965, "commit_id": "0bdc482d6f1682e103b4c4d7ee7c4d505d2d3b1c", "repo": "modin", "path": "modin/config/envvars.py", "file_name": "envvars.py", "fun_name": "get", "commit_message": "REFACTOR-#3768: change 'compute_chunksize' signature (#3769)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Anatoly Myachev ", "code": "def get(cls):\n \n min_partition_size = super().get()\n assert min_partition_size > 0, \"`min_partition_size` should be > 0\"\n return min_partition_size\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 42, "n_identifiers": 4, "random_cut": "def get(cls):\n \n min_partition_size = super().get()\n assert min_partition_size > 0, \"`min_partition_size` should be > 0\"\n return min_partition_size\n\n", "d_id": 35209, "documentation": { "docstring": "\n Get ``MinPartitionSize`` with extra checks.\n\n Returns\n -------\n int\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 44, "language": "en" } }, { "id": 124089, "commit_id": "ac831fded416381ad3c7fe2ba135eaa4aaab5879", "repo": "ray", "path": "python/ray/tune/examples/pbt_function.py", "file_name": "pbt_function.py", "fun_name": "pbt_function", "commit_message": "[air] update documentation to use `session.report` (#26051)\n\nUpdate documentation to use `session.report`.\r\n\r\nNext steps:\r\n1. Update our internal caller to use `session.report`. Most importantly, CheckpointManager and DataParallelTrainer.\r\n2. Update `get_trial_resources` to use PGF notions to incorporate the requirement of ResourceChangingScheduler. @Yard1 \r\n3. After 2 is done, change all `tune.get_trial_resources` to `session.get_trial_resources`\r\n4. [internal implementation] remove special checkpoint handling logic from huggingface trainer. Optimize the flow for checkpoint conversion with `session.report`.\r\n\r\nCo-authored-by: Antoni Baum ", "code": "def pbt_function(config):\n \n lr = config[\"lr\"]\n accuracy = 0.0 # end = 1000\n start = 0\n if session.get_checkpoint():\n state = session.get_checkpoint().to_dict()\n accuracy = state[\"acc\"]\n start = state[\"step\"]\n\n midpoint = 100 # lr starts decreasing after acc > midpoint\n q_tolerance = 3 # penalize exceeding lr by more than this multiple\n noise_level = 2 # add gaussian noise to the acc increase\n # triangle wave:\n # - start at 0.001 @ t=0,\n # - peak at 0.01 @ t=midpoint,\n # - end at 0.001 @ t=midpoint * 2,\n for step in range(start, 100):\n if accuracy < midpoint:\n optimal_lr = 0.01 * accuracy / midpoint\n else:\n optimal_lr = 0.01 - 0.01 * (accuracy - midpoint) / midpoint\n optimal_lr = min(0.01, max(0.001, optimal_lr))\n\n # compute accuracy increase\n q_err = max(lr, optimal_lr) / min(lr, optimal_lr)\n if q_err < q_tolerance:\n accuracy += (1.0 / q_err) * random.random()\n elif lr > optimal_lr:\n accuracy -= (q_err - q_tolerance) * random.random()\n accuracy += noise_level * np.random.normal()\n accuracy = max(0, accuracy)\n\n checkpoint = None\n if step % 3 == 0:\n checkpoint = Checkpoint.from_dict({\"acc\": accuracy, \"step\": start})\n\n session.report(\n {\n \"mean_accuracy\": accuracy,\n \"cur_lr\": lr,\n \"optimal_lr\": optimal_lr, # for debugging\n \"q_err\": q_err, # for debugging\n \"done\": accuracy > midpoint * 2, # this stops the training process\n },\n checkpoint=checkpoint,\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 531, "n_words": 207, "vocab_size": 114, "complexity": 7, "nloc": 37, "token_counts": 253, "n_ast_nodes": 407, "n_identifiers": 25, "random_cut": "def pbt_function(config):\n \n lr = config[\"lr\"]\n accuracy = 0.0 # end = 1000\n start = 0\n if session.get_checkpoint():\n state = session.get_checkpoint().to_dict()\n accuracy = state[\"acc\"]\n start = state[\"step\"]\n\n midpoint = 100 # lr starts decreasing after acc > midpoint\n q_tolerance = 3 # penalize exceeding lr by more than this multiple\n noise_level = 2 # add gaussian noise to the acc increase\n # triangle wave:\n # - start at 0.001 @ t=0,\n # - peak at 0.01 @ t=midpoint,\n # - end at 0.001 @ t=midpoint * 2,\n for step in range(start, 100):\n if accuracy < midpoint:\n optimal_lr = 0.01 * accuracy / midpoint\n else:\n optimal_lr = 0.01 - 0.01 * (accuracy - midpoint) / midpoint\n optimal_lr = min(0.01, max(0.001, optimal_lr))\n\n # compute accuracy increase\n q_err = max(lr, optimal_lr) / min(lr, optimal_lr)\n if q_err < q_tolerance:\n accuracy", "d_id": 27510, "documentation": { "docstring": "Toy PBT problem for benchmarking adaptive learning rate.\n\n The goal is to optimize this trainable's accuracy. The accuracy increases\n fastest at the optimal lr, which is a function of the current accuracy.\n\n The optimal lr schedule for this problem is the triangle wave as follows.\n Note that many lr schedules for real models also follow this shape:\n\n best lr\n ^\n | /\\\n | / \\\n | / \\\n | / \\\n ------------> accuracy\n\n In this problem, using PBT with a population of 2-4 is sufficient to\n roughly approximate this lr schedule. Higher population sizes will yield\n faster convergence. Training will not converge without PBT.\n ", "n_words": 104, "vocab_size": 71, "n_whitespaces": 177, "language": "en" } }, { "id": 284498, "commit_id": "54a1b6f545a0016c576e9e00eef5c003d229dacf", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/options/hedge/hedge_model.py", "file_name": "hedge_model.py", "fun_name": "add_hedge_option", "commit_message": "Feature/hedge (#1768)\n\n* [Bug] Incorrect log for reddit keys. #1733 fix\r\n\r\n* Create new feature-hedge\r\n\r\n* Significantly improve code of hedge menu\r\n\r\n* More robust\r\n\r\n* Robustness\r\n\r\n* Fix tests\r\n\r\n* Fix can't multiply sequence by non-int of type 'numpy.float64' error\r\n\r\n* Temporary fix of singular matrix error. Return first feasible solution\r\n\r\n* Update Hugo Documentation\r\n\r\n* Combining menus and cleaning up code\r\n\r\n* Tidy up call_exp\r\n\r\n* Update tests Round 1\r\n\r\n* Update tests Round 2\r\n\r\n* Fix linting error\r\n\r\n* Fix linting?\r\n\r\n* Fixed glitch\r\n\r\nCo-authored-by: JerBouma \r\nCo-authored-by: James Maslek \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\nCo-authored-by: didierlopes.eth ", "code": "def add_hedge_option(price, implied_volatility, strike, days, side):\n \n # Determine delta position given the option\n delta = calc_delta(price, implied_volatility, strike, days, 0, side)\n\n # Determine gamma position given the option\n gamma = calc_gamma(price, implied_volatility, strike, days, 0)\n\n # Determine vega position given the option\n vega = calc_vega(price, implied_volatility, strike, days, 0)\n\n return delta, gamma, vega\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 77, "n_words": 53, "vocab_size": 25, "complexity": 1, "nloc": 5, "token_counts": 64, "n_ast_nodes": 88, "n_identifiers": 12, "random_cut": "def add_hedge_option(price, implied_volatility, strike, days, side):\n \n # Determine delta position given the option\n delta = calc_delta(price, implied_volatility, strike, days, 0, side)\n\n # Determine gamma position given the option\n gamma = calc_gamma(price, implied_volatility, strike, days, 0)\n\n # Determine vega position given the option\n vega = calc_vega(price, implied_volatility, strike, days, 0)\n\n return delta, gamma, vega\n\n", "d_id": 84764, "documentation": { "docstring": "Determine the delta, gamma and vega value of the portfolio and/or options.\n\n Parameters\n ----------\n price: int\n The price.\n implied_volatility: float\n The implied volatility.\n strike: float\n The strike price.\n days: float\n The amount of days until expiration. Use annual notation thus a month would be 30 / 360.\n sign: int\n Whether you have a long (1) or short (-1) position\n\n Returns\n -------\n delta: float\n gamma: float\n portfolio: float\n ", "n_words": 67, "vocab_size": 54, "n_whitespaces": 141, "language": "en" } }, { "id": 223645, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/charset.py", "file_name": "charset.py", "fun_name": "header_encode", "commit_message": "add python 3.10.4 for windows", "code": "def header_encode(self, string):\n \n codec = self.output_codec or 'us-ascii'\n header_bytes = _encode(string, codec)\n # 7bit/8bit encodings return the string unchanged (modulo conversions)\n encoder_module = self._get_encoder(header_bytes)\n if encoder_module is None:\n return string\n return encoder_module.header_encode(header_bytes, codec)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 93, "n_words": 33, "vocab_size": 26, "complexity": 3, "nloc": 7, "token_counts": 47, "n_ast_nodes": 78, "n_identifiers": 9, "random_cut": "def header_encode(self, string):\n \n codec = self.output_codec or 'us-ascii'\n header_bytes = _encode(string, codec)\n # 7bit/8bit encodings return the string unchanged (modulo conversions)\n encoder_module = self._get_encoder(header_bytes)\n if encoder_module is None:\n return string\n return encoder", "d_id": 57028, "documentation": { "docstring": "Header-encode a string by converting it first to bytes.\n\n The type of encoding (base64 or quoted-printable) will be based on\n this charset's `header_encoding`.\n\n :param string: A unicode string for the header. It must be possible\n to encode this string to bytes using the character set's\n output codec.\n :return: The encoded string, with RFC 2047 chrome.\n ", "n_words": 55, "vocab_size": 47, "n_whitespaces": 113, "language": "en" } }, { "id": 42583, "commit_id": "f019fbedb3d2b6a2e6b58ec1b38db612b106568b", "repo": "nltk", "path": "nltk/corpus/reader/bcp47.py", "file_name": "bcp47.py", "fun_name": "data_dict", "commit_message": "Support both iso639-3 codes and BCP-47 language tags (#3060)\n\n* Add support for iso639-3 language codes\r\n\r\n* Add support for retired language codes\r\n\r\n* Move langnames.py to the top-level\r\n\r\n* Add langcode() function\r\n\r\n* Add iso639retired dictionary\r\n\r\n* Improve wrapper functions\r\n\r\n* Add module docstring with doctest\r\n\r\n* Add 2-letter language codes\r\n\r\n* Add regular expression check\r\n\r\n* Improve inverse lookup of retired codes\r\n\r\n* Support BCP-47\r\n\r\n* Avoid deprecated langcodes\r\n\r\n* Set stack level for warnings to warn on the langname call\r\n\r\nNow it throws e.g.\r\n```\r\n...\\nltk_3060.py:9: UserWarning: Shortening 'smo' to 'sm'\r\n print(f\"{lang}: {langname(code)}\")\r\n```\r\n\r\nRather than\r\n```\r\n...\\nltk\\langnames.py:64: UserWarning: Shortening zha to za\r\n warn(f\"Shortening {code} to {code2}\")\r\n```\r\n\r\n* Dict key membership is equivalent to dict membership\r\n\r\n* Resolve bug: subtag -> tag\r\n\r\n* Capitalize BCP47 in CorpusReader name\r\n\r\n* Reimplement removed type hint changes from #3081\r\n\r\nCo-authored-by: Tom Aarsen ", "code": "def data_dict(self, records):\n \n self.version = records[0].replace(\"File-Date:\", \"\").strip()\n dic = {}\n dic[\"deprecated\"] = {}\n for label in [\n \"language\",\n \"extlang\",\n \"script\",\n \"region\",\n \"variant\",\n \"redundant\",\n \"grandfathered\",\n ]:\n dic[\"deprecated\"][label] = {}\n for record in records[1:]:\n fields = [field.split(\": \") for field in record.strip().split(\"\\n\")]\n typ = fields[0][1]\n tag = fields[1][1]\n if typ not in dic:\n dic[typ] = {}\n subfields = {}\n for field in fields[2:]:\n if len(field) == 2:\n [key, val] = field\n if key not in subfields:\n subfields[key] = [val]\n else: # multiple value\n subfields[key].append(val)\n else: # multiline field\n subfields[key][-1] += \" \" + field[0].strip()\n if (\n \"Deprecated\" not in record\n and typ == \"language\"\n and key == \"Description\"\n ):\n self.langcode[subfields[key][-1]] = tag\n for key in subfields:\n if len(subfields[key]) == 1: # single value\n subfields[key] = subfields[key][0]\n if \"Deprecated\" in record:\n dic[\"deprecated\"][typ][tag] = subfields\n else:\n dic[typ][tag] = subfields\n return dic\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 720, "n_words": 137, "vocab_size": 75, "complexity": 14, "nloc": 44, "token_counts": 294, "n_ast_nodes": 484, "n_identifiers": 20, "random_cut": "def data_dict(self, records):\n \n self.version = records[0].replace(\"File-Date:\", \"\").strip()\n dic = {}\n dic[\"deprecated\"] = {}\n for label in [\n \"language\",\n \"extlang\",\n \"script\",\n \"region\",\n \"variant\",\n \"redundant\",\n \"grandfathered\",\n ", "d_id": 7639, "documentation": { "docstring": "Convert the BCP-47 language subtag registry to a dictionary", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 291740, "commit_id": "c576a68d336bc91fd82c299d9b3e5dfdc1c14960", "repo": "core", "path": "tests/test_core.py", "file_name": "test_core.py", "fun_name": "test_async_add_hass_job_schedule_partial_coroutinefunction", "commit_message": "Upgrade pytest-aiohttp (#82475)\n\n* Upgrade pytest-aiohttp\r\n\r\n* Make sure executors, tasks and timers are closed\r\n\r\nSome test will trigger warnings on garbage collect, these warnings\r\nspills over into next test.\r\n\r\nSome test trigger tasks that raise errors on shutdown, these spill\r\nover into next test.\r\n\r\nThis is to mimic older pytest-aiohttp and it's behaviour on test\r\ncleanup.\r\n\r\nDiscussions on similar changes for pytest-aiohttp are here:\r\nhttps://github.com/pytest-dev/pytest-asyncio/pull/309\r\n\r\n* Replace loop with event_loop\r\n\r\n* Make sure time is frozen for tests\r\n\r\n* Make sure the ConditionType is not async\r\n\r\n /home-assistant/homeassistant/helpers/template.py:2082: RuntimeWarning: coroutine 'AsyncMockMixin._execute_mock_call' was never awaited\r\n def wrapper(*args, **kwargs):\r\n Enable tracemalloc to get traceback where the object was allocated.\r\n See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.\r\n\r\n* Increase litejet press tests with a factor 10\r\n\r\nThe times are simulated anyway, and we can't stop the normal\r\nevent from occuring.\r\n\r\n* Use async handlers for aiohttp\r\n\r\ntests/components/motioneye/test_camera.py::test_get_still_image_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_still_image_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_stream_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_stream_from_camera\r\ntests/components/motioneye/test_camera.py::test_camera_option_stream_url_template\r\ntests/components/motioneye/test_camera.py::test_camera_option_stream_url_template\r\n /Users/joakim/src/hass/home-assistant/venv/lib/python3.9/site-packages/aiohttp/web_urldispatcher.py:189: DeprecationWarning: Bare functions are deprecated, use async ones\r\n warnings.warn(\r\n\r\n* Switch to freezegun in modbus tests\r\n\r\nThe tests allowed clock to tick in between steps\r\n\r\n* Make sure skybell object are fully mocked\r\n\r\nOld tests would trigger attempts to post to could services:\r\n\r\n```\r\nDEBUG:aioskybell:HTTP post https://cloud.myskybell.com/api/v3/login/ Request with headers: {'content-type': 'application/json', 'accept': '*/*', 'x-skybell-app-id': 'd2b542c7-a7e4-4e1e-b77d-2b76911c7c46', 'x-skybell-client-id': '1f36a3c0-6dee-4997-a6db-4e1c67338e57'}\r\n```\r\n\r\n* Fix sorting that broke after rebase", "code": "def test_async_add_hass_job_schedule_partial_coroutinefunction(event_loop):\n \n hass = MagicMock(loop=MagicMock(wraps=event_loop))\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 8, "token_counts": 82, "n_ast_nodes": 34, "n_identifiers": 6, "random_cut": "def test_async_add_hass_job_schedule_partial_coroutinefunction(event_loop):\n \n", "d_id": 90844, "documentation": { "docstring": "Test that we schedule partial coros and add jobs to the job pool.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 258256, "commit_id": "d114a994f1af71d3721cecd14da6f6b4592043b8", "repo": "haystack", "path": "haystack/utils/squad_data.py", "file_name": "squad_data.py", "fun_name": "to_label_objs", "commit_message": "refactor: update Squad data (#3513)\n\n* refractor the to_squad data class\r\n\r\n* fix the validation label\r\n\r\n* refractor the to_squad data class\r\n\r\n* fix the validation label\r\n\r\n* add the test for the to_label object function\r\n\r\n* fix the tests for to_label_objects\r\n\r\n* move all the test related to squad data to one file\r\n\r\n* remove unused imports\r\n\r\n* revert tiny_augmented.json\r\n\r\nCo-authored-by: ZanSara ", "code": "def to_label_objs(self, answer_type=\"generative\"):\n \n df_labels = self.df[[\"id\", \"question\", \"answer_text\", \"answer_start\", \"context\", \"document_id\"]]\n record_dicts = df_labels.to_dict(\"records\")\n labels = [\n Label(\n query=record[\"question\"],\n answer=Answer(answer=record[\"answer_text\"], answer_type=answer_type),\n is_correct_answer=True,\n is_correct_document=True,\n id=record[\"id\"],\n origin=record.get(\"origin\", \"gold-label\"),\n document=Document(content=record.get(\"context\"), id=str(record[\"document_id\"])),\n )\n for record in record_dicts\n ]\n return labels\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 216, "n_words": 36, "vocab_size": 32, "complexity": 2, "nloc": 16, "token_counts": 124, "n_ast_nodes": 206, "n_identifiers": 22, "random_cut": "def to_label_objs(self, answer_type=\"generative\"):\n \n df_labels = self.df[[\"id\", \"question\", \"answer_text\", \"answer_start\", \"context\", \"document_id\"]]\n record_dicts = df_labels.to_dict(\"records\")\n labels = [\n Label(\n query=record[\"question\"],\n answer=Answer(answer=record[\"answer_text\"], answer_type=answer_type),\n ", "d_id": 75208, "documentation": { "docstring": "Export all labels stored in this object to haystack.Label objects", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 319781, "commit_id": "53baed03895f28f24113d376b089e3ef281b34ed", "repo": "paperless-ngx", "path": "src/documents/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_api_get_storage_path", "commit_message": "Increases test coverage of storage paths", "code": "def test_api_get_storage_path(self):\n \n response = self.client.get(\"/api/storage_paths/\", format=\"json\")\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data[\"count\"], 1)\n\n resp_storage_path = response.data[\"results\"][0]\n self.assertEqual(resp_storage_path[\"id\"], self.sp1.id)\n self.assertEqual(resp_storage_path[\"path\"], self.sp1.path)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 8, "token_counts": 94, "n_ast_nodes": 155, "n_identifiers": 13, "random_cut": "def test_api_get_storage_path(self):\n \n response = self.client.get(\"/api/storage_paths/\", format=\"json\")\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data[\"count\"], 1)\n\n resp_storage_path = response.data[\"results\"][0]\n self.assertEqual(resp_storage_path[\"id\"], self.sp1.i", "d_id": 116994, "documentation": { "docstring": "\n GIVEN:\n - API request to get all storage paths\n WHEN:\n - API is called\n THEN:\n - Existing storage paths are returned\n ", "n_words": 21, "vocab_size": 16, "n_whitespaces": 83, "language": "en" } }, { "id": 260439, "commit_id": "ae51c13af76af206e6815d0ca0d0052f73167caa", "repo": "scikit-learn", "path": "sklearn/manifold/tests/test_mds.py", "file_name": "test_mds.py", "fun_name": "test_normalize_metric_warning", "commit_message": "ENH Calculate normed stress (Stress-1) in `manifold.MDS` (#22562)\n\nCo-authored-by: Chiara Marmo \r\nCo-authored-by: Roth E Conrad \r\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Thomas J. Fan ", "code": "def test_normalize_metric_warning():\n \n msg = \"Normalized stress is not supported\"\n sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])\n with pytest.raises(ValueError, match=msg):\n mds.smacof(sim, metric=True, normalized_stress=True)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 33, "vocab_size": 29, "complexity": 1, "nloc": 5, "token_counts": 82, "n_ast_nodes": 117, "n_identifiers": 13, "random_cut": "def test_normalize_metric_warning():\n \n msg = \"Normalized stress is not supported\"\n sim = np.array([[0, 5, 3, 4],", "d_id": 76249, "documentation": { "docstring": "\n Test that a UserWarning is emitted when using normalized stress with\n metric-MDS.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 22, "language": "en" } }, { "id": 64188, "commit_id": "c2ecc7a2d1da839423fd768821b1f77ddcf7f53d", "repo": "erpnext", "path": "erpnext/patches/v13_0/add_bin_unique_constraint.py", "file_name": "add_bin_unique_constraint.py", "fun_name": "delete_and_patch_duplicate_bins", "commit_message": "refactor: patch for fixing broken bins\n\nfix(patch): delete fully broken bins\n\nif bin doesn't have item_code or warehouse then it's not recoverable.", "code": "def delete_and_patch_duplicate_bins():\n\n\tduplicate_bins = frappe.db.sql(, as_dict=1)\n\n\tfor duplicate_bin in duplicate_bins:\n\t\texisting_bins = frappe.get_list(\"Bin\",\n\t\t\t\tfilters={\n\t\t\t\t\t\"item_code\": duplicate_bin.item_code,\n\t\t\t\t\t\"warehouse\": duplicate_bin.warehouse\n\t\t\t\t\t},\n\t\t\t\tfields=[\"name\"],\n\t\t\t\torder_by=\"creation\",)\n\n\t\t# keep last one\n\t\texisting_bins.pop()\n\n\t\tfor broken_bin in existing_bins:\n\t\t\tfrappe.delete_doc(\"Bin\", broken_bin.name)\n\n\t\tqty_dict = {\n\t\t\t\"reserved_qty\": get_reserved_qty(duplicate_bin.item_code, duplicate_bin.warehouse),\n\t\t\t\"indented_qty\": get_indented_qty(duplicate_bin.item_code, duplicate_bin.warehouse),\n\t\t\t\"ordered_qty\": get_ordered_qty(duplicate_bin.item_code, duplicate_bin.warehouse),\n\t\t\t\"planned_qty\": get_planned_qty(duplicate_bin.item_code, duplicate_bin.warehouse),\n\t\t\t\"actual_qty\": get_balance_qty_from_sle(duplicate_bin.item_code, duplicate_bin.warehouse)\n\t\t}\n\n\t\tupdate_bin_qty(duplicate_bin.item_code, duplicate_bin.warehouse, qty_dict)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 32, "n_words": 54, "vocab_size": 47, "complexity": 3, "nloc": 30, "token_counts": 158, "n_ast_nodes": 254, "n_identifiers": 25, "random_cut": "def delete_and_patch_duplicate_bins():\n\n\tduplicate_bins = frappe.db.sql(, as_dict=1)\n\n\tfor duplicate_bin in duplicate_bins:\n\t\texisting_bins = frappe.get_list(\"Bin\",\n\t\t\t\tfilters={\n\t\t\t\t\t\"item_code\": duplicate_bin.item_code,\n\t\t\t\t\t\"warehouse\": duplicate_bin.warehouse\n\t\t\t\t\t},\n\t\t\t\tfields=[\"name\"],\n\t\t\t\torder_by=\"creation\",)\n\n\t\t# keep last one\n\t\texisting_bins.pop()\n\n\t\tfor broken_bin in existing_bins:\n\t\t\tfrappe.delete_doc(\"Bin\", broken_bin.name)\n\n\t\tqty_dict = {\n\t\t\t\"reserved_qty\": get_reserved_qty(duplicate_bin.item_code, duplicate_bin.warehouse),\n\t\t\t\"indented_qty\": get_indented_qty(duplicate_bin.item_code, duplicate_bin.warehouse),\n\t\t\t\"ordered_qty\": get_ordered_qty(duplicate_bin.item_code, duplicate_bin.warehouse),\n\t\t\t\"planned_qty\": get_planned_qty(duplicate_bin.item_code, duplicate_bin.warehouse),\n\t\t\t\"actual_qty\": get_bal", "d_id": 13576, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\titem_code, warehouse, count(*) as bin_count\n\t\tFROM\n\t\t\ttabBin\n\t\tGROUP BY\n\t\t\titem_code, warehouse\n\t\tHAVING\n\t\t\tbin_count > 1\n\t", "n_words": 16, "vocab_size": 14, "n_whitespaces": 8, "language": "en" } }, { "id": 127689, "commit_id": "8840be1942a69b2595a05c5c5556b0daec7abbcd", "repo": "ray", "path": "dashboard/modules/job/tests/test_job_agent.py", "file_name": "test_job_agent.py", "fun_name": "test_stop_long_running_job", "commit_message": "[Job Submission][refactor 4/N] Complete the remaining interfaces on JobAgent (#28533)\n\nSigned-off-by: Catch-Bull \r\njust need to implement stop_job, and I remove get_job_info because we can access JobInfoStorage without call `ray.init`.", "code": "async def test_stop_long_running_job(job_sdk_client):\n \n agent_client, head_client = job_sdk_client\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n path = Path(tmp_dir)\n driver_script = \n test_script_file = path / \"test_script.py\"\n with open(test_script_file, \"w+\") as file:\n file.write(driver_script)\n\n runtime_env = {\"working_dir\": tmp_dir}\n runtime_env = upload_working_dir_if_needed(runtime_env, tmp_dir, logger=logger)\n runtime_env = RuntimeEnv(**runtime_env).to_dict()\n\n request = validate_request_type(\n {\"runtime_env\": runtime_env, \"entrypoint\": \"python test_script.py\"},\n JobSubmitRequest,\n )\n submit_result = await agent_client.submit_job_internal(request)\n job_id = submit_result.submission_id\n\n resp = await agent_client.stop_job_internal(job_id)\n assert resp.stopped is True\n\n wait_for_condition(\n partial(\n _check_job, client=head_client, job_id=job_id, status=JobStatus.STOPPED\n ),\n timeout=10,\n )\n\n\n@pytest.mark.asyncio", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 269, "n_words": 74, "vocab_size": 57, "complexity": 1, "nloc": 30, "token_counts": 152, "n_ast_nodes": 269, "n_identifiers": 40, "random_cut": "async def test_stop_long_running_job(job_sdk_client):\n \n agent_client, head_client = job_sdk_client\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n path = Path(tmp_dir)\n driver_script = \n test_script_file = path / \"test_script.py\"\n with open(test_script_file, \"w+\") as file:\n file.write(driver_script)\n\n runtime_env = {\"working_dir\": tmp_dir}\n runtime_env = upload_working_dir_if_needed(runtime_env, tmp_dir, logg", "d_id": 28504, "documentation": { "docstring": "\n Submit a job that runs for a while and stop it in the middle.\n \nprint('Hello !')\nimport time\ntime.sleep(300) # This should never finish\nraise RuntimeError('Intentionally failed.')\n ", "n_words": 27, "vocab_size": 26, "n_whitespaces": 38, "language": "en" } }, { "id": 281459, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py", "file_name": "dd_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n source_txt = CRYPTO_SOURCES.get(self.source, \"?\") if self.source != \"\" else \"\"\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Due Diligence\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 41, "token_counts": 42, "n_ast_nodes": 86, "n_identifiers": 12, "random_cut": "def print_help(self):\n \n source_txt = CRYPTO_SOURCES.get(self.source, \"?\") if self.source != \"\" else \"\"\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Due Dil", "d_id": 83776, "documentation": { "docstring": "Print help[cmds]\n load load a specific cryptocurrency for analysis\n\n[param]Coin: [/param]{self.current_coin}\n[param]Source: [/param]{source_txt}\n\n[src]Glassnode[/src]\n active active addresses\n nonzero addresses with non-zero balances\n change 30d change of supply held on exchange wallets\n eb total balance held on exchanges (in percentage and units)\n[src]Coinglass[/src]\n oi open interest per exchange\n[src]CoinPaprika[/src]\n basic basic information about loaded coin\n ps price and supply related metrics for loaded coin\n mkt all markets for loaded coin\n ex all exchanges where loaded coin is listed\n twitter tweets for loaded coin\n events events related to loaded coin\n[src]CoinGecko[/src]\n info basic information about loaded coin\n market market stats about loaded coin\n ath all time high related stats for loaded coin\n atl all time low related stats for loaded coin\n web found websites for loaded coin e.g forum, homepage\n social social portals urls for loaded coin, e.g reddit, twitter\n score different kind of scores for loaded coin, e.g developer score, sentiment score\n dev github, bitbucket coin development statistics\n bc links to blockchain explorers for loaded coin\n[src]Binance[/src]\n binbook show order book\n balance show coin balance\n[src]Coinbase[/src]\n cbbook show order book\n trades show last trades\n stats show coin stats[/cmds]\n", "n_words": 187, "vocab_size": 107, "n_whitespaces": 499, "language": "en" } }, { "id": 89255, "commit_id": "07558e31bd672fab58cff55cf4e9cf0e02b36654", "repo": "sentry", "path": "tests/sentry/integrations/github/test_client.py", "file_name": "test_client.py", "fun_name": "test_get_cached_repo_files_with_all_files", "commit_message": "feat(derive-code-mappings): Add caching support for fetching files (#41777)\n\nThis improves the readability of the code and separates caching logic to their respective functions.\r\nThis allows getting files for a repo with caching support without having to call `get_trees_for_org`.\r\nThere will be a follow up PR to improve the caching logic.\r\n\r\nCo-authored-by: Mark Story ", "code": "def test_get_cached_repo_files_with_all_files(self):\n \n responses.add(\n method=responses.GET,\n url=f\"https://api.github.com/repos/{self.repo.name}/git/trees/master?recursive=1\",\n status=200,\n json={\n \"tree\": [\n {\"type\": \"blob\", \"path\": \"src/foo.py\"},\n {\"type\": \"blob\", \"path\": \"README\"},\n ]\n },\n )\n repo_key = f\"github:repo:{self.repo.name}:all\"\n assert cache.get(repo_key) is None\n with mock.patch(\"sentry.integrations.github.client.get_jwt\", return_value=b\"jwt_token_1\"):\n files = self.client.get_cached_repo_files(self.repo.name, \"master\")\n assert files == [\"src/foo.py\"]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 225, "n_words": 38, "vocab_size": 32, "complexity": 1, "nloc": 17, "token_counts": 103, "n_ast_nodes": 201, "n_identifiers": 20, "random_cut": "def test_get_cached_repo_files_with_all_files(self):\n \n responses.add(\n method=responses.GET,", "d_id": 18522, "documentation": { "docstring": "Fetch files for repo. All files rather than just source code files", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 204604, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/base.py", "file_name": "base.py", "fun_name": "check_migrations", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def check_migrations(self):\n \n from django.db.migrations.executor import MigrationExecutor\n\n try:\n executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])\n except ImproperlyConfigured:\n # No databases are configured (or the dummy one)\n return\n\n plan = executor.migration_plan(executor.loader.graph.leaf_nodes())\n if plan:\n apps_waiting_migration = sorted(\n {migration.app_label for migration, backwards in plan}\n )\n self.stdout.write(\n self.style.NOTICE(\n \"\\nYou have %(unapplied_migration_count)s unapplied migration(s). \"\n \"Your project may not work properly until you apply the \"\n \"migrations for app(s): %(apps_waiting_migration)s.\"\n % {\n \"unapplied_migration_count\": len(plan),\n \"apps_waiting_migration\": \", \".join(apps_waiting_migration),\n }\n )\n )\n self.stdout.write(\n self.style.NOTICE(\"Run 'python manage.py migrate' to apply them.\")\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 421, "n_words": 79, "vocab_size": 69, "complexity": 4, "nloc": 25, "token_counts": 117, "n_ast_nodes": 201, "n_identifiers": 26, "random_cut": "def check_migrations(self):\n \n from django.db.migrations.executor import MigrationExecutor\n\n try:\n executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])\n except ImproperlyConfigured:\n # No databases are configured (or the dummy one)\n return\n\n plan = executor.migration_plan(executor.loader.graph.leaf_nodes())\n if plan:\n apps_waiting_migration = sorted(\n {migration.app_label for migration, backwards in plan}\n )\n self.stdout.write(\n self.style.NOTICE(\n \"\\nYou have %(unapplied_migration_count)s unapplied migration(s). \"\n \"Your project may not work properly until you apply the \"\n \"migrations for app(s): %(apps_waiting_migration)s.\"\n % {\n \"unapplied_migration_count\": len(plan),\n \"apps_waiting_migration\": \", \".join(app", "d_id": 50810, "documentation": { "docstring": "\n Print a warning if the set of migrations on disk don't match the\n migrations in the database.\n ", "n_words": 17, "vocab_size": 14, "n_whitespaces": 39, "language": "en" } }, { "id": 176449, "commit_id": "cc1db275efc709cb964ce88abbfa877798d58c10", "repo": "networkx", "path": "networkx/algorithms/chordal.py", "file_name": "chordal.py", "fun_name": "find_induced_nodes", "commit_message": "Minor improvements from general code readthrough (#5414)\n\n* Add deprecated directive to reversed docstring.\r\n\r\n* Add missing dep directives to shpfiles.\r\n\r\n* Remove defn of INF sentinel.\r\n\r\n* typo.\r\n\r\n* str -> comment in forloop.\r\n\r\n* STY: appropriate casing for var name.", "code": "def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):\n \n if not is_chordal(G):\n raise nx.NetworkXError(\"Input graph is not chordal.\")\n\n H = nx.Graph(G)\n H.add_edge(s, t)\n induced_nodes = set()\n triplet = _find_chordality_breaker(H, s, treewidth_bound)\n while triplet:\n (u, v, w) = triplet\n induced_nodes.update(triplet)\n for n in triplet:\n if n != s:\n H.add_edge(s, n)\n triplet = _find_chordality_breaker(H, s, treewidth_bound)\n if induced_nodes:\n # Add t and the second node in the induced path from s to t.\n induced_nodes.add(t)\n for u in G[s]:\n if len(induced_nodes & set(G[u])) == 2:\n induced_nodes.add(u)\n break\n return induced_nodes\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 232, "n_words": 82, "vocab_size": 60, "complexity": 8, "nloc": 21, "token_counts": 149, "n_ast_nodes": 233, "n_identifiers": 24, "random_cut": "def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):\n \n if not is_chordal(G):\n raise nx.NetworkXError(\"Input graph is not chordal.\")\n\n H = nx.Graph(G)\n H.add_edge(s, t)\n induced_nodes = set()\n triplet = _find_chordality_breaker(H, s, treewidth_bound)\n while triplet:\n (u, v, w) = triplet\n induced_nodes.update(triplet)\n for n in triplet:\n if n != s:\n H.add_edge(s, n)\n triplet = _find_chordality_breaker(H, s, tre", "d_id": 41910, "documentation": { "docstring": "Returns the set of induced nodes in the path from s to t.\n\n Parameters\n ----------\n G : graph\n A chordal NetworkX graph\n s : node\n Source node to look for induced nodes\n t : node\n Destination node to look for induced nodes\n treewidth_bound: float\n Maximum treewidth acceptable for the graph H. The search\n for induced nodes will end as soon as the treewidth_bound is exceeded.\n\n Returns\n -------\n induced_nodes : Set of nodes\n The set of induced nodes in the path from s to t in G\n\n Raises\n ------\n NetworkXError\n The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.\n If the input graph is an instance of one of these classes, a\n :exc:`NetworkXError` is raised.\n The algorithm can only be applied to chordal graphs. If the input\n graph is found to be non-chordal, a :exc:`NetworkXError` is raised.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G = nx.generators.classic.path_graph(10)\n >>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2)\n >>> sorted(induced_nodes)\n [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n Notes\n -----\n G must be a chordal graph and (s,t) an edge that is not in G.\n\n If a treewidth_bound is provided, the search for induced nodes will end\n as soon as the treewidth_bound is exceeded.\n\n The algorithm is inspired by Algorithm 4 in [1]_.\n A formal definition of induced node can also be found on that reference.\n\n References\n ----------\n .. [1] Learning Bounded Treewidth Bayesian Networks.\n Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008.\n http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf\n ", "n_words": 239, "vocab_size": 126, "n_whitespaces": 416, "language": "en" } }, { "id": 170548, "commit_id": "f9ff3796329e4bedb4a5477739f5eb8d2e40761d", "repo": "pandas", "path": "pandas/conftest.py", "file_name": "conftest.py", "fun_name": "any_skipna_inferred_dtype", "commit_message": "STYLE fix: pylint \"consider-using-from\" (#49335)\n\n* use from import\r\n\r\n* delete empty file\r\n\r\nCo-authored-by: carlotta \r\nCo-authored-by: cfabian ", "code": "def any_skipna_inferred_dtype(request):\n \n inferred_dtype, values = request.param\n values = np.array(values, dtype=object) # object dtype to avoid casting\n\n # correctness of inference tested in tests/dtypes/test_inference.py\n return inferred_dtype, values\n\n\n# ----------------------------------------------------------------\n# Misc\n# ----------------------------------------------------------------\n@pytest.fixture", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 45, "n_words": 33, "vocab_size": 24, "complexity": 1, "nloc": 4, "token_counts": 29, "n_ast_nodes": 60, "n_identifiers": 11, "random_cut": "def any_skipna_inferred_dtype(request):\n \n inferred_dtype, values = request.param\n values = np.array(values, dtype=object) #", "d_id": 40573, "documentation": { "docstring": "\n Fixture for all inferred dtypes from _libs.lib.infer_dtype\n\n The covered (inferred) types are:\n * 'string'\n * 'empty'\n * 'bytes'\n * 'mixed'\n * 'mixed-integer'\n * 'mixed-integer-float'\n * 'floating'\n * 'integer'\n * 'decimal'\n * 'boolean'\n * 'datetime64'\n * 'datetime'\n * 'date'\n * 'timedelta'\n * 'time'\n * 'period'\n * 'interval'\n\n Returns\n -------\n inferred_dtype : str\n The string for the inferred dtype from _libs.lib.infer_dtype\n values : np.ndarray\n An array of object dtype that will be inferred to have\n `inferred_dtype`\n\n Examples\n --------\n >>> from pandas._libs import lib\n >>>\n >>> def test_something(any_skipna_inferred_dtype):\n ... inferred_dtype, values = any_skipna_inferred_dtype\n ... # will pass\n ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype\n ", "n_words": 100, "vocab_size": 68, "n_whitespaces": 230, "language": "en" } }, { "id": 9409, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py", "file_name": "upfirdn_2d.py", "fun_name": "downsample_2d", "commit_message": "initialize ostec", "code": "def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):\n r\n\n assert isinstance(factor, int) and factor >= 1\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * gain\n p = k.shape[0] - factor\n return _simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)\n\n#----------------------------------------------------------------------------\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 43, "vocab_size": 36, "complexity": 3, "nloc": 28, "token_counts": 87, "n_ast_nodes": 153, "n_identifiers": 16, "random_cut": "def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):\n r\n\n assert isinstance(factor, int)", "d_id": 1609, "documentation": { "docstring": "Downsample a batch of 2D images with the given filter.\n\n Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`\n and downsamples each image with the given filter. The filter is normalized so that\n if the input pixels are constant, they will be scaled by the specified `gain`.\n Pixels outside the image are assumed to be zero, and the filter is padded with\n zeros so that its shape is a multiple of the downsampling factor.\n\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).\n The default is `[1] * factor`, which corresponds to average pooling.\n factor: Integer downsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the shape `[N, C, H // factor, W // factor]` or\n `[N, H // factor, W // factor, C]`, and same datatype as `x`.\n ", "n_words": 181, "vocab_size": 106, "n_whitespaces": 327, "language": "en" } }, { "id": 168224, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/core/groupby/grouper.py", "file_name": "grouper.py", "fun_name": "_check_deprecated_resample_kwargs", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def _check_deprecated_resample_kwargs(kwargs, origin):\n \n # Deprecation warning of `base` and `loffset` since v1.1.0:\n # we are raising the warning here to be able to set the `stacklevel`\n # properly since we need to raise the `base` and `loffset` deprecation\n # warning from three different cases:\n # core/generic.py::NDFrame.resample\n # core/groupby/groupby.py::GroupBy.resample\n # core/groupby/grouper.py::Grouper\n # raising these warnings from TimeGrouper directly would fail the test:\n # tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base\n\n if kwargs.get(\"base\", None) is not None:\n warnings.warn(\n \"'base' in .resample() and in Grouper() is deprecated.\\n\"\n \"The new arguments that you should use are 'offset' or 'origin'.\\n\"\n '\\n>>> df.resample(freq=\"3s\", base=2)\\n'\n \"\\nbecomes:\\n\"\n '\\n>>> df.resample(freq=\"3s\", offset=\"2s\")\\n',\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n if kwargs.get(\"loffset\", None) is not None:\n warnings.warn(\n \"'loffset' in .resample() and in Grouper() is deprecated.\\n\"\n '\\n>>> df.resample(freq=\"3s\", loffset=\"8H\")\\n'\n \"\\nbecomes:\\n\"\n \"\\n>>> from pandas.tseries.frequencies import to_offset\"\n '\\n>>> df = df.resample(freq=\"3s\").mean()'\n '\\n>>> df.index = df.index.to_timestamp() + to_offset(\"8H\")\\n',\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 373, "n_words": 136, "vocab_size": 85, "complexity": 3, "nloc": 22, "token_counts": 83, "n_ast_nodes": 176, "n_identifiers": 11, "random_cut": "def _check_deprecated_resample_kwargs(kwargs, origin):\n \n # Deprecation warning of `base` and `loffset` since v1.1.0:\n # we are raising the warning here to be able to set the `stacklevel`\n # properly since we need to raise the `base` and `loffset` deprecation\n # warning from three different cases:\n # core/generic.py::NDFrame.resample\n # core/groupby/groupby.py::GroupBy.resample\n # core/groupby/grouper.py::Grouper\n # raising these warnings from TimeGrouper directly would fail the test:\n # tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base\n\n if kwargs.get(\"base\", None) is not None:\n warnings.warn(\n \"'base' in .resample() and in Grouper() is deprecated.\\n\"\n \"The new arguments that you should use are 'offset' or 'origin'.\\n\"\n '\\n>>> df.resample(freq=\"3s\", base=2)\\n'\n \"\\nbecomes:\\n\"\n '\\n>>> df.resample(freq=\"3s\", offset=\"2s\")\\n',\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n ", "d_id": 40239, "documentation": { "docstring": "\n Check for use of deprecated parameters in ``resample`` and related functions.\n\n Raises the appropriate warnings if these parameters are detected.\n Only sets an approximate ``stacklevel`` for the warnings (see #37603, #36629).\n\n Parameters\n ----------\n kwargs : dict\n Dictionary of keyword arguments to check for deprecated parameters.\n origin : object\n From where this function is being called; either Grouper or TimeGrouper. Used\n to determine an approximate stacklevel.\n ", "n_words": 65, "vocab_size": 54, "n_whitespaces": 111, "language": "en" } }, { "id": 44887, "commit_id": "1b568d73e1dfb838a3a0446e3a6063b9f27f04b8", "repo": "airflow", "path": "airflow/providers/google/cloud/hooks/datacatalog.py", "file_name": "datacatalog.py", "fun_name": "get_conn", "commit_message": "Extract ClientInfo to module level (#21554)", "code": "def get_conn(self) -> DataCatalogClient:\n \n if not self._client:\n self._client = DataCatalogClient(credentials=self._get_credentials(), client_info=CLIENT_INFO)\n return self._client\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 45, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 8, "random_cut": "def get_conn(self) -> DataCatalogClient:\n \n", "d_id": 8400, "documentation": { "docstring": "Retrieves client library object that allow access to Cloud Data Catalog service.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 56932, "commit_id": "574d10ff7612661b37801c811862f18998521d58", "repo": "prefect", "path": "src/prefect/blocks/kubernetes.py", "file_name": "kubernetes.py", "fun_name": "get_api_client", "commit_message": "organizational changes for the KubernetesClusterConfig and add from_environment classmethod", "code": "def get_api_client(self) -> ApiClient:\n \n try:\n return new_client_from_config_dict(\n config_dict=self.config, context=self.context\n )\n except ConfigException:\n raise\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 82, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 10, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "def get_api_client(self) -> ApiClient:\n \n try:\n return new_client_from_config_dict(\n config_dict=self.config, context=self.context\n )\n ", "d_id": 11591, "documentation": { "docstring": "\n Returns an instance of the kubernetes api client with a specific context\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 182433, "commit_id": "57a05c7bbd14728f0dbde8b8e55d6f086362c35e", "repo": "textual", "path": "src/textual/_arrangement.py", "file_name": "_arrangement.py", "fun_name": "cuts", "commit_message": "ws", "code": "def cuts(self) -> list[list[int]]:\n \n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 258, "n_words": 75, "vocab_size": 51, "complexity": 9, "nloc": 23, "token_counts": 143, "n_ast_nodes": 218, "n_identifiers": 25, "random_cut": "def cuts(self) -> list[list[int]]:\n \n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for", "d_id": 43822, "documentation": { "docstring": "Get vertical cuts.\n\n A cut is every point on a line where a widget starts or ends.\n\n Returns:\n list[list[int]]: A list of cuts for every line.\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 58, "language": "en" } }, { "id": 270861, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer_utils.py", "file_name": "base_layer_utils.py", "fun_name": "is_subclassed", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def is_subclassed(layer):\n \n return (\n layer.__module__.find(\"keras.engine\") == -1\n and layer.__module__.find(\"keras.layers\") == -1\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 12, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 32, "n_ast_nodes": 58, "n_identifiers": 4, "random_cut": "def is_subclassed(layer):\n \n return (\n la", "d_id": 80576, "documentation": { "docstring": "Returns True if the object is a subclassed layer or subclassed model.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 196925, "commit_id": "0b4d5fa57d64b1102e51e03ed80013e16053bf96", "repo": "sympy", "path": "sympy/matrices/dense.py", "file_name": "dense.py", "fun_name": "_mat", "commit_message": "Update the deprecation of the _mat and _smat Matrix properties", "code": "def _mat(self):\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.9\",\n active_deprecations_target=\"deprecated-private-matrix-attributes\"\n )\n\n return self.flat()\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 62, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 10, "token_counts": 23, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def _mat(self):\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.9\",\n active_deprecations_target=\"deprecated-private-matrix-attributes\"\n )\n\n return", "d_id": 48249, "documentation": { "docstring": "\n The private _mat attribute of Matrix is deprecated. Use the\n .flat() method instead.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 47, "language": "en" } }, { "id": 266037, "commit_id": "ea6d86e6c4bb6037465410db6205a7471bc81a6c", "repo": "netbox", "path": "netbox/extras/tests/test_customfields.py", "file_name": "test_customfields.py", "fun_name": "test_missing_required_field", "commit_message": "Closes #10052: The cf attribute now returns deserialized custom field data", "code": "def test_missing_required_field(self):\n \n cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True)\n cf3.save()\n cf3.content_types.set([ContentType.objects.get_for_model(Site)])\n\n site = Site(name='Test Site', slug='test-site')\n\n # Set custom field data with a required field omitted\n site.custom_field_data['foo'] = 'abc'\n with self.assertRaises(ValidationError):\n site.clean()\n\n site.custom_field_data['baz'] = 'def'\n site.clean()\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 115, "n_words": 34, "vocab_size": 28, "complexity": 1, "nloc": 10, "token_counts": 92, "n_ast_nodes": 165, "n_identifiers": 22, "random_cut": "def test_missing_required_field(self):\n \n cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True)\n cf3.save()\n cf3.conte", "d_id": 78274, "documentation": { "docstring": "\n Check that a ValidationError is raised if any required custom fields are not present.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 78230, "commit_id": "524cab82e33b43463b746c3df1a80657b3ae874a", "repo": "wagtail", "path": "wagtail/admin/tests/test_templatetags.py", "file_name": "test_templatetags.py", "fun_name": "test_basic", "commit_message": "Introduce new template fragment composition tags", "code": "def test_basic(self):\n context = Context({})\n\n template = \n\n expected = \n\n self.assertHTMLEqual(expected, Template(template).render(context))\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 40, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 15, "token_counts": 34, "n_ast_nodes": 60, "n_identifiers": 9, "random_cut": "def test_basic(self):\n context = Context({})\n\n template = \n\n expected = \n\n self.assertHTMLEqual(expected, Template(", "d_id": 16736, "documentation": { "docstring": "\n {% load wagtailadmin_tags %}\n {% fragment as my_fragment %}\n

    Hello, World

    \n {% endfragment %}\n Text coming after:\n {{ my_fragment }}\n \n Text coming after:\n

    Hello, World

    \n ", "n_words": 25, "vocab_size": 15, "n_whitespaces": 129, "language": "en" } }, { "id": 167732, "commit_id": "f65417656ba8c59438d832b6e2a431f78d40c21c", "repo": "pandas", "path": "pandas/core/arrays/sparse/accessor.py", "file_name": "accessor.py", "fun_name": "to_dense", "commit_message": "TYP: more return annotations in core/ (#47618)\n\n* TYP: more return annotations in core/\r\n\r\n* from __future__ import annotations\r\n\r\n* more __future__", "code": "def to_dense(self) -> Series:\n \n from pandas import Series\n\n return Series(\n self._parent.array.to_dense(),\n index=self._parent.index,\n name=self._parent.name,\n )\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 75, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 32, "token_counts": 42, "n_ast_nodes": 67, "n_identifiers": 8, "random_cut": "def to_dense(self) -> Series:\n \n from pandas import Series\n\n return Series(\n self._parent.array.to_dense", "d_id": 40094, "documentation": { "docstring": "\n Convert a Series from sparse values to dense.\n\n .. versionadded:: 0.25.0\n\n Returns\n -------\n Series:\n A Series with the same values, stored as a dense array.\n\n Examples\n --------\n >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))\n >>> series\n 0 0\n 1 1\n 2 0\n dtype: Sparse[int64, 0]\n\n >>> series.sparse.to_dense()\n 0 0\n 1 1\n 2 0\n dtype: int64\n ", "n_words": 54, "vocab_size": 39, "n_whitespaces": 217, "language": "en" } }, { "id": 63296, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "replaceHTMLEntity", "commit_message": "upd; format", "code": "def replaceHTMLEntity(t):\n \n return _htmlEntityMap.get(t.entity)\n\n# it's easy to get these comment structures wrong - they're very common, so may as well make them available\ncStyleComment = Combine(Regex(r\"/\\*(?:[^*]|\\*(?!/))*\") + '*/').setName(\"C style comment\")\n\"Comment of the form ``/* ... */``\"\n\nhtmlComment = Regex(r\"\").setName(\"HTML comment\")\n\"Comment of the form ````\"\n\nrestOfLine = Regex(r\".*\").leaveWhitespace().setName(\"rest of line\")\ndblSlashComment = Regex(r\"//(?:\\\\\\n|[^\\n])*\").setName(\"// comment\")\n\"Comment of the form ``// ... (to end of line)``\"\n\ncppStyleComment = Combine(Regex(r\"/\\*(?:[^*]|\\*(?!/))*\") + '*/' | dblSlashComment).setName(\"C++ style comment\")\n\"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`\"\n\njavaStyleComment = cppStyleComment\n\"Same as :class:`cppStyleComment`\"\n\npythonStyleComment = Regex(r\"#.*\").setName(\"Python style comment\")\n\"Comment of the form ``# ... (to end of line)``\"\n\n_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',')\n + Optional(Word(\" \\t\")\n + ~Literal(\",\") + ~LineEnd()))).streamline().setName(\"commaItem\")\ncommaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default=\"\")).setName(\"commaSeparatedList\")\n\n\n# some other useful expressions - using lower-case class name since we are really using this as a namespace", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 207, "n_words": 141, "vocab_size": 91, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 347, "n_identifiers": 30, "random_cut": "def replaceHTMLEntity(t):\n \n return _htmlEntityMap.get(t.entity)\n\n# it's easy to get these comment structures wrong - they're very common, so may as well make them available\ncStyleComment = Combine(Regex(r\"/\\*(?:[^*]|\\*(?!/))*\") + '*/').setName(\"C style comment\")\n\"Comment of the form ``/* ... */``\"\n\nhtmlComment = Regex(r\"\").setName(\"HTML comment\")\n\"Comment of the form ````\"\n\nrestOfLine = Regex(r\".*\").leaveWhitespace().setName(\"rest of line\")\ndblSlashComment = Regex(r\"//(?:\\\\\\n|[^\\n])*\").setName(\"// comment\")\n\"Comment of the form ``// ... (to end of line)``\"\n\ncppStyleComment = Combine(R", "d_id": 13236, "documentation": { "docstring": "Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or\nquoted strings, separated by commas.\n\nThis expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.\n", "n_words": 34, "vocab_size": 31, "n_whitespaces": 31, "language": "en" } }, { "id": 6800, "commit_id": "9ae57a93ee4011c3d56cb0284ec07a861f3c88ff", "repo": "ludwig", "path": "tests/integration_tests/utils.py", "file_name": "utils.py", "fun_name": "read_csv_with_nan", "commit_message": "Adds regression tests for #2020 (#2021)\n\n* fixes nans in dask df engine\r\n\r\n* adds tests\r\n\r\n* fixes with logs\r\n\r\n* fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* cleanup\r\n\r\n* checking accuracy closeness\r\n\r\n* investigates ray batcher dropping samples with logs\r\n\r\n* clean up for PR review\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* cleanup\r\n\r\n* add missing test param\r\n\r\n* updated sampling to nan_percent% of rows in each col\r\n\r\n* cleanup\r\n\r\nCo-authored-by: Geoffrey Angus \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def read_csv_with_nan(path, nan_percent=0.0):\n \n df = pd.read_csv(path)\n if nan_percent > 0:\n num_rows = len(df)\n for col in df.columns:\n for row in random.sample(range(num_rows), int(round(nan_percent * num_rows))):\n df[col].iloc[row] = np.nan\n return df\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 81, "n_words": 29, "vocab_size": 24, "complexity": 4, "nloc": 8, "token_counts": 76, "n_ast_nodes": 116, "n_identifiers": 19, "random_cut": "def read_csv_with_nan(path, nan_percent=0.0):\n \n df = pd.read_csv(path)\n if nan_percent > 0:\n num_rows = len(df)\n for col in df.columns:\n for row in random.sampl", "d_id": 1072, "documentation": { "docstring": "Converts `nan_percent` of samples in each row of the CSV at `path` to NaNs.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 50719, "commit_id": "a6790a651a12eb391060e533868bf0ba197f6f7e", "repo": "PaddleHub", "path": "modules/image/text_to_image/stable_diffusion/diffusers/models/resnet.py", "file_name": "resnet.py", "fun_name": "_upsample_2d", "commit_message": "Add stable diffusion module", "code": "def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1):\n \n\n assert isinstance(factor, int) and factor >= 1\n\n # Setup filter kernel.\n if k is None:\n k = [1] * factor\n\n # setup kernel\n k = np.asarray(k, dtype=np.float32)\n if k.ndim == 1:\n k = np.outer(k, k)\n k /= np.sum(k)\n\n k = k * (gain * (factor**2))\n\n if self.use_conv:\n convH = w.shape[2]\n convW = w.shape[3]\n inC = w.shape[1]\n\n p = (k.shape[0] - factor) - (convW - 1)\n\n stride = (factor, factor)\n # Determine data dimensions.\n stride = [1, 1, factor, factor]\n output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW)\n output_padding = (\n output_shape[0] - (x.shape[2] - 1) * stride[0] - convH,\n output_shape[1] - (x.shape[3] - 1) * stride[1] - convW,\n )\n assert output_padding[0] >= 0 and output_padding[1] >= 0\n inC = w.shape[1]\n num_groups = x.shape[1] // inC\n\n # Transpose weights.\n w = paddle.reshape(w, (num_groups, -1, inC, convH, convW))\n w = w[..., ::-1, ::-1].transpose([0, 2, 1, 3, 4])\n w = paddle.reshape(w, (num_groups * inC, -1, convH, convW))\n\n x = F.conv2d_transpose(x, w, stride=stride, output_padding=output_padding, padding=0)\n\n x = upfirdn2d_native(x, paddle.to_tensor(k), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))\n else:\n p = k.shape[0] - factor\n x = upfirdn2d_native(x, paddle.to_tensor(k), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2))\n\n return x\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 586, "n_words": 219, "vocab_size": 112, "complexity": 6, "nloc": 33, "token_counts": 434, "n_ast_nodes": 670, "n_identifiers": 36, "random_cut": "def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1):\n \n\n assert isinstance(factor, int) and factor >= 1\n\n # Setup filter kernel.\n if k is None:\n k = [1] * factor\n\n # setup kernel\n k = np.asarray(k, dtype=np.float32)\n if k.ndim == 1:\n k = np.outer(k, k)\n k /= np.sum(k)\n\n k = k * (gain * (factor**2))\n\n if self.use_conv:\n convH = w.shape[2]\n convW = w.shape[3]\n inC = w.shape[1]\n\n p = (k.shape[0] - factor) - (convW - 1)\n\n stride = (factor, factor)\n # Determine data dimensions.\n stride = [1, 1, factor, factor]\n output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW)\n output_padding = (\n output_shape[0] - (x.shape[2] - 1) * stride[0] - convH,\n output_shape[1] - (x.shape[3] - 1) * stride[1] - convW,\n )\n assert output_padding[0] >= 0 and output_padding[1] >= 0\n inC = w.shape[1]\n num_groups = x.shape[1] // inC\n\n # Transpose weights.\n w = paddle.reshape(w, (num_groups, -1, inC, convH, convW))\n w = w[..., ::-1, ::-1].transpose([0, 2, 1, 3, 4])\n w = paddle.reshape(w, (num_groups * inC, -1, convH, convW))\n\n x = F.conv2d_transpose(x, w, st", "d_id": 10203, "documentation": { "docstring": "Fused `upsample_2d()` followed by `Conv2d()`.\n\n Args:\n Padding is performed only once at the beginning, not between the operations. The fused op is considerably more\n efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary:\n order.\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,\n C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as\n `x`.\n ", "n_words": 139, "vocab_size": 102, "n_whitespaces": 256, "language": "en" } }, { "id": 267980, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/docker_util.py", "file_name": "docker_util.py", "fun_name": "get_network_names", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def get_network_names(self) -> t.Optional[t.List[str]]:\n \n if self.networks is None:\n return None\n\n return sorted(self.networks)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 44, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 34, "n_ast_nodes": 55, "n_identifiers": 8, "random_cut": "def get_network_names(self) -> t.Optional[t.List[str]]:\n \n i", "d_id": 79255, "documentation": { "docstring": "Return a list of the network names the container is attached to.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 67035, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py", "file_name": "project_wise_stock_tracking.py", "fun_name": "get_delivered_items_cost", "commit_message": "style: format code with black", "code": "def get_delivered_items_cost():\n\tdn_items = frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t)\n\n\tsi_items = frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t)\n\n\tdn_item_map = {}\n\tfor item in dn_items:\n\t\tdn_item_map.setdefault(item.project, item.amount)\n\n\tfor item in si_items:\n\t\tdn_item_map.setdefault(item.project, item.amount)\n\n\treturn dn_item_map\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 16, "n_words": 31, "vocab_size": 19, "complexity": 3, "nloc": 22, "token_counts": 74, "n_ast_nodes": 116, "n_identifiers": 12, "random_cut": "def get_delivered_items_cost():\n\tdn_items = frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t)\n\n\tsi_items = frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t)\n\n\tdn_item_map = {}\n\tfor item in dn_items:\n\t\tdn_item_map.setdefault(item.project, item.amount)\n\n\tfor item in si_items:", "d_id": 14413, "documentation": { "docstring": "select dn.project, sum(dn_item.base_net_amount) as amount\n\t\tfrom `tabDelivery Note` dn, `tabDelivery Note Item` dn_item\n\t\twhere dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != ''\n\t\tgroup by dn.projectselect si.project, sum(si_item.base_net_amount) as amount\n\t\tfrom `tabSales Invoice` si, `tabSales Invoice Item` si_item\n\t\twhere si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1\n\t\tand si.is_pos = 1 and ifnull(si.project, '') != ''\n\t\tgroup by si.project", "n_words": 65, "vocab_size": 40, "n_whitespaces": 57, "language": "en" } }, { "id": 200574, "commit_id": "6c55ca197b0f795047d8f8ee0d871ab36600d560", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "__new__", "commit_message": "move dummy index deduping to TensMul.__new__\n\nAlso removed _eval_subs and _xreplace. All tests pass.", "code": "def __new__(cls, *args, **kw_args):\n is_canon_bp = kw_args.get('is_canon_bp', False)\n args = list(map(_sympify, args))\n\n \n free = [get_free_indices(arg) for arg in args]\n free = set(itertools.chain(*free)) #flatten free\n newargs = []\n for arg in args:\n dum_this = set(get_dummy_indices(arg))\n dum_other = [get_dummy_indices(a) for a in newargs]\n dum_other = set(itertools.chain(*dum_other)) #flatten dum_other\n free_this = set(get_free_indices(arg))\n if len(dum_this.intersection(free)) > 0:\n exclude = free_this.union(free, dum_other)\n newarg = TensMul._dedupe_indices(arg, exclude, arg._index_structure)\n else:\n newarg = arg\n newargs.append(newarg)\n\n args = newargs\n\n # Flatten:\n args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])]\n\n args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False)\n\n # Data for indices:\n index_types = [i.tensor_index_type for i in indices]\n index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp)\n\n obj = TensExpr.__new__(cls, *args)\n obj._indices = indices\n obj._index_types = index_types[:]\n obj._index_structure = index_structure\n obj._free = index_structure.free[:]\n obj._dum = index_structure.dum[:]\n obj._free_indices = {x[0] for x in obj.free}\n obj._rank = len(obj.free)\n obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum)\n obj._coeff = S.One\n obj._is_canon_bp = is_canon_bp\n return obj\n\n index_types = property(lambda self: self._index_types)\n free = property(lambda self: self._free)\n dum = property(lambda self: self._dum)\n free_indices = property(lambda self: self._free_indices)\n rank = property(lambda self: self._rank)\n ext_rank = property(lambda self: self._ext_rank)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 508, "n_words": 186, "vocab_size": 107, "complexity": 10, "nloc": 37, "token_counts": 348, "n_ast_nodes": 648, "n_identifiers": 61, "random_cut": "def __new__(cls, *args, **kw_args):\n is_canon_bp = kw_args.get('is_canon_bp', False)\n args = list(map(_sympify, args))\n\n \n free = [get_free_indices(arg) for arg in args]\n free = set(itertools.chain(*free)) #flatten free\n newargs = []\n for arg in args:\n dum_this = set(get_dummy_indices(arg))\n dum_other = [get_dummy_indices(a) for a in newargs]\n dum_other = set(itertools.chain(*dum_other)) #flatten dum_other\n free_this = set(get_free_indices(arg))\n if len(dum_this.intersect", "d_id": 49709, "documentation": { "docstring": "\n If the internal dummy indices in one arg conflict with the free indices of the remaining args, we need to rename those internal dummy indices.\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 40, "language": "en" } }, { "id": 96402, "commit_id": "146fba432a32568be7d0b884dae0c39a6c33a11f", "repo": "sentry", "path": "tests/sentry/incidents/test_action_handlers.py", "file_name": "test_action_handlers.py", "fun_name": "test_context_for_crash_rate_alert", "commit_message": "fix(metric_alerts): Make sure critical triggers resolve properly when no action is set on a warning trigger (#31883)\n\n### Problem\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: None\r\n- Critical: 100. Action: Slack\r\n\r\nThen if we go from critical -> warning state the slack resolve action will fail to fire.\r\n\r\n### Cause\r\nThe reason this happens is related to a previous fix. For an alert like\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Slack\r\n\r\nWhen going from critical -> warning the critical action would be marked as resolved. This would\r\ncause a slack notification with `Resolved` to be sent to the channel. This is misleading, because\r\nthe alert is still active, just in the warning state. What we want here is to fire a warning\r\nnotification instead.\r\n\r\nThe initial fix for this was that when we resolved a critical trigger, we’d check and see whether\r\nthere was an active warning trigger. If so, we’d send a warning trigger fire to our actions, rather\r\nthan a critical trigger resolve. This works ok for many cases, but fails when the actions on the\r\nwarning trigger are different to those on the critical trigger.\r\n\r\n### Fix\r\nSubstituting the warning trigger for the critical trigger causes us subtle bugs. So, instead of\r\nthis, when triggering fires/resolves on our action handlers we will also pass along the incident\r\nstate change that the trigger/resolve caused the incident to go into.\r\n\r\nSo if a critical trigger resolves, we check what state it would have put the incident in. If\r\nthere’s a warning trigger, then the state is warning. If no warning trigger, the state is closed.\r\nThis state is then used to appropriately generate the messages that we send to users via our\r\nvarious actions.\r\n\r\nSo now, If we have an alert set up like:\r\n- Warning: 50. Action: None\r\n- Critical: 100. Action: Slack\r\n\r\nIf this goes from\r\n- critical -> warning OR critical -> resolved we will send `IncidentStatus.WARNING` to any actions\r\nrelated to the critical trigger. \r\n- warning -> resolved We do nothing since there are no actions on the warning trigger\r\n\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Slack\r\n\r\nIf this goes from:\r\n- critical -> warning: critical trigger, `IncidentStatus.Warning`\r\n- warning -> resolved: warning trigger, `IncidentStatus.Closed`\r\n- critical -> resolved: Since we de-dupe triggers to avoid spamming the user, we will select the\r\nwarning trigger here, and send `IncidentStatus.closed`\r\n\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Pagerduty\r\n\r\nIf this goes from:\r\n- critical -> warning: critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Nothing sent\r\nto Slack\r\n- warning -> resolved: warning trigger, `IncidentStatus.Closed` sent to Slack. Nothing sent to\r\nPagerduty\r\n- critical -> resolved: Critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Warning\r\ntrigger, `IncidentStatus.Closed` sent to Slack. We don’t de-dupe here since the actions are\r\ndifferent.", "code": "def test_context_for_crash_rate_alert(self):\n \n status = TriggerStatus.ACTIVE\n incident = self.create_incident()\n alert_rule = self.create_alert_rule(\n aggregate=\"percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate\"\n )\n alert_rule_trigger = self.create_alert_rule_trigger(alert_rule)\n action = self.create_alert_rule_trigger_action(\n alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident\n )\n assert (\n generate_incident_trigger_email_context(\n self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CRITICAL\n )[\"aggregate\"]\n == \"percentage(sessions_crashed, sessions)\"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 178, "n_words": 38, "vocab_size": 32, "complexity": 1, "nloc": 16, "token_counts": 76, "n_ast_nodes": 124, "n_identifiers": 19, "random_cut": "def test_context_for_crash_rate_alert(self):\n \n status = TriggerStatus.ACTIVE\n incident = self.create_incident()\n alert_rule = self.create_alert_rule(\n aggregate=\"percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate\"\n )\n alert_rule_trigger = self.create_alert_rule_trigger(alert_rule)\n action = self.create_alert_rule_trigger_action(\n alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident\n )\n assert (\n generate_incident_trigger_email_context(\n self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CRITICAL\n )[\"aggregate\"]\n == \"percentage(sessions_crashed, sessions)\"\n )", "d_id": 19308, "documentation": { "docstring": "\n Test that ensures the metric name for Crash rate alerts excludes the alias\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 206293, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/loaders/base.py", "file_name": "base.py", "fun_name": "get_template", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_template(self, template_name, skip=None):\n \n tried = []\n\n for origin in self.get_template_sources(template_name):\n if skip is not None and origin in skip:\n tried.append((origin, \"Skipped to avoid recursion\"))\n continue\n\n try:\n contents = self.get_contents(origin)\n except TemplateDoesNotExist:\n tried.append((origin, \"Source does not exist\"))\n continue\n else:\n return Template(\n contents,\n origin,\n origin.template_name,\n self.engine,\n )\n\n raise TemplateDoesNotExist(template_name, tried=tried)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 302, "n_words": 49, "vocab_size": 43, "complexity": 5, "nloc": 19, "token_counts": 98, "n_ast_nodes": 155, "n_identifiers": 13, "random_cut": "def get_template(self, template_name, skip=None):\n \n tried = []\n\n for origin in self.get_template_sources(template_name):\n if skip is n", "d_id": 51471, "documentation": { "docstring": "\n Call self.get_template_sources() and return a Template object for\n the first template matching template_name. If skip is provided, ignore\n template origins in skip. This is used to avoid recursion during\n template extending.\n ", "n_words": 31, "vocab_size": 28, "n_whitespaces": 67, "language": "en" } }, { "id": 275289, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/optimizer.py", "file_name": "optimizer.py", "fun_name": "from_config", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def from_config(cls, config):\n \n if \"learning_rate\" in config:\n if isinstance(config[\"learning_rate\"], dict):\n config[\"learning_rate\"] = learning_rate_schedule.deserialize(\n config[\"learning_rate\"]\n )\n return cls(**config)\n\n\nbase_optimizer_keyword_args = \n\n\n# pylint: disable=g-classes-have-attributes\n@keras_export(\"keras.optimizers.experimental.Optimizer\", v1=[])", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.optimizers.experimental.Optimizer\", v1=[])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 103, "n_words": 24, "vocab_size": 21, "complexity": 3, "nloc": 7, "token_counts": 44, "n_ast_nodes": 104, "n_identifiers": 10, "random_cut": "def from_config(cls, config):\n \n if \"learning_rate\" in config:\n if isinstance(config[\"learning_rate\"], dict):\n config[\"learning_rate\"] = learning_rate_schedule.deserialize(\n config[\"learning_rate\"]\n )\n return cls(", "d_id": 81373, "documentation": { "docstring": "Creates an optimizer from its config.\n\n This method is the reverse of `get_config`, capable of instantiating the\n same optimizer from the config dictionary.\n\n Args:\n config: A Python dictionary, typically the output of get_config.\n\n Returns:\n An optimizer instance.\n name: String. The name to use\n for momentum accumulator weights created by\n the optimizer.\n clipnorm: Float. If set, the gradient of each weight is individually\n clipped so that its norm is no higher than this value.\n clipvalue: Float. If set, the gradient of each weight is clipped to be no\n higher than this value.\n global_clipnorm: Float. If set, the gradient of all weights is clipped so\n that their global norm is no higher than this value.\n use_ema: Boolean, defaults to False. If True, exponential moving average\n (EMA) is applied. EMA consists of computing an exponential moving\n average of the weights of the model (as the weight values change after\n each training batch), and periodically overwriting the weights with\n their moving average.\n ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is\n the momentum to use when computing the EMA of the model's weights:\n `new_average = ema_momentum * old_average + (1 - ema_momentum) *\n current_variable_value`.\n ema_overwrite_frequency: Int or None, defaults to None. Only used if\n `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we\n overwrite the model variable by its moving average. If None, the optimizer\n does not overwrite model variables in the middle of training, and you\n need to explicitly overwrite the variables at the end of training\n by calling `optimizer.finalize_variable_values()` (which updates the model\n variables in-place). When using the built-in `fit()` training loop, this\n happens automatically after the last epoch, and you don't need to do\n anything.\n jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA\n compilation. If no GPU device is found, this flag will be ignored.\n **kwargs: keyword arguments only used for backward compatibility.", "n_words": 306, "vocab_size": 166, "n_whitespaces": 492, "language": "en" } }, { "id": 42764, "commit_id": "595981c8ad3cfeb4ad7a4514d00060e978aa9d81", "repo": "airflow", "path": "airflow/providers/amazon/aws/log/s3_task_handler.py", "file_name": "s3_task_handler.py", "fun_name": "close", "commit_message": "Light Refactor and Clean-up AWS Provider (#23907)", "code": "def close(self):\n \n # When application exit, system shuts down all handlers by\n # calling close method. Here we check if logger is already\n # closed to prevent uploading the log to remote storage multiple\n # times when `logging.shutdown` is called.\n if self.closed:\n return\n\n super().close()\n\n if not self.upload_on_close:\n return\n\n local_loc = os.path.join(self.local_base, self.log_relative_path)\n remote_loc = os.path.join(self.remote_base, self.log_relative_path)\n if os.path.exists(local_loc):\n # read log and remove old logs to get just the latest additions\n log = pathlib.Path(local_loc).read_text()\n self.s3_write(log, remote_loc)\n\n # Mark closed so we don't double write if close is called twice\n self.closed = True\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 238, "n_words": 92, "vocab_size": 68, "complexity": 4, "nloc": 12, "token_counts": 93, "n_ast_nodes": 158, "n_identifiers": 19, "random_cut": "def close(self):\n \n # When application exit, system shuts down all handlers by\n # calling close method. Here we check if logger is already\n # closed to prevent uploading the log to remote storage multiple\n # times when `logging.shutdown` is called.\n if self.closed:\n return\n\n super().close()\n\n if not self.upload_on_close:\n return\n\n local_loc = os.path.join(self.local_base, self.log_relative_path)\n remote_loc = os.path.join(self.remote_base, self.log_relative_path)\n if os.path.exists(local_loc):\n # read log and remove old logs to get just the latest additions\n log = pathlib.Path(local_loc).read_text()\n self.s3_write(log, remote_loc)\n\n # Mark closed so we don't double write if close is called twice\n self.clo", "d_id": 7726, "documentation": { "docstring": "Close and upload local log file to remote storage S3.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 203167, "commit_id": "c5c7a15b09368a58340d3a65ba9d1f1441e92dc8", "repo": "django", "path": "tests/view_tests/views.py", "file_name": "views.py", "fun_name": "safestring_in_template_exception", "commit_message": "Fixed #33461 -- Escaped template errors in the technical 500 debug page.", "code": "def safestring_in_template_exception(request):\n \n template = Template('{% extends \"\" %}')\n try:\n template.render(Context())\n except Exception:\n return technical_500_response(request, *sys.exc_info())\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 41, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 37, "n_ast_nodes": 67, "n_identifiers": 10, "random_cut": "def safestring_in_template_exception(request):\n \n template = Template('{% extends \"\" %}')\n try:\n template.render(Cont", "d_id": 50242, "documentation": { "docstring": "\n Trigger an exception in the template machinery which causes a SafeString\n to be inserted as args[0] of the Exception.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 29, "language": "en" } }, { "id": 316439, "commit_id": "7cd68381f1d4f58930ffd631dfbfc7159d459832", "repo": "core", "path": "tests/test_config_entries.py", "file_name": "test_config_entries.py", "fun_name": "test_unique_id_in_progress", "commit_message": "Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)", "code": "async def test_unique_id_in_progress(hass, manager):\n \n mock_integration(hass, MockModule(\"comp\"))\n mock_entity_platform(hass, \"config_flow.comp\", None)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 17, "token_counts": 127, "n_ast_nodes": 45, "n_identifiers": 6, "random_cut": "async def test_unique_id_in_progress(hass, manager):\n \n mock_integration(hass, Mo", "d_id": 115017, "documentation": { "docstring": "Test that we abort if there is already a flow in progress with same unique id.", "n_words": 16, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 250140, "commit_id": "3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b", "repo": "synapse", "path": "tests/storage/test_event_chain.py", "file_name": "test_event_chain.py", "fun_name": "test_simple", "commit_message": "Require types in tests.storage. (#14646)\n\nAdds missing type hints to `tests.storage` package\r\nand does not allow untyped definitions.", "code": "def test_simple(self) -> None:\n \n\n event_factory = self.hs.get_event_builder_factory()\n bob = \"@creator:test\"\n alice = \"@alice:test\"\n room_id = \"!room:test\"\n\n # Ensure that we have a rooms entry so that we generate the chain index.\n self.get_success(\n self.store.store_room(\n room_id=room_id,\n room_creator_user_id=\"\",\n is_public=True,\n room_version=RoomVersions.V6,\n )\n )\n\n create = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Create,\n \"state_key\": \"\",\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"create\"},\n },\n ).build(prev_event_ids=[], auth_event_ids=[])\n )\n\n bob_join = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": bob,\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"bob_join\"},\n },\n ).build(prev_event_ids=[], auth_event_ids=[create.event_id])\n )\n\n power = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.PowerLevels,\n \"state_key\": \"\",\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"power\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, bob_join.event_id],\n )\n )\n\n alice_invite = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": alice,\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"alice_invite\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],\n )\n )\n\n alice_join = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": alice,\n \"sender\": alice,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"alice_join\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id],\n )\n )\n\n power_2 = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.PowerLevels,\n \"state_key\": \"\",\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"power_2\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],\n )\n )\n\n bob_join_2 = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": bob,\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"bob_join_2\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],\n )\n )\n\n alice_join2 = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": alice,\n \"sender\": alice,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"alice_join2\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[\n create.event_id,\n alice_join.event_id,\n power_2.event_id,\n ],\n )\n )\n\n events = [\n create,\n bob_join,\n power,\n alice_invite,\n alice_join,\n bob_join_2,\n power_2,\n alice_join2,\n ]\n\n expected_links = [\n (bob_join, create),\n (power, create),\n (power, bob_join),\n (alice_invite, create),\n (alice_invite, power),\n (alice_invite, bob_join),\n (bob_join_2, power),\n (alice_join2, power_2),\n ]\n\n self.persist(events)\n chain_map, link_map = self.fetch_chains(events)\n\n # Check that the expected links and only the expected links have been\n # added.\n self.assertEqual(len(expected_links), len(list(link_map.get_additions())))\n\n for start, end in expected_links:\n start_id, start_seq = chain_map[start.event_id]\n end_id, end_seq = chain_map[end.event_id]\n\n self.assertIn(\n (start_seq, end_seq), list(link_map.get_links_between(start_id, end_id))\n )\n\n # Test that everything can reach the create event, but the create event\n # can't reach anything.\n for event in events[1:]:\n self.assertTrue(\n link_map.exists_path_from(\n chain_map[event.event_id], chain_map[create.event_id]\n ),\n )\n\n self.assertFalse(\n link_map.exists_path_from(\n chain_map[create.event_id],\n chain_map[event.event_id],\n ),\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 2689, "n_words": 338, "vocab_size": 148, "complexity": 3, "nloc": 175, "token_counts": 808, "n_ast_nodes": 1276, "n_identifiers": 55, "random_cut": "def test_simple(self) -> None:\n \n\n event_factory = self.hs.get_event_builder_factory()\n bob = \"@creator:test\"\n alice = \"@alice:test\"\n room_id = \"!room:test\"\n\n # Ensure that we have a rooms entry so that we generate the chain index.\n self.get_success(\n self.store.store_room(\n room_id=room_id,\n room_creator_user_id=\"\",\n is_public=True,\n room_version=RoomVersions.V6,\n )\n )\n\n create = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Create,\n \"state_key\": \"\",\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"create\"},\n },\n ).build(prev_event_ids=[], auth_event_ids=[])\n )\n\n bob_join = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": bob,\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"bob_join\"},\n },\n ).build(prev_event_ids=[], auth_event_ids=[create.event_id])\n )\n\n power = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.PowerLevels,\n \"state_key\": \"\",\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"power\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, bob_join.event_id],\n )\n )\n\n alice_invite = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": alice,\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"alice_invite\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],\n )\n )\n\n alice_join = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": alice,\n \"sender\": alice,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"alice_join\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id],\n )\n )\n\n power_2 = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.PowerLevels,\n \"state_key\": \"\",\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"power_2\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],\n )\n )\n\n bob_join_2 = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": bob,\n \"sender\": bob,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"bob_join_2\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],\n )\n )\n\n alice_join2 = self.get_success(\n event_factory.for_room_version(\n RoomVersions.V6,\n {\n \"type\": EventTypes.Member,\n \"state_key\": alice,\n \"sender\": alice,\n \"room_id\": room_id,\n \"content\": {\"tag\": \"alice_join2\"},\n },\n ).build(\n prev_event_ids=[],\n auth_event_ids=[\n create.event_id,\n alice_join.event_id,\n power_2.event_id,\n ],\n )\n )\n\n events = [\n create,\n bob_join,\n power,\n alice_invite,\n alice_join,\n bob_join_2,\n power_2,\n alice_join2,\n ]\n\n expected_links = [\n (bob_join, create),\n (power, create),\n (power, bob_join),\n (alice_invite, create),\n (alice_invite, power),\n (alice_invite, bob_join),\n (bob_join_2, power),\n (alice_join2, power_2),\n ]\n\n self.persist(events)\n chain_map, link_map = self.fetch_chains(events)\n\n # Check that the expected links and only the expected links have been\n # added.\n self.assertEqual(len(expected_links), len(list(link_map.get_additions())))\n\n for start, end in expected_links:\n start_id, start_seq = chain_map[start.event_id]\n end_id, end_seq = chain_map[end.event_id]\n\n ", "d_id": 73285, "documentation": { "docstring": "Test that the example in `docs/auth_chain_difference_algorithm.md`\n works.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 21, "language": "en" } }, { "id": 295382, "commit_id": "ccd5ada3414b8b51835a7a29b2e5c2a70464987f", "repo": "core", "path": "homeassistant/components/withings/common.py", "file_name": "common.py", "fun_name": "_do_retry", "commit_message": "Fix withings race condition for access token (#69107)", "code": "async def _do_retry(self, func, attempts=3) -> Any:\n \n # pylint: disable=no-self-use\n exception = None\n for attempt in range(1, attempts + 1):\n _LOGGER.debug(\"Attempt %s of %s\", attempt, attempts)\n try:\n return await func()\n except Exception as exception1: # pylint: disable=broad-except\n _LOGGER.debug(\n \"Failed attempt %s of %s (%s)\", attempt, attempts, exception1\n )\n # Make each backoff pause a little bit longer\n await asyncio.sleep(0.5 * attempt)\n exception = exception1\n continue\n\n if exception:\n raise exception\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 273, "n_words": 69, "vocab_size": 56, "complexity": 4, "nloc": 19, "token_counts": 83, "n_ast_nodes": 135, "n_identifiers": 14, "random_cut": "async def _do_retry(self, func, attempts=3) -> Any:\n \n # pylint: disable=no-self-use\n exception = None\n for attempt in range(1, attempts + 1):\n _LOGGER.debug(\"Attempt %s of %s\", attempt, attempts)\n try:\n return await func()\n except Exception as exception1: # pylint: disable=broad-except\n _LOGGER.debug(\n \"Failed attempt %s of %s (%s)\", attempt, attempts, exce", "d_id": 94400, "documentation": { "docstring": "Retry a function call.\n\n Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 31, "language": "en" } }, { "id": 218881, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/refactor.py", "file_name": "refactor.py", "fun_name": "refactor_doctest", "commit_message": "add python 3.10.4 for windows", "code": "def refactor_doctest(self, block, lineno, indent, filename):\n \n try:\n tree = self.parse_block(block, lineno, indent)\n except Exception as err:\n if self.logger.isEnabledFor(logging.DEBUG):\n for line in block:\n self.log_debug(\"Source: %s\", line.rstrip(\"\\n\"))\n self.log_error(\"Can't parse docstring in %s line %s: %s: %s\",\n filename, lineno, err.__class__.__name__, err)\n return block\n if self.refactor_tree(tree, filename):\n new = str(tree).splitlines(keepends=True)\n # Undo the adjustment of the line numbers in wrap_toks() below.\n clipped, new = new[:lineno-1], new[lineno-1:]\n assert clipped == [\"\\n\"] * (lineno-1), clipped\n if not new[-1].endswith(\"\\n\"):\n new[-1] += \"\\n\"\n block = [indent + self.PS1 + new.pop(0)]\n if new:\n block += [indent + self.PS2 + line for line in new]\n return block\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 343, "n_words": 97, "vocab_size": 66, "complexity": 8, "nloc": 20, "token_counts": 195, "n_ast_nodes": 313, "n_identifiers": 30, "random_cut": "def refactor_doctest(self, block, lineno, indent, filename):\n \n try:\n tree = self.parse_block(block, lineno, indent)\n except Exception as err:\n if self.logger.isEnabledFor(logging.DEBUG):\n for line in block:\n self.log_debug(\"Source: %s\", line.rstrip(\"\\n\"))\n self.log_error(\"Can't parse docstring in %s line %s: %s: %s\",\n filename, lineno, err.__class__.__name__, err)\n return block\n if self.refactor_tree(tree, filename):\n new = str(tree).splitlines(keepends=True)\n # Undo the adjustment of the line numbers in wrap_toks() below.\n clipped, new = new[:lineno-1], new[lineno-1:]\n assert clipped == [\"\\n\"] * (lineno-1), clipped\n if not new[-1].endswith(\"\\n\"):\n new[-1] += \"\\n\"\n block = [indent + self.PS1 + new.pop(0)]\n if new:\n ", "d_id": 55526, "documentation": { "docstring": "Refactors one doctest.\n\n A doctest is given as a block of lines, the first of which starts\n with \">>>\" (possibly indented), while the remaining lines start\n with \"...\" (identically indented).\n\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 58, "language": "en" } }, { "id": 115819, "commit_id": "cbe6767de6152a78348a8047244e5e3305b24e04", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/bigquery_handler/bigquery_handler.py", "file_name": "bigquery_handler.py", "fun_name": "get_tables", "commit_message": "Add handler", "code": "def get_tables(self, dataset_id) -> Response:\n \n client = self.connect()\n result = client.list_tables(dataset_id)\n return result\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 41, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 27, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def get_tables(self, dataset_id) -> Response:\n \n client = self.connect()\n ", "d_id": 25567, "documentation": { "docstring": "\n Get a list with all of the tabels in BigQuery\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 262841, "commit_id": "1a7d704ffbabb433007e3ba04750c2f13ade48e5", "repo": "pyinstaller", "path": "PyInstaller/depend/dylib.py", "file_name": "dylib.py", "fun_name": "mac_set_relative_dylib_deps", "commit_message": "Fix typos (#6782) [skip ci]", "code": "def mac_set_relative_dylib_deps(libname, distname):\n \n\n from macholib import util\n from macholib.MachO import MachO\n\n # Ignore bootloader; otherwise PyInstaller fails with exception like\n # 'ValueError: total_size > low_offset (288 > 0)'\n if os.path.basename(libname) in _BOOTLOADER_FNAMES:\n return\n\n # Determine how many directories up ('../') is the directory with shared dynamic libraries.\n # E.g., ./qt4_plugins/images/ -> ./../../\n parent_dir = ''\n # Check if distname is not only base filename.\n if os.path.dirname(distname):\n parent_level = len(os.path.dirname(distname).split(os.sep))\n parent_dir = parent_level * (os.pardir + os.sep)\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 130, "n_words": 76, "vocab_size": 61, "complexity": 5, "nloc": 21, "token_counts": 141, "n_ast_nodes": 132, "n_identifiers": 17, "random_cut": "def mac_set_relative_dylib_deps(libname, distname):\n \n\n from macholib import util\n from macholib.MachO import MachO\n\n # Ignore bootloader; otherwise PyInstaller fails with exception like\n # 'ValueError: total_size > low_offset (288 > 0)'\n if os.path.basename(libname) in _BOOTLOADER_FNAMES:\n return\n\n # Determine how many directories up ('../') is the directory with shared dynamic libraries.\n # E.g., ./qt4_plugins/images/", "d_id": 77401, "documentation": { "docstring": "\n On Mac OS set relative paths to dynamic library dependencies of `libname`.\n\n Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with\n DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism.\n\n Current location of dependent libraries is derived from the location of the library path (paths start with\n '@loader_path').\n\n 'distname' path of the library relative to dist directory of frozen executable. We need this to determine the level\n of directory level for @loader_path of binaries not found in dist directory.\n\n For example, Qt5 plugins are not in the same directory as Qt*.dylib files. Without using\n '@loader_path/../..' for Qt plugins, Mac OS would not be able to resolve shared library dependencies,\n and Qt plugins will not be loaded.\n ", "n_words": 120, "vocab_size": 78, "n_whitespaces": 203, "language": "en" } }, { "id": 276381, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/testing_infra/test_utils.py", "file_name": "test_utils.py", "fun_name": "get_v2_optimizer", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_v2_optimizer(name, **kwargs):\n \n try:\n return _V2_OPTIMIZER_MAP[name](**kwargs)\n except KeyError:\n raise ValueError(\n \"Could not find requested v2 optimizer: {}\\nValid choices: {}\".format(\n name, list(_V2_OPTIMIZER_MAP.keys())\n )\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 90, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 9, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def get_v2_optimizer(name, **kwargs):\n \n try:\n return _V2_OPTIMIZER_MAP[name](**kwargs)\n except KeyError:\n ", "d_id": 81643, "documentation": { "docstring": "Get the v2 optimizer requested.\n\n This is only necessary until v2 are the default, as we are testing in Eager,\n and Eager + v1 optimizers fail tests. When we are in v2, the strings alone\n should be sufficient, and this mapping can theoretically be removed.\n\n Args:\n name: string name of Keras v2 optimizer.\n **kwargs: any kwargs to pass to the optimizer constructor.\n\n Returns:\n Initialized Keras v2 optimizer.\n\n Raises:\n ValueError: if an unknown name was passed.\n ", "n_words": 75, "vocab_size": 58, "n_whitespaces": 116, "language": "en" } }, { "id": 218525, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "_prefix_from_ip_int", "commit_message": "add python 3.10.4 for windows", "code": "def _prefix_from_ip_int(cls, ip_int):\n \n trailing_zeroes = _count_righthand_zero_bits(ip_int,\n cls._max_prefixlen)\n prefixlen = cls._max_prefixlen - trailing_zeroes\n leading_ones = ip_int >> trailing_zeroes\n all_ones = (1 << prefixlen) - 1\n if leading_ones != all_ones:\n byteslen = cls._max_prefixlen // 8\n details = ip_int.to_bytes(byteslen, 'big')\n msg = 'Netmask pattern %r mixes zeroes & ones'\n raise ValueError(msg % details)\n return prefixlen\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 197, "n_words": 52, "vocab_size": 40, "complexity": 2, "nloc": 12, "token_counts": 74, "n_ast_nodes": 120, "n_identifiers": 14, "random_cut": "def _prefix_from_ip_int(cls, ip_int):\n \n trailing_zeroes = _count_righthand_zero_bits(ip_int,\n cls._max_prefixlen)\n prefixlen = cls._max_prefixlen - trailing_zeroes\n leading_ones = ip_int >> trailing_zeroes\n ", "d_id": 55366, "documentation": { "docstring": "Return prefix length from the bitwise netmask.\n\n Args:\n ip_int: An integer, the netmask in expanded bitwise format\n\n Returns:\n An integer, the prefix length.\n\n Raises:\n ValueError: If the input intermingles zeroes & ones\n ", "n_words": 32, "vocab_size": 25, "n_whitespaces": 93, "language": "en" } }, { "id": 111228, "commit_id": "91acc3ea75d219ad07ed2b106e7b8bdcb01516dd", "repo": "spaCy", "path": "spacy/pipeline/entity_linker.py", "file_name": "entity_linker.py", "fun_name": "batch_has_learnable_example", "commit_message": "Fix entity linker batching (#9669)\n\n* Partial fix of entity linker batching\r\n\r\n* Add import\r\n\r\n* Better name\r\n\r\n* Add `use_gold_ents` option, docs\r\n\r\n* Change to v2, create stub v1, update docs etc.\r\n\r\n* Fix error type\r\n\r\nHonestly no idea what the right type to use here is.\r\nConfigValidationError seems wrong. Maybe a NotImplementedError?\r\n\r\n* Make mypy happy\r\n\r\n* Add hacky fix for init issue\r\n\r\n* Add legacy pipeline entity linker\r\n\r\n* Fix references to class name\r\n\r\n* Add __init__.py for legacy\r\n\r\n* Attempted fix for loss issue\r\n\r\n* Remove placeholder V1\r\n\r\n* formatting\r\n\r\n* slightly more interesting train data\r\n\r\n* Handle batches with no usable examples\r\n\r\nThis adds a test for batches that have docs but not entities, and a\r\ncheck in the component that detects such cases and skips the update step\r\nas thought the batch were empty.\r\n\r\n* Remove todo about data verification\r\n\r\nCheck for empty data was moved further up so this should be OK now - the\r\ncase in question shouldn't be possible.\r\n\r\n* Fix gradient calculation\r\n\r\nThe model doesn't know which entities are not in the kb, so it generates\r\nembeddings for the context of all of them.\r\n\r\nHowever, the loss does know which entities aren't in the kb, and it\r\nignores them, as there's no sensible gradient.\r\n\r\nThis has the issue that the gradient will not be calculated for some of\r\nthe input embeddings, which causes a dimension mismatch in backprop.\r\nThat should have caused a clear error, but with numpyops it was causing\r\nnans to happen, which is another problem that should be addressed\r\nseparately.\r\n\r\nThis commit changes the loss to give a zero gradient for entities not in\r\nthe kb.\r\n\r\n* add failing test for v1 EL legacy architecture\r\n\r\n* Add nasty but simple working check for legacy arch\r\n\r\n* Clarify why init hack works the way it does\r\n\r\n* Clarify use_gold_ents use case\r\n\r\n* Fix use gold ents related handling\r\n\r\n* Add tests for no gold ents and fix other tests\r\n\r\n* Use aligned ents function (not working)\r\n\r\nThis doesn't actually work because the \"aligned\" ents are gold-only. But\r\nif I have a different function that returns the intersection, *then*\r\nthis will work as desired.\r\n\r\n* Use proper matching ent check\r\n\r\nThis changes the process when gold ents are not used so that the\r\nintersection of ents in the pred and gold is used.\r\n\r\n* Move get_matching_ents to Example\r\n\r\n* Use model attribute to check for legacy arch\r\n\r\n* Rename flag\r\n\r\n* bump spacy-legacy to lower 3.0.9\r\n\r\nCo-authored-by: svlandeg ", "code": "def batch_has_learnable_example(self, examples):\n \n\n for eg in examples:\n for ent in eg.predicted.ents:\n candidates = list(self.get_candidates(self.kb, ent))\n if candidates:\n return True\n\n return False\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 102, "n_words": 21, "vocab_size": 18, "complexity": 4, "nloc": 7, "token_counts": 44, "n_ast_nodes": 69, "n_identifiers": 11, "random_cut": "def batch_has_learnable_example(self, examples):\n \n\n for eg in examples:\n for ent in eg.predicted.ents:\n candidates = list(self.get_candidates(self.kb, ent))\n if candidates:\n return True\n\n return False\n", "d_id": 24361, "documentation": { "docstring": "Check if a batch contains a learnable example.\n\n If one isn't present, then the update step needs to be skipped.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 34, "language": "en" } }, { "id": 81164, "commit_id": "452744b67e02823879e722fe574984a2d760ed60", "repo": "awx", "path": "awx/main/tasks/callback.py", "file_name": "callback.py", "fun_name": "get_delayed_update_fields", "commit_message": "Delay update of artifacts and error fields until final job save (#11832)\n\n* Delay update of artifacts until final job save\r\n\r\nSave tracebacks from receptor module to callback object\r\n\r\nMove receptor traceback check up to be more logical\r\n\r\nUse new mock_me fixture to avoid DB call with me method\r\n\r\nUpdate the special runner message to the delay_update pattern\r\n\r\n* Move special runner message into post-processing of callback fields", "code": "def get_delayed_update_fields(self):\n \n self.extra_update_fields['emitted_events'] = self.event_ct\n if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):\n self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)\n return self.extra_update_fields\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 42, "n_ast_nodes": 76, "n_identifiers": 8, "random_cut": "def get_delayed_update_fields(self):\n \n self.extra_update_fields['emitted_events'] = self.event_ct\n if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):\n self.delay_up", "d_id": 17165, "documentation": { "docstring": "Return finalized dict of all fields that should be saved along with the job status change", "n_words": 16, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 70442, "commit_id": "d964675ee8fcb7ea58681ac8869733a86d58e4ec", "repo": "wagtail", "path": "wagtail/search/tests/test_indexed_class.py", "file_name": "test_indexed_class.py", "fun_name": "get_checks_result", "commit_message": "add check for correct search_fields on pages\n\n- fixes #4940", "code": "def get_checks_result(warning_id=None):\n \n checks_result = checks.run_checks()\n if warning_id:\n return [\n warning for warning in\n checks_result if warning.id == warning_id]\n return checks_result\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 89, "n_words": 20, "vocab_size": 15, "complexity": 4, "nloc": 7, "token_counts": 34, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "def get_checks_result(warning_id=None):\n \n checks_result = checks.run_checks()\n if warning_id:\n return [\n warning for warning in\n ", "d_id": 15508, "documentation": { "docstring": "Run Django checks on any with the 'search' tag used when registering the check", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 167693, "commit_id": "9612375ca28ade056f15d4338f1bfde5d045c9fc", "repo": "pandas", "path": "pandas/core/config_init.py", "file_name": "config_init.py", "fun_name": "use_bottleneck_cb", "commit_message": "TYP: return values in core/*.py (#47587)\n\n* TYP: return values in core/*.py\r\n\r\n* fix test\r\n\r\n* to_html\r\n\r\n* to_html part 2\r\n\r\n* DataFrame.query\r\n\r\n* more overloads\r\n\r\n* fix query?\r\n\r\n* increase stacklevel by one\r\n\r\n* fix rename_axis\r\n\r\n* and an overload for DataFrame.eval\r\n\r\n* address comments\r\n\r\n* fix typevar", "code": "def use_bottleneck_cb(key) -> None:\n from pandas.core import nanops\n\n nanops.set_use_bottleneck(cf.get_option(key))\n\n\nuse_numexpr_doc = \n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 16, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 46, "n_identifiers": 9, "random_cut": "def use_bottleneck_cb(key) -> None:\n ", "d_id": 40077, "documentation": { "docstring": "\n: bool\n Use the numexpr library to accelerate computation if it is installed,\n the default is True\n Valid values: False,True\n", "n_words": 20, "vocab_size": 18, "n_whitespaces": 28, "language": "en" } }, { "id": 164647, "commit_id": "c055dc4e6be9fc1b68d873a1ace286322dadd5e1", "repo": "pandas", "path": "pandas/tests/io/test_stata.py", "file_name": "test_stata.py", "fun_name": "test_repeated_column_labels", "commit_message": "TST: Don't use autouse fixture in test_stata (#45831)", "code": "def test_repeated_column_labels(self, datapath):\n # GH 13923, 25772\n msg = \n with pytest.raises(ValueError, match=msg):\n read_stata(\n datapath(\"io\", \"data\", \"stata\", \"stata15.dta\"),\n convert_categoricals=True,\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 92, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 16, "token_counts": 40, "n_ast_nodes": 73, "n_identifiers": 10, "random_cut": "def test_repeated_column_labels(self, datapath):\n # GH 13923, 2577", "d_id": 39578, "documentation": { "docstring": "\nValue labels for column ethnicsn are not unique. These cannot be converted to\npandas categoricals.\n\nEither read the file with `convert_categoricals` set to False or use the\nlow level interface in `StataReader` to separately read the values and the\nvalue_labels.\n\nThe repeated labels are:\\n-+\\nwolof\n", "n_words": 44, "vocab_size": 37, "n_whitespaces": 38, "language": "en" } }, { "id": 212643, "commit_id": "acaae54a1ade24b2e55f7274ae4db747160a38db", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "string_width_in_pixels", "commit_message": "Enable Text class methods to be called prior to any windows being created: string_width_in_pixels, char_height_in_pixels, char_width_in_pixels. Removed destruction of hidden master root from popup_get_file & popup_get_folder (was old code)", "code": "def string_width_in_pixels(cls, font, string):\n \n\n # if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the measurement can happen\n if Window.NumOpenWindows == 0:\n root = tk.Tk()\n else:\n root = None\n\n size = 0\n try:\n size = tkinter.font.Font(font=font).measure(string) # string's width\n except Exception as e:\n _error_popup_with_traceback('Exception retrieving string width in pixels', e)\n\n if root is not None:\n root.destroy()\n\n return size\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 190, "n_words": 70, "vocab_size": 56, "complexity": 4, "nloc": 13, "token_counts": 75, "n_ast_nodes": 128, "n_identifiers": 17, "random_cut": "def string_width_in_pixels(cls, font, string):\n \n\n # if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the meas", "d_id": 53306, "documentation": { "docstring": "\n Get the with of the supplied string in pixels for the font being passed in.\n If an error occurs, 0 will be returned\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike, to be measured\n :type font: (str or (str, int[, str]) or None)\n :param string: the string to measure\n :type string: str\n :return: Width in pixels of string\n :rtype: (int)\n ", "n_words": 76, "vocab_size": 57, "n_whitespaces": 160, "language": "en" } }, { "id": 276243, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saving_utils.py", "file_name": "saving_utils.py", "fun_name": "model_call_inputs", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def model_call_inputs(model, keep_original_batch_size=False):\n \n input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)\n if input_specs is None:\n return None, None\n input_specs = _enforce_names_consistency(input_specs)\n return input_specs\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 19, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 38, "n_ast_nodes": 63, "n_identifiers": 7, "random_cut": "def model_call_inputs(model, keep_original_batch_size=False):\n \n input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)\n if input_specs is None:\n return None, None\n input_specs =", "d_id": 81601, "documentation": { "docstring": "Inspect model to get its input signature.\n\n The model's input signature is a list with a single (possibly-nested) object.\n This is due to the Keras-enforced restriction that tensor inputs must be\n passed in as the first argument.\n\n For example, a model with input {'feature1': , 'feature2': }\n will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}]\n\n Args:\n model: Keras Model object.\n keep_original_batch_size: A boolean indicating whether we want to keep using\n the original batch size or set it to None. Default is `False`, which means\n that the batch dim of the returned input signature will always be set to\n `None`.\n\n Returns:\n A tuple containing `(args, kwargs)` TensorSpecs of the model call function\n inputs.\n `kwargs` does not contain the `training` argument.\n ", "n_words": 119, "vocab_size": 87, "n_whitespaces": 189, "language": "en" } }, { "id": 178927, "commit_id": "abfb99b0a05dd76d2ecc6ebc20732a271857c6c8", "repo": "Nuitka", "path": "nuitka/plugins/standard/DataFileCollectorPlugin.py", "file_name": "DataFileCollectorPlugin.py", "fun_name": "_getSubDirectoryFolders", "commit_message": "Plugins: Massive cleanup of data file handling\n\n* Move data file handling out of standalone only, allowing support\n for other modes as well.\n\n* Attach logger and tags to data file objects.", "code": "def _getSubDirectoryFolders(self, module, sub_dirs):\n \n\n module_dir = module.getCompileTimeDirectory()\n file_list = []\n\n data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs]\n\n # Gather the full file list, probably makes no sense to include bytecode files\n file_list = sum(\n (\n getFileList(\n data_dir, ignore_dirs=(\"__pycache__\",), ignore_suffixes=(\".pyc\",)\n )\n for data_dir in data_dirs\n ),\n [],\n )\n\n if not file_list:\n msg = \"No files or folders found for '%s' in subfolder(s) %r (%r).\" % (\n module.getFullName(),\n sub_dirs,\n data_dirs,\n )\n self.warning(msg)\n\n is_package = (\n module.isCompiledPythonPackage() or module.isUncompiledPythonPackage()\n )\n\n # We need to preserve the package target path in the dist folder.\n if is_package:\n package_part = module.getFullName().asPath()\n else:\n package = module.getFullName().getPackageName()\n\n if package is None:\n package_part = \"\"\n else:\n package_part = package.asPath()\n\n item_set = OrderedSet()\n\n for f in file_list:\n target = os.path.join(package_part, os.path.relpath(f, module_dir))\n\n dir_name = os.path.dirname(target)\n item_set.add(dir_name)\n\n return self.makeIncludedEmptyDirectories(\n source_path=module_dir,\n dest_paths=item_set,\n reason=\"Subdirectories of module %s\" % module.getFullName(),\n tags=\"config\",\n )\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 595, "n_words": 139, "vocab_size": 95, "complexity": 8, "nloc": 42, "token_counts": 232, "n_ast_nodes": 376, "n_identifiers": 40, "random_cut": "def _getSubDirectoryFolders(self, module, sub_dirs):\n \n\n module_dir = module.getCompileTimeDirectory()\n file_list = []\n\n data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs]\n\n # Gather the full file list, probably makes no sense to include bytecode files\n file_list = sum(\n (\n getFileList(\n data_dir, ignore_dirs=(\"__pycache__\",), ignore_suffixes=(\".pyc\",)\n )\n for data_dir in data_dirs\n ),\n [],\n )\n\n if not file_list:\n msg = \"No files or folders found for '%s' in subfolder(s) %r (%r).\" % (\n module.getFullName(),\n sub_dirs,\n data_dirs,\n )\n self.warning(msg)\n\n is_package = (\n module.isCompiledPythonPackage() or module.isUncompiledPythonPackage()\n )\n\n # We need to preserve the package target path in the dist folder.\n if is_package:\n package_part = module.getFullName().asPath()\n else:\n package = module.getFullName().getPackageName()\n\n if package is None:\n package_part = \"\"\n else:\n package_part = package.asPath()\n\n item_set = OrderedSet()\n\n for f in file_list:\n target = os.path.join(package_part, os.path.relpath(f, module_dir))\n\n dir_name = os.path.dirname(target)\n item_set.", "d_id": 42862, "documentation": { "docstring": "Get dirnames in given subdirs of the module.\n\n Notes:\n All dirnames in folders below one of the sub_dirs are recursively\n retrieved and returned shortened to begin with the string of subdir.\n Args:\n module: module object\n sub_dirs: sub folder name(s) - tuple\n Returns:\n makeIncludedEmptyDirectories of found dirnames.\n ", "n_words": 46, "vocab_size": 39, "n_whitespaces": 129, "language": "en" } }, { "id": 85795, "commit_id": "35ec251212b82e5d9468062a3ab5945d8e739002", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_orderby_percentile_with_many_fields_one_entity_no_data", "commit_message": "feat(metrics): Support rate for derived metric [TET-129 TET-127] (#38792)\n\nAdds support for operation `rate` to be able to compute performance\r\nrelated metrics such as tpm, tps, epm, eps\r\n\r\nThis PR achieves this by:\r\n- Defining rate as a derived operation that produces its own SnQL rather\r\nthan trying to compute the data sketch aggregate and using that directly\r\n- Replaces `filter_conditions_func` that used to just produce a snql\r\ncondition to be used a conditional aggregate with `snql_func` that\r\ninstead produces a SnQL function\r\n- Replaces the logic in `get_entity` on MetricsExpression to determine\r\nthe entity from the MRI rather than from the aggregate applied", "code": "def test_orderby_percentile_with_many_fields_one_entity_no_data(self):\n \n for metric in [\n TransactionMRI.MEASUREMENTS_FCP.value,\n \"transaction\",\n ]:\n perf_indexer_record(self.organization.id, metric)\n response = self.get_success_response(\n self.organization.slug,\n field=[\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n f\"p50({TransactionMetricKey.MEASUREMENTS_FCP.value})\",\n ],\n statsPeriod=\"1h\",\n interval=\"1h\",\n groupBy=[\"project_id\", \"transaction\"],\n orderBy=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n useCase=\"performance\",\n )\n groups = response.data[\"groups\"]\n assert len(groups) == 0\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 233, "n_words": 33, "vocab_size": 32, "complexity": 2, "nloc": 20, "token_counts": 94, "n_ast_nodes": 181, "n_identifiers": 23, "random_cut": "def test_orderby_percentile_with_many_fields_one_entity_no_data(self):\n \n for metric in [\n TransactionMRI.MEASUREMENTS_FCP.value,\n \"transaction\",\n ]:\n perf_indexer_record(self.organization.id, metric)\n response = self.get_success_response(\n self.organization.slug,\n field=[\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n f\"p50({TransactionMetricKey.MEASUREMENTS_FCP.value})\",\n ],\n statsPeriod=\"1h\",\n interval=\"1h\",\n groupBy=[\"project_id\", \"transaction\"],\n orderBy=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n useCase=\"performance\",\n )\n groups = response.data[\"groups\"]\n assert", "d_id": 18045, "documentation": { "docstring": "\n Test that ensures that when metrics data is available then an empty response is returned\n gracefully\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 38, "language": "en" } }, { "id": 101563, "commit_id": "7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5", "repo": "faceswap", "path": "lib/training/preview_tk.py", "file_name": "preview_tk.py", "fun_name": "_add_save_button", "commit_message": "Training - Use custom preview pop-out", "code": "def _add_save_button(self) -> None:\n \n logger.debug(\"Adding save button\")\n button = tk.Button(self,\n text=\"Save\",\n cursor=\"hand2\",\n command=lambda: self.save_var.set(True))\n button.pack(side=tk.LEFT)\n logger.debug(\"Added save burron: '%s'\", button)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 133, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 61, "n_ast_nodes": 104, "n_identifiers": 15, "random_cut": "def _add_save_button(self) -> None:\n \n logger.debug(\"Adding save button\")\n button = tk.Button(self,\n text=\"Save\",\n cursor=\"hand2\",\n command=lambda: self.save_var.set(True))\n button.pack(side=tk.LEFT)\n logger.debug(\"Added save burron: '%s'\", button)\n", "d_id": 20973, "documentation": { "docstring": " Add a save button for saving out original preview ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 53433, "commit_id": "a9e67e2311c1e4a056b9e740cc739360896aab92", "repo": "prefect", "path": "src/prefect/context.py", "file_name": "context.py", "fun_name": "temporary_environ_defaults", "commit_message": "Introduce basic profile context management", "code": "def temporary_environ_defaults(**kwargs):\n \n old_env = os.environ.copy()\n\n try:\n for var in kwargs:\n # TODO: Consider warning on conflicts\n os.environ.setdefault(var, str(kwargs[var]))\n\n yield {var: os.environ[var] for var in kwargs}\n\n finally:\n for var in kwargs:\n if old_env.get(var):\n os.environ[var] = old_env[var]\n else:\n os.environ.pop(var, None)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 145, "n_words": 38, "vocab_size": 29, "complexity": 6, "nloc": 12, "token_counts": 92, "n_ast_nodes": 147, "n_identifiers": 11, "random_cut": "def temporary_environ_defaults(**kwargs):\n \n old_env = os.environ.copy()\n\n try:\n for var in kwargs:\n # TODO: Consider warning on conflicts\n os.environ.setdefault(var, str(kwargs[var]))\n\n yield {var: os.environ[var] for var in kwargs}\n\n finally:\n for var in kwargs:\n if old_env.get(var):\n os.environ[var] = old_env[var]\n else:\n os.environ.pop(var, None)\n\n", "d_id": 10804, "documentation": { "docstring": "\n Temporarily override default values in os.environ.\n\n Yields a dictionary of the key/value pairs matching the provided keys.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 27, "language": "en" } }, { "id": 91706, "commit_id": "7f60db924ea37f34e0cfe6856777239e2a2ffe13", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_details.py", "file_name": "test_organization_metric_details.py", "fun_name": "test_same_entity_multiple_metric_ids_missing_data", "commit_message": "feat(metrics): make indexer more configurable (#35604)\n\nThis makes the sentry_metrics indexer more configurable in the following ways, to enable indexing on the ingest-performance-metrics topic:\r\n\r\n- configurable input Kafka topic\r\n- configurable output Kafka topic\r\n- configurable model from which to pull index results\r\n- tags for internal metrics to distinguish between the two modes operationally", "code": "def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics):\n \n mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2\n _indexer_record(self.organization.id, \"metric_foo_doe\")\n self.store_session(\n self.build_session(\n project_id=self.project.id,\n started=(time.time() // 60) * 60,\n status=\"ok\",\n release=\"foobar@2.0\",\n errors=2,\n )\n )\n response = self.get_response(\n self.organization.slug,\n \"derived_metric.multiple_metrics\",\n )\n assert response.status_code == 404\n assert response.json()[\"detail\"] == (\n \"Not all the requested metrics or the constituent metrics in \"\n \"['derived_metric.multiple_metrics'] have data in the dataset\"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 264, "n_words": 53, "vocab_size": 43, "complexity": 1, "nloc": 21, "token_counts": 97, "n_ast_nodes": 169, "n_identifiers": 22, "random_cut": "def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics):\n \n mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2\n _indexer_record(self.organization.id, \"metric_foo_doe\")\n self.store_sessi", "d_id": 18790, "documentation": { "docstring": "\n Test when not requested metrics have data in the dataset\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 276932, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/kernelized_utils.py", "file_name": "kernelized_utils.py", "fun_name": "_align_matrices", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _align_matrices(x, y):\n \n x_matrix = _to_matrix(x)\n y_matrix = _to_matrix(y)\n x_shape = x_matrix.shape\n y_shape = y_matrix.shape\n if y_shape[1] != x_shape[1]: # dimensions do not match.\n raise ValueError(\n \"The outermost dimensions of the input tensors should match. \"\n f\"Received y = {y_shape[1]} vs x = {x_shape[1]}.\"\n )\n\n x_tile = tf.tile(tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1])\n y_tile = tf.tile(tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1])\n return x_tile, y_tile\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 126, "n_words": 62, "vocab_size": 51, "complexity": 2, "nloc": 13, "token_counts": 104, "n_ast_nodes": 176, "n_identifiers": 15, "random_cut": "def _align_matrices(x, y):\n \n x_matrix = _to_matrix(x)\n y_matrix = _to_matrix(y)\n x_shape = x_matrix.shape\n y_shape = y_matrix.shape\n if y_shape[1] != x_shape[1]: # dimensions do ", "d_id": 81784, "documentation": { "docstring": "Aligns x and y tensors to allow computations over pairs of their rows.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 248366, "commit_id": "b83bc5fab57b37f75a79d02213d6032c586fd36e", "repo": "synapse", "path": "tests/storage/test_events.py", "file_name": "test_events.py", "fun_name": "test_prune_gap_if_dummy_local", "commit_message": "Pull out less state when handling gaps mk2 (#12852)", "code": "def test_prune_gap_if_dummy_local(self):\n \n\n body = self.helper.send(self.room_id, body=\"Test\", tok=self.token)\n\n body = self.helper.send_event(\n self.room_id, type=EventTypes.Dummy, content={}, tok=self.token\n )\n local_message_event_id = body[\"event_id\"]\n self.assert_extremities([local_message_event_id])\n\n # Advance the clock for many days to make the old extremity \"old\". We\n # also set the depth to \"lots\".\n self.reactor.advance(7 * 24 * 60 * 60)\n\n # Fudge a second event which points to an event we don't have. This is a\n # state event so that the state changes (otherwise we won't prune the\n # extremity as they'll have the same state group).\n remote_event_2 = event_from_pdu_json(\n {\n \"type\": EventTypes.Member,\n \"state_key\": \"@user:other2\",\n \"content\": {\"membership\": Membership.JOIN},\n \"room_id\": self.room_id,\n \"sender\": \"@user:other2\",\n \"depth\": 10000,\n \"prev_events\": [\"$some_unknown_message\"],\n \"auth_events\": [],\n \"origin_server_ts\": self.clock.time_msec(),\n },\n RoomVersions.V6,\n )\n\n state_before_gap = self.get_success(\n self.state.get_current_state_ids(self.room_id)\n )\n\n self.persist_event(remote_event_2, state=state_before_gap)\n\n # Check the new extremity is just the new remote event.\n self.assert_extremities([remote_event_2.event_id, local_message_event_id])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 454, "n_words": 131, "vocab_size": 96, "complexity": 1, "nloc": 27, "token_counts": 191, "n_ast_nodes": 320, "n_identifiers": 32, "random_cut": "def test_prune_gap_if_dummy_local(self):\n \n\n body = self.helper.send(self.room_id, body=\"Test\", tok=self.token)\n\n body = self.helper.send_event(\n self.room_id, type=EventTypes.Dummy, content={}, tok=self.token\n )\n local_message_event_id = body[\"event_id\"]\n self.assert_extremities([local_message_event_id])\n\n # Advance the clock for many days to make the old extremity \"old\". We\n # also set the depth to \"lots\".\n self.reactor.advance(7 * 24 * 60 * 60)\n\n # Fudge a second event which points to an event we don't have. This is a\n # state event so that the state changes (otherwise we won't prune the\n # extremity as they'll have the same state group).\n remote_event_2 = event_from_pdu_json(\n {\n \"type\": EventTypes.Member,\n \"state_key\": \"@user:other2\",\n \"content\": {\"membership\": Membership.JOIN},\n \"room_id\": self.room_id,\n \"sender\": \"@user:other2\",\n \"depth\": 10000,\n \"prev_events\": [\"$some_unknown_message\"],\n \"auth", "d_id": 72243, "documentation": { "docstring": "Test that we don't drop extremities after a gap when the previous\n extremity is a local dummy event and points to local events.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 37, "language": "en" } }, { "id": 269147, "commit_id": "e61cbc52fd3b0170769c120e9b8dabc8c4205322", "repo": "keras", "path": "keras/saving/saved_model/save_impl.py", "file_name": "save_impl.py", "fun_name": "_reset_layer_losses", "commit_message": "Support Keras saving/loading for ShardedVariables with arbitrary partitions.\n\nPiperOrigin-RevId: 439837516", "code": "def _reset_layer_losses(parent_layer):\n \n losses_dict = {}\n for layer in utils.list_all_layers_and_sublayers(parent_layer):\n losses_dict[layer] = {\n 'losses': layer._losses[:],\n 'eager_losses': layer._eager_losses[:]\n }\n with utils.no_automatic_dependency_tracking_scope(layer):\n layer._losses = []\n layer._eager_losses = []\n return losses_dict\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 64, "n_words": 27, "vocab_size": 22, "complexity": 2, "nloc": 11, "token_counts": 66, "n_ast_nodes": 113, "n_identifiers": 9, "random_cut": "def _reset_layer_losses(parent_layer):\n \n losses_dict = {}\n for layer in utils.list_all_layers_and_sublayers(parent_layer):\n losses_dict[layer] = {\n 'losses': layer._losses[:],\n 'eager_losses", "d_id": 79929, "documentation": { "docstring": "Resets losses of layer and its sublayers, and returns original losses.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 206971, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_changelist/tests.py", "file_name": "tests.py", "fun_name": "test_result_list_editable_html", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_result_list_editable_html(self):\n \n new_parent = Parent.objects.create(name=\"parent\")\n new_child = Child.objects.create(name=\"name\", parent=new_parent)\n request = self.factory.get(\"/child/\")\n request.user = self.superuser\n m = ChildAdmin(Child, custom_site)\n\n # Test with list_editable fields\n m.list_display = [\"id\", \"name\", \"parent\"]\n m.list_display_links = [\"id\"]\n m.list_editable = [\"name\"]\n cl = m.get_changelist_instance(request)\n FormSet = m.get_changelist_formset(request)\n cl.formset = FormSet(queryset=cl.result_list)\n template = Template(\n \"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}\"\n )\n context = Context({\"cl\": cl, \"opts\": Child._meta})\n table_output = template.render(context)\n # make sure that hidden fields are in the correct place\n hiddenfields_div = (\n '
    '\n ''\n \"
    \"\n ) % new_child.id\n self.assertInHTML(\n hiddenfields_div, table_output, msg_prefix=\"Failed to find hidden fields\"\n )\n\n # make sure that list editable fields are rendered in divs correctly\n editable_name_field = (\n ''\n )\n self.assertInHTML(\n '%s' % editable_name_field,\n table_output,\n msg_prefix='Failed to find \"name\" list_editable field',\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 438, "n_words": 139, "vocab_size": 99, "complexity": 1, "nloc": 34, "token_counts": 186, "n_ast_nodes": 330, "n_identifiers": 40, "random_cut": "def test_result_list_editable_html(self):\n \n new_parent = Parent.objects.create(name=\"parent\")\n new_child = Child.objects.create(name=\"name\", parent=new_parent)\n request = self.factory.get(\"/child/\")\n request.user = self.superuser\n m = ChildAdmin(Child, custom_site)\n\n # Test with list_editable fields\n m.list_display = [\"id\", \"name\", \"parent\"]\n m.list_display_links = [\"id\"]\n m.list_editable = [\"name\"]\n cl = m.get_changelist_instance(request)\n FormSet = m.get_changelist_formset(request)\n cl.formset = FormSet(queryset=cl.result_list)\n template = Template(\n \"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}\"\n )\n context = Context({\"cl\": cl, \"opts\": Child._meta})\n table_output = template.render(context)\n # make sure that hidden fields are in the correct place\n hiddenfields_div = (\n '
    '\n ''\n \"
    \"\n ) % new_child.id\n self.assertInHTML(\n ", "d_id": 51815, "documentation": { "docstring": "\n Regression tests for #11791: Inclusion tag result_list generates a\n table and this checks that the items are nested within the table\n element tags.\n Also a regression test for #13599, verifies that hidden fields\n when list_editable is enabled are rendered in a div outside the\n table.\n ", "n_words": 45, "vocab_size": 37, "n_whitespaces": 95, "language": "en" } }, { "id": 77580, "commit_id": "5994cc43dfc5cc1ed891ab78eff3a3bcf56f6830", "repo": "wagtail", "path": "wagtail/admin/tests/ui/test_tables.py", "file_name": "test_tables.py", "fun_name": "test_title_column", "commit_message": "Allow passing arbitrary link attributes to TitleColumn", "code": "def test_title_column(self):\n root_page = Page.objects.filter(depth=2).first()\n blog = Site.objects.create(\n hostname=\"blog.example.com\", site_name=\"My blog\", root_page=root_page\n )\n gallery = Site.objects.create(\n hostname=\"gallery.example.com\", site_name=\"My gallery\", root_page=root_page\n )\n data = [blog, gallery]\n\n table = Table(\n [\n TitleColumn(\n \"hostname\",\n url_name=\"wagtailsites:edit\",\n link_classname=\"choose-site\",\n link_attrs={\"data-chooser\": \"yes\"},\n ),\n Column(\"site_name\", label=\"Site name\"),\n ],\n data,\n )\n\n html = self.render_component(table)\n self.assertHTMLEqual(\n html,\n \n % (blog.pk, gallery.pk),\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 337, "n_words": 51, "vocab_size": 40, "complexity": 1, "nloc": 51, "token_counts": 136, "n_ast_nodes": 223, "n_identifiers": 27, "random_cut": "def test_title_column(self):\n root_page = Page.objects.filter(depth=2).first()\n blog = Site.objects.create(\n ", "d_id": 16677, "documentation": { "docstring": "\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    HostnameSite name
    \n \n My blog
    \n \n My gallery
    \n ", "n_words": 37, "vocab_size": 25, "n_whitespaces": 530, "language": "en" } }, { "id": 34270, "commit_id": "841d979190319098adc8101f9820a02ee3be4c8b", "repo": "transformers", "path": "src/transformers/models/realm/tokenization_realm.py", "file_name": "tokenization_realm.py", "fun_name": "_clean_text", "commit_message": "Add FastTokenizer to REALM (#15211)\n\n* Remove BertTokenizer abstraction\r\n\r\n* Add FastTokenizer to REALM\r\n\r\n* Fix config archive map\r\n\r\n* Fix copies\r\n\r\n* Update realm.mdx\r\n\r\n* Apply suggestions from code review", "code": "def _clean_text(self, text):\n \n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 149, "n_words": 32, "vocab_size": 26, "complexity": 6, "nloc": 11, "token_counts": 65, "n_ast_nodes": 112, "n_identifiers": 11, "random_cut": "def _clean_text(self, text):\n \n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append", "d_id": 6233, "documentation": { "docstring": "Performs invalid character removal and whitespace cleanup on text.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 284723, "commit_id": "0e03b9e9e41aaa61cdec5d674a9f2c64ab8d3394", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/crypto_controller.py", "file_name": "crypto_controller.py", "fun_name": "call_candle", "commit_message": "refactoring load, changed chart to candle (#1838)\n\n* refactoring load, changed chart to candle\r\n\r\n* updating load\r\n\r\n* refactor done, missing tests\r\n\r\n* fixed chart\r\n\r\n* refactor\r\n\r\n* linting\r\n\r\n* tests failing\r\n\r\n* fix minh issues\r\n\r\n* auto completion for load\r\n\r\n* linting\r\n\r\n* Tests : cryptocurrency/controller ; remove mocking of functions which are not used anymore\r\n\r\n* Cryptocurrency/Controller : call_headlines ; fix bug\r\n\r\n* Tests : cryptocurrency/controller ; mock function\r\n\r\n* Tests : cryptocurrency/due_diligence ; fix expected output\r\n\r\n* cryptocurrency/due_diligence ; mock functions\r\n\r\nCo-authored-by: Chavithra \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: James Maslek ", "code": "def call_candle(self, other_args):\n \n if self.symbol:\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"candle\",\n description=,\n )\n\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES\n )\n\n if ns_parser:\n plot_chart(\n symbol=self.symbol,\n currency=self.current_currency,\n prices_df=self.current_df,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 258, "n_words": 27, "vocab_size": 23, "complexity": 3, "nloc": 18, "token_counts": 72, "n_ast_nodes": 111, "n_identifiers": 20, "random_cut": "def call_candle(self, other_args):\n \n if self.symbol:\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"candle\",\n description=,\n )\n\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES\n )\n\n if ns_parser:\n plot_chart(\n ", "d_id": 84941, "documentation": { "docstring": "Process candle commandDisplay chart for loaded coin. You can specify currency vs which you want\n to show chart and also number of days to get data for.", "n_words": 27, "vocab_size": 25, "n_whitespaces": 41, "language": "en" } }, { "id": 289805, "commit_id": "fe7402375d2f899a7edd6ac326d2c1998b4c43da", "repo": "core", "path": "tests/components/bayesian/test_binary_sensor.py", "file_name": "test_binary_sensor.py", "fun_name": "test_load_values_when_added_to_hass", "commit_message": "Bayesian - support `unique_id:` (#79879)\n\n* support unique_id\r\n\r\n* adds test for unique_ids", "code": "async def test_load_values_when_added_to_hass(hass):\n \n\n config = {\n \"binary_sensor\": {\n \"name\": \"Test_Binary\",\n \"platform\": \"bayesian\",\n \"unique_id\": \"3b4c9563-5e84-4167-8fe7-8f507e796d72\",\n \"device_class\": \"connectivity\",\n \"observations\": [\n {\n \"platform\": \"state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"to_state\": \"off\",\n \"prob_given_true\": 0.8,\n \"prob_given_false\": 0.4,\n }\n ],\n \"prior\": 0.2,\n \"probability_threshold\": 0.32,\n }\n }\n\n hass.states.async_set(\"sensor.test_monitored\", \"off\")\n await hass.async_block_till_done()\n\n assert await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n entity_registry = async_get_entities(hass)\n assert (\n entity_registry.entities[\"binary_sensor.test_binary\"].unique_id\n == \"bayesian-3b4c9563-5e84-4167-8fe7-8f507e796d72\"\n )\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n assert state.attributes.get(\"device_class\") == \"connectivity\"\n assert state.attributes.get(\"observations\")[0][\"prob_given_true\"] == 0.8\n assert state.attributes.get(\"observations\")[0][\"prob_given_false\"] == 0.4\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 355, "n_words": 72, "vocab_size": 55, "complexity": 1, "nloc": 33, "token_counts": 183, "n_ast_nodes": 319, "n_identifiers": 14, "random_cut": "async def test_load_values_when_added_to_hass(hass):\n \n\n config = {\n \"binary_sensor\": {\n ", "d_id": 88940, "documentation": { "docstring": "Test that sensor initializes with observations of relevant entities.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 47940, "commit_id": "6a3d6cc32b4e3922d259c889460fe82e0ebf3663", "repo": "airflow", "path": "tests/providers/databricks/operators/test_databricks_sql.py", "file_name": "test_databricks_sql.py", "fun_name": "test_copy_with_target_credential", "commit_message": "Update to the released version of DBSQL connector\n\nAlso added additional parameters for further customization of connection\nif it's required", "code": "def test_copy_with_target_credential(self):\n expression = \"col1, col2\"\n op = DatabricksCopyIntoOperator(\n file_location=COPY_FILE_LOCATION,\n file_format='CSV',\n table_name='test',\n task_id=TASK_ID,\n expression_list=expression,\n storage_credential='abc',\n credential={'AZURE_SAS_TOKEN': 'abc'},\n )\n assert (\n op._create_sql_query()\n == f.strip()\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 157, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 18, "token_counts": 60, "n_ast_nodes": 109, "n_identifiers": 16, "random_cut": "def test_copy_with_target_credential(self):\n expression = \"col1, col2\"\n op = DatabricksCopyIntoOperator(\n file_location=COPY_FILE_LOCATION,\n file_format='CSV',\n table_name='test',\n task_id=TASK_ID,\n expression_list=expression,\n storage_credential='abc',\n credential={'AZURE_SAS_TOKEN': 'abc'},\n )\n asse", "d_id": 9308, "documentation": { "docstring": "COPY INTO test WITH (CREDENTIAL abc)\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') ))\nFILEFORMAT = CSV\n", "n_words": 20, "vocab_size": 16, "n_whitespaces": 17, "language": "en" } }, { "id": 60607, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/commands/debug.py", "file_name": "debug.py", "fun_name": "show_actual_vendor_versions", "commit_message": "upd; format", "code": "def show_actual_vendor_versions(vendor_txt_versions):\n # type: (Dict[str, str]) -> None\n \n for module_name, expected_version in vendor_txt_versions.items():\n extra_message = ''\n actual_version = get_vendor_version_from_module(module_name)\n if not actual_version:\n extra_message = ' (Unable to locate actual module version, using'\\\n ' vendor.txt specified version)'\n actual_version = expected_version\n elif parse_version(actual_version) != parse_version(expected_version):\n extra_message = ' (CONFLICT: vendor.txt suggests version should'\\\n ' be {})'.format(expected_version)\n logger.info('%s==%s%s', module_name, actual_version, extra_message)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 189, "n_words": 58, "vocab_size": 45, "complexity": 4, "nloc": 12, "token_counts": 71, "n_ast_nodes": 126, "n_identifiers": 12, "random_cut": "def show_actual_vendor_versions(vendor_txt_versions):\n # type: (Dict[str, str]) -> None\n \n for module_name, expected_version in vendor_txt_versions.items():\n extra_message = ''\n actual_version = get_vendor_version_from_module(module_name)\n if not actual_version:\n extra_message = ' (Un", "d_id": 12219, "documentation": { "docstring": "Log the actual version and print extra info if there is\n a conflict or if the actual version could not be imported.\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 28, "language": "en" } }, { "id": 106467, "commit_id": "556862bc911bb54435b7b0b01451789b884b0390", "repo": "youtube-dl", "path": "youtube_dl/utils.py", "file_name": "utils.py", "fun_name": "escape_rfc3986", "commit_message": "[utils] Ensure RFC3986 encoding result is unicode", "code": "def escape_rfc3986(s):\n \n if sys.version_info < (3, 0) and isinstance(s, compat_str):\n s = s.encode('utf-8')\n # ensure unicode: after quoting, it can always be converted\n return compat_str(compat_urllib_parse.quote(s, b\"%/;:@&=+$,!~*'()?#[]\"))\n\n", "url": "https://github.com/ytdl-org/youtube-dl.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 26, "vocab_size": 26, "complexity": 3, "nloc": 4, "token_counts": 45, "n_ast_nodes": 75, "n_identifiers": 9, "random_cut": "def escape_rfc3986(s):\n \n if sys.version_info < (3, 0) and isinstance(s, compat_str):\n s = s.encode('utf-", "d_id": 22383, "documentation": { "docstring": "Escape non-ASCII characters as suggested by RFC 3986", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 281122, "commit_id": "ea964109d654394cc0a5237e6ec5510ba6404097", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py", "file_name": "dd_controller.py", "fun_name": "call_social", "commit_message": "Crypto menu refactor (#1119)\n\n* enabled some crypto commands in dd to be called independent of source loaded\r\n\r\n* support for coin_map_df in all dd functions + load ta and plot chart refactor\r\n\r\n* updated tests and removed coingecko scrapping where possible\r\n\r\n* removed ref of command from hugo\r\n\r\n* updated pycoingecko version\r\n\r\n* refactoring load\r\n\r\n* refactored load to fetch prices; pred can run independent of source now\r\n\r\n* load by default usd on cp/cg and usdt on cb/bin\r\n\r\n* updated to rich for formatting and updated dependencies\r\n\r\n* fixed changes requested\r\n\r\n* update docs\r\n\r\n* revert discord requirements\r\n\r\n* removed absolute from calculate change for price\r\n\r\n* fixing pr issues\r\n\r\n* fix loading issue when similar coins exist, move coins to home, fill n/a\r\n\r\n* update docs for coins\r\n\r\n* adds load to ta and pred menu", "code": "def call_social(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"social\",\n description=,\n )\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n\n if ns_parser:\n pycoingecko_view.display_social(\n self.coin_map_df[\"CoinGecko\"], export=ns_parser.export\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 158, "n_words": 24, "vocab_size": 21, "complexity": 2, "nloc": 15, "token_counts": 63, "n_ast_nodes": 100, "n_identifiers": 18, "random_cut": "def call_social(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"social\",\n description=,\n )\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n\n if ns_parser:\n pycoingecko_view.display_social(\n self.coin_map_df[\"CoinGecko\"", "d_id": 83534, "documentation": { "docstring": "Process social commandShows social media corresponding to loaded coin. You can find there name of\n telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.", "n_words": 25, "vocab_size": 23, "n_whitespaces": 35, "language": "en" } }, { "id": 85476, "commit_id": "6d7681529f68a87e41d4c11a4aa1e6732cb15ade", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_project_event_details.py", "file_name": "test_project_event_details.py", "fun_name": "test_ignores_different_group", "commit_message": "feat(perf issues): Return prev/next for transaction events (#38274)\n\n* feat(perf issues): Return prev/next for transaction events", "code": "def test_ignores_different_group(self):\n \n url = reverse(\n \"sentry-api-0-project-event-details\",\n kwargs={\n \"event_id\": self.next_transaction_event.event_id,\n \"project_slug\": self.next_transaction_event.project.slug,\n \"organization_slug\": self.next_transaction_event.project.organization.slug,\n },\n )\n with self.feature(\"organizations:performance-issues\"):\n response = self.client.get(url, format=\"json\", data={\"group_id\": self.group.id})\n\n assert response.status_code == 200, response.content\n assert response.data[\"id\"] == str(self.next_transaction_event.event_id)\n assert response.data[\"nextEventID\"] is None\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 174, "n_words": 36, "vocab_size": 32, "complexity": 1, "nloc": 14, "token_counts": 117, "n_ast_nodes": 195, "n_identifiers": 21, "random_cut": "def test_ignores_different_group(self):\n \n url = reverse(\n \"sentry-api-0-project-event-details\",\n kwargs={\n \"event_id\": self.next_transaction_event.event_id,\n \"project_slug\": self.next_transaction_event.project.slug,\n \"organization_slug\": self.next_transacti", "d_id": 17999, "documentation": { "docstring": "Test that a different group's events aren't attributed to the one that was passed", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 200571, "commit_id": "b5f5ec455e7d003fa214a215475a3fa2407760cc", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "_dedupe_indices", "commit_message": "_dedupe_indices: convert to staticmethod\n\nindex_structure is now an additional argument", "code": "def _dedupe_indices(new, exclude, index_structure):\n \n inds_self = set(exclude)\n dums_new = set(get_dummy_indices(new))\n\n conflicts = dums_new.intersection(inds_self)\n if len(conflicts) == 0:\n return None\n\n \n inds_self.update(dums_new)\n self_args_free = [(i, None) for i in inds_self]\n gen = index_structure._get_generator_for_dummy_indices(self_args_free)\n repl = {}\n for d in conflicts:\n if -d in repl.keys():\n continue\n newname = gen(d.tensor_index_type)\n new_d = d.func(newname, *d.args[1:])\n repl[d] = new_d\n repl[-d] = -new_d\n\n if len(repl) == 0:\n return None\n\n new_renamed = new._replace_indices(repl)\n return new_renamed\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 258, "n_words": 67, "vocab_size": 45, "complexity": 6, "nloc": 26, "token_counts": 150, "n_ast_nodes": 242, "n_identifiers": 26, "random_cut": "def _dedupe_indices(new, exclude, index_structure):\n \n inds_self = set(exclude)\n dums_new = set(get_dummy_indices(new))\n\n conflicts = dums_new.intersection(inds_self)\n if len(conflicts) == 0:\n return None\n\n \n inds_self.update(dums_new)\n self_args_free = [(i, None) for i in inds_self]\n gen = index_structure._get_generator_for_dummy_indices(self_args_free)\n repl = {}\n for d in conflicts:\n if -d in repl.keys():\n continue\n newname = gen(d.te", "d_id": 49706, "documentation": { "docstring": "\n exclude: set\n new: TensExpr\n index_structure: _IndexStructure (required to generate new dummy indices)\n\n If ``new`` has any dummy indices that are in ``exclude``, return a version\n of new with those indices replaced. If no replacements are needed,\n return None\n \n ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``.\n Since the latter does not use the index position for anything, we just\n set it as ``None`` here.\n ", "n_words": 63, "vocab_size": 53, "n_whitespaces": 142, "language": "en" } }, { "id": 110082, "commit_id": "3804cdd8f1771065f9c8616c57357c2b190c3a05", "repo": "matplotlib", "path": "lib/matplotlib/spines.py", "file_name": "spines.py", "fun_name": "get_window_extent", "commit_message": "Fix issue with space allocated for single tick that should not be there\n\nCo-authored-by: Antony Lee ", "code": "def get_window_extent(self, renderer=None):\n \n # make sure the location is updated so that transforms etc are correct:\n self._adjust_location()\n bb = super().get_window_extent(renderer=renderer)\n if self.axis is None or not self.axis.get_visible():\n return bb\n bboxes = [bb]\n drawn_ticks = self.axis._update_ticks()\n\n major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None)\n minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None)\n for tick in [major_tick, minor_tick]:\n if tick is None:\n continue\n bb0 = bb.frozen()\n tickl = tick._size\n tickdir = tick._tickdir\n if tickdir == 'out':\n padout = 1\n padin = 0\n elif tickdir == 'in':\n padout = 0\n padin = 1\n else:\n padout = 0.5\n padin = 0.5\n padout = padout * tickl / 72 * self.figure.dpi\n padin = padin * tickl / 72 * self.figure.dpi\n\n if tick.tick1line.get_visible():\n if self.spine_type == 'left':\n bb0.x0 = bb0.x0 - padout\n bb0.x1 = bb0.x1 + padin\n elif self.spine_type == 'bottom':\n bb0.y0 = bb0.y0 - padout\n bb0.y1 = bb0.y1 + padin\n\n if tick.tick2line.get_visible():\n if self.spine_type == 'right':\n bb0.x1 = bb0.x1 + padout\n bb0.x0 = bb0.x0 - padin\n elif self.spine_type == 'top':\n bb0.y1 = bb0.y1 + padout\n bb0.y0 = bb0.y0 - padout\n bboxes.append(bb0)\n\n return mtransforms.Bbox.union(bboxes)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 711, "n_words": 174, "vocab_size": 80, "complexity": 13, "nloc": 42, "token_counts": 330, "n_ast_nodes": 534, "n_identifiers": 39, "random_cut": "def get_window_extent(self, renderer=None):\n \n # make sure the location is updated so that transforms etc are correct:\n self._adjust_location()\n bb = super().get_window_extent(renderer=renderer)\n if self.axis is None or not self.axis.get_visible():\n return bb\n bboxes = [bb]\n drawn_ticks = self.axis._update_ticks()\n\n major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None)\n minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None)\n for tick in [major_tick, minor_tick]:\n if tick is None:\n continue\n bb0 = bb.frozen()\n tickl = tick._size\n tickdir = tick._tickdir\n if tickdir == 'out':\n padout = 1\n padin = 0\n elif tickdir == 'in':\n padout = 0\n padin = 1\n else:\n padout = 0.5\n padin = 0.5\n padout = padout * tickl / 72 * self.figure.dpi\n padin = padin * tickl / 72 * self.figure.dpi\n\n if tick.tick1line.get_visible():\n if self.spine_type == 'left':\n bb0.x0 = bb0.x0 - padout\n bb0.x1 = bb0.x1 + padin\n elif self.spine_type == 'bottom':\n bb0.y0 = bb0.y0 - padout\n bb0.y1 = bb0.y1 + padin\n\n if tick.tick2line.get_visible():\n if self.spine_type == 'right':\n bb0.x1 = bb0.x1 + padout\n bb0.x0 = bb0.x0 - padin\n elif self.spine_type == 'top':\n bb0.y1 = bb0.y1 + padout\n bb0.y0 = bb0.y0 - padout\n ", "d_id": 23916, "documentation": { "docstring": "\n Return the window extent of the spines in display space, including\n padding for ticks (but not their labels)\n\n See Also\n --------\n matplotlib.axes.Axes.get_tightbbox\n matplotlib.axes.Axes.get_window_extent\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 73, "language": "en" } }, { "id": 198533, "commit_id": "99ede53223eafb56b2c2b4ab7b8a6764b628c9d9", "repo": "sympy", "path": "sympy/physics/continuum_mechanics/truss.py", "file_name": "truss.py", "fun_name": "apply_support", "commit_message": "remove_load method added along with other changes", "code": "def apply_support(self, location, type):\n \n if location not in self._node_labels:\n raise ValueError(\"Support must be added on a known node\")\n\n else:\n self._supports[location] = type\n\n if type == \"pinned\":\n self._loads['R_'+str(location)+'_x']= []\n self._loads['R_'+str(location)+'_y']= []\n\n elif type == \"roller\":\n self._loads['R_'+str(location)+'_y']= []\n if 'R_'+str(location)+'_x' in list(self._loads):\n self._loads.pop('R_'+str(location)+'_x')\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 157, "n_words": 41, "vocab_size": 32, "complexity": 5, "nloc": 12, "token_counts": 123, "n_ast_nodes": 221, "n_identifiers": 11, "random_cut": "def apply_support(self, location, type):\n \n if location not in self._node_labels:\n raise ValueError(\"Support must be added on a known node\")\n\n else:\n self._supports[location] = type\n\n if type == \"pinned\":\n self._loads['R_'+str(location)+'_x']= []\n self._loads['R_'+", "d_id": 48985, "documentation": { "docstring": "\n This method adds a pinned or roller support at a particular node\n\n Parameters\n ==========\n\n location: String or Symbol\n Label of the Node at which support is added.\n\n type: String\n Type of the support being provided at the node.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.apply_support('A', 'pinned')\n >>> t.supports\n {'A': 'pinned', 'B': 'none'}\n ", "n_words": 66, "vocab_size": 50, "n_whitespaces": 194, "language": "en" } }, { "id": 303692, "commit_id": "54fc17e10de0752c03d6b95153c3d8168f76ea44", "repo": "core", "path": "homeassistant/components/xiaomi_miio/vacuum.py", "file_name": "vacuum.py", "fun_name": "timers", "commit_message": "Improve type hints in xiaomi_miio vacuum entities (#76563)\n\nCo-authored-by: Teemu R. ", "code": "def timers(self) -> list[dict[str, Any]]:\n \n return [\n {\n \"enabled\": timer.enabled,\n \"cron\": timer.cron,\n \"next_schedule\": as_utc(timer.next_schedule),\n }\n for timer in self.coordinator.data.timers\n ]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 119, "n_words": 20, "vocab_size": 20, "complexity": 2, "nloc": 10, "token_counts": 52, "n_ast_nodes": 83, "n_identifiers": 13, "random_cut": "def timers(self) -> list[dict[str, Any]]:\n \n retu", "d_id": 102508, "documentation": { "docstring": "Get the list of added timers of the vacuum cleaner.", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 262205, "commit_id": "5169d4eb32407ca0278046aaffc56ca6f9e9ef32", "repo": "TTS", "path": "TTS/tts/utils/visual.py", "file_name": "visual.py", "fun_name": "plot_avg_pitch", "commit_message": "Plot pitch over input characters", "code": "def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False):\n \n old_fig_size = plt.rcParams[\"figure.figsize\"]\n if fig_size is not None:\n plt.rcParams[\"figure.figsize\"] = fig_size\n\n fig, ax = plt.subplots()\n\n x = np.array(range(len(chars)))\n my_xticks = [c for c in chars]\n plt.xticks(x, my_xticks)\n\n ax.set_xlabel(\"characters\")\n ax.set_ylabel(\"freq\")\n\n ax2 = ax.twinx()\n ax2.plot(pitch, linewidth=5.0, color=\"red\")\n ax2.set_ylabel(\"F0\")\n\n plt.rcParams[\"figure.figsize\"] = old_fig_size\n if not output_fig:\n plt.close()\n return fig\n\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 51, "vocab_size": 39, "complexity": 4, "nloc": 26, "token_counts": 142, "n_ast_nodes": 233, "n_identifiers": 27, "random_cut": "def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False):\n \n old_fig_size = plt.rcParams[\"figure.figsize\"]\n if fig_size is not None:\n plt.rcParams[\"figure.figsize\"] = fig_size\n\n fig, ax = plt.subplots()\n\n x = np.array(range(len(chars)))\n", "d_id": 77142, "documentation": { "docstring": "Plot pitch curves on top of the input characters.\n\n Args:\n pitch (np.array): Pitch values.\n chars (str): Characters to place to the x-axis.\n\n Shapes:\n pitch: :math:`(T,)`\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 55, "language": "en" } }, { "id": 197047, "commit_id": "e0dc14eca132f37c5f49369eb4051eae37c9b119", "repo": "sympy", "path": "sympy/ntheory/generate.py", "file_name": "generate.py", "fun_name": "composite", "commit_message": "Refactored import ordering in functions", "code": "def composite(nth):\n \n n = as_int(nth)\n if n < 1:\n raise ValueError(\"nth must be a positive integer; composite(1) == 4\")\n composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]\n if n <= 10:\n return composite_arr[n - 1]\n\n a, b = 4, sieve._list[-1]\n if n <= b - primepi(b) - 1:\n while a < b - 1:\n mid = (a + b) >> 1\n if mid - primepi(mid) - 1 > n:\n b = mid\n else:\n a = mid\n if isprime(a):\n a -= 1\n return a\n\n from sympy.functions.elementary.exponential import log\n from sympy.functions.special.error_functions import li\n a = 4 # Lower bound for binary search\n b = int(n*(log(n) + log(log(n)))) # Upper bound for the search.\n\n while a < b:\n mid = (a + b) >> 1\n if mid - li(mid) - 1 > n:\n b = mid\n else:\n a = mid + 1\n\n n_composites = a - primepi(a) - 1\n while n_composites > n:\n if not isprime(a):\n n_composites -= 1\n a -= 1\n if isprime(a):\n a -= 1\n return a\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 402, "n_words": 170, "vocab_size": 76, "complexity": 12, "nloc": 36, "token_counts": 250, "n_ast_nodes": 389, "n_identifiers": 23, "random_cut": "def composite(nth):\n \n n = as_int(nth)\n if n < 1:\n raise ValueError(\"nth must be a positive integer; composite(1) == 4\")\n composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]\n if n <= 10:\n return composite_arr[n - 1]\n\n a, b = 4, sieve._list[-1]\n if n <= b - primepi(b) - 1:\n while a < b - 1:\n mid = (a + b) >> 1\n ", "d_id": 48304, "documentation": { "docstring": " Return the nth composite number, with the composite numbers indexed as\n composite(1) = 4, composite(2) = 6, etc....\n\n Examples\n ========\n\n >>> from sympy import composite\n >>> composite(36)\n 52\n >>> composite(1)\n 4\n >>> composite(17737)\n 20000\n\n See Also\n ========\n\n sympy.ntheory.primetest.isprime : Test if n is prime\n primerange : Generate all primes in a given range\n primepi : Return the number of primes less than or equal to n\n prime : Return the nth prime\n compositepi : Return the number of positive composite numbers less than or equal to n\n ", "n_words": 87, "vocab_size": 53, "n_whitespaces": 210, "language": "en" } }, { "id": 160496, "commit_id": "2215054472616df563faa4613734426c790d4217", "repo": "numpy", "path": "numpy/lib/twodim_base.py", "file_name": "twodim_base.py", "fun_name": "histogram2d", "commit_message": "DEP: Remove `normed=` keyword argument from histogroms\n\nThe normed keyword argument has been deprecated for a long time.\nThis removes it, replacing its position with the new density\nargument.", "code": "def histogram2d(x, y, bins=10, range=None, density=None, weights=None):\n \n from numpy import histogramdd\n\n if len(x) != len(y):\n raise ValueError('x and y must have the same length.')\n\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1 and N != 2:\n xedges = yedges = asarray(bins)\n bins = [xedges, yedges]\n hist, edges = histogramdd([x, y], bins, range, density, weights)\n return hist, edges[0], edges[1]\n\n\n@set_module('numpy')", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "@set_module('numpy')", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 122, "n_words": 64, "vocab_size": 50, "complexity": 5, "nloc": 13, "token_counts": 114, "n_ast_nodes": 184, "n_identifiers": 19, "random_cut": "def histogram2d(x, y, bins=10, range=None, density=None, weights=None):\n \n from numpy import histogramdd\n\n if len(x) != len(y):\n raise ValueError('x and y must have the same length.')\n\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1 and N != 2:\n xedges = yedges = asarray(bins)\n bins = [xedges, yedges]\n hist, edges = histogramdd([x, y], bins, range, density, weights)\n return hist, edges[0], edges[1]\n\n\n@set_module('numpy')", "d_id": 38645, "documentation": { "docstring": "\n Compute the bi-dimensional histogram of two data samples.\n\n Parameters\n ----------\n x : array_like, shape (N,)\n An array containing the x coordinates of the points to be\n histogrammed.\n y : array_like, shape (N,)\n An array containing the y coordinates of the points to be\n histogrammed.\n bins : int or array_like or [int, int] or [array, array], optional\n The bin specification:\n\n * If int, the number of bins for the two dimensions (nx=ny=bins).\n * If array_like, the bin edges for the two dimensions\n (x_edges=y_edges=bins).\n * If [int, int], the number of bins in each dimension\n (nx, ny = bins).\n * If [array, array], the bin edges in each dimension\n (x_edges, y_edges = bins).\n * A combination [int, array] or [array, int], where int\n is the number of bins and array is the bin edges.\n\n range : array_like, shape(2,2), optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range\n will be considered outliers and not tallied in the histogram.\n density : bool, optional\n If False, the default, returns the number of samples in each bin.\n If True, returns the probability *density* function at the bin,\n ``bin_count / sample_count / bin_area``.\n weights : array_like, shape(N,), optional\n An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.\n Weights are normalized to 1 if `density` is True. If `density` is\n False, the values of the returned histogram are equal to the sum of\n the weights belonging to the samples falling into each bin.\n\n Returns\n -------\n H : ndarray, shape(nx, ny)\n The bi-dimensional histogram of samples `x` and `y`. Values in `x`\n are histogrammed along the first dimension and values in `y` are\n histogrammed along the second dimension.\n xedges : ndarray, shape(nx+1,)\n The bin edges along the first dimension.\n yedges : ndarray, shape(ny+1,)\n The bin edges along the second dimension.\n\n See Also\n --------\n histogram : 1D histogram\n histogramdd : Multidimensional histogram\n\n Notes\n -----\n When `density` is True, then the returned histogram is the sample\n density, defined such that the sum over bins of the product\n ``bin_value * bin_area`` is 1.\n\n Please note that the histogram does not follow the Cartesian convention\n where `x` values are on the abscissa and `y` values on the ordinate\n axis. Rather, `x` is histogrammed along the first dimension of the\n array (vertical), and `y` along the second dimension of the array\n (horizontal). This ensures compatibility with `histogramdd`.\n\n Examples\n --------\n >>> from matplotlib.image import NonUniformImage\n >>> import matplotlib.pyplot as plt\n\n Construct a 2-D histogram with variable bin width. First define the bin\n edges:\n\n >>> xedges = [0, 1, 3, 5]\n >>> yedges = [0, 2, 3, 4, 6]\n\n Next we create a histogram H with random bin content:\n\n >>> x = np.random.normal(2, 1, 100)\n >>> y = np.random.normal(1, 1, 100)\n >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n >>> # Histogram does not follow Cartesian convention (see Notes),\n >>> # therefore transpose H for visualization purposes.\n >>> H = H.T\n\n :func:`imshow ` can only display square bins:\n\n >>> fig = plt.figure(figsize=(7, 3))\n >>> ax = fig.add_subplot(131, title='imshow: square bins')\n >>> plt.imshow(H, interpolation='nearest', origin='lower',\n ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])\n \n\n :func:`pcolormesh ` can display actual edges:\n\n >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',\n ... aspect='equal')\n >>> X, Y = np.meshgrid(xedges, yedges)\n >>> ax.pcolormesh(X, Y, H)\n \n\n :class:`NonUniformImage ` can be used to\n display actual bin edges with interpolation:\n\n >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',\n ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])\n >>> im = NonUniformImage(ax, interpolation='bilinear')\n >>> xcenters = (xedges[:-1] + xedges[1:]) / 2\n >>> ycenters = (yedges[:-1] + yedges[1:]) / 2\n >>> im.set_data(xcenters, ycenters, H)\n >>> ax.images.append(im)\n >>> plt.show()\n\n It is also possible to construct a 2-D histogram without specifying bin\n edges:\n\n >>> # Generate non-symmetric test data\n >>> n = 10000\n >>> x = np.linspace(1, 100, n)\n >>> y = 2*np.log(x) + np.random.rand(n) - 0.5\n >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges\n >>> H, yedges, xedges = np.histogram2d(y, x, bins=20)\n\n Now we can plot the histogram using\n :func:`pcolormesh `, and a\n :func:`hexbin ` for comparison.\n\n >>> # Plot histogram using pcolormesh\n >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)\n >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')\n >>> ax1.plot(x, 2*np.log(x), 'k-')\n >>> ax1.set_xlim(x.min(), x.max())\n >>> ax1.set_ylim(y.min(), y.max())\n >>> ax1.set_xlabel('x')\n >>> ax1.set_ylabel('y')\n >>> ax1.set_title('histogram2d')\n >>> ax1.grid()\n\n >>> # Create hexbin plot for comparison\n >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')\n >>> ax2.plot(x, 2*np.log(x), 'k-')\n >>> ax2.set_title('hexbin')\n >>> ax2.set_xlim(x.min(), x.max())\n >>> ax2.set_xlabel('x')\n >>> ax2.grid()\n\n >>> plt.show()\n ", "n_words": 747, "vocab_size": 356, "n_whitespaces": 1295, "language": "en" } }, { "id": 292448, "commit_id": "b19bf9b147f4321e89d1f7f01e68337f2102f460", "repo": "core", "path": "homeassistant/components/dlna_dms/dms.py", "file_name": "dms.py", "fun_name": "available", "commit_message": "Add dlna_dms integration to support DLNA Digital Media Servers (#66437)", "code": "def available(self) -> bool:\n \n return self._device is not None and self._device.profile_device.available\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 23, "n_ast_nodes": 38, "n_identifiers": 5, "random_cut": "def available(self) -> bool:\n \n return self._device is not None and self._device.profile_device.available\n", "d_id": 91534, "documentation": { "docstring": "Device is available when we have a connection to it.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 270328, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distributed_training_utils_v1.py", "file_name": "distributed_training_utils_v1.py", "fun_name": "_get_input_from_iterator", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _get_input_from_iterator(iterator, model):\n \n next_element = iterator.get_next()\n\n # `len(nest.flatten(x))` is going to not count empty elements such as {}.\n # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is\n # going to get flattened in `_prepare_feed_values` to work around that. Empty\n # elements are going to get filtered out as part of the flattening.\n if len(tf.nest.flatten(next_element)) == len(model.inputs):\n x = next_element\n y = None\n sample_weights = None\n elif len(tf.nest.flatten(next_element)) == (\n len(model.inputs) + len(model.outputs)\n ):\n x, y = next_element\n sample_weights = None\n else:\n x, y, sample_weights = next_element\n\n # Validate that all the elements in x and y are of the same type and shape.\n validate_distributed_dataset_inputs(\n model._distribution_strategy, x, y, sample_weights\n )\n return x, y, sample_weights\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 215, "n_words": 115, "vocab_size": 67, "complexity": 3, "nloc": 17, "token_counts": 108, "n_ast_nodes": 176, "n_identifiers": 16, "random_cut": "def _get_input_from_iterator(iterator, model):\n \n next_element = iterator.get_next()\n\n # `len(nest.flatten(x))` is going to not count empty elements such as {}.\n # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is\n # going to get flattened in `_prepare_feed_values` to work around that. Empty\n # elements are going to get filtered out as part of the flattening.\n if len(tf.nest.flatten(next_element)) == len(model.inputs):\n x = next_element\n y = None\n sample_weights = None\n elif len(tf.nest.flatten(next_element)) == (\n len(model.inputs) + len(model.outputs)\n ):\n x, y = next_element\n sample_weights = None\n else:\n x, y, sample_weights = next_element\n\n # Validate that all the elements in x and y are of the same type and shape.\n validate_distributed_dataset_inputs(\n ", "d_id": 80431, "documentation": { "docstring": "Get elements from the iterator and verify the input shape and type.", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 160501, "commit_id": "84eeca630ec9c5bf580bc456035c87d8591c1389", "repo": "numpy", "path": "numpy/core/multiarray.py", "file_name": "multiarray.py", "fun_name": "inner", "commit_message": "DIC: Misc RST reformatting.\n\nThis contains various RST reformatting.\n\nOne, moving `(C)` one line up, is specific to a bug in tree-sitter-rst\nthat mis parses this section. Another is adding one black line for a\nsimilar reason where `..` is seen as section underline by\ntree-sitter-rst.\n\nThis is some shuffling of section underline: try to be consitant,\n`=`, then `-`, then `~`, with this refactor there is also no more\nsection that use backticks as underline.\n\nNote in particular that non-consitency of underline lead to a problem in\ndatetime64 section where \"weekmasks\" (underlined with `-`) were actually\na level-4 heading instead of a level 2 or 3 I guess, and thus were\nnested under the `busday_count()` section.\n\nYou'll note also 2 formulas that are under double-quotes as they are not\nreferences.", "code": "def inner(a, b):\n \n return (a, b)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 12, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 35, "n_identifiers": 6, "random_cut": "def inner(a, b):\n \n return (a, b)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)", "d_id": 38647, "documentation": { "docstring": "\n inner(a, b, /)\n\n Inner product of two arrays.\n\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : array_like\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n If `a` and `b` are both\n scalars or both 1-D arrays then a scalar is returned; otherwise\n an array is returned.\n ``out.shape = (*a.shape[:-1], *b.shape[:-1])``\n\n Raises\n ------\n ValueError\n If both `a` and `b` are nonscalar and their last dimensions have\n different sizes.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n\n np.inner(a, b) = sum(a[:]*b[:])\n\n More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``::\n\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n\n or explicitly::\n\n np.inner(a, b)[i0,...,ir-2,j0,...,js-2]\n = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])\n\n In addition `a` or `b` may be scalars, in which case::\n\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n 2\n\n Some multidimensional examples:\n\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> c = np.inner(a, b)\n >>> c.shape\n (2, 3)\n >>> c\n array([[ 14, 38, 62],\n [ 86, 110, 134]])\n\n >>> a = np.arange(2).reshape((1,1,2))\n >>> b = np.arange(6).reshape((3,2))\n >>> c = np.inner(a, b)\n >>> c.shape\n (1, 1, 3)\n >>> c\n array([[[1, 3, 5]]])\n\n An example where `b` is a scalar:\n\n >>> np.inner(np.eye(2), 7)\n array([[7., 0.],\n [0., 7.]])\n\n ", "n_words": 260, "vocab_size": 162, "n_whitespaces": 521, "language": "en" } }, { "id": 106987, "commit_id": "c25cf96cfb7e6fc9ad75347cb2a32193c501e82c", "repo": "matplotlib", "path": "lib/matplotlib/collections.py", "file_name": "collections.py", "fun_name": "contains", "commit_message": "Switch transOffset to offset_transform.\n\nNote that most APIs *previously* already accepted *offset_transform* as\nkwarg, due to the presence of the `set_offset_transform` setter. Prefer\nthat name (shortening it to `offset_trf` for local variables).\n\nBackcompat for the old `transOffset` name is kept in most places by\nintroducing a property alias.", "code": "def contains(self, mouseevent):\n \n inside, info = self._default_contains(mouseevent)\n if inside is not None:\n return inside, info\n\n if not self.get_visible():\n return False, {}\n\n pickradius = (\n float(self._picker)\n if isinstance(self._picker, Number) and\n self._picker is not True # the bool, not just nonzero or 1\n else self._pickradius)\n\n if self.axes:\n self.axes._unstale_viewLim()\n\n transform, offset_trf, offsets, paths = self._prepare_points()\n\n # Tests if the point is contained on one of the polygons formed\n # by the control points of each of the paths. A point is considered\n # \"on\" a path if it would lie within a stroke of width 2*pickradius\n # following the path. If pickradius <= 0, then we instead simply check\n # if the point is *inside* of the path instead.\n ind = _path.point_in_path_collection(\n mouseevent.x, mouseevent.y, pickradius,\n transform.frozen(), paths, self.get_transforms(),\n offsets, offset_trf, pickradius <= 0)\n\n return len(ind) > 0, dict(ind=ind)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 347, "n_words": 135, "vocab_size": 89, "complexity": 6, "nloc": 19, "token_counts": 148, "n_ast_nodes": 231, "n_identifiers": 29, "random_cut": "def contains(self, mouseevent):\n \n inside, info = self._default_contains(mouseevent)\n if inside is not None:\n return inside, info\n\n if not self.get_visible():\n return False, {}\n\n pickradius = (\n float(self._picker)\n if isinstance(self._picker, Number) and\n self._picker is not True # the bool, not just nonzero or 1\n else self._pickradius)\n\n if self.axes:\n self.axes._unstale_viewLim()\n\n transform, offset_trf, offsets, paths = self._prepare_points()\n\n # Tests if the point is contained on one of the polygons formed\n # by the control points of each of the paths. A point is considered\n ", "d_id": 22539, "documentation": { "docstring": "\n Test whether the mouse event occurred in the collection.\n\n Returns ``bool, dict(ind=itemlist)``, where every item in itemlist\n contains the event.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 49, "language": "en" } }, { "id": 10849, "commit_id": "13edc16d806fb5d77a6849551178ccc75937f25f", "repo": "jina", "path": "jina/orchestrate/deployments/__init__.py", "file_name": "__init__.py", "fun_name": "deployments", "commit_message": "refactor: rename pod to deployment (#4230)\n\n* refactor: rename pod to deployment\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: undo daemon mistake\r\n\r\n* refactor: leftover cleanup\r\n\r\n* fix: more test fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more tests\r\n\r\n* fix: fix more tests\r\n\r\n* refactor: fix more tests\r\n\r\n* refactor: more tests fixes\r\n\r\n* refactor: rename pea to pod\r\n\r\n* refactor: adjust docs\r\n\r\n* refactor: complete pea renaming\r\n\r\n* refactor: more fixes\r\n\r\n* fix: pea_type in k8s yamls\r\n\r\n* fix: adjust pod args name\r\n\r\n* refactor: rename peapods parser folder\r\n\r\n* fix: da init\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def deployments(self) -> List[Dict]:\n \n return [\n {\n 'name': self.name,\n 'head_host': self.head_host,\n 'head_port_in': self.head_port_in,\n }\n ]\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 103, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 12, "token_counts": 34, "n_ast_nodes": 58, "n_identifiers": 7, "random_cut": "def deployments(self) -> List[Dict]:\n \n return [\n {\n 'name': self.name,\n ", "d_id": 1949, "documentation": { "docstring": "Get deployments of the deployment. The BaseDeployment just gives one deployment.\n\n :return: list of deployments\n ", "n_words": 15, "vocab_size": 12, "n_whitespaces": 29, "language": "en" } }, { "id": 320685, "commit_id": "bd8c940320b7d8476b422edd9c417703db64f603", "repo": "qutebrowser", "path": "qutebrowser/browser/webkit/http.py", "file_name": "http.py", "fun_name": "parse_content_disposition", "commit_message": "Simplify some syntax\n\nFound via pyupgrade", "code": "def parse_content_disposition(reply):\n \n is_inline = True\n filename = None\n content_disposition_header = b'Content-Disposition'\n # First check if the Content-Disposition header has a filename\n # attribute.\n if reply.hasRawHeader(content_disposition_header):\n # We use the unsafe variant of the filename as we sanitize it via\n # os.path.basename later.\n try:\n value = bytes(reply.rawHeader(content_disposition_header))\n log.network.debug(\"Parsing Content-Disposition: {value!r}\")\n content_disposition = ContentDisposition.parse(value)\n filename = content_disposition.filename()\n except ContentDispositionError as e:\n log.network.error(f\"Error while parsing filename: {e}\")\n else:\n is_inline = content_disposition.is_inline()\n # Then try to get filename from url\n if not filename:\n filename = reply.url().path().rstrip('/')\n # If that fails as well, use a fallback\n if not filename:\n filename = 'qutebrowser-download'\n return is_inline, os.path.basename(filename)\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 251, "n_words": 100, "vocab_size": 68, "complexity": 5, "nloc": 19, "token_counts": 121, "n_ast_nodes": 222, "n_identifiers": 23, "random_cut": "def parse_content_disposition(reply):\n \n is_inline = True\n filename = None\n content_disposition_header = b'Content-Disposition'\n # First check if the Content-Disposition header has a filename\n # attribute.\n if reply.hasRawHeader(content_disposition_header):\n # We use the unsafe variant of the filename as we sanitize it via\n # os.path.basename later.\n try:\n value = bytes(reply.rawHeader(content_disposition_header))\n log.network.debug(\"Parsing Content-Disposition: {value!r}\")\n content_disposition = ContentDisposition.parse(value)\n filename = content_disposition.filename()\n except ContentDispositionError as e:\n log.network.error(f\"Error while parsing filenam", "d_id": 117279, "documentation": { "docstring": "Parse a content_disposition header.\n\n Args:\n reply: The QNetworkReply to get a filename for.\n\n Return:\n A (is_inline, filename) tuple.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 41391, "commit_id": "a07ef69882ed76e09a0ed43d6f3ea33780c1b2be", "repo": "seaborn", "path": "seaborn/_core/properties.py", "file_name": "properties.py", "fun_name": "_get_categorical_mapping", "commit_message": "Transition mappings->properties, leaving a few loose ends", "code": "def _get_categorical_mapping(self, scale, data):\n \n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_colors(colors)\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 415, "n_words": 124, "vocab_size": 89, "complexity": 8, "nloc": 28, "token_counts": 184, "n_ast_nodes": 311, "n_identifiers": 32, "random_cut": "def _get_categorical_mapping(self, scale, data):\n \n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, lis", "d_id": 7413, "documentation": { "docstring": "Define mapping as lookup in list of discrete color values.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 107367, "commit_id": "6010bb43ed01c48c7c403569dd210490b236a853", "repo": "matplotlib", "path": "lib/matplotlib/colorbar.py", "file_name": "colorbar.py", "fun_name": "minorlocator", "commit_message": "MNT: make colorbars locators and formatters properties", "code": "def minorlocator(self, loc):\n \n self._long_axis().set_minor_locator(loc)\n self._minorlocator = loc\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def minorlocator(self, loc):\n \n self._long_axis().set_minor_locator(loc)", "d_id": 22712, "documentation": { "docstring": "\n Set minor locator being used for colorbar\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 300467, "commit_id": "1d9fb4bca871f97109684419f0f9526a0c151f2d", "repo": "core", "path": "tests/components/recorder/test_models.py", "file_name": "test_models.py", "fun_name": "test_process_datetime_to_timestamp_freeze_time", "commit_message": "Fix process_datetime_to_timestamp and add test coverage (#71755)", "code": "def test_process_datetime_to_timestamp_freeze_time(time_zone, hass):\n \n hass.config.set_time_zone(time_zone)\n utc_now = dt_util.utcnow()\n with freeze_time(utc_now):\n epoch = utc_now.timestamp()\n assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch\n now = dt_util.now()\n assert process_datetime_to_timestamp(now) == epoch\n\n\n@pytest.mark.parametrize(\n \"time_zone\", [\"Europe/Berlin\", \"America/Chicago\", \"US/Hawaii\", \"UTC\"]\n)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"time_zone\", [\"Europe/Berlin\", \"America/Chicago\", \"US/Hawaii\", \"UTC\"]\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 71, "n_words": 30, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 61, "n_ast_nodes": 141, "n_identifiers": 16, "random_cut": "def test_process_datetime_to_timestamp_freeze_time(time_zone, hass):\n \n hass.config.set_time_zone(time_zone)\n utc_now = dt_util.utcnow()\n with freeze_time(utc_now):\n epoch = utc_now.timestamp()\n assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch\n now = dt_util.now()\n assert process_datetime_to_timestamp(now) == epoch\n\n\n@pytest.mark.parametrize(\n \"time_zone\", [\"Europe/Berlin\", \"America/Chicago\", \"US/Hawaii", "d_id": 99327, "documentation": { "docstring": "Test we can handle processing database datatimes to timestamps.\n\n This test freezes time to make sure everything matches.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 24, "language": "en" } }, { "id": 101723, "commit_id": "e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_spatially_filter", "commit_message": "Alignments Tool - Typing, Documentation + Re-org", "code": "def _spatially_filter(self) -> np.ndarray:\n \n logger.debug(\"Spatially Filter\")\n assert self._shapes_model is not None\n landmarks_norm = self._normalized[\"landmarks\"]\n # Convert to matrix form\n landmarks_norm_table = np.reshape(landmarks_norm, [68 * 2, landmarks_norm.shape[2]]).T\n # Project onto shapes model and reconstruct\n landmarks_norm_table_rec = self._shapes_model.inverse_transform(\n self._shapes_model.transform(landmarks_norm_table))\n # Convert back to shapes (numKeypoint, num_dims, numFrames)\n landmarks_norm_rec = np.reshape(landmarks_norm_table_rec.T,\n [68, 2, landmarks_norm.shape[2]])\n # Transform back to image co-ordinates\n retval = self._normalized_to_original(landmarks_norm_rec,\n self._normalized[\"scale_factors\"],\n self._normalized[\"mean_coords\"])\n\n logger.debug(\"Spatially Filtered: %s\", retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 306, "n_words": 68, "vocab_size": 53, "complexity": 1, "nloc": 21, "token_counts": 126, "n_ast_nodes": 203, "n_identifiers": 19, "random_cut": "def _spatially_filter(self) -> np.ndarray:\n \n logger.debug(\"Spatially Filter\")\n assert self._shapes_model is not None\n landmarks_norm = self._normalized[\"l", "d_id": 21127, "documentation": { "docstring": " interpret the shapes using our shape model (project and reconstruct)\n\n Returns\n -------\n :class:`numpy.ndarray`\n The filtered landmarks in original coordinate space\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 60, "language": "en" } }, { "id": 196087, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/free_groups.py", "file_name": "free_groups.py", "fun_name": "letter_form", "commit_message": "Updated import locations", "code": "def letter_form(self):\n \n return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)\n for i, j in self.array_form]))\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 48, "n_words": 15, "vocab_size": 14, "complexity": 3, "nloc": 3, "token_counts": 44, "n_ast_nodes": 69, "n_identifiers": 7, "random_cut": "def letter_form(self):\n \n return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)\n for i, j in se", "d_id": 47587, "documentation": { "docstring": "\n The letter representation of a ``FreeGroupElement`` is a tuple\n of generator symbols, with each entry corresponding to a group\n generator. Inverses of the generators are represented by\n negative generator symbols.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> f, a, b, c, d = free_group(\"a b c d\")\n >>> (a**3).letter_form\n (a, a, a)\n >>> (a**2*d**-2*a*b**-4).letter_form\n (a, a, -d, -d, a, -b, -b, -b, -b)\n >>> (a**-2*b**3*d).letter_form\n (-a, -a, b, b, b, d)\n\n See Also\n ========\n\n array_form\n\n ", "n_words": 76, "vocab_size": 56, "n_whitespaces": 203, "language": "en" } }, { "id": 150868, "commit_id": "2b5f0678772bea0abaf4abe93efc55de43ea3e0e", "repo": "freqtrade", "path": "freqtrade/rpc/rpc.py", "file_name": "rpc.py", "fun_name": "_handle_default_message", "commit_message": "Refactoring, minor improvements, data provider improvements", "code": "def _handle_default_message(self, type, data):\n \n logger.debug(f\"Received message from Leader of type {type}: {data}\")\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 26, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 37, "n_identifiers": 6, "random_cut": "def _handle_default_message(self, type, data):\n \n logger.debug(f\"Received message from Leader ", "d_id": 34858, "documentation": { "docstring": "\n Default leader message handler, just logs it. We should never have to\n run this unless the leader sends us some weird message.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 113318, "commit_id": "f77db747d07d5c90a3a9f70bb17f71d4573f329e", "repo": "nni", "path": "nni/nas/oneshot/pytorch/base_lightning.py", "file_name": "base_lightning.py", "fun_name": "export_probs", "commit_message": "Enhancement of one-shot NAS (v2.9) (#5049)", "code": "def export_probs(self) -> dict[str, Any]:\n \n result = {}\n for module in self.nas_modules:\n try:\n result.update(module.export_probs(memo=result))\n except NotImplementedError:\n warnings.warn(\n 'Some super-modules you have used did not implement export_probs. You might find some logs are missing.',\n UserWarning\n )\n return result\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 170, "n_words": 37, "vocab_size": 36, "complexity": 3, "nloc": 22, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 14, "random_cut": "def export_probs(self) -> dict[str, Any]:\n \n result = {}\n for module in self.nas_modules:\n try:\n result.update(module.export_probs(memo=result))\n except NotImplementedE", "d_id": 24885, "documentation": { "docstring": "\n Export the probability of every choice in the search space got chosen.\n\n .. note:: If such method of some modules is not implemented, they will be simply ignored.\n\n Returns\n -------\n dict\n In most cases, keys are names of ``nas_modules`` suffixed with ``/`` and choice name.\n Values are the probability / logits depending on the implementation.\n ", "n_words": 55, "vocab_size": 47, "n_whitespaces": 120, "language": "en" } }, { "id": 87624, "commit_id": "0cfac5c8bd83bcc7b96f3294c41a96524b883786", "repo": "sentry", "path": "src/sentry/api/endpoints/organization_releases.py", "file_name": "organization_releases.py", "fun_name": "debounce_update_release_health_data", "commit_message": "fix(releases): Use Release.is_valid_version on adding releases (#40930)", "code": "def debounce_update_release_health_data(organization, project_ids):\n \n # Figure out which projects need to get updates from the snuba.\n should_update = {}\n cache_keys = [\"debounce-health:%d\" % id for id in project_ids]\n cache_data = cache.get_many(cache_keys)\n for project_id, cache_key in zip(project_ids, cache_keys):\n if cache_data.get(cache_key) is None:\n should_update[project_id] = cache_key\n\n if not should_update:\n return\n\n projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())}\n\n # This gives us updates for all release-projects which have seen new\n # health data over the last days. It will miss releases where the last\n # date is longer than what `get_changed_project_release_model_adoptions`\n # considers recent.\n project_releases = release_health.get_changed_project_release_model_adoptions(\n should_update.keys()\n )\n\n # Check which we already have rows for.\n existing = set(\n ReleaseProject.objects.filter(\n project_id__in=[x[0] for x in project_releases],\n release__version__in=[x[1] for x in project_releases],\n ).values_list(\"project_id\", \"release__version\")\n )\n to_upsert = []\n for key in project_releases:\n if key not in existing:\n to_upsert.append(key)\n\n if to_upsert:\n dates = release_health.get_oldest_health_data_for_releases(to_upsert)\n\n for project_id, version in to_upsert:\n project = projects.get(project_id)\n if project is None:\n # should not happen\n continue\n\n # Ignore versions that were saved with an empty string before validation was added\n if not Release.is_valid_version(version):\n continue\n\n # We might have never observed the release. This for instance can\n # happen if the release only had health data so far. For these cases\n # we want to create the release the first time we observed it on the\n # health side.\n release = Release.get_or_create(\n project=project, version=version, date_added=dates.get((project_id, version))\n )\n\n # Make sure that the release knows about this project. Like we had before\n # the project might not have been associated with this release yet.\n release.add_project(project)\n\n # Debounce updates for a minute\n cache.set_many(dict(zip(should_update.values(), [True] * len(should_update))), 60)\n\n\n@region_silo_endpoint", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@region_silo_endpoint", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 636, "n_words": 265, "vocab_size": 161, "complexity": 14, "nloc": 36, "token_counts": 268, "n_ast_nodes": 440, "n_identifiers": 48, "random_cut": "def debounce_update_release_health_data(organization, project_ids):\n \n # Figure out which projects need to get updates from the snuba.\n should_update = {}\n cache_keys = [\"debounce-health:%d\" % id for id in project_ids]\n cache_data = cache.get_many(cache_keys)\n for project_id, cache_key in zip(project_ids, cache_keys):\n if cache_data.get(cache_key) is None:\n should_update[project_id] = cache_key\n\n if not should_update:\n return\n\n projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())}\n\n # This gives us updates for all release-projects which have seen new\n # health data over the last days. It will miss releases where the last\n # date is longer than what `get_changed_project_release_model_adoptions`\n # considers recent.\n project_releases = release_health.get_changed_project_release_model_adoptions(\n should_update.keys()\n )\n\n # Check which we already have rows for.\n existing = set(\n ReleaseProject.objects.filter(\n project_id__in=[x[0] for x in project_releases],\n release__version__in=[x[1] for x in project_releases],\n ).values_list(\"project_id\", \"release__version\")\n )\n to_upsert = []\n for key in project_releases:\n if key not in existing:\n to_upsert.append(key)\n\n if to_upsert:\n dates = release_health.get_oldest_health_data_for_releases(to_upsert)\n\n for project_id, version in to_upsert:\n project = projects.get(project_id)\n if project is None:\n # should not happen\n continue\n\n # Ignore versions that were saved with an empty string before validation was added\n ", "d_id": 18323, "documentation": { "docstring": "This causes a flush of snuba health data to the postgres tables once\n per minute for the given projects.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 25, "language": "en" } }, { "id": 222520, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/dis.py", "file_name": "dis.py", "fun_name": "show_code", "commit_message": "add python 3.10.4 for windows", "code": "def show_code(co, *, file=None):\n \n print(code_info(co), file=file)\n\n_Instruction = collections.namedtuple(\"_Instruction\",\n \"opname opcode arg argval argrepr offset starts_line is_jump_target\")\n\n_Instruction.opname.__doc__ = \"Human readable name for operation\"\n_Instruction.opcode.__doc__ = \"Numeric code for operation\"\n_Instruction.arg.__doc__ = \"Numeric argument to operation (if any), otherwise None\"\n_Instruction.argval.__doc__ = \"Resolved arg value (if known), otherwise same as arg\"\n_Instruction.argrepr.__doc__ = \"Human readable description of operation argument\"\n_Instruction.offset.__doc__ = \"Start index of operation within bytecode sequence\"\n_Instruction.starts_line.__doc__ = \"Line started by this opcode (if any), otherwise None\"\n_Instruction.is_jump_target.__doc__ = \"True if other code jumps to here, otherwise False\"\n\n_OPNAME_WIDTH = 20\n_OPARG_WIDTH = 5\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 95, "n_words": 96, "vocab_size": 67, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 171, "n_identifiers": 19, "random_cut": "def show_code(co, *, file=None):\n \n print(code_info(co), file=file)\n\n_Instruction = collections.namedtuple(\"_Instruction\",\n \"opname opcode arg argval argrepr offset starts_line is_jump_target\")\n\n_Instruction.opname.__doc__ = \"Human readable name for operation\"\n_Instruction.opcode.__doc__ = \"Numeric co", "d_id": 56613, "documentation": { "docstring": "Print details of methods, functions, or code to *file*.\n\n If *file* is not provided, the output is printed on stdout.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 26, "language": "en" } }, { "id": 109381, "commit_id": "a17f4f3bd63e3ca3754f96d7db4ce5197720589b", "repo": "matplotlib", "path": "lib/matplotlib/pyplot.py", "file_name": "pyplot.py", "fun_name": "set_cmap", "commit_message": "MNT: convert tests and internal usage way from using mpl.cm.get_cmap", "code": "def set_cmap(cmap):\n \n cmap = colormaps[cmap]\n\n rc('image', cmap=cmap.name)\n im = gci()\n\n if im is not None:\n im.set_cmap(cmap)\n\n\n@_copy_docstring_and_deprecators(matplotlib.image.imread)", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@_copy_docstring_and_deprecators(matplotlib.image.imread)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 38, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 39, "n_ast_nodes": 82, "n_identifiers": 11, "random_cut": "def set_cmap(cmap):\n \n cmap = colormaps[cmap]\n\n rc('image', cmap=cmap.name)\n im = gci()\n\n if im is not None:\n im.set_cmap(cmap)\n\n", "d_id": 23562, "documentation": { "docstring": "\n Set the default colormap, and applies it to the current image if any.\n\n Parameters\n ----------\n cmap : `~matplotlib.colors.Colormap` or str\n A colormap instance or the name of a registered colormap.\n\n See Also\n --------\n colormaps\n matplotlib.cm.register_cmap\n matplotlib.cm.get_cmap\n ", "n_words": 36, "vocab_size": 33, "n_whitespaces": 74, "language": "en" } }, { "id": 320776, "commit_id": "a20bb67a878b2e68abf8268c1b0a27f018d01352", "repo": "qutebrowser", "path": "qutebrowser/completion/completionwidget.py", "file_name": "completionwidget.py", "fun_name": "completion_item_focus", "commit_message": "mypy: Upgrade to PyQt5-stubs 5.15.6.0\n\nFor some unknown reason, those new stubs cause a *lot* of things now to be\nchecked by mypy which formerly probably got skipped due to Any being implied\nsomewhere.\n\nThe stubs themselves mainly improved, with a couple of regressions too.\n\nIn total, there were some 337 (!) new mypy errors. This commit fixes almost all\nof them, and the next commit improves a fix to get things down to 0 errors\nagain.\n\nOverview of the changes:\n\n==== qutebrowser/app.py\n\n- Drop type ignore due to improved stubs.\n\n==== qutebrowser/browser/browsertab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n This is debatable: I suppose the abstract stuff shouldn't need to know\n anything about the concrete backends at all. But it seems like we cut some\n corners when initially implementing things, and put some code in browsertab.py\n just because the APIs of both backends happened to be compatible. Perhaps\n something to reconsider once we drop QtWebKit and hopefully implement a dummy\n backend.\n\n- Add an additional assertion in AbstractAction.run_string. This is already\n covered by the isinstance(member, self.action_base) above it, but that's too\n dynamic for mypy to understand.\n\n- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x\n and y components), not a single int.\n\n- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x\n and y components), not a single int.\n\n- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass\n fractional percentages too.\n\n- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re\n this being debatable.\n\n- Fix the return type of AbstractTabPrivate.event_target(), which can be None\n (see #3888).\n\n- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS\n return value), not None.\n\n- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can\n be None to use the last used position.\n\n- Declare the type of sub-objects of AbstractTab.\n\n- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.\n\n==== qutebrowser/browser/commands.py\n\n- Make sure the active window is a MainWindow (with a .win_id attribute).\n\n==== qutebrowser/browser/downloadview.py\n\n- Add _model() which makes sure that self.model() is a DownloadModel, not None\n or any other model. This is needed because other methods access a variety of\n custom attributes on it, e.g. last_index().\n\n==== qutebrowser/browser/greasemonkey.py\n\n- Add an ignore for AbstractDownload.requested_url which we patch onto the\n downloads. Probably would be nicer to add it as a proper attribute which always\n gets set by the DownloadManager.\n\n==== qutebrowser/browser/hints.py\n\n- Remove type ignores for QUrl.toString().\n- Add a new type ignore for combining different URL flags (which works, but is\n not exactly type safe... still probably a regression in the stubs).\n- Make sure the things we get back from self._get_keyparser are what we actually\n expect. Probably should introduce a TypedDict (and/or overloads for\n _get_keyparser with typing.Literal) to teach mypy about the exact return value.\n See #7098.\n This is needed because we access Hint/NormalKeyParser-specific attributes such\n as .set_inhibited_timout() or .update_bindings().\n\n==== qutebrowser/browser/inspector.py\n\n- Similar changes than in browsertab.py to make some types where we share API\n (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next\n commit.\n\n==== qutebrowser/browser/network/pac.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/qtnetworkdownloads.py\n\n- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an\n AbstractDownload), so that we can call ._uses_nam() on it.\n\n==== qutebrowser/browser/qutescheme.py\n\n- Remove now unneeded type ignore for QUrl flags.\n\n==== qutebrowser/browser/urlmarks.py\n\n- Specify the type of UrlMarkManager._lineparser, as those only get initialized\n in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.\n\n==== qutebrowser/browser/webelem.py\n\n- New casts to turn single KeyboardModifier (enum) entries into\n KeyboardModifiers (flags). Might not be needed anymore with Qt 6.\n- With that, casting the final value is now unneeded.\n\n==== qutebrowser/browser/webengine/notification.py\n\n- Remove now unneeded type ignore for signal.\n- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()\n is a QProcess, not just any QObject.\n\n==== qutebrowser/browser/webengine/webenginedownloads.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webengine/webengineelem.py\n\n- Specify the type of WebEngineElement._tab.\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webengineinspector.py\n\n- See changes to inspector.py and next commit.\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/webengine/webenginequtescheme.py\n\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webenginesettings.py\n\n- Ignore access of .setter attribute which we patch onto QWebEngineProfile.\n Would be nice to have a subclass or wrapper-class instead.\n\n==== qutebrowser/browser/webengine/webenginetab.py\n\n- Specified the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Remove some now-unneeded type ignores for creating FindFlags.\n- Specify more concrete types for WebEngineTab members where we actually need to\n access WebEngine-specific attributes.\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webengine/webview.py\n\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webkit/network/networkreply.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webkit/webkitinspector.py\n\n- See changes to inspector.py and next commit.\n\n==== qutebrowser/browser/webkit/webkittab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Add a type ignore for WebKitAction because our workaround needs to\n treat them as ints (which is allowed by PyQt, even if not type-safe).\n- Add new ignores for findText calls: The text is a QString and can be None; the\n flags are valid despite mypy thinking they aren't (stubs regression?).\n- Specify the type for WebKitHistoryPrivate._history, because we access\n WebKit-specific attributes. See above (_widget) re this being debatable.\n- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs\n regression?).\n- Make sure the .page() and .page().networkAccessManager() are our subclasses\n rather than the more generic QtWebKit objects, as we use custom attributes.\n- Add new type ignores for signals (stubs regression!)\n\n==== qutebrowser/browser/webkit/webpage.py\n\n- Make sure the .networkAccessManager() is our subclass rather than the more\n generic QtWebKit object, as we use custom attributes.\n- Replace a cast by a type ignore. The cast didn't work anymore.\n\n==== qutebrowser/browser/webkit/webview.py\n\n- Make sure the .page() is our subclass rather than the more generic QtWebKit\n object, as we use custom attributes.\n\n==== qutebrowser/commands/userscripts.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/completion/completer.py\n\n- Add a new _completion() getter (which ensures it actually gets the completion\n view) rather than accessing the .parent() directly (which could be any QObject).\n\n==== qutebrowser/completion/completiondelegate.py\n\n- Make sure self.parent() is a CompletionView (no helper method as there is only\n one instance).\n- Remove a now-unneeded type ignore for adding QSizes.\n\n==== qutebrowser/completion/completionwidget.py\n\n- Add a ._model() getter which ensures that we get a CompletionModel (with\n custom attributes) rather than Qt's .model() which can be any QAbstractItemModel\n (or None).\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/completion/models/completionmodel.py\n\n- Remove now unneeded type ignores for signals.\n- Ignore a complaint about .set_pattern() not being defined. Completion\n categories don't share any common parent class, so it would be good to introduce\n a typing.Protocol for this. See #7098.\n\n==== qutebrowser/components/misccommands.py\n\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/components/readlinecommands.py\n\n- Make sure QApplication.instance() is a QApplication (and not just a\n QCoreApplication). This includes the former \"not None\" check.\n\n==== qutebrowser/components/scrollcommands.py\n\n- Add basic annotation for \"funcs\" dict. Could have a callable protocol to\n specify it needs a count kwarg, see #7098.\n\n==== qutebrowser/config/stylesheet.py\n\n- Correctly specify that stylesheet apply to QWidgets, not any QObject.\n- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy\n about this with overloads and protocols (stylesheet for set_register being None\n => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not\n worth the troble. See #7098.\n\n==== qutebrowser/keyinput/keyutils.py\n\n- Remove some now-unneeded type ignores and add a cast for using a single enum\n value as flags. Might need to look at this again with Qt 6 support.\n\n==== qutebrowser/keyinput/modeman.py\n\n- Add a FIXME for using a TypedDict, see comments for hints.py above.\n\n==== qutebrowser/mainwindow/mainwindow.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n- Improve where we cast from WindowType to WindowFlags, no int needed\n- Use new .tab_bar() getter, see below.\n\n==== qutebrowser/mainwindow/prompt.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n\n==== qutebrowser/mainwindow/statusbar/bar.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/statusbar/command.py\n\n- Fix type for setText() override (from QLineEdit): text can be None\n (QString in C++).\n\n==== qutebrowser/mainwindow/statusbar/url.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/tabbedbrowser.py\n\n- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses\n AbstractTab-specific attributes.\n- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access\n .maybe_hide.\n- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.\n- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and\n .widget(), which ensures that the return values are valid AbstractTabs (or None\n for _tab_by_idx). This is needed because we access AbstractTab-specific\n attributes.\n- For some places, where the tab can be None, continue using .currentTab() but\n add asserts.\n- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None\n possibility now.\n\n==== qutebrowser/mainwindow/tabwidget.py\n\n- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and\n .widget() are of type TabBar and AbstractTab, respectively.\n- Add additional assertions where we expect ._tab_by_idx() to never be None.\n- Remove dead code in get_tab_fields for handling a None y scroll position. I\n was unable to find any place in the code where this could be set to None.\n- Remove some now-unneeded type ignores and casts, as mypy now knows that\n _type_by_idx() could be None.\n- Work around a strange instance where mypy complains about not being able to\n find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,\n despite it clearly being shown as a bool *inside* that class without any\n annotation.\n- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in\n fact a TabWidget.\n\n==== qutebrowser/misc/crashsignal.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/editor.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/ipc.py\n\n- Remove now unneeded type ignores for signals.\n- Add new type ignores for .error() which is both a signal and a getter\n (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was\n renamed to errorOccurred in 5.15.\n\n==== qutebrowser/misc/objects.py\n\n- Make sure mypy knows that objects.app is our custom Application (with custom\n attributes) rather than any QApplication.\n\n==== qutebrowser/utils/objreg.py\n\n- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,\n but ideally, the whole objreg stuff should die one day anyways.\n\n==== tests/unit/completion/test_completer.py\n\n- Make CompletionWidgetStub inherit from CompletionView so that it passes the\n new isinstance() asserts in completer.py (see above).", "code": "def completion_item_focus(self, which, history=False):\n \n if history:\n if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or\n not self._active):\n if which == 'next':\n self._cmd.command_history_next()\n return\n elif which == 'prev':\n self._cmd.command_history_prev()\n return\n else:\n raise cmdutils.CommandError(\"Can't combine --history with \"\n \"{}!\".format(which))\n\n if not self._active:\n return\n\n selmodel = self.selectionModel()\n indices = {\n 'next': lambda: self._next_idx(upwards=False),\n 'prev': lambda: self._next_idx(upwards=True),\n 'next-category': lambda: self._next_category_idx(upwards=False),\n 'prev-category': lambda: self._next_category_idx(upwards=True),\n 'next-page': lambda: self._next_page(upwards=False),\n 'prev-page': lambda: self._next_page(upwards=True),\n }\n idx = indices[which]()\n\n if not idx.isValid():\n return\n\n selmodel.setCurrentIndex(\n idx,\n QItemSelectionModel.ClearAndSelect |\n QItemSelectionModel.Rows)\n\n # if the last item is focused, try to fetch more\n next_idx = self.indexBelow(idx)\n if not self.visualRect(next_idx).isValid():\n self.expandAll()\n\n count = self._model().count()\n if count == 0:\n self.hide()\n elif count == 1 and config.val.completion.quick:\n self.hide()\n elif config.val.completion.show == 'auto':\n self.show()\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 608, "n_words": 114, "vocab_size": 78, "complexity": 14, "nloc": 41, "token_counts": 292, "n_ast_nodes": 494, "n_identifiers": 38, "random_cut": "def completion_item_focus(self, which, history=False):\n \n if history:\n if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or\n not self._active):\n if which == 'next':\n self._cmd.command_history_next()\n return\n ", "d_id": 117343, "documentation": { "docstring": "Shift the focus of the completion menu to another item.\n\n Args:\n which: 'next', 'prev',\n 'next-category', 'prev-category',\n 'next-page', or 'prev-page'.\n history: Navigate through command history if no text was typed.\n ", "n_words": 29, "vocab_size": 28, "n_whitespaces": 101, "language": "en" } }, { "id": 101259, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/manual/detected_faces.py", "file_name": "detected_faces.py", "fun_name": "_background_extract", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def _background_extract(self, output_folder, progress_queue):\n \n _io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True),\n loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))\n\n for frame_idx, (filename, image) in enumerate(_io[\"loader\"].load()):\n logger.trace(\"Outputting frame: %s: %s\", frame_idx, filename)\n src_filename = os.path.basename(filename)\n frame_name = os.path.splitext(src_filename)[0]\n progress_queue.put(1)\n\n for face_idx, face in enumerate(self._frame_faces[frame_idx]):\n output = f\"{frame_name}_{face_idx}.png\"\n aligned = AlignedFace(face.landmarks_xy,\n image=image,\n centering=\"head\",\n size=512) # TODO user selectable size\n meta = dict(alignments=face.to_png_meta(),\n source=dict(alignments_version=self._alignments.version,\n original_filename=output,\n face_index=face_idx,\n source_filename=src_filename,\n source_is_video=self._globals.is_video,\n source_frame_dims=image.shape[:2]))\n\n b_image = encode_image(aligned.face, \".png\", metadata=meta)\n _io[\"saver\"].save(output, b_image)\n _io[\"saver\"].close()\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 575, "n_words": 65, "vocab_size": 56, "complexity": 3, "nloc": 24, "token_counts": 232, "n_ast_nodes": 366, "n_identifiers": 58, "random_cut": "def _background_extract(self, output_folder, progress_queue):\n \n _io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True),\n loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))\n\n for frame_idx, (filename, image) in enumerate(_io[\"loader\"].load()):\n logger.trace(\"Outputting frame: %s: %s\", frame_idx, filename)\n src_filename = os.path.basename(filename)\n frame_name = os.path.splitext(src_filename)[0]\n progress_queue.put(1)\n\n for face_idx, face in enumerate(self._frame_faces[frame_idx]):\n output = f\"{frame_name}_{face_idx}.png\"\n aligned = AlignedFace(face.landmarks_xy,\n image=image,\n centering=\"head\",\n size=512) # TODO user selectable size\n meta = dict(alignments=face.to_png_meta(),\n source=dict(alignments_version=self._alignments.version,\n original_filename=output,\n face_index=face_idx,\n source_filename=src_filename,\n source_is_video=self._globals.is_video,\n source_frame_dims=image.shape[:2]))\n\n ", "d_id": 20679, "documentation": { "docstring": " Perform the background extraction in a thread so GUI doesn't become unresponsive.\n\n Parameters\n ----------\n output_folder: str\n The location to save the output faces to\n progress_queue: :class:`queue.Queue`\n The queue to place incremental counts to for updating the GUI's progress bar\n ", "n_words": 39, "vocab_size": 33, "n_whitespaces": 97, "language": "en" } }, { "id": 320361, "commit_id": "55ef0d4a1b62c3abe8500cad97ddeecf9f746b84", "repo": "paperless-ngx", "path": "src/paperless_tesseract/tests/test_checks.py", "file_name": "test_checks.py", "fun_name": "test_multi_part_language", "commit_message": "Fixes language code checks around two part languages", "code": "def test_multi_part_language(self, m):\n \n m.return_value = [\"chi_sim\", \"eng\"]\n\n msgs = check_default_language_available(None)\n\n self.assertEqual(len(msgs), 0)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 59, "n_identifiers": 8, "random_cut": "def test_multi_part_language(self, m):\n \n m.return_value = [\"chi_sim\", \"eng\"]\n\n msgs = check_default_language_available(None)\n\n self.assertEqual(len", "d_id": 117149, "documentation": { "docstring": "\n GIVEN:\n - An OCR language which is multi part (ie chi-sim)\n - The language is correctly formatted\n WHEN:\n - Installed packages are checked\n THEN:\n - No errors are reported\n ", "n_words": 29, "vocab_size": 23, "n_whitespaces": 102, "language": "en" } }, { "id": 80512, "commit_id": "443bdc1234682dd0004bae372078512fcf37cce9", "repo": "awx", "path": "awx/main/tasks/callback.py", "file_name": "callback.py", "fun_name": "finished_callback", "commit_message": "Decoupled callback functions from BaseTask Class\n\n--- Removed all callback functions from 'jobs.py' and put them in a new file '/awx/main/tasks/callback.py'\n--- Modified Unit tests unit moved\n--- Moved 'update_model' from jobs.py to /awx/main/utils/update_model.py", "code": "def finished_callback(self, runner_obj):\n \n event_data = {\n 'event': 'EOF',\n 'final_counter': self.event_ct,\n 'guid': self.guid,\n }\n event_data.setdefault(self.event_data_key, self.instance.id)\n self.dispatcher.dispatch(event_data)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 84, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 8, "token_counts": 50, "n_ast_nodes": 85, "n_identifiers": 12, "random_cut": "def finished_callback(self, runner_obj):\n \n ", "d_id": 17072, "documentation": { "docstring": "\n Ansible runner callback triggered on finished run\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 27579, "commit_id": "7ea7916c65357741c3911e307acb58d547a5e91a", "repo": "saleor", "path": "saleor/webhook/observability/tests/test_obfuscation.py", "file_name": "test_obfuscation.py", "fun_name": "test_anonymize_gql_operation_response_with_fragment_spread", "commit_message": "Observability reporter (#9803)\n\n* Initial commit\r\n\r\n* Add observability celery beat task\r\n\r\n* Add observability_reporter_task and observability_send_events\r\n\r\n* Convert payload to camel case\r\n\r\n* Add fakeredis to dev dependencies\r\n\r\n* Add redis buffer tests\r\n\r\n* Refactor buffer\r\n\r\n* Update\r\n\r\n* Optimize buffer\r\n\r\n* Add tests\r\n\r\n* Add types-redis to dev dependencies\r\n\r\n* Refactor\r\n\r\n* Fix after rebase\r\n\r\n* Refactor opentracing\r\n\r\n* Add opentracing to observability tasks\r\n\r\n* Add more tests\r\n\r\n* Fix buffer fixtures\r\n\r\n* Report dropped events\r\n\r\n* Fix buffer tests\r\n\r\n* Refactor get_buffer\r\n\r\n* Refactor unit tests\r\n\r\n* Set Redis connection client_name\r\n\r\n* Refactor redis tests\r\n\r\n* Fix test_get_or_create_connection_pool\r\n\r\n* Fix JsonTruncText comparison\r\n\r\n* Add more generate_event_delivery_attempt_payload tests", "code": "def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory):\n query = \n result = {\"data\": \"result\"}\n sensitive_fields = {\"Product\": {\"name\"}}\n operation_result = gql_operation_factory(query, result=result)\n\n anonymize_gql_operation_response(operation_result, sensitive_fields)\n\n assert operation_result.result[\"data\"] == MASK\n\n\n@pytest.mark.parametrize(\n \"sensitive_fields\",\n [\n {\"NonExistingType\": {}},\n {\"Product\": {\"nonExistingField\"}},\n {\"Node\": {\"id\"}},\n ],\n)", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"sensitive_fields\",\n [\n {\"NonExistingType\": {}},\n {\"Product\": {\"nonExistingField\"}},\n {\"Node\": {\"id\"}},\n ],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 79, "n_words": 33, "vocab_size": 29, "complexity": 1, "nloc": 23, "token_counts": 49, "n_ast_nodes": 152, "n_identifiers": 11, "random_cut": "def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory):\n query = \n result = {\"data\": \"result\"}\n sensitive_fields = {\"", "d_id": 5117, "documentation": { "docstring": "\n fragment ProductFragment on Product {\n id\n name\n }\n query products($first: Int){\n products(channel: \"channel-pln\", first:$first){\n edges{\n node{\n ... ProductFragment\n variants {\n variantName: name\n }\n }\n }\n }\n }", "n_words": 27, "vocab_size": 19, "n_whitespaces": 139, "language": "en" } }, { "id": 176620, "commit_id": "de1d00f20e0bc14f1cc911b3486e50225a8fa168", "repo": "networkx", "path": "networkx/algorithms/bipartite/generators.py", "file_name": "generators.py", "fun_name": "complete_bipartite_graph", "commit_message": "Adjust the usage of nodes_or_number decorator (#5599)\n\n* recorrect typo in decorators.py\r\n\r\n* Update tests to show troubles in current code\r\n\r\n* fix troubles with usage of nodes_or_number\r\n\r\n* fix typo\r\n\r\n* remove nodes_or_number where that makes sense\r\n\r\n* Reinclude nodes_or_numbers and add some tests for nonstandard usage\r\n\r\n* fix typowq\r\n\r\n* hopefully final tweaks (no behavior changes\r\n\r\n* Update test_classic.py\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def complete_bipartite_graph(n1, n2, create_using=None):\n \n G = nx.empty_graph(0, create_using)\n if G.is_directed():\n raise nx.NetworkXError(\"Directed Graph not supported\")\n\n n1, top = n1\n n2, bottom = n2\n if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral):\n bottom = [n1 + i for i in bottom]\n G.add_nodes_from(top, bipartite=0)\n G.add_nodes_from(bottom, bipartite=1)\n if len(G) != len(top) + len(bottom):\n raise nx.NetworkXError(\"Inputs n1 and n2 must contain distinct nodes\")\n G.add_edges_from((u, v) for u in top for v in bottom)\n G.graph[\"name\"] = f\"complete_bipartite_graph({n1}, {n2})\"\n return G\n\n\n@py_random_state(3)", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@py_random_state(3)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 130, "n_words": 74, "vocab_size": 54, "complexity": 8, "nloc": 15, "token_counts": 148, "n_ast_nodes": 250, "n_identifiers": 23, "random_cut": "def complete_bipartite_graph(n1, n2, create_using=None):\n \n G = nx.empty_g", "d_id": 42003, "documentation": { "docstring": "Returns the complete bipartite graph `K_{n_1,n_2}`.\n\n The graph is composed of two partitions with nodes 0 to (n1 - 1)\n in the first and nodes n1 to (n1 + n2 - 1) in the second.\n Each node in the first is connected to each node in the second.\n\n Parameters\n ----------\n n1, n2 : integer or iterable container of nodes\n If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`.\n If a container, the elements are the nodes.\n create_using : NetworkX graph instance, (default: nx.Graph)\n Return graph of this type.\n\n Notes\n -----\n Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are\n containers of nodes. If only one of n1 or n2 are integers, that\n integer is replaced by `range` of that integer.\n\n The nodes are assigned the attribute 'bipartite' with the value 0 or 1\n to indicate which bipartite set the node belongs to.\n\n This function is not imported in the main namespace.\n To use it use nx.bipartite.complete_bipartite_graph\n ", "n_words": 166, "vocab_size": 93, "n_whitespaces": 237, "language": "en" } }, { "id": 220698, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/sslproto.py", "file_name": "sslproto.py", "fun_name": "shutdown", "commit_message": "add python 3.10.4 for windows", "code": "def shutdown(self, callback=None):\n \n if self._state == _UNWRAPPED:\n raise RuntimeError('no security layer present')\n if self._state == _SHUTDOWN:\n raise RuntimeError('shutdown in progress')\n assert self._state in (_WRAPPED, _DO_HANDSHAKE)\n self._state = _SHUTDOWN\n self._shutdown_cb = callback\n ssldata, appdata = self.feed_ssldata(b'')\n assert appdata == [] or appdata == [b'']\n return ssldata\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 130, "n_words": 45, "vocab_size": 31, "complexity": 4, "nloc": 11, "token_counts": 79, "n_ast_nodes": 130, "n_identifiers": 13, "random_cut": "def shutdown(self, callback=None):\n \n if self._state == _UNWRAPPED:\n raise RuntimeError('no security layer present')\n if self._state == _SHUTDOWN:\n raise RuntimeError('shutdown in progress')\n assert self._state in (_WRAPPED, _DO_HANDSHAKE)\n self._state = _SHUTDOWN\n self._shutdown_cb = callback\n ssldata, ap", "d_id": 56088, "documentation": { "docstring": "Start the SSL shutdown sequence.\n\n Return a list of ssldata. A ssldata element is a list of buffers\n\n The optional *callback* argument can be used to install a callback that\n will be called when the shutdown is complete. The callback will be\n called without arguments.\n ", "n_words": 45, "vocab_size": 32, "n_whitespaces": 80, "language": "en" } }, { "id": 216053, "commit_id": "a5679caf65c7c79cd72841b6e5793b9b693744c9", "repo": "salt", "path": "salt/cloud/clouds/proxmox.py", "file_name": "proxmox.py", "fun_name": "preferred_ip", "commit_message": "Add support for get IP-address from agent", "code": "def preferred_ip(vm_, ips):\n \n proto = config.get_cloud_config_value(\n \"protocol\", vm_, __opts__, default=\"ipv4\", search_global=False\n )\n\n family = socket.AF_INET\n if proto == \"ipv6\":\n family = socket.AF_INET6\n for ip in ips:\n ignore_ip = ignore_cidr(vm_, ip)\n if ignore_ip:\n continue\n try:\n socket.inet_pton(family, ip)\n return ip\n except Exception: # pylint: disable=broad-except\n continue\n return False\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 154, "n_words": 46, "vocab_size": 36, "complexity": 5, "nloc": 17, "token_counts": 78, "n_ast_nodes": 128, "n_identifiers": 18, "random_cut": "def preferred_ip(vm_, ips):\n \n proto = config.get_cloud_config_value(\n \"protocol\", vm_, __opts__, default=\"ipv4\", search_global=False\n )\n\n family = socket.AF_INET\n if proto == \"ipv6\":\n family = socket.AF_INET6\n for ip in ips:\n ignore_ip = ignore_cidr(vm_, ip)\n if ignore_ip:\n continue\n try:\n socket.inet_pton(family, ip)\n return ip\n except Exception: # pylint: disable=broad-except\n continue\n return False\n\n", "d_id": 54359, "documentation": { "docstring": "\n Return either an 'ipv4' (default) or 'ipv6' address depending on 'protocol' option.\n The list of 'ipv4' IPs is filtered by ignore_cidr() to remove any unreachable private addresses.\n ", "n_words": 27, "vocab_size": 26, "n_whitespaces": 37, "language": "en" } }, { "id": 68785, "commit_id": "00ef499739959630cd7cf97419fbb6ca59be05f2", "repo": "erpnext", "path": "erpnext/accounts/doctype/account/account.py", "file_name": "account.py", "fun_name": "get_parent_account", "commit_message": "refactor: use db independent offset syntax (#31345)\n\n* chore: use db independent offset syntax\r\n\r\n* fix: typo\r\n\r\n* style: reformat code to black spec\r\n\r\nCo-authored-by: Ankush Menat ", "code": "def get_parent_account(doctype, txt, searchfield, start, page_len, filters):\n\treturn frappe.db.sql(\n\t\t\n\t\t% (\"%s\", searchfield, \"%s\", \"%s\", \"%s\"),\n\t\t(filters[\"company\"], \"%%%s%%\" % txt, page_len, start),\n\t\tas_list=1,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 17, "n_words": 23, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 56, "n_ast_nodes": 85, "n_identifiers": 11, "random_cut": "def get_parent_account(doctype, txt, searchfield, start, page_len, filters):\n\treturn frappe.db.sql(\n\t\t\n\t\t% (\"%s\", searchfield, \"%s\", \"%s\", \"%s\"),\n\t\t(filters[\"company\"], \"%%%s%%\" % txt, page_len, start),\n\t\tas_list=1,\n", "d_id": 14866, "documentation": { "docstring": "select name from tabAccount\n\t\twhere is_group = 1 and docstatus != 2 and company = %s\n\t\tand %s like %s order by name limit %s offset %s", "n_words": 27, "vocab_size": 19, "n_whitespaces": 24, "language": "en" } }, { "id": 276930, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/kernelized_utils.py", "file_name": "kernelized_utils.py", "fun_name": "exact_laplacian_kernel", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def exact_laplacian_kernel(x, y, stddev):\n r\n x_aligned, y_aligned = _align_matrices(x, y)\n diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)\n return tf.exp(-diff_l1_norm / stddev)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 30, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 27, "token_counts": 53, "n_ast_nodes": 81, "n_identifiers": 13, "random_cut": "def exact_laplacian_kernel(x, y, stddev):\n r\n x_aligned, y_aligned = _align_matrices(x, y)\n diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)\n ", "d_id": 81783, "documentation": { "docstring": "Computes exact Laplacian kernel value(s) for tensors x and y using stddev.\n\n The Laplacian kernel for vectors u, v is defined as follows:\n K(u, v) = exp(-||u-v|| / stddev)\n where the norm is the l1-norm. x, y can be either vectors or matrices. If they\n are vectors, they must have the same dimension. If they are matrices, they\n must have the same number of columns. In the latter case, the method returns\n (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and\n v is a row from y.\n\n Args:\n x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].\n y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].\n stddev: The width of the Gaussian kernel.\n\n Returns:\n A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix\n of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for\n all (u,v) pairs where u, v are rows from x and y respectively.\n\n Raises:\n ValueError: if the shapes of x, y are not compatible.\n ", "n_words": 195, "vocab_size": 106, "n_whitespaces": 269, "language": "en" } }, { "id": 259028, "commit_id": "71656844586f212324678804ace73f7a266deb00", "repo": "scikit-learn", "path": "sklearn/manifold/_isomap.py", "file_name": "_isomap.py", "fun_name": "transform", "commit_message": "ENH Isomap supports radius-based neighbors (#19794)\n\nCo-authored-by: Tom Dupré la Tour \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Julien Jerphanion ", "code": "def transform(self, X):\n \n check_is_fitted(self)\n if self.n_neighbors is not None:\n distances, indices = self.nbrs_.kneighbors(X, return_distance=True)\n else:\n distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)\n\n # Create the graph of shortest distances from X to\n # training data via the nearest neighbors of X.\n # This can be done as a single array operation, but it potentially\n # takes a lot of memory. To avoid that, use a loop:\n\n n_samples_fit = self.nbrs_.n_samples_fit_\n n_queries = distances.shape[0]\n G_X = np.zeros((n_queries, n_samples_fit))\n for i in range(n_queries):\n G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)\n\n G_X **= 2\n G_X *= -0.5\n\n return self.kernel_pca_.transform(G_X)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 232, "n_words": 93, "vocab_size": 75, "complexity": 3, "nloc": 14, "token_counts": 140, "n_ast_nodes": 213, "n_identifiers": 23, "random_cut": "def transform(self, X):\n \n check_is_fitted(self)\n if self.n_neighbors is not None:\n distances, indices = self.nbrs_.kneighbors(X, return_dis", "d_id": 75534, "documentation": { "docstring": "Transform X.\n\n This is implemented by linking the points X into the graph of geodesic\n distances of the training data. First the `n_neighbors` nearest\n neighbors of X are found in the training data, and from these the\n shortest geodesic distances from each point in X to each point in\n the training data are computed in order to construct the kernel.\n The embedding of X is the projection of this kernel onto the\n embedding vectors of the training set.\n\n Parameters\n ----------\n X : array-like, shape (n_queries, n_features)\n If neighbors_algorithm='precomputed', X is assumed to be a\n distance matrix or a sparse graph of shape\n (n_queries, n_samples_fit).\n\n Returns\n -------\n X_new : array-like, shape (n_queries, n_components)\n X transformed in the new space.\n ", "n_words": 118, "vocab_size": 69, "n_whitespaces": 260, "language": "en" } }, { "id": 189989, "commit_id": "309c9d41eb734ca85a7aea5533f88a6d4ee7c944", "repo": "manim", "path": "manim/mobject/svg/svg_mobject.py", "file_name": "svg_mobject.py", "fun_name": "generate_config_style_dict", "commit_message": "Ported improved implementation of :class:`.SVGMobject` from 3b1b/manim (#2898)\n\n* port SVGMobject from 3b1b/manim\r\n\r\n* added svgelements as dependency\r\n\r\n* revert change of default values\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* set default stroke_width of svg elements to 0 if not set\r\n\r\n* fix handling of circles with different rx/ry\r\n\r\n* turn more methods into staticmethods\r\n\r\n* removed duplicated method\r\n\r\n* set/adapt stroke-width of some test SVGs\r\n\r\n* updated control data\r\n\r\n* forgot some control data\r\n\r\n* fixed init_colors in tex_mobject and text_mobject\r\n\r\n* minor changes, added docstrings\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* module docstring, removed import\r\n\r\n* vector_to_coords changed again\r\n\r\n* nail sphinx version to below 5.1 to fix rtd (?)\r\n\r\n* update test_text control data for science\r\n\r\n* changed Brace to use VMobjectFromSVGPath\r\n\r\n* remove unused classes and methods depending on old SVG path implementation\r\n\r\n* remove style_utils and svg_path modules\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* change test_text to use monospace font\r\n\r\n* restore geometry.polygram\r\n\r\n* added get_mobject_type_class auxiliary method; changed polyline implementation to ad-hoc approach\r\n\r\n* restore test_text to previous version\r\n\r\n* skip Use tags as svgelements already populates them\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def generate_config_style_dict(self) -> dict[str, str]:\n \n keys_converting_dict = {\n \"fill\": (\"color\", \"fill_color\"),\n \"fill-opacity\": (\"opacity\", \"fill_opacity\"),\n \"stroke\": (\"color\", \"stroke_color\"),\n \"stroke-opacity\": (\"opacity\", \"stroke_opacity\"),\n \"stroke-width\": (\"stroke_width\",),\n }\n svg_default_dict = self.svg_default\n result = {}\n for svg_key, style_keys in keys_converting_dict.items():\n for style_key in style_keys:\n if svg_default_dict[style_key] is None:\n continue\n result[svg_key] = str(svg_default_dict[style_key])\n return result\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 212, "n_words": 48, "vocab_size": 40, "complexity": 4, "nloc": 17, "token_counts": 104, "n_ast_nodes": 180, "n_identifiers": 12, "random_cut": "def generate_config_style_dict(self) -> dict[str, str]:\n \n keys_converting_dict = {\n \"fill\": (\"color\", \"fill_color\"),\n \"fill-opacity\": (\"opacity\", \"fill_opacity\"),\n \"stroke\": (\"color\", \"stroke_color\"),\n \"stroke-opacity\": (\"opacity\", \"stroke_opacity\"),\n \"stroke-width\": (\"stroke_width\",),\n }\n svg_default_dict = self.svg_default\n result = {}\n for", "d_id": 46283, "documentation": { "docstring": "Generate a dictionary holding the default style information.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 266709, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/bootstrap.py", "file_name": "bootstrap.py", "fun_name": "get_variables", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]]\n \n return dict(\n bootstrap_type=self.bootstrap_type,\n controller='yes' if self.controller else '',\n python_versions=self.python_versions,\n ssh_key_type=self.ssh_key.KEY_TYPE,\n ssh_private_key=self.ssh_key.key_contents,\n ssh_public_key=self.ssh_key.pub_contents,\n )\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 110, "n_words": 22, "vocab_size": 22, "complexity": 2, "nloc": 9, "token_counts": 56, "n_ast_nodes": 87, "n_identifiers": 13, "random_cut": "def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]]\n \n return dict(\n ", "d_id": 78524, "documentation": { "docstring": "The variables to template in the bootstrapping script.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 217321, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/enum.py", "file_name": "enum.py", "fun_name": "_create_pseudo_member_", "commit_message": "add python 3.10.4 for windows", "code": "def _create_pseudo_member_(cls, value):\n \n pseudo_member = cls._value2member_map_.get(value, None)\n if pseudo_member is None:\n # verify all bits are accounted for\n _, extra_flags = _decompose(cls, value)\n if extra_flags:\n raise ValueError(\"%r is not a valid %s\" % (value, cls.__qualname__))\n # construct a singleton enum pseudo-member\n pseudo_member = object.__new__(cls)\n pseudo_member._name_ = None\n pseudo_member._value_ = value\n # use setdefault in case another thread already created a composite\n # with this value\n pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member)\n return pseudo_member\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 224, "n_words": 71, "vocab_size": 54, "complexity": 3, "nloc": 11, "token_counts": 83, "n_ast_nodes": 136, "n_identifiers": 16, "random_cut": "def _create_pseudo_member_(cls, value):\n \n pseudo_member = cls._value2member_map_.get(value, None)\n if pseudo_member is None:\n ", "d_id": 54718, "documentation": { "docstring": "\n Create a composite member iff value contains only members.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 259201, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/preprocessing/_encoders.py", "file_name": "_encoders.py", "fun_name": "_identify_infrequent", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def _identify_infrequent(self, category_count, n_samples, col_idx):\n \n if isinstance(self.min_frequency, numbers.Integral):\n infrequent_mask = category_count < self.min_frequency\n elif isinstance(self.min_frequency, numbers.Real):\n min_frequency_abs = n_samples * self.min_frequency\n infrequent_mask = category_count < min_frequency_abs\n else:\n infrequent_mask = np.zeros(category_count.shape[0], dtype=bool)\n\n n_current_features = category_count.size - infrequent_mask.sum() + 1\n if self.max_categories is not None and self.max_categories < n_current_features:\n # stable sort to preserve original count order\n smallest_levels = np.argsort(category_count, kind=\"mergesort\")[\n : -self.max_categories + 1\n ]\n infrequent_mask[smallest_levels] = True\n\n output = np.flatnonzero(infrequent_mask)\n return output if output.size > 0 else None\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 237, "n_words": 78, "vocab_size": 56, "complexity": 6, "nloc": 16, "token_counts": 146, "n_ast_nodes": 227, "n_identifiers": 26, "random_cut": "def _identify_infrequent(self, category_count, n_samples, col_idx):\n \n if isinstance(self.min_frequency, numbers.Integral):\n infrequent_mask = category_count < self.min_frequency\n elif isinstance(self.min_frequency, numbers.Real):\n min_frequency_abs = n_samples * self.min_frequency\n infrequent_mask = category_count < min_frequency_abs\n else:\n infrequent_mask = np.zeros(category_count.shape[0], dtype=bool)\n\n n_current_features = category_count.size - infrequent_mask.sum() + 1\n if self.max_categories is not None and self.max_categories < n_current_features:\n # stable sort to preserve original co", "d_id": 75639, "documentation": { "docstring": "Compute the infrequent indices.\n\n Parameters\n ----------\n category_count : ndarray of shape (n_cardinality,)\n Category counts.\n\n n_samples : int\n Number of samples.\n\n col_idx : int\n Index of the current category. Only used for the error message.\n\n Returns\n -------\n output : ndarray of shape (n_infrequent_categories,) or None\n If there are infrequent categories, indices of infrequent\n categories. Otherwise None.\n ", "n_words": 55, "vocab_size": 41, "n_whitespaces": 173, "language": "en" } }, { "id": 44892, "commit_id": "1b568d73e1dfb838a3a0446e3a6063b9f27f04b8", "repo": "airflow", "path": "airflow/providers/google/cloud/hooks/kubernetes_engine.py", "file_name": "kubernetes_engine.py", "fun_name": "get_conn", "commit_message": "Extract ClientInfo to module level (#21554)", "code": "def get_conn(self) -> container_v1.ClusterManagerClient:\n \n if self._client is None:\n credentials = self._get_credentials()\n self._client = container_v1.ClusterManagerClient(credentials=credentials, client_info=CLIENT_INFO)\n return self._client\n\n # To preserve backward compatibility\n # TODO: remove one day", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 10, "token_counts": 44, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def get_conn(self) -> container_v1.ClusterManagerClient:\n \n if self._client is None:\n credentials = self._get_credentials()\n self._client = container_v1.ClusterManagerClient(credentials=credent", "d_id": 8405, "documentation": { "docstring": "\n Returns ClusterManagerCLinet object.\n\n :rtype: google.cloud.container_v1.ClusterManagerClient\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 27, "language": "en" } }, { "id": 220414, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/coroutines.py", "file_name": "coroutines.py", "fun_name": "iscoroutinefunction", "commit_message": "add python 3.10.4 for windows", "code": "def iscoroutinefunction(func):\n \n return (inspect.iscoroutinefunction(func) or\n getattr(func, '_is_coroutine', None) is _is_coroutine)\n\n\n# Prioritize native coroutine check to speed-up\n# asyncio.iscoroutine.\n_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,\n collections.abc.Coroutine, CoroWrapper)\n_iscoroutine_typecache = set()\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 28, "vocab_size": 26, "complexity": 2, "nloc": 3, "token_counts": 26, "n_ast_nodes": 81, "n_identifiers": 15, "random_cut": "def iscoroutinefunction(func):\n \n return (inspect.isco", "d_id": 55985, "documentation": { "docstring": "Return True if func is a decorated coroutine function.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 316852, "commit_id": "16900dcef15bdb9016feabd12bfec94d61ed4df6", "repo": "core", "path": "homeassistant/helpers/storage.py", "file_name": "storage.py", "fun_name": "async_load", "commit_message": "Make Store a generic class (#74617)", "code": "async def async_load(self) -> _T | None:\n \n if self._load_task is None:\n self._load_task = self.hass.async_create_task(self._async_load())\n\n return await self._load_task\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 17, "vocab_size": 14, "complexity": 2, "nloc": 12, "token_counts": 38, "n_ast_nodes": 65, "n_identifiers": 7, "random_cut": "async def async_load(self) -> _T | None:\n \n if self._load_task is None:\n self._load_task = self.hass.async_create_task(self._async_load())\n\n return await self._load_task\n", "d_id": 115428, "documentation": { "docstring": "Load data.\n\n If the expected version and minor version do not match the given versions, the\n migrate function will be invoked with migrate_func(version, minor_version, config).\n\n Will ensure that when a call comes in while another one is in progress,\n the second call will wait and return the result of the first call.\n ", "n_words": 52, "vocab_size": 42, "n_whitespaces": 87, "language": "en" } }, { "id": 310210, "commit_id": "03bf2cdd56eb9a0a9ed56d7afb700d5f7d9cf75e", "repo": "core", "path": "homeassistant/components/vera/lock.py", "file_name": "lock.py", "fun_name": "extra_state_attributes", "commit_message": "Remove vera from mypy ignore list (#64474)\n\n* Remove vera from mypy ignore list\r\n\r\n* Fix pylint", "code": "def extra_state_attributes(self) -> dict[str, Any] | None:\n \n data = super().extra_state_attributes or {}\n\n last_user = self.vera_device.get_last_user_alert()\n if last_user is not None:\n data[ATTR_LAST_USER_NAME] = last_user[1]\n\n data[ATTR_LOW_BATTERY] = self.vera_device.get_low_battery_alert()\n return data\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 81, "n_words": 28, "vocab_size": 22, "complexity": 3, "nloc": 13, "token_counts": 63, "n_ast_nodes": 101, "n_identifiers": 13, "random_cut": "def extra_state_attributes(self) -> dict[str, Any] | None:\n \n data = super().extra", "d_id": 108897, "documentation": { "docstring": "Who unlocked the lock and did a low battery alert fire.\n\n Reports on the previous poll cycle.\n changed_by_name is a string like 'Bob'.\n low_battery is 1 if an alert fired, 0 otherwise.\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 60, "language": "en" } }, { "id": 208620, "commit_id": "e306c9d3f707de42b47a1e7c4c8034d6862fba5f", "repo": "ipython", "path": "IPython/core/interactiveshell.py", "file_name": "interactiveshell.py", "fun_name": "magic", "commit_message": "Update old deprecation", "code": "def magic(self, arg_s):\n \n warnings.warn(\n \"`magic(...)` is deprecated since IPython 0.13 (warning added in \"\n \"8.1), use run_line_magic(magic_name, parameter_s).\",\n DeprecationWarning,\n stacklevel=2,\n )\n # TODO: should we issue a loud deprecation warning here?\n magic_name, _, magic_arg_s = arg_s.partition(' ')\n magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)\n return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)\n\n #-------------------------------------------------------------------------\n # Things related to macros\n #-------------------------------------------------------------------------\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 153, "n_words": 51, "vocab_size": 48, "complexity": 1, "nloc": 10, "token_counts": 57, "n_ast_nodes": 97, "n_identifiers": 16, "random_cut": "def magic(self, arg_s):\n \n warnings.warn(\n \"`magic(...)` is deprecated since IPython 0.13 (warning added in \"\n \"8.1), use run_line_magic(magic_name, parameter_s).\",\n DeprecationWarning,\n stacklevel=2,\n )\n # TODO: should we issue a loud deprecation warning here?\n magic_name, _, magic_arg_s = arg_s.partition(' ')\n magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)\n return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)\n\n #-------------------------------------------------------------------------\n # Th", "d_id": 52421, "documentation": { "docstring": "\n DEPRECATED\n\n Deprecated since IPython 0.13 (warning added in\n 8.1), use run_line_magic(magic_name, parameter_s).\n\n Call a magic function by name.\n\n Input: a string containing the name of the magic function to call and\n any additional arguments to be passed to the magic.\n\n magic('name -opt foo bar') is equivalent to typing at the ipython\n prompt:\n\n In[1]: %name -opt foo bar\n\n To call a magic without arguments, simply use magic('name').\n\n This provides a proper Python function to call IPython's magics in any\n valid Python code you can type at the interpreter, including loops and\n compound statements.\n ", "n_words": 92, "vocab_size": 67, "n_whitespaces": 191, "language": "en" } }, { "id": 60572, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/commands/__init__.py", "file_name": "__init__.py", "fun_name": "create_command", "commit_message": "upd; format", "code": "def create_command(name, **kwargs):\n # type: (str, **Any) -> Command\n \n module_path, class_name, summary = commands_dict[name]\n module = importlib.import_module(module_path)\n command_class = getattr(module, class_name)\n command = command_class(name=name, summary=summary, **kwargs)\n\n return command\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 6, "token_counts": 52, "n_ast_nodes": 82, "n_identifiers": 13, "random_cut": "def create_command(name, **kwargs):\n # type: (str, **Any) -> Command\n \n module_path, class_name, summary = commands_dict[name]\n module = importlib.import_module(module_path)\n command_class = getattr(module, class_name)\n command = command_cl", "d_id": 12211, "documentation": { "docstring": "\n Create an instance of the Command class with the given name.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 18, "language": "en" } }, { "id": 130580, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/data/dataset.py", "file_name": "dataset.py", "fun_name": "to_pandas_refs", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def to_pandas_refs(self) -> List[ObjectRef[\"pandas.DataFrame\"]]:\n \n\n block_to_df = cached_remote_fn(_block_to_df)\n return [block_to_df.remote(block) for block in self._blocks.get_blocks()]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 15, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 11, "random_cut": "def to_pandas_refs(self) -> List[ObjectRef[\"pandas.DataFrame\"]]:\n \n\n block_to_df = cached_remote_fn(_block_to_df)\n return [block_to_df.remote(b", "d_id": 29316, "documentation": { "docstring": "Convert this dataset into a distributed set of Pandas dataframes.\n\n This is only supported for datasets convertible to Arrow records.\n This function induces a copy of the data. For zero-copy access to the\n underlying data, consider using ``.to_arrow()`` or\n ``.get_internal_block_refs()``.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A list of remote Pandas dataframes created from this dataset.\n ", "n_words": 57, "vocab_size": 49, "n_whitespaces": 117, "language": "en" } }, { "id": 160008, "commit_id": "1cacb2ffb1113167a4995f4f4c183f9a8356c2f0", "repo": "numpy", "path": "numpy/lib/shape_base.py", "file_name": "shape_base.py", "fun_name": "hsplit", "commit_message": "DOC: Include special case in `hsplit` doc (#20974)", "code": "def hsplit(ary, indices_or_sections):\n \n if _nx.ndim(ary) == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if ary.ndim > 1:\n return split(ary, indices_or_sections, 1)\n else:\n return split(ary, indices_or_sections, 0)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "@array_function_dispatch(_hvdsplit_dispatcher)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 64, "n_words": 32, "vocab_size": 28, "complexity": 3, "nloc": 7, "token_counts": 50, "n_ast_nodes": 89, "n_identifiers": 9, "random_cut": "def hsplit(ary, indices_or_sections):\n \n if _nx.ndim(ary) == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if ary.ndim > 1:\n return split(ary, indices_or_sections, 1)\n else:\n retur", "d_id": 38466, "documentation": { "docstring": "\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis except for 1-D arrays, where it is split at ``axis=0``.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float64)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[0., 1.]],\n [[4., 5.]]]),\n array([[[2., 3.]],\n [[6., 7.]]])]\n\n With a 1-D array, the split is along axis 0.\n\n >>> x = np.array([0, 1, 2, 3, 4, 5])\n >>> np.hsplit(x, 2)\n [array([0, 1, 2]), array([3, 4, 5])]\n\n ", "n_words": 203, "vocab_size": 116, "n_whitespaces": 562, "language": "en" } }, { "id": 64884, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/loyalty_program/test_loyalty_program.py", "file_name": "test_loyalty_program.py", "fun_name": "get_points_earned", "commit_message": "style: format code with black", "code": "def get_points_earned(self):\n\tdef get_returned_amount():\n\t\treturned_amount = frappe.db.sql(\n\t\t\t,\n\t\t\tself.name,\n\t\t)\n\t\treturn abs(flt(returned_amount[0][0])) if returned_amount else 0\n\n\tlp_details = get_loyalty_program_details_with_points(\n\t\tself.customer,\n\t\tcompany=self.company,\n\t\tloyalty_program=self.loyalty_program,\n\t\texpiry_date=self.posting_date,\n\t\tinclude_expired_entry=True,\n\t)\n\tif (\n\t\tlp_details\n\t\tand getdate(lp_details.from_date) <= getdate(self.posting_date)\n\t\tand (not lp_details.to_date or getdate(lp_details.to_date) >= getdate(self.posting_date))\n\t):\n\t\treturned_amount = get_returned_amount()\n\t\teligible_amount = flt(self.grand_total) - cint(self.loyalty_amount) - returned_amount\n\t\tpoints_earned = cint(eligible_amount / lp_details.collection_factor)\n\n\treturn points_earned or 0\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 36, "n_words": 59, "vocab_size": 42, "complexity": 6, "nloc": 18, "token_counts": 114, "n_ast_nodes": 231, "n_identifiers": 27, "random_cut": "def get_points_earned(self):\n\tdef get_returned_amount():\n\t\treturned_amount = frappe.db.sql(\n\t\t\t,\n\t\t\tself.name,\n\t\t)\n\t\treturn abs(flt(returned_amount[0][0])) if returned_amount else 0\n\n\tlp_details = get_loyalty_program_details_with_points(\n\t\tself.customer,\n\t\tcompany=self.company,\n\t\tloyalty_program=self.loyalty_program,\n\t\texpiry_date=self.posting_date,\n\t\tinclude_expired_entry=True,\n\t)\n\tif (\n\t\tlp_details\n\t\tand getdate(lp_details.", "d_id": 13748, "documentation": { "docstring": "\n\t\t\tselect sum(grand_total)\n\t\t\tfrom `tabSales Invoice`\n\t\t\twhere docstatus=1 and is_return=1 and ifnull(return_against, '')=%s\n\t\t", "n_words": 12, "vocab_size": 11, "n_whitespaces": 9, "language": "en" } }, { "id": 100875, "commit_id": "04337e0c5efd442c1ce3e2da193dd8749f1e30d8", "repo": "faceswap", "path": "lib/model/losses_plaid.py", "file_name": "losses_plaid.py", "fun_name": "_get_kernel", "commit_message": "SSIM Updates\n - Standardize DSSIM Function\n - Implement MSSIM function for AMD", "code": "def _get_kernel(self) -> plaidml.tile.Value:\n \n coords = np.arange(self._filter_size, dtype=\"float32\")\n coords -= (self._filter_size - 1) / 2.\n\n kernel = np.square(coords)\n kernel *= -0.5 / np.square(self._filter_sigma)\n kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1))\n kernel = K.constant(np.reshape(kernel, (1, -1)))\n kernel = K.softmax(kernel)\n kernel = K.reshape(kernel, (self._filter_size, self._filter_size, 1, 1))\n return kernel\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 119, "n_words": 49, "vocab_size": 33, "complexity": 1, "nloc": 17, "token_counts": 143, "n_ast_nodes": 214, "n_identifiers": 17, "random_cut": "def _get_kernel(self) -> plaidml.tile.Value:\n \n coords = np.arange(self._filter_size, dtype=\"float32\")\n coords -= (self._filter_size - 1) / 2.\n\n kernel = np.square(coords)\n kernel *= -0.5 / np.square(self._filter_sigma)\n kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1))\n kernel = K.constant(np.reshape(kernel, (1, -1)))\n kernel = K.softmax(kernel)\n kernel = K.reshape(kernel, (", "d_id": 20326, "documentation": { "docstring": " Obtain the base kernel for performing depthwise convolution.\n\n Returns\n -------\n :class:`plaidml.tile.Value`\n The gaussian kernel based on selected size and sigma\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 60, "language": "en" } }, { "id": 70351, "commit_id": "4a848bfb4e3ec1a84a3d36fda577c1ed784de498", "repo": "wagtail", "path": "wagtail/core/tests/test_blocks.py", "file_name": "test_blocks.py", "fun_name": "test_default_default", "commit_message": "Implement a ListValue type for ListBlocks", "code": "def test_default_default(self):\n \n block = blocks.ListBlock(blocks.CharBlock(default='chocolate'))\n\n self.assertEqual(list(block.get_default()), ['chocolate'])\n\n block.set_name('test_shoppinglistblock')\n js_args = ListBlockAdapter().js_args(block)\n self.assertEqual(js_args[2], 'chocolate')\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 55, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 65, "n_ast_nodes": 115, "n_identifiers": 13, "random_cut": "def test_default_default(self):\n \n block = blocks.ListBlock(blocks.CharBlock(default='chocolate'))\n\n self.assertEqual(list(block.get_default()), ['chocolate'])\n\n block.set_name('test_shoppinglistblock')\n ", "d_id": 15499, "documentation": { "docstring": "\n if no explicit 'default' is set on the ListBlock, it should fall back on\n a single instance of the child block in its default state.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 47, "language": "en" } }, { "id": 165521, "commit_id": "a72fa1b400234d3a05342f17c3c0b1e3993a6bd8", "repo": "pandas", "path": "pandas/io/formats/xml.py", "file_name": "xml.py", "fun_name": "convert_empty_str_key", "commit_message": "CLN/DOC: typos (#46328)\n\n* fix typos\r\n\r\n* fix typo\r\n\r\n* fix typo\r\n\r\n* fix typo", "code": "def convert_empty_str_key(self) -> None:\n \n\n if self.namespaces and \"\" in self.namespaces.keys():\n self.namespaces[None] = self.namespaces.pop(\"\", \"default\")\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 9, "token_counts": 40, "n_ast_nodes": 71, "n_identifiers": 5, "random_cut": "def convert_empty_str_key(self) -> None:\n \n\n ", "d_id": 39674, "documentation": { "docstring": "\n Replace zero-length string in `namespaces`.\n\n This method will replace '' with None to align to `lxml`\n requirement that empty string prefixes are not allowed.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 53, "language": "en" } }, { "id": 75353, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/tests.py", "file_name": "tests.py", "fun_name": "test_get_with_custom_key_using_default_key", "commit_message": "Reformat with black", "code": "def test_get_with_custom_key_using_default_key(self):\n \n # Generate signature\n signature = generate_signature(self.image.id, \"fill-800x600\")\n\n # Get the image\n response = self.client.get(\n reverse(\n \"wagtailimages_serve_custom_key\",\n args=(signature, self.image.id, \"fill-800x600\"),\n )\n + \"test.png\"\n )\n\n # Check response\n self.assertEqual(response.status_code, 403)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 149, "n_words": 30, "vocab_size": 24, "complexity": 1, "nloc": 10, "token_counts": 58, "n_ast_nodes": 100, "n_identifiers": 13, "random_cut": "def test_get_with_custom_key_using_default_key(self):\n \n # Generate signature\n signature = generate_signature(self.image.id, \"fill-800x600\")\n\n # Get the image\n response = self.client.get(\n reverse(\n \"wagtailimages_serve_custom_key\",\n args=(signature, self.image.id, \"fill-800x600\"),\n ", "d_id": 16399, "documentation": { "docstring": "\n Test that that the key can be changed on the view\n\n This tests that the default key no longer works when the key is changed on the view\n ", "n_words": 28, "vocab_size": 17, "n_whitespaces": 50, "language": "en" } }, { "id": 54348, "commit_id": "3e657b429b967fa532d2f97ed7e6809112db3107", "repo": "prefect", "path": "tests/test_engine.py", "file_name": "test_engine.py", "fun_name": "test_timeouts_do_not_hide_crashes", "commit_message": "Fix engine tests; move function to other crash handleres", "code": "async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client):\n \n started = anyio.Event()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 24, "token_counts": 121, "n_ast_nodes": 30, "n_identifiers": 7, "random_cut": "async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client):\n \n started = anyio.Event()\n", "d_id": 11033, "documentation": { "docstring": "\n Since timeouts capture anyio cancellations, we want to ensure that something\n still ends up in a 'Crashed' state if it is cancelled independently from our\n timeout cancellation.\n ", "n_words": 27, "vocab_size": 27, "n_whitespaces": 56, "language": "en" } }, { "id": 111556, "commit_id": "aea16719be04d4d6ab889cd20fe0e323b2c7ffee", "repo": "spaCy", "path": "spacy/tests/pipeline/test_pipe_methods.py", "file_name": "test_pipe_methods.py", "fun_name": "test_enable_disable_conflict_with_config", "commit_message": "Simplify and clarify enable/disable behavior of spacy.load() (#11459)\n\n* Change enable/disable behavior so that arguments take precedence over config options. Extend error message on conflict. Add warning message in case of overwriting config option with arguments.\r\n\r\n* Fix tests in test_serialize_pipeline.py to reflect changes to handling of enable/disable.\r\n\r\n* Fix type issue.\r\n\r\n* Move comment.\r\n\r\n* Move comment.\r\n\r\n* Issue UserWarning instead of printing wasabi message. Adjust test.\r\n\r\n* Added pytest.warns(UserWarning) for expected warning to fix tests.\r\n\r\n* Update warning message.\r\n\r\n* Move type handling out of fetch_pipes_status().\r\n\r\n* Add global variable for default value. Use id() to determine whether used values are default value.\r\n\r\n* Fix default value for disable.\r\n\r\n* Rename DEFAULT_PIPE_STATUS to _DEFAULT_EMPTY_PIPES.", "code": "def test_enable_disable_conflict_with_config():\n \n nlp = English()\n nlp.add_pipe(\"tagger\")\n nlp.add_pipe(\"senter\")\n nlp.add_pipe(\"sentencizer\")\n\n with make_tempdir() as tmp_dir:\n nlp.to_disk(tmp_dir)\n # Expected to fail, as config and arguments conflict.\n with pytest.raises(ValueError):\n spacy.load(\n tmp_dir, enable=[\"tagger\"], config={\"nlp\": {\"disabled\": [\"senter\"]}}\n )\n # Expected to succeed without warning due to the lack of a conflicting config option.\n spacy.load(tmp_dir, enable=[\"tagger\"])\n # Expected to succeed with a warning, as disable=[] should override the config setting.\n with pytest.warns(UserWarning):\n spacy.load(\n tmp_dir,\n enable=[\"tagger\"],\n disable=[],\n config={\"nlp\": {\"disabled\": [\"senter\"]}},\n )\n\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 258, "n_words": 72, "vocab_size": 49, "complexity": 1, "nloc": 19, "token_counts": 127, "n_ast_nodes": 235, "n_identifiers": 17, "random_cut": "def test_enable_disable_conflict_with_config():\n \n nlp = English()\n nlp.add_pipe(\"tagger\")\n nlp.add_pipe(\"senter\")\n nlp.add_pipe(\"sentencizer\")\n\n with make_tempdir() as tmp_dir:\n nlp.to_disk(tmp_dir)\n # Expected to fail, as config and arguments conflict.\n with ", "d_id": 24437, "documentation": { "docstring": "Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 126689, "commit_id": "326b5bd1acc6d3d00ab0546e4ae45da6bed501f7", "repo": "ray", "path": "dashboard/modules/job/tests/test_job_manager.py", "file_name": "test_job_manager.py", "fun_name": "test_logs_streaming", "commit_message": "Convert job_manager to be async (#27123)\n\nUpdates jobs api\r\nUpdates snapshot api\r\nUpdates state api\r\n\r\nIncreases jobs api version to 2\r\n\r\nSigned-off-by: Alan Guo aguo@anyscale.com\r\n\r\nWhy are these changes needed?\r\nfollow-up for #25902 (comment)", "code": "async def test_logs_streaming(job_manager):\n \n\n stream_logs_script = \n\n stream_logs_cmd = f'python -c \"{stream_logs_script}\"'\n\n job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd)\n await async_wait_for_condition(\n lambda: \"STREAMED\" in job_manager.get_job_logs(job_id)\n )\n\n job_manager.stop_job(job_id)\n\n\n@pytest.mark.asyncio", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 51, "n_words": 23, "vocab_size": 20, "complexity": 1, "nloc": 13, "token_counts": 44, "n_ast_nodes": 95, "n_identifiers": 13, "random_cut": "async def test_logs_streaming(job_manager):\n \n\n stream_logs_script = \n\n stream_logs_cmd = f'python -c \"{stream_logs_script}\"'\n\n job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd)\n await async_wait_for_condition(\n lambda: \"STREAMED\" i", "d_id": 28233, "documentation": { "docstring": "Test that logs are streamed during the job, not just at the end.\nimport time\nprint('STREAMED')\nwhile True:\n time.sleep(1)\n", "n_words": 19, "vocab_size": 18, "n_whitespaces": 18, "language": "en" } }, { "id": 272209, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/integration_test/gradient_checkpoint_test.py", "file_name": "gradient_checkpoint_test.py", "fun_name": "_train_no_recompute", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _train_no_recompute(n_steps):\n \n img_dim, n_channels, batch_size = 256, 1, 4\n x, y = _get_dummy_data(img_dim, n_channels, batch_size)\n model = _get_big_cnn_model(\n img_dim, n_channels, num_partitions=3, blocks_per_partition=2\n )\n optimizer = optimizers.SGD()\n losses = []\n tr_vars = model.trainable_variables\n for _ in range(n_steps):\n with tf.GradientTape() as tape:\n logits = model(x)\n loss = _compute_loss(logits, y)\n losses.append(loss)\n grads = tape.gradient(loss, tr_vars) # tr_vars\n optimizer.apply_gradients(zip(grads, tr_vars))\n del grads\n return losses\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 159, "n_words": 60, "vocab_size": 46, "complexity": 2, "nloc": 18, "token_counts": 123, "n_ast_nodes": 195, "n_identifiers": 31, "random_cut": "def _train_no_recompute(n_steps):\n \n img_dim, n_channels, batch_size = 256, 1, 4\n x, y = _get_dummy_data(img_dim, n_channels, batch_size)\n model = _get_big_cnn_model(\n img_dim, n_channels, num_partitions=3, blocks_per_partition=2\n )\n optimizer = optimizers.SGD()\n losses = []\n tr_vars = model.traina", "d_id": 80978, "documentation": { "docstring": "Trains a single large model without gradient checkpointing.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 270367, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distributed_training_utils_v1.py", "file_name": "distributed_training_utils_v1.py", "fun_name": "_make_replica_execution_function", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _make_replica_execution_function(model, mode):\n \n if mode == ModeKeys.TRAIN:\n func = model.train_on_batch\n elif mode == ModeKeys.TEST:\n func = model.test_on_batch\n else:\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 18, "vocab_size": 14, "complexity": 4, "nloc": 11, "token_counts": 60, "n_ast_nodes": 57, "n_identifiers": 9, "random_cut": "def _make_replica_execution_function(model, mode):\n \n ", "d_id": 80457, "documentation": { "docstring": "A single step of the distributed execution on a replica.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 221252, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/calendar.py", "file_name": "calendar.py", "fun_name": "monthdays2calendar", "commit_message": "add python 3.10.4 for windows", "code": "def monthdays2calendar(self, year, month):\n \n days = list(self.itermonthdays2(year, month))\n return [ days[i:i+7] for i in range(0, len(days), 7) ]\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 3, "token_counts": 48, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def monthdays2calendar(self, year, month):\n \n days = list(self.itermonthdays2(year, month))\n r", "d_id": 56295, "documentation": { "docstring": "\n Return a matrix representing a month's calendar.\n Each row represents a week; week entries are\n (day number, weekday number) tuples. Day numbers outside this month\n are zero.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 63, "language": "en" } }, { "id": 126397, "commit_id": "410fe1b5ec9e798d6e7ffbb5844e258d08e323b3", "repo": "ray", "path": "python/ray/serve/drivers.py", "file_name": "drivers.py", "fun_name": "predict_with_route", "commit_message": "[Serve] Support Multiple DAG Entrypoints in DAGDriver (#26573)", "code": "async def predict_with_route(self, route_path, *args, **kwargs):\n \n if route_path not in self.dags:\n raise RayServeException(f\"{route_path} does not exist in dags routes\")\n return await self.dags[route_path].remote(*args, **kwargs)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 55, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 4, "token_counts": 45, "n_ast_nodes": 76, "n_identifiers": 8, "random_cut": "async def predict_with_route(self, route_path, *args, **kwargs):\n \n if route_path not in self.dags:\n raise RayServeExc", "d_id": 28163, "documentation": { "docstring": "Perform inference directly without HTTP for multi dags.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 128796, "commit_id": "f1882f90cf2d91f5d802b7dffd41db5e306d6e6c", "repo": "ray", "path": "python/ray/tune/experiment/trial.py", "file_name": "trial.py", "fun_name": "should_recover", "commit_message": "[tune] Add retry logic for restoring trials. (#29086)\n\nThis is an advanced setting. Consider the following scenario: Due to scheduling glitches, sometimes a restoring\r\ntrial may be scheduled onto a dying node. By setting this env var to a positive number, the trial can be restored\r\nseveral times and hopefully one of the times it will not be put on a dying node. This retry behavior won't increment\r\nthe per trial failure number, which is compared against max_failures.\r\n\r\nSigned-off-by: xwjiang2010 \r\nSigned-off-by: xwjiang2010 <87673679+xwjiang2010@users.noreply.github.com>", "code": "def should_recover(self):\n \n return (\n self.num_failures < self.max_failures\n or self.max_failures < 0\n or (\n self.num_failures == self.max_failures\n and self.num_restore_failures\n < int(os.environ.get(\"TUNE_RESTORE_RETRY_NUM\", 0))\n )\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 133, "n_words": 23, "vocab_size": 15, "complexity": 4, "nloc": 10, "token_counts": 50, "n_ast_nodes": 81, "n_identifiers": 9, "random_cut": "def should_recover(self):\n \n return (\n self.num_failures < self.max_failures\n or self.max_failures < 0\n or (\n self.num_failures == self.max_failures\n and self.num_restore_failures\n < int(os.environ.get(\"TUNE_RESTORE_RETRY_NUM\", 0))\n )\n ", "d_id": 28813, "documentation": { "docstring": "Returns whether the trial qualifies for retrying.\n\n This is if the trial has not failed more than max_failures. Note this\n may return true even when there is no checkpoint, either because\n `self.checkpoint_freq` is `0` or because the trial failed before\n a checkpoint has been made.\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 80, "language": "en" } }, { "id": 74216, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_locale_model.py", "file_name": "test_locale_model.py", "fun_name": "test_change_root_page_locale_on_locale_deletion", "commit_message": "Reformat with black", "code": "def test_change_root_page_locale_on_locale_deletion(self):\n \n # change 'real' pages first\n Page.objects.filter(depth__gt=1).update(\n locale=Locale.objects.get(language_code=\"fr\")\n )\n self.assertEqual(Page.get_first_root_node().locale.language_code, \"en\")\n Locale.objects.get(language_code=\"en\").delete()\n self.assertEqual(Page.get_first_root_node().locale.language_code, \"fr\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 75, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 78, "n_ast_nodes": 136, "n_identifiers": 14, "random_cut": "def test_change_root_page_locale_on_locale_deletion(self):\n \n # change 'real' pages first\n Page.objects.filter(depth__gt=1).update(\n locale=Locale.objects.get(language_code=\"fr\")\n )\n self.assertEqual(Page.get_first_root_node().locale.language_code, \"en\")\n Locale.objects.get(language_code=\"en\").delete()\n self.assertEqual(Page.get_first_root_node().locale.language_code, \"fr\")\n", "d_id": 16239, "documentation": { "docstring": "\n On deleting the locale used for the root page (but no 'real' pages), the\n root page should be reassigned to a new locale (the default one, if possible)\n ", "n_words": 28, "vocab_size": 23, "n_whitespaces": 50, "language": "en" } }, { "id": 21496, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "_proc_pax", "commit_message": "Vendor in pip 22.1.2", "code": "def _proc_pax(self, tarfile):\n \n # Read the header information.\n buf = tarfile.fileobj.read(self._block(self.size))\n\n # A pax header stores supplemental information for either\n # the following file (extended) or all following files\n # (global).\n if self.type == XGLTYPE:\n pax_headers = tarfile.pax_headers\n else:\n pax_headers = tarfile.pax_headers.copy()\n\n # Check if the pax header contains a hdrcharset field. This tells us\n # the encoding of the path, linkpath, uname and gname fields. Normally,\n # these fields are UTF-8 encoded but since POSIX.1-2008 tar\n # implementations are allowed to store them as raw binary strings if\n # the translation to UTF-8 fails.\n match = re.search(br\"\\d+ hdrcharset=([^\\n]+)\\n\", buf)\n if match is not None:\n pax_headers[\"hdrcharset\"] = match.group(1).decode(\"utf8\")\n\n # For the time being, we don't care about anything other than \"BINARY\".\n # The only other value that is currently allowed by the standard is\n # \"ISO-IR 10646 2000 UTF-8\" in other words UTF-8.\n hdrcharset = pax_headers.get(\"hdrcharset\")\n if hdrcharset == \"BINARY\":\n encoding = tarfile.encoding\n else:\n encoding = \"utf8\"\n\n # Parse pax header information. A record looks like that:\n # \"%d %s=%s\\n\" % (length, keyword, value). length is the size\n # of the complete record including the length field itself and\n # the newline. keyword and value are both UTF-8 encoded strings.\n regex = re.compile(br\"(\\d+) ([^=]+)=\")\n pos = 0\n while True:\n match = regex.match(buf, pos)\n if not match:\n break\n\n length, keyword = match.groups()\n length = int(length)\n value = buf[match.end(2) + 1:match.start(1) + length - 1]\n\n # Normally, we could just use \"utf8\" as the encoding and \"strict\"\n # as the error handler, but we better not take the risk. For\n # example, GNU tar <= 1.23 is known to store filenames it cannot\n # translate to UTF-8 as raw strings (unfortunately without a\n # hdrcharset=BINARY header).\n # We first try the strict standard encoding, and if that fails we\n # fall back on the user's encoding and error handler.\n keyword = self._decode_pax_field(keyword, \"utf8\", \"utf8\",\n tarfile.errors)\n if keyword in PAX_NAME_FIELDS:\n value = self._decode_pax_field(value, encoding, tarfile.encoding,\n tarfile.errors)\n else:\n value = self._decode_pax_field(value, \"utf8\", \"utf8\",\n tarfile.errors)\n\n pax_headers[keyword] = value\n pos += length\n\n # Fetch the next header.\n try:\n next = self.fromtarfile(tarfile)\n except HeaderError:\n raise SubsequentHeaderError(\"missing or bad subsequent header\")\n\n # Process GNU sparse information.\n if \"GNU.sparse.map\" in pax_headers:\n # GNU extended sparse format version 0.1.\n self._proc_gnusparse_01(next, pax_headers)\n\n elif \"GNU.sparse.size\" in pax_headers:\n # GNU extended sparse format version 0.0.\n self._proc_gnusparse_00(next, pax_headers, buf)\n\n elif pax_headers.get(\"GNU.sparse.major\") == \"1\" and pax_headers.get(\"GNU.sparse.minor\") == \"0\":\n # GNU extended sparse format version 1.0.\n self._proc_gnusparse_10(next, pax_headers, tarfile)\n\n if self.type in (XHDTYPE, SOLARIS_XHDTYPE):\n # Patch the TarInfo object with the extended header info.\n next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)\n next.offset = self.offset\n\n if \"size\" in pax_headers:\n # If the extended header replaces the size field,\n # we need to recalculate the offset where the next\n # header starts.\n offset = next.offset_data\n if next.isreg() or next.type not in SUPPORTED_TYPES:\n offset += next._block(next.size)\n tarfile.offset = offset\n\n return next\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1320, "n_words": 468, "vocab_size": 249, "complexity": 16, "nloc": 52, "token_counts": 387, "n_ast_nodes": 669, "n_identifiers": 47, "random_cut": "def _proc_pax(self, tarfile):\n \n # Read the header information.\n buf = tarfile.fileobj.read(self._block(self.size))\n\n # A pax header stores supplemental information for either\n # the following file (extended) or all following files\n # (global).\n if self.type == XGLTYPE:\n pax_headers = tarfile.pax_headers\n else:\n pax_headers = tarfile.pax_headers.copy()\n\n # Check if the pax header contains a hdrcharset field. This tells us\n # the encoding of the path, linkpath, uname and gname fields. Normally,\n # these fields are UTF-8 encoded but since POSIX.1-2008 tar\n # implementations are allowed to store them as raw binary strings if\n # the translation to UTF-8 fails.\n match = re.search(br\"\\d+ hdrcharset=([^\\n]+)\\n\", buf)\n if match is not None:\n pax_headers[\"hdrcharset\"] = match.group(1).decode(\"utf8\")\n\n # For the time being, we don't care about anything other than \"BINARY\".\n # The only other value that is currently allowed by the standard is\n # \"ISO-IR 10646 2000 UTF-8\" in other words UTF-8.\n hdrcharset = pax_headers.get(\"hdrcharset\")\n if hdrcharset == \"BINARY\":\n encoding = tarfile.encoding\n else:\n encoding = \"utf8\"\n\n # Parse pax header information. A record looks like that:\n # \"%d %s=%s\\n\" % (length, keyword, value). length is the size\n # of the complete record including the length field itself and\n # the newline. keyword and value are both UTF-8 encoded strings.\n regex = re.compile(br\"(\\d+) ([^=]+)=\")\n pos = 0\n while True:\n match = regex.match(buf, pos)\n if not match:\n break\n\n length, keyword = match.groups()\n length = int(length)\n value = buf[match.end(2)", "d_id": 3880, "documentation": { "docstring": "Process an extended or global header as described in\n POSIX.1-2008.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 27, "language": "en" } }, { "id": 23379, "commit_id": "6e607a0fa1cefbf0388dac86c84debf4781cec48", "repo": "PaddleOCR", "path": "ppocr/modeling/backbones/rec_efficientb3_pren.py", "file_name": "rec_efficientb3_pren.py", "fun_name": "get_global_params", "commit_message": "[Feature] Add PREN Scene Text Recognition Model(Accepted in CVPR2021) (#5563)\n\n* [Feature] add PREN scene text recognition model\r\n\r\n* [Patch] Optimize yml File\r\n\r\n* [Patch] Save Label/Pred Preprocess Time Cost\r\n\r\n* [BugFix] Modify Shape Conversion to Fit for Inference Model Exportion\r\n\r\n* [Patch] ?\r\n\r\n* [Patch] ?\r\n\r\n* 啥情况...", "code": "def get_global_params():\n \n GlobalParams = namedtuple('GlobalParams', [\n 'drop_connect_rate', 'width_coefficient', 'depth_coefficient',\n 'depth_divisor', 'image_size'\n ])\n global_params = GlobalParams(\n drop_connect_rate=0.3,\n width_coefficient=1.2,\n depth_coefficient=1.4,\n depth_divisor=8,\n image_size=64)\n return global_params\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 134, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 12, "token_counts": 55, "n_ast_nodes": 83, "n_identifiers": 9, "random_cut": "def get_global_params():\n \n GlobalParams = namedtuple('GlobalParams', [\n 'drop_connect_rate', 'width_coefficient', 'depth_coefficient',\n 'depth_divisor', 'image_size'\n ])\n global_params = GlobalParams(\n drop_connect_rate=0.3,\n width_coefficient=1.2,\n depth_coefficient=1.4,\n depth_divisor=8,\n ", "d_id": 4585, "documentation": { "docstring": "\n The fllowing are efficientnetb3's arch superparams, but to fit for scene \n text recognition task, the resolution(image_size) here is changed \n from 300 to 64.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 54, "language": "en" } }, { "id": 108809, "commit_id": "e994b58e49bcd98334b220d74540005f62af918d", "repo": "matplotlib", "path": "lib/matplotlib/path.py", "file_name": "path.py", "fun_name": "_create_closed", "commit_message": "Add a helper to generate closed paths.\n\nInstead of having to manually append an unused vertex that corresponds\nto the CLOSEPATH code, add a _make_closed helper (private for now) which\ndoes that for us.", "code": "def _create_closed(cls, vertices):\n \n v = _to_unmasked_float_array(vertices)\n return cls(np.concatenate([v, v[:1]]), closed=True)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 57, "n_identifiers": 8, "random_cut": "def _create_closed(cls, vertices):\n \n v = _to_unmasked_float_array(vertices)\n ", "d_id": 23349, "documentation": { "docstring": "\n Create a closed polygonal path going through *vertices*.\n\n Unlike ``Path(..., closed=True)``, *vertices* should **not** end with\n an entry for the CLOSEPATH; this entry is added by `._create_closed`.\n ", "n_words": 27, "vocab_size": 26, "n_whitespaces": 56, "language": "en" } }, { "id": 259259, "commit_id": "751c5cd05ff545c20ad0b09ac491c07f31e4cd56", "repo": "scikit-learn", "path": "sklearn/_loss/tests/test_loss.py", "file_name": "test_loss.py", "fun_name": "test_predict_proba", "commit_message": "TST ensure that sklearn/_loss/tests/test_loss.py is seed insensitive (#22847)\n\nCo-authored-by: Christian Lorentzen ", "code": "def test_predict_proba(loss, global_random_seed):\n \n n_samples = 20\n y_true, raw_prediction = random_y_true_raw_prediction(\n loss=loss,\n n_samples=n_samples,\n y_bound=(-100, 100),\n raw_bound=(-5, 5),\n seed=global_random_seed,\n )\n\n if hasattr(loss, \"predict_proba\"):\n proba = loss.predict_proba(raw_prediction)\n assert proba.shape == (n_samples, loss.n_classes)\n assert np.sum(proba, axis=1) == approx(1, rel=1e-11)\n\n if hasattr(loss, \"gradient_proba\"):\n for grad, proba in (\n (None, None),\n (None, np.empty_like(raw_prediction)),\n (np.empty_like(raw_prediction), None),\n (np.empty_like(raw_prediction), np.empty_like(raw_prediction)),\n ):\n grad, proba = loss.gradient_proba(\n y_true=y_true,\n raw_prediction=raw_prediction,\n sample_weight=None,\n gradient_out=grad,\n proba_out=proba,\n )\n assert proba.shape == (n_samples, loss.n_classes)\n assert np.sum(proba, axis=1) == approx(1, rel=1e-11)\n assert_allclose(\n grad,\n loss.gradient(\n y_true=y_true,\n raw_prediction=raw_prediction,\n sample_weight=None,\n gradient_out=None,\n ),\n )\n\n\n@pytest.mark.parametrize(\"loss\", ALL_LOSSES)\n@pytest.mark.parametrize(\"sample_weight\", [None, \"range\"])\n@pytest.mark.parametrize(\"dtype\", (np.float32, np.float64))\n@pytest.mark.parametrize(\"order\", (\"C\", \"F\"))", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"loss\", ALL_LOSSES)\n@pytest.mark.parametrize(\"sample_weight\", [None, \"range\"])\n@pytest.mark.parametrize(\"dtype\", (np.float32, np.float64))\n@pytest.mark.parametrize(\"order\", (\"C\", \"F\"))", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 483, "n_words": 93, "vocab_size": 62, "complexity": 4, "nloc": 38, "token_counts": 248, "n_ast_nodes": 453, "n_identifiers": 34, "random_cut": "def test_predict_proba(loss, global_random_seed):\n \n n_samples = 20\n y_true, raw_prediction = random_y_true_raw_prediction(\n loss=loss,\n n_samples=n_samples,\n y_bound=(-100, 100),\n raw_bound=(-5, 5),\n seed=global_random_seed,\n )\n\n if hasattr(loss, \"predict_proba\"):\n proba = loss.predict_proba(raw_prediction)\n assert proba.shape == (n_samples, loss.n_classes)\n assert np.sum(proba, axis=1) == approx(1, rel=1e-11)\n\n ", "d_id": 75678, "documentation": { "docstring": "Test that predict_proba and gradient_proba work as expected.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 65555, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py", "file_name": "supplier_scorecard_variable.py", "fun_name": "get_late_shipments", "commit_message": "style: format code with black", "code": "def get_late_shipments(scorecard):\n\t\n\treturn get_total_shipments(scorecard) - get_on_time_shipments(scorecard)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 4, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def get_late_shipments(scorecard):\n\t\n\treturn get", "d_id": 13932, "documentation": { "docstring": "Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs)", "n_words": 18, "vocab_size": 17, "n_whitespaces": 17, "language": "en" } }, { "id": 7172, "commit_id": "aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173", "repo": "ludwig", "path": "ludwig/models/base.py", "file_name": "base.py", "fun_name": "eval_loss", "commit_message": "feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)", "code": "def eval_loss(self, targets, predictions):\n \n eval_loss = 0\n for of_name, of_obj in self.output_features.items():\n of_eval_loss = of_obj.eval_loss(targets[of_name], predictions[of_name])\n eval_loss += of_obj.loss[\"weight\"] * of_eval_loss\n\n additional_loss = 0\n additional_losses = self.losses()\n if additional_losses:\n additional_loss = torch.sum(torch.stack(additional_losses)) # other losses\n\n return eval_loss, additional_loss\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 121, "n_words": 38, "vocab_size": 29, "complexity": 3, "nloc": 10, "token_counts": 82, "n_ast_nodes": 132, "n_identifiers": 16, "random_cut": "def eval_loss(self, targets, predictions):\n \n eval_loss = 0\n for of_name, of_obj in self.outp", "d_id": 1150, "documentation": { "docstring": "Computes all evaluation losses for the model given targets and predictions.\n\n Args:\n targets: A dictionary of target names to target tensors.\n predictions: A dictionary of output names to output tensors.\n\n Returns:\n A tuple of loss values for eval losses and additional losses.\n ", "n_words": 42, "vocab_size": 29, "n_whitespaces": 96, "language": "en" } }, { "id": 153455, "commit_id": "2d40797b2b700d81d4db4a4cd023d563edf6431f", "repo": "modin", "path": "modin/db_conn.py", "file_name": "db_conn.py", "fun_name": "partition_query", "commit_message": "FEAT-#979: Enable reading from SQL server. (#4279)\n\nCo-authored-by: eavidan \r\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: mvashishtha ", "code": "def partition_query(self, query, limit, offset):\n \n return (\n (\n f\"SELECT * FROM ({query}) AS _ ORDER BY(SELECT NULL)\"\n + f\" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY\"\n )\n if self._dialect_is_microsoft_sql()\n else f\"SELECT * FROM ({query}) LIMIT {limit} OFFSET {offset}\"\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 135, "n_words": 40, "vocab_size": 31, "complexity": 2, "nloc": 9, "token_counts": 31, "n_ast_nodes": 73, "n_identifiers": 6, "random_cut": "def partition_query(self, query, limit, offset):\n \n return (\n (\n f\"SELECT * FROM ({query}) AS _ ORDER BY(SELECT NULL)\"\n + f\" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY\"\n )\n if self._dialect_is_microsoft_sql()\n else f\"SELECT * FROM", "d_id": 35404, "documentation": { "docstring": "\n Get a query that partitions the original `query`.\n\n Parameters\n ----------\n query : str\n The SQL query to get a partition.\n limit : int\n The size of the partition.\n offset : int\n Where the partition begins.\n\n Returns\n -------\n str\n ", "n_words": 38, "vocab_size": 27, "n_whitespaces": 142, "language": "en" } }, { "id": 207118, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_filters/tests.py", "file_name": "tests.py", "fun_name": "test_parameter_ends_with__in__or__isnull", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_parameter_ends_with__in__or__isnull(self):\n \n # When it ends with '__in' -----------------------------------------\n modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)\n request = self.request_factory.get(\"/\", {\"decade__in\": \"the 90s\"})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n\n # Make sure the correct queryset is returned\n queryset = changelist.get_queryset(request)\n self.assertEqual(list(queryset), [self.bio_book])\n\n # Make sure the correct choice is selected\n filterspec = changelist.get_filters(request)[0][0]\n self.assertEqual(filterspec.title, \"publication decade\")\n choices = list(filterspec.choices(changelist))\n self.assertEqual(choices[2][\"display\"], \"the 1990's\")\n self.assertIs(choices[2][\"selected\"], True)\n self.assertEqual(choices[2][\"query_string\"], \"?decade__in=the+90s\")\n\n # When it ends with '__isnull' ---------------------------------------\n modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)\n request = self.request_factory.get(\"/\", {\"decade__isnull\": \"the 90s\"})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n\n # Make sure the correct queryset is returned\n queryset = changelist.get_queryset(request)\n self.assertEqual(list(queryset), [self.bio_book])\n\n # Make sure the correct choice is selected\n filterspec = changelist.get_filters(request)[0][0]\n self.assertEqual(filterspec.title, \"publication decade\")\n choices = list(filterspec.choices(changelist))\n self.assertEqual(choices[2][\"display\"], \"the 1990's\")\n self.assertIs(choices[2][\"selected\"], True)\n self.assertEqual(choices[2][\"query_string\"], \"?decade__isnull=the+90s\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 339, "n_words": 122, "vocab_size": 52, "complexity": 1, "nloc": 25, "token_counts": 284, "n_ast_nodes": 475, "n_identifiers": 24, "random_cut": "def test_parameter_ends_with__in__or__isnull(self):\n \n # When it ends with '__in' -----------------------------------------\n modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)\n request = self.request_factory.get(\"/\", {\"decade__in\": \"the 90s\"})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n\n # Make sure the correct queryset is returned\n queryset = changelist.get_queryset(request)\n self.assertEqual(list(queryset), [self.bio_book])\n\n # Make sure the correct choice is selected\n filterspec = changelist.get_filters(request)[0][0]\n self.assertEqual(filterspec.title, \"publication decade\")\n choices = list(filterspec.choices(changelist))\n self.assertEqual(choices[2][\"display\"], \"the 1990's\")\n self.assertIs(choices[2][\"selected\"], True)\n self.assertEqual(choices[2][\"query_string\"], \"?decade__in=the+90s\")\n\n # When it ends with '__isnull' ----------", "d_id": 51869, "documentation": { "docstring": "\n A SimpleListFilter's parameter name is not mistaken for a model field\n if it ends with '__isnull' or '__in' (#17091).\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 218553, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "v4_int_to_packed", "commit_message": "add python 3.10.4 for windows", "code": "def v4_int_to_packed(address):\n \n try:\n return address.to_bytes(4, 'big')\n except OverflowError:\n raise ValueError(\"Address negative or too large for IPv4\")\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 5, "token_counts": 25, "n_ast_nodes": 47, "n_identifiers": 5, "random_cut": "def v4_int_to_packed(address):\n \n try:\n return address.to_bytes(4, 'big')\n except OverflowError:\n raise ValueError(\"Address negative or too large for I", "d_id": 55384, "documentation": { "docstring": "Represent an address as 4 packed bytes in network (big-endian) order.\n\n Args:\n address: An integer representation of an IPv4 IP address.\n\n Returns:\n The integer address packed as 4 bytes in network (big-endian) order.\n\n Raises:\n ValueError: If the integer is negative or too large to be an\n IPv4 IP address.\n\n ", "n_words": 49, "vocab_size": 33, "n_whitespaces": 91, "language": "en" } }, { "id": 247600, "commit_id": "5dd949bee6158a8b651db9f2ae417a62c8184bfd", "repo": "synapse", "path": "tests/handlers/test_directory.py", "file_name": "test_directory.py", "fun_name": "test_delete_alias_not_allowed", "commit_message": "Add type hints to some tests/handlers files. (#12224)", "code": "def test_delete_alias_not_allowed(self) -> None:\n \n self._create_alias(self.admin_user)\n self.get_failure(\n self.handler.delete_association(\n create_requester(self.test_user), self.room_alias\n ),\n synapse.api.errors.AuthError,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 88, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 9, "token_counts": 47, "n_ast_nodes": 75, "n_identifiers": 14, "random_cut": "def test_delete_alias_not_allowed(self) -> None:\n \n self._create_alias(self.admin_user)\n self.get_failure(\n self.handler.delete_association(\n create_requester(self.test_us", "d_id": 71768, "documentation": { "docstring": "A user that doesn't meet the expected guidelines cannot delete an alias.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 198414, "commit_id": "e94a7b45d7b033ccbd57395dca28b654f875c54c", "repo": "sympy", "path": "sympy/integrals/trigonometry.py", "file_name": "trigonometry.py", "fun_name": "trigintegrate", "commit_message": "Improve loop performance", "code": "def trigintegrate(f, x, conds='piecewise'):\n \n pat, a, n, m = _pat_sincos(x)\n\n f = f.rewrite('sincos')\n M = f.match(pat)\n\n if M is None:\n return\n\n n, m = M[n], M[m]\n if n.is_zero and m.is_zero:\n return x\n zz = x if n.is_zero else S.Zero\n\n a = M[a]\n\n if n.is_odd or m.is_odd:\n u = _u\n n_, m_ = n.is_odd, m.is_odd\n\n # take smallest n or m -- to choose simplest substitution\n if n_ and m_:\n\n # Make sure to choose the positive one\n # otherwise an incorrect integral can occur.\n if n < 0 and m > 0:\n m_ = True\n n_ = False\n elif m < 0 and n > 0:\n n_ = True\n m_ = False\n # Both are negative so choose the smallest n or m\n # in absolute value for simplest substitution.\n elif (n < 0 and m < 0):\n n_ = n > m\n m_ = not (n > m)\n\n # Both n and m are odd and positive\n else:\n n_ = (n < m) # NB: careful here, one of the\n m_ = not (n < m) # conditions *must* be true\n\n # n m u=C (n-1)/2 m\n # S(x) * C(x) dx --> -(1-u^2) * u du\n if n_:\n ff = -(1 - u**2)**((n - 1)/2) * u**m\n uu = cos(a*x)\n\n # n m u=S n (m-1)/2\n # S(x) * C(x) dx --> u * (1-u^2) du\n elif m_:\n ff = u**n * (1 - u**2)**((m - 1)/2)\n uu = sin(a*x)\n\n fi = integrate(ff, u) # XXX cyclic deps\n fx = fi.subs(u, uu)\n if conds == 'piecewise':\n return Piecewise((fx / a, Ne(a, 0)), (zz, True))\n return fx / a\n\n # n & m are both even\n #\n # 2k 2m 2l 2l\n # we transform S (x) * C (x) into terms with only S (x) or C (x)\n #\n # example:\n # 100 4 100 2 2 100 4 2\n # S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x))\n #\n # 104 102 100\n # = S (x) - 2*S (x) + S (x)\n # 2k\n # then S is integrated with recursive formula\n\n # take largest n or m -- to choose simplest substitution\n n_ = (Abs(n) > Abs(m))\n m_ = (Abs(m) > Abs(n))\n res = S.Zero\n\n if n_:\n # 2k 2 k i 2i\n # C = (1 - S ) = sum(i, (-) * B(k, i) * S )\n if m > 0:\n for i in range(0, m//2 + 1):\n res += (S.NegativeOne**i * binomial(m//2, i) *\n _sin_pow_integrate(n + 2*i, x))\n\n elif m == 0:\n res = _sin_pow_integrate(n, x)\n else:\n\n # m < 0 , |n| > |m|\n # /\n # |\n # | m n\n # | cos (x) sin (x) dx =\n # |\n # |\n #/\n # /\n # |\n # -1 m+1 n-1 n - 1 | m+2 n-2\n # ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx\n # |\n # m + 1 m + 1 |\n # /\n\n res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) +\n Rational(n - 1, m + 1) *\n trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x))\n\n elif m_:\n # 2k 2 k i 2i\n # S = (1 - C ) = sum(i, (-) * B(k, i) * C )\n if n > 0:\n\n # / /\n # | |\n # | m n | -m n\n # | cos (x)*sin (x) dx or | cos (x) * sin (x) dx\n # | |\n # / /\n #\n # |m| > |n| ; m, n >0 ; m, n belong to Z - {0}\n # n 2\n # sin (x) term is expanded here in terms of cos (x),\n # and then integrated.\n #\n\n for i in range(0, n//2 + 1):\n res += (S.NegativeOne**i * binomial(n//2, i) *\n _cos_pow_integrate(m + 2*i, x))\n\n elif n == 0:\n\n # /\n # |\n # | 1\n # | _ _ _\n # | m\n # | cos (x)\n # /\n #\n\n res = _cos_pow_integrate(m, x)\n else:\n\n # n < 0 , |m| > |n|\n # /\n # |\n # | m n\n # | cos (x) sin (x) dx =\n # |\n # |\n #/\n # /\n # |\n # 1 m-1 n+1 m - 1 | m-2 n+2\n # _______ cos (x) sin (x) + _______ | cos (x) sin (x) dx\n # |\n # n + 1 n + 1 |\n # /\n\n res = (Rational(1, n + 1) * cos(x)**(m - 1)*sin(x)**(n + 1) +\n Rational(m - 1, n + 1) *\n trigintegrate(cos(x)**(m - 2)*sin(x)**(n + 2), x))\n\n else:\n if m == n:\n ##Substitute sin(2x)/2 for sin(x)cos(x) and then Integrate.\n res = integrate((sin(2*x)*S.Half)**m, x)\n elif (m == -n):\n if n < 0:\n # Same as the scheme described above.\n # the function argument to integrate in the end will\n # be 1, this cannot be integrated by trigintegrate.\n # Hence use sympy.integrals.integrate.\n res = (Rational(1, n + 1) * cos(x)**(m - 1) * sin(x)**(n + 1) +\n Rational(m - 1, n + 1) *\n integrate(cos(x)**(m - 2) * sin(x)**(n + 2), x))\n else:\n res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) +\n Rational(n - 1, m + 1) *\n integrate(cos(x)**(m + 2)*sin(x)**(n - 2), x))\n if conds == 'piecewise':\n return Piecewise((res.subs(x, a*x) / a, Ne(a, 0)), (zz, True))\n return res.subs(x, a*x) / a\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 3401, "n_words": 909, "vocab_size": 266, "complexity": 30, "nloc": 78, "token_counts": 829, "n_ast_nodes": 1394, "n_identifiers": 41, "random_cut": "def trigintegrate(f, x, conds='piecewise'):\n \n pat, a, n, m = _pat_sincos(x)\n\n f = f.rewrite('sincos')\n M = f.match(pat)\n\n if M is None:\n return\n\n n, m = M[n], M[m]\n if n.is_zero and m.is_zero:\n return x\n zz = x if n.is_zero else S.Zero\n\n a = M[a]\n\n if n.is_odd or m.is_odd:\n u = _u\n n_, m_ = n.is_odd, m.is_odd\n\n # take smallest n or m -- to choose simplest substitution\n if n_ and m_:\n\n # Make sure to choose the positive one\n # otherwise an incorrect integral can occur.\n if n < 0 and m > 0:\n m_ = True\n n_ = False\n elif m < 0 and n > 0:\n n_ = True\n m_ = False\n # Both are negative so choose the smallest n or m\n # in absolute value for simplest substitution.\n elif (n < 0 and m < 0):\n n_ = n > m\n m_ = not (n > m)\n\n # Both n and m are odd and positive\n else:\n n_ = (n < m) # NB: careful here, one of the\n m_ = not (n < m) # conditions *must* be true\n\n # n m u=C (n-1)/2 m\n # S(x) * C(x) dx --> -(1-u^2) * u du\n if n_:\n ff = -(1 - u**2)**((n - 1)/2) * u**m\n uu = cos(a*x)\n\n # n m u=S n (m-1)/2\n # S(x) * C(x) dx --> u * (1-u^2) du\n elif m_:\n ff = u**n * (1 - u**2)**((m - 1)/2)\n uu = sin(a*x)\n\n fi = integrate(ff, u) # XXX cyclic deps\n fx = fi.subs(u, uu)\n if conds == 'piecewise':\n return Piecewise((fx / a, Ne(a, 0)), (zz, True))\n return fx / a\n\n # n & m are both even\n #\n # 2k 2m 2l 2l\n # we transform S (x) * C (x) into terms with only S (x) or C (x)\n #\n # example:\n # 100 4 100 2 2 100 4 2\n # S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x))\n #\n # 104 102 100\n # = S (x) - 2*S (x) + S (x)\n # 2k\n # then S is integrated with recursive formula\n\n # take largest n or m -- to choose simplest substitution\n n_ = (Abs(n) > Abs(m))\n m_ = (Abs(m) > Abs(n))\n res = S.Zero\n\n if n_:\n # 2k 2 k i 2i\n # C = (1 - S ) = sum(i, (-) * B(k, i) * S )\n if m > 0:\n for i in range(0, m//2 + 1):\n res += (S.NegativeOne**i * binomial(m//2, i) *\n _sin_pow_integrate(n + 2*i, x))\n\n elif m == 0:\n res = _sin_pow_integrate(n, x)\n else:\n\n # m < 0 , |n| > |m|\n # /\n # |\n # | m n\n # | cos (x) sin (x) dx =\n # |\n # |\n #/\n # /\n # |\n # -1 m+1 n-1 n - 1 | m+2 n-2\n # ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx\n # |\n # m + 1 m + 1 |\n # /\n\n res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) +\n Rational(n - 1, m + 1) *\n trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x))\n\n elif m_:\n # 2k 2 k i 2i\n # S = (1 - C ) = sum(i, (-) * B(k, i) * C )\n if n > 0:\n\n # / /\n # | |\n # | m n | -m n\n # | cos (x)*sin (x) dx or | cos (x) * sin (x) dx\n # | |\n # / /\n #\n # |m| > |n| ; m, n >0 ; m, n belong to Z - {0}\n # n ", "d_id": 48923, "documentation": { "docstring": "\n Integrate f = Mul(trig) over x.\n\n Examples\n ========\n\n >>> from sympy import sin, cos, tan, sec\n >>> from sympy.integrals.trigonometry import trigintegrate\n >>> from sympy.abc import x\n\n >>> trigintegrate(sin(x)*cos(x), x)\n sin(x)**2/2\n\n >>> trigintegrate(sin(x)**2, x)\n x/2 - sin(x)*cos(x)/2\n\n >>> trigintegrate(tan(x)*sec(x), x)\n 1/cos(x)\n\n >>> trigintegrate(sin(x)*tan(x), x)\n -log(sin(x) - 1)/2 + log(sin(x) + 1)/2 - sin(x)\n\n References\n ==========\n\n .. [1] http://en.wikibooks.org/wiki/Calculus/Integration_techniques\n\n See Also\n ========\n\n sympy.integrals.integrals.Integral.doit\n sympy.integrals.integrals.Integral\n ", "n_words": 62, "vocab_size": 44, "n_whitespaces": 129, "language": "en" } }, { "id": 264064, "commit_id": "b87832b35dc1866c81ecaf7e502afe48a4e50a82", "repo": "pyinstaller", "path": "PyInstaller/utils/hooks/win32.py", "file_name": "win32.py", "fun_name": "get_pywin32_module_file_attribute", "commit_message": "hookutils: win32: port to PyInstaller.isolated framework", "code": "def get_pywin32_module_file_attribute(module_name):\n \n from PyInstaller.utils.win32 import winutils\n module = winutils.import_pywin32_module(module_name)\n return module.__file__\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 26, "n_ast_nodes": 43, "n_identifiers": 9, "random_cut": "def get_pywin32_module_file_attribute(module_name):\n \n from PyInstaller.utils.win32 import winutils\n module = w", "d_id": 77585, "documentation": { "docstring": "\n Get the absolute path of the PyWin32 DLL specific to the PyWin32 module with the passed name.\n\n On import, each PyWin32 module:\n\n * Imports a DLL specific to that module.\n * Overwrites the values of all module attributes with values specific to that DLL. This includes that module's\n `__file__` attribute, which then provides the absolute path of that DLL.\n\n This function safely imports that module in a PyWin32-aware subprocess and returns the value of that module's\n `__file__` attribute.\n\n Parameters\n ----------\n module_name : str\n Fully-qualified name of that module.\n\n Returns\n ----------\n str\n Absolute path of that DLL.\n\n See Also\n ----------\n `PyInstaller.utils.win32.winutils.import_pywin32_module()`\n For further details.\n ", "n_words": 103, "vocab_size": 60, "n_whitespaces": 178, "language": "en" } }, { "id": 97856, "commit_id": "d246d2b6d3e014270941209e54f2f12e09ad9a81", "repo": "sentry", "path": "src/sentry/pipeline/base.py", "file_name": "base.py", "fun_name": "render_warning", "commit_message": "ref(py): Split up large file (#32862)\n\nCo-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>", "code": "def render_warning(self, message):\n \n context = {\"error\": message}\n return render_to_response(\"sentry/pipeline-provider-error.html\", context, self.request)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 6, "random_cut": "def render_warning(self, message):\n \n context = {\"error\": message}\n return render_to_response(\"sentry/pipeline-provider-error.html\", context, self.request)\n", "d_id": 19490, "documentation": { "docstring": "For situations when we want to display an error without triggering an issue", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 247283, "commit_id": "2ffaf30803f93273a4d8a65c9e6c3110c8433488", "repo": "synapse", "path": "tests/rest/client/test_rooms.py", "file_name": "test_rooms.py", "fun_name": "test_context_filter_not_labels", "commit_message": "Add type hints to `tests/rest/client` (#12108)\n\n* Add type hints to `tests/rest/client`\r\n\r\n* newsfile\r\n\r\n* fix imports\r\n\r\n* add `test_account.py`\r\n\r\n* Remove one type hint in `test_report_event.py`\r\n\r\n* change `on_create_room` to `async`\r\n\r\n* update new functions in `test_third_party_rules.py`\r\n\r\n* Add `test_filter.py`\r\n\r\n* add `test_rooms.py`\r\n\r\n* change to `assertEquals` to `assertEqual`\r\n\r\n* lint", "code": "def test_context_filter_not_labels(self) -> None:\n \n event_id = self._send_labelled_messages_in_room()\n\n channel = self.make_request(\n \"GET\",\n \"/rooms/%s/context/%s?filter=%s\"\n % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)),\n access_token=self.tok,\n )\n self.assertEqual(channel.code, 200, channel.result)\n\n events_before = channel.json_body[\"events_before\"]\n\n self.assertEqual(\n len(events_before), 1, [event[\"content\"] for event in events_before]\n )\n self.assertEqual(\n events_before[0][\"content\"][\"body\"], \"without label\", events_before[0]\n )\n\n events_after = channel.json_body[\"events_after\"]\n\n self.assertEqual(\n len(events_after), 2, [event[\"content\"] for event in events_after]\n )\n self.assertEqual(\n events_after[0][\"content\"][\"body\"], \"with wrong label\", events_after[0]\n )\n self.assertEqual(\n events_after[1][\"content\"][\"body\"], \"with two wrong labels\", events_after[1]\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 284, "n_words": 66, "vocab_size": 47, "complexity": 3, "nloc": 27, "token_counts": 189, "n_ast_nodes": 302, "n_identifiers": 20, "random_cut": "def test_context_filter_not_labels(self) -> None:\n \n event_id = self._send_labelled_messages_in_room()\n\n channel = self.make_request(\n \"GET\",\n \"/rooms/%s/context/%s?filter=%s\"\n % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)),\n access_token=self.tok,\n )\n self.assertEqual(channel.code, 200, channel.result)\n\n events_before = channel.json_body[\"events_before\"]\n\n self.assertEqual(\n len(events_before), 1, [event[\"content\"] for event in events_before]\n )\n self.assertEqual(\n events_before[0][\"content\"][\"body\"], \"without label\", events_before[0]\n )\n\n events_after = channel.json_body[\"events_after\"]\n\n self.assertEqual(\n len(events_after), 2, [event[\"content\"] for event in events_after]\n )\n self.assertEqual(\n events_after[0][\"content\"][\"body\"], \"with wrong label\", events_after[0]\n )\n self.assertEqual(\n events_after[1][\"content\"][\"body\"], \"with two wrong labels\", events_after[1]\n )\n", "d_id": 71570, "documentation": { "docstring": "Test that we can filter by the absence of a label on a /context request.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 177245, "commit_id": "bffcd74649fb95a57fb834846eb3c7d9693c55b8", "repo": "networkx", "path": "networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py", "file_name": "feasibility.py", "fun_name": "_consistent_PT", "commit_message": "Preliminary VF2++ Implementation (#5788)\n\n* Preliminary implementation of the candidate node pair ordering of VF2++\r\n\r\n* Removed unused lines of code\r\n\r\n* Added todos\r\n\r\n* Added demo and pseudocode for VF2++\r\n\r\n* Pointed out a problem with the pseudocode\r\n\r\n* Initialisation of the VF2++ basis structure\r\n\r\n* Initialise the GraphMatcher\r\n\r\n* Remove useless changes\r\n\r\n* Check labels for the node ordering + demo\r\n\r\n* Code to verify the ordering\r\n\r\n* Implement the ISO feasibility check\r\n\r\n* Implement the IND feasibility\r\n\r\n* Create State class\r\n\r\n* Fixed Dan's code for the ordering\r\n\r\n* Preliminary form of the node ordering\r\n\r\n* Add visualisation\r\n\r\n* Use list comprehension for the Ti computation\r\n\r\n* Remove function\r\n\r\n* Create Unit Tests\r\n\r\n* Add labels check + update unit tests\r\n\r\n* Add pre-computation of G-labels\r\n\r\n* Remove todo\r\n\r\n* First implementation of the candidate selection\r\n\r\n* Initial version of candidate selection\r\n\r\n* Remove unnecessary files\r\n\r\n* Merge candidate selection cases into one\r\n\r\n* Create a function to incrementally update Ti and Ti_out\r\n\r\n* Unit Test for the Ti updating\r\n\r\n* Implement the Ti/Ti_out restoring\r\n\r\n* Finish the restoring of Ti and create unit test\r\n\r\n* Update test file names\r\n\r\n* Uncommented test section\r\n\r\n* Replace redundant loop with for-any\r\n\r\n* Create unit test for candidate selection using the same label for all nodes\r\n\r\n* Create unit test for candidate selection using different labels for the nodes\r\n\r\n* Update feasibility tests without the use of the state class\r\n\r\n* Create more unit tests for the feasibility checking\r\n\r\n* Provide explanation for the unit tests\r\n\r\n* First successful test of the complete ISO VF2++ algorithm (except from the buggy ordering)\r\n\r\n* Fix bug: when popping a node to climb up the DFS tree we need the previous node ordering (containing the node that we just popped)\r\n\r\n* Create a separate file for the VF2++ ISO algorithm\r\n\r\n* Delete file\r\n\r\n* Remove redundant iteration and memory use\r\n\r\n* Demo for different labels\r\n\r\n* Add benchmark for the incremental Ti updating\r\n\r\n* Remove unnecessary class\r\n\r\n* Fix bug with the ordering WOOOHOOOOO\r\n\r\n* Unit tests for the node ordering\r\n\r\n* Add unit tests for the VF2++ ISO\r\n\r\n* Fix ordering\r\n\r\n* Probablly fix logic error in ordering\r\n\r\n* Reformatted with black\r\n\r\n* Test precommit\r\n\r\n* Test precommit\r\n\r\n* Test pre commit\r\n\r\n* Testing pre commit\r\n\r\n* Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Add unit tests for vf2++\r\n\r\n* Added vf2++ unit test\r\n\r\n* Added precheck for VF2++\r\n\r\n* Add unit tests for the precheck\r\n\r\n* Updated the benchmarking\r\n\r\n* Updated the benchmark\r\n\r\n* Apply hooks\r\n\r\n* Add documentation for the ordering\r\n\r\n* Add documentation for the candidate selection\r\n\r\n* Added documentation for the feasibility\r\n\r\n* Added documentation for vf2++\r\n\r\n* Separate functions for ISO feasibility\r\n\r\n* Refine unit tests\r\n\r\n* Apply hooks\r\n\r\n* Force reformat all files\r\n\r\n* Remove redundant return statements from VF2__\r\n\r\n* Apply hooks\r\n\r\n* Apply hooks\r\n\r\n* Format\r\n\r\n* Minor changes\r\n\r\n* Add unit tests\r\n\r\n* Adjusted benchmark\r\n\r\n* Fix benchmark\r\n\r\n* Isort\r\n\r\n* Isort benchmark\r\n\r\n* Apply optimization in the candidate selection\r\n\r\n* Track matched node with pointer\r\n\r\n* Adjust benchmark\r\n\r\n* Restructure in VF2 function\r\n\r\n* Make VF2++ EXTREMELY PRETTY\r\n\r\n* Removed sorting in feasibility rules\r\n\r\n* Get rid of visited set, check mapping instead\r\n\r\n* Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Made color assignement deterministic in VF2++ unit tests\r\n\r\n* Add keyword argument in unit tests\r\n\r\n* Hoepfully fix pipeline errors\r\n\r\n* Add vf2++ unit tests for multigraphs\r\n\r\n* Add Unit tests for Feasibility\r\n\r\n* Add unit tests for feasibility on multi graphs\r\n\r\n* Finalize feasibility tests for multigraph settings\r\n\r\n* Update documentation\r\n\r\n* Remove list comprehension and boost performance\r\n\r\n* Add unit tests for both graphs and multi graphs, using same labels\r\n\r\n* Isort\r\n\r\n* Optimized precheck\r\n\r\n* Replace loop with any\r\n\r\n* Optimize multigraph chceck\r\n\r\n* Transfer except statement\r\n\r\n* Check order consistency\r\n\r\n* Cache degrees and labels from the beginning\r\n\r\n* Delete benchmark to create new\r\n\r\n* Fix precheck bug\r\n\r\n* Adjust unit tests\r\n\r\n* Add benchmark for perofmance comparison between VF2 and VF2++\r\n\r\n* Fix Ti computing tests\r\n\r\n* Hopefully fix isort\r\n\r\n* Add benchmark for the candidate selection methods\r\n\r\n* Rename modules: lower case, remove +\r\n\r\n* Refactor VF2++ arguments\r\n\r\n* Adjust VF2++ to work with multiple node labels\r\n\r\n* Add unit tests for multiple labels\r\n\r\n* Adjust for different number of labels per node\r\n\r\n* Finish arguments of VF2++\r\n\r\n* Add user functions\r\n\r\n* Exported the two vf2++ functions\r\n\r\n* Added underscore prefix to private functions and fixed tests\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/isomorphism/demo.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Apply suggested changes\r\n\r\n* Refactor rst files\r\n\r\n* Rm unnecessary toctree from isomorphism page.\r\n\r\n* Autodoc vf2pp module + public functions.\r\n\r\n* Rm dedicated vf2pp reference article.\r\n\r\n* Rm extra vf2pp listing from autosummaries.\r\n\r\n* Add summary of three functions to module docstring.\r\n\r\n* Make sure docstrings match their functions.\r\n\r\n* Refactor everything\r\n\r\n* Format code\r\n\r\n* Add unit test\r\n\r\n* Inline process level function in node ordering\r\n\r\n* Perform intersection first rather than last\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/candidates.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Replace return statement with multiple operations and make it more readable\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Fix multigraph bug in update_Tinout\r\n\r\n* Abstract the argmax function\r\n\r\n* Add unit test for first case of candidate selection\r\n\r\n* Create unit test for all candidate selection cases\r\n\r\n* Remove re-definition of namedtuple parameters\r\n\r\n* Update doc/reference/algorithms/isomorphism.rst\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/__init__.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Delete benchmark file\r\n\r\n* Add demo file\r\n\r\n* Create util file containing the helper functions, common across all unit tests\r\n\r\n* Fix CI/CD\r\n\r\n* Make unit tests for Ti updating specific\r\n\r\n* Remove util functions from vf2pp tests\r\n\r\n* Remove utils functions from multivf2pp tests\r\n\r\n* Remove utils functions from candidate tests\r\n\r\n* Remove utils functions from ordering checks\r\n\r\n* Remove utils functions from Ti tests\r\n\r\n* Add example in docstring\r\n\r\n* Remove unused utils functions\r\n\r\n* Separate initialization of vf2pp\r\n\r\n* Inline functions and add new abstract function for pushing to stack\r\n\r\n* Inline push to stack\r\n\r\n* Add commentsa\r\n\r\n* Separate precheck functions\r\n\r\n* Replace method with existing networkx function\r\n\r\n* Include label initialization inside parameter initializer function\r\n\r\n* Rename Tiout to Titilde\r\n\r\n* Update networkx/algorithms/isomorphism/tests/vf2pp/test_Ti_computing.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Use canonical setitem for dictionary insertions\r\n\r\n* Update networkx/algorithms/isomorphism/tests/vf2pp/test_precheck.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Remove variable assignement\r\n\r\n* Merge unit tests of vf2pp for graphs and multigraphs into the same file\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Change variable name\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Re-write ordering unit tests\r\n\r\n* Rename vf2pp solver\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Replace abstractified argmax function with two loops for readability\r\n\r\n* Apply final changes\r\n\r\n* Fix mistake\r\n\r\n* Update ref guide to reflect new fn names.\r\n\r\n* Update docstrings\r\n * Fix line length in module docstring\r\n * Copy updated parameter section to all 3 public fns.\r\n * Add Yields section to all_isomorphisms fn.\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult ", "code": "def _consistent_PT(u, v, graph_params, state_params):\n \n G1, G2 = graph_params.G1, graph_params.G2\n mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping\n\n for neighbor in G1[u]:\n if neighbor in mapping:\n if G1.number_of_edges(u, neighbor) != G2.number_of_edges(\n v, mapping[neighbor]\n ):\n return False\n\n for neighbor in G2[v]:\n if neighbor in reverse_mapping:\n if G1.number_of_edges(u, reverse_mapping[neighbor]) != G2.number_of_edges(\n v, neighbor\n ):\n return False\n return True\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 189, "n_words": 53, "vocab_size": 32, "complexity": 7, "nloc": 16, "token_counts": 110, "n_ast_nodes": 164, "n_identifiers": 11, "random_cut": "def _consistent_PT(u, v, graph_params, state_params):\n \n G1, G2 = graph_params.G1, graph_params.G2\n mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping\n\n for neighbor in G1[u]:\n if neighbor in mapping:\n if G1.number_of_edges(u, neighbor) != G2.number_of_edges(\n v, mapping[neighbor]\n ):\n return False\n\n for neighbor in G2[v]:\n ", "d_id": 42311, "documentation": { "docstring": "Checks the consistency of extending the mapping using the current node pair.\n\n Parameters\n ----------\n u, v: Graph node\n The two candidate nodes being examined.\n\n graph_params: namedtuple\n Contains all the Graph-related parameters:\n\n G1,G2: NetworkX Graph or MultiGraph instances.\n The two graphs to check for isomorphism or monomorphism\n\n G1_labels,G2_labels: dict\n The label of every node in G1 and G2 respectively\n\n state_params: namedtuple\n Contains all the State-related parameters:\n\n mapping: dict\n The mapping as extended so far. Maps nodes of G1 to nodes of G2\n\n reverse_mapping: dict\n The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically \"mapping\" reversed\n\n T1, T2: set\n Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are\n neighbors of nodes that are.\n\n T1_out, T2_out: set\n Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti\n\n Returns\n -------\n True if the pair passes all the consistency checks successfully. False otherwise.\n ", "n_words": 162, "vocab_size": 94, "n_whitespaces": 329, "language": "en" } }, { "id": 67919, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/report/stock_balance/stock_balance.py", "file_name": "stock_balance.py", "fun_name": "get_variant_values_for", "commit_message": "style: format code with black", "code": "def get_variant_values_for(items):\n\t\n\tattribute_map = {}\n\tfor attr in frappe.db.sql(\n\t\t\n\t\t% \", \".join([\"%s\"] * len(items)),\n\t\ttuple(items),\n\t\tas_dict=1,\n\t):\n\t\tattribute_map.setdefault(attr[\"parent\"], {})\n\t\tattribute_map[attr[\"parent\"]].update({attr[\"attribute\"]: attr[\"attribute_value\"]})\n\n\treturn attribute_map\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 13, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 13, "token_counts": 82, "n_ast_nodes": 140, "n_identifiers": 13, "random_cut": "def get_variant_values_for(items):\n\t\n\tattribute_map = {}\n\tfor attr in frappe.db.sql(\n\t\t\n\t\t% \", \".join([\"%s\"] * len(items)),\n\t\ttuple(items),\n\t\tas_dict=1,\n\t):\n\t\tattribute_map.setdefault(attr[\"parent\"], {})\n\t\tattribute_map[attr[\"parent\"]].", "d_id": 14662, "documentation": { "docstring": "Returns variant values for items.select parent, attribute, attribute_value\n\t\tfrom `tabItem Variant Attribute` where parent in (%s)\n\t\t", "n_words": 16, "vocab_size": 16, "n_whitespaces": 14, "language": "en" } }, { "id": 80233, "commit_id": "10dbbddaf35607e4257f50dd960520a1268dd225", "repo": "wagtail", "path": "wagtail/snippets/tests/test_locking.py", "file_name": "test_locking.py", "fun_name": "test_edit_post_locked_by_self", "commit_message": "Add tests for locking snippets", "code": "def test_edit_post_locked_by_self(self):\n \n # Lock the snippet\n self.lock_snippet(self.user)\n\n # Try to edit the snippet\n response = self.client.post(\n self.get_url(\"edit\"),\n {\"text\": \"Edited while locked\"},\n follow=True,\n )\n self.refresh_snippet()\n\n # Should not show error message\n self.assertNotContains(\n response,\n f\"The {self.model_name} could not be saved as it is locked\",\n )\n\n # Check that the snippet is still locked\n self.assertTrue(self.snippet.locked)\n\n # Check that the snippet is edited\n self.assertEqual(self.snippet.text, \"Edited while locked\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 216, "n_words": 63, "vocab_size": 45, "complexity": 1, "nloc": 14, "token_counts": 77, "n_ast_nodes": 142, "n_identifiers": 17, "random_cut": "def test_edit_post_locked_by_self(self):\n \n # Lock the snippet\n self.lock_snippet(self.user)\n\n # Try to edit the snippet\n response = self.client.post(\n self.get_url(\"edit\"),\n {\"text\": \"Edited while locked\"},\n follow=True,\n )\n", "d_id": 17037, "documentation": { "docstring": "A user can edit a snippet that is locked by themselves.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 7165, "commit_id": "aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173", "repo": "ludwig", "path": "ludwig/models/base.py", "file_name": "base.py", "fun_name": "update_metrics", "commit_message": "feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)", "code": "def update_metrics(self, targets, predictions):\n \n for of_name, of_obj in self.output_features.items():\n of_obj.update_metrics(targets[of_name], predictions[of_name])\n\n eval_loss, additional_losses = self.eval_loss(targets, predictions)\n self.eval_loss_metric.update(eval_loss)\n self.eval_additional_losses_metrics.update(additional_losses)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 64, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 65, "n_ast_nodes": 101, "n_identifiers": 13, "random_cut": "def update_metrics(self, targets, predictions):\n \n ", "d_id": 1143, "documentation": { "docstring": "Updates the model's metrics given targets and predictions.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 107483, "commit_id": "f156db08eee54d285ab0fb4e031e48d078ba6aa3", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "tick_right", "commit_message": "DOC: More cleanup axes -> Axes", "code": "def tick_right(self):\n \n label = True\n if 'label1On' in self._major_tick_kw:\n label = (self._major_tick_kw['label1On']\n or self._major_tick_kw['label2On'])\n self.set_ticks_position('right')\n # if labels were turned off before this was called\n # leave them off\n self.set_tick_params(which='both', labelright=label)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 111, "n_words": 31, "vocab_size": 26, "complexity": 3, "nloc": 7, "token_counts": 51, "n_ast_nodes": 94, "n_identifiers": 8, "random_cut": "def tick_right(self):\n \n label = True\n if 'label1On' in self._m", "d_id": 22772, "documentation": { "docstring": "\n Move ticks and ticklabels (if present) to the right of the Axes.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 320942, "commit_id": "676e01677183825d19107d3b2fbf1bb2c0684ede", "repo": "qutebrowser", "path": "tests/unit/mainwindow/test_messageview.py", "file_name": "test_messageview.py", "fun_name": "test_show_message_twice", "commit_message": "Only replace the exact same message\n\nIf we have a error message followed by an info message with the same text, they\nshould both be shown, not replaced automatically.", "code": "def test_show_message_twice(view, info1, info2, count):\n \n view.show_message(info1)\n view.show_message(info2)\n assert len(view._messages) == count\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 53, "n_identifiers": 8, "random_cut": "def test_show_message_twice(view, info1, info2, count):\n \n view.show_message(info1)\n view.show_message(info2)\n assert len(view._messages) == count\n\n", "d_id": 117451, "documentation": { "docstring": "Show the exact same message twice -> only one should be shown.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 245712, "commit_id": "d915740fa8228cf57741b27d9e5d66e358456b8e", "repo": "mmdetection", "path": "mmdet/models/task_modules/assigners/iou2d_calculator.py", "file_name": "iou2d_calculator.py", "fun_name": "__call__", "commit_message": "[Refactor] Refactor anchor head and base head with boxlist (#8625)\n\n* Refactor anchor head\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Add a series of boxes tools\r\n\r\n* Fix box type to support n x box_dim boxes\r\n\r\n* revert box type changes\r\n\r\n* Add docstring\r\n\r\n* refactor retina_head\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Fix comments\r\n\r\n* modify docstring of coder and ioucalculator\r\n\r\n* Replace with_boxlist with use_box_type", "code": "def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):\n \n bboxes1 = get_box_tensor(bboxes1)\n bboxes2 = get_box_tensor(bboxes2)\n assert bboxes1.size(-1) in [0, 4, 5]\n assert bboxes2.size(-1) in [0, 4, 5]\n if bboxes2.size(-1) == 5:\n bboxes2 = bboxes2[..., :4]\n if bboxes1.size(-1) == 5:\n bboxes1 = bboxes1[..., :4]\n\n if self.dtype == 'fp16':\n # change tensor type to save cpu and cuda memory and keep speed\n bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)\n bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)\n overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n if not overlaps.is_cuda and overlaps.dtype == torch.float16:\n # resume cpu float32\n overlaps = overlaps.float()\n return overlaps\n\n return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 275, "n_words": 94, "vocab_size": 54, "complexity": 6, "nloc": 17, "token_counts": 183, "n_ast_nodes": 279, "n_identifiers": 17, "random_cut": "def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):\n \n bboxes1 = get_box_tensor(bboxes1)\n bboxes2 = get_box_tensor(bboxes2)\n assert bboxes1.size(-1) in [0, 4, 5]\n assert bboxes2.size(-1) in [0, 4, 5]\n if bboxes2.size(-1) == 5:\n bboxes2 = bboxes2[..., :4]\n if bboxes1.size(-1) == 5:\n bboxes1 = bboxes1[..., :4]\n\n if self.dtype == 'fp16':\n # change tensor type to save cpu and cuda memory and keep speed\n bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)\n bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)\n overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n if not overlaps.is_cuda and overlaps.dtype == torch.float16:\n # resume cpu float32\n ", "d_id": 70858, "documentation": { "docstring": "Calculate IoU between 2D bboxes.\n\n Args:\n bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in format, or shape (m, 5) in format.\n bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in format, shape (m, 5) in format, or be empty. If ``is_aligned `` is ``True``,\n then m and n must be equal.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n over foreground), or \"giou\" (generalized intersection over\n union).\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)\n ", "n_words": 115, "vocab_size": 64, "n_whitespaces": 311, "language": "en" } }, { "id": 101629, "commit_id": "98d01760e469fd2108eed8d0b0a1ba6297c3177c", "repo": "faceswap", "path": "tools/sort/sort_methods_aligned.py", "file_name": "sort_methods_aligned.py", "fun_name": "binning", "commit_message": "Overhaul sort:\n - Standardize image data reading and writing\n - Optimize loading (just one pass required)\n - Make all sort groups binnable (to greater or lesser results)\n - Add sort by pitch\n - Deprecate multiple options\n - linting, docs + locales", "code": "def binning(self) -> List[List[str]]:\n \n return self._binning_linear_threshold(multiplier=100)\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 9, "token_counts": 23, "n_ast_nodes": 38, "n_identifiers": 6, "random_cut": "def binning(self) -> List[List[str]]:\n \n return self._binning_linear_threshold(multiplier=100)\n\n", "d_id": 21037, "documentation": { "docstring": " Create bins to split linearly from the lowest to the highest sample value\n\n Returns\n -------\n list\n List of bins of filenames\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 61, "language": "en" } }, { "id": 215636, "commit_id": "8683fed190f0ac807ab3f87e0e66808f7dbc130c", "repo": "salt", "path": "salt/transport/base.py", "file_name": "base.py", "fun_name": "connect", "commit_message": "Add NotImplimentedError to stubs", "code": "def connect(self, publish_port, connect_callback=None, disconnect_callback=None):\n \n raise NotImplementedError\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 28, "n_identifiers": 6, "random_cut": "def connect(self, publish_port, connect_callback=None, disconnect_callback=None):\n \n raise NotImpleme", "d_id": 54062, "documentation": { "docstring": "\n Create a network connection to the the PublishServer or broker.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 196086, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/free_groups.py", "file_name": "free_groups.py", "fun_name": "contains", "commit_message": "Updated import locations", "code": "def contains(self, g):\n \n if not isinstance(g, FreeGroupElement):\n return False\n elif self != g.group:\n return False\n else:\n return True\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 79, "n_words": 18, "vocab_size": 15, "complexity": 3, "nloc": 7, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 6, "random_cut": "def contains(self, g):\n \n if not isinstance(g, FreeGroupElement):\n return False\n elif self != g.group:\n return False\n else:\n ret", "d_id": 47586, "documentation": { "docstring": "Tests if Free Group element ``g`` belong to self, ``G``.\n\n In mathematical terms any linear combination of generators\n of a Free Group is contained in it.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> f, x, y, z = free_group(\"x y z\")\n >>> f.contains(x**3*y**2)\n True\n\n ", "n_words": 45, "vocab_size": 40, "n_whitespaces": 108, "language": "en" } }, { "id": 66339, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/loan_management/doctype/loan_security_unpledge/loan_security_unpledge.py", "file_name": "loan_security_unpledge.py", "fun_name": "get_pledged_security_qty", "commit_message": "style: format code with black", "code": "def get_pledged_security_qty(loan):\n\n\tcurrent_pledges = {}\n\n\tunpledges = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\t(loan),\n\t\t)\n\t)\n\n\tpledges = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\t(loan),\n\t\t)\n\t)\n\n\tfor security, qty in pledges.items():\n\t\tcurrent_pledges.setdefault(security, qty)\n\t\tcurrent_pledges[security] -= unpledges.get(security, 0.0)\n\n\treturn current_pledges\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 16, "n_words": 34, "vocab_size": 24, "complexity": 2, "nloc": 32, "token_counts": 85, "n_ast_nodes": 131, "n_identifiers": 14, "random_cut": "def get_pledged_security_qty(loan):\n\n\tcurrent_pledges = {}\n\n\tunpledges = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\t(loan),\n\t\t)\n\t)\n\n\tpledges = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\t(loan),\n\t\t)\n\t)\n\n\tfor security, qt", "d_id": 14170, "documentation": { "docstring": "\n\t\tSELECT u.loan_security, sum(u.qty) as qty\n\t\tFROM `tabLoan Security Unpledge` up, `tabUnpledge` u\n\t\tWHERE up.loan = %s\n\t\tAND u.parent = up.name\n\t\tAND up.status = 'Approved'\n\t\tGROUP BY u.loan_security\n\t\n\t\tSELECT p.loan_security, sum(p.qty) as qty\n\t\tFROM `tabLoan Security Pledge` lp, `tabPledge`p\n\t\tWHERE lp.loan = %s\n\t\tAND p.parent = lp.name\n\t\tAND lp.status = 'Pledged'\n\t\tGROUP BY p.loan_security\n\t", "n_words": 53, "vocab_size": 35, "n_whitespaces": 41, "language": "en" } }, { "id": 205838, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/compiler.py", "file_name": "compiler.py", "fun_name": "get_select", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_select(self):\n \n select = []\n klass_info = None\n annotations = {}\n select_idx = 0\n for alias, (sql, params) in self.query.extra_select.items():\n annotations[alias] = select_idx\n select.append((RawSQL(sql, params), alias))\n select_idx += 1\n assert not (self.query.select and self.query.default_cols)\n if self.query.default_cols:\n cols = self.get_default_columns()\n else:\n # self.query.select is a special case. These columns never go to\n # any model.\n cols = self.query.select\n if cols:\n select_list = []\n for col in cols:\n select_list.append(select_idx)\n select.append((col, None))\n select_idx += 1\n klass_info = {\n \"model\": self.query.model,\n \"select_fields\": select_list,\n }\n for alias, annotation in self.query.annotation_select.items():\n annotations[alias] = select_idx\n select.append((annotation, alias))\n select_idx += 1\n\n if self.query.select_related:\n related_klass_infos = self.get_related_selections(select)\n klass_info[\"related_klass_infos\"] = related_klass_infos\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 436, "n_words": 101, "vocab_size": 65, "complexity": 11, "nloc": 49, "token_counts": 311, "n_ast_nodes": 327, "n_identifiers": 25, "random_cut": "def get_select(self):\n \n select = []\n klass_info = None\n annotations = {}\n select_idx = 0\n for alias, (sql, params) in self.query.extra_select.items():\n annotations[alias] = select_idx\n select.append((RawSQL(sql, params), alias))\n select_idx += 1\n assert not (self.query.select and self.query.default_cols)\n if self.query.default_cols:\n cols = self.get_default_columns()\n else:\n # self.query.select is a special case. These columns never go to\n # any model.\n cols = self.query.select\n if cols:\n select_list = []\n for col in cols:\n select_list.append(select_idx)\n select.append((col, None))\n select_idx += 1\n klass_info = {\n \"model\": self.query.model,\n \"select_fields\": select_list,\n }\n for alias, annotation in self.query.annotation_select.items():\n annotations[alias] = select_idx\n select.appe", "d_id": 51234, "documentation": { "docstring": "\n Return three values:\n - a list of 3-tuples of (expression, (sql, params), alias)\n - a klass_info structure,\n - a dictionary of annotations\n\n The (sql, params) is what the expression will produce, and alias is the\n \"AS alias\" for the column (possibly None).\n\n The klass_info structure contains the following information:\n - The base model of the query.\n - Which columns for that model are present in the query (by\n position of the select clause).\n - related_klass_infos: [f, klass_info] to descent into\n\n The annotations is a dictionary of {'attname': column position} values.\n ", "n_words": 90, "vocab_size": 59, "n_whitespaces": 184, "language": "en" } }, { "id": 263850, "commit_id": "5b2ab7067ba954bd7950a79ed31e5ee177ff3f43", "repo": "pyinstaller", "path": "PyInstaller/depend/imphookapi.py", "file_name": "imphookapi.py", "fun_name": "set_module_collection_mode", "commit_message": "building & hooks: implement module collection mode setting\n\nImplement a mechanism for controlling the collection mode of\nmodules and packages, with granularity ranging from top-level\npackages to individual sub-modules. Therefore, the hooks can\nnow specify whether the hooked package should be collected as\nbyte-compiled .pyc modules into embedded PYZ archive (the\ndefault behavior), or as source .py files collected as external\ndata files (without corresponding modules in the PYZ archive).\n\nThe latter option should let us avoid unnecessary .pyc module\ncollection when the source files are required by the code, or\nwork around the situations where having a .pyc module in\nPYZ archive causes issues due to FrozenImporter's incompatibility\nwith sys.path manipulation that some packages attempt to perform.\n\nThis feature adds a new optional global hook variable, called\n`module_collection_mode`. The value can be either a string\n(\"py\" or \"pyc\") or a dictionary of module names and setting\nstrings.\n\nIn the case of a string, the setting affects the hooked module\nor a package, and is applied recursively to all sub-packages and\nsub-modules, unless another hook overrides it.\n\nThe dictionary setting allows a hook to specify different settings\nfor the package and it subpackages, or even different settings\nfor other packages.\n\nA corresponding `set_module_collection_mode` method has been\nadded to the `hook_api` object for adjusting the collection\nmode from within the `hook()` function.\n\nThe `Analysis` object can now also be passed a dictionary via\nan optional `module_collection_mode` argument; the corresponding\nsettings are applied last, which allows advanced users to both\nsupplement and override the settings made by the hooks.", "code": "def set_module_collection_mode(self, name, mode):\n \n if name is None:\n name = self.__name__\n if mode is None:\n self._module_collection_mode.pop(name)\n else:\n self._module_collection_mode[name] = mode\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 81, "n_words": 20, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 43, "n_ast_nodes": 70, "n_identifiers": 7, "random_cut": "def set_module_collection_mode(self, name, mode):\n \n if name is None:\n name = self.__name__\n if mode is None:\n self._module_collection_mode.pop(name)\n else:\n ", "d_id": 77467, "documentation": { "docstring": "\"\n Set the package/module collection mode for the specified module\n name. If `name` is `None`, the hooked module/package name is used.\n Valid values for `mode` are: `'pyc'`, `'py'`, and `None`.\n ", "n_words": 30, "vocab_size": 26, "n_whitespaces": 58, "language": "en" } }, { "id": 222933, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/file_util.py", "file_name": "file_util.py", "fun_name": "write_file", "commit_message": "add python 3.10.4 for windows", "code": "def write_file (filename, contents):\n \n f = open(filename, \"w\")\n try:\n for line in contents:\n f.write(line + \"\\n\")\n finally:\n f.close()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 55, "n_words": 18, "vocab_size": 18, "complexity": 3, "nloc": 7, "token_counts": 38, "n_ast_nodes": 69, "n_identifiers": 8, "random_cut": "def write_file (filename, contents):\n \n f = open(filename, \"w\")\n try:\n for line in contents:\n f.writ", "d_id": 56817, "documentation": { "docstring": "Create a file with the specified name and write 'contents' (a\n sequence of strings without line terminators) to it.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 25, "language": "en" } }, { "id": 58650, "commit_id": "82c78fe8b65117dc5fe89365acb62e7aa902f8ba", "repo": "prefect", "path": "tests/orion/schemas/test_core.py", "file_name": "test_core.py", "fun_name": "test_flow_run_policy_is_backwards_compatible", "commit_message": "Revert breaking schema change (#6521)\n\n* Revert breaking schema change\r\n\r\n* Add new retry properties on policies; deprecate prior ones\r\n\r\n* Add tests for schema compat\r\n\r\n* Use root_validator to populate properties from deprecated", "code": "async def test_flow_run_policy_is_backwards_compatible(self):\n \n\n empty_new_policy = schemas.core.FlowRunPolicy()\n\n # should not raise an error\n self.OldFlowRunPolicy(**empty_new_policy.dict())\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "async def test_flow_run_policy_is_backwards_compatible(self):\n \n\n empty_new_policy = schemas.core.FlowRunPolicy()\n\n # should not raise an error\n self.OldFlowRu", "d_id": 11792, "documentation": { "docstring": "\n In version 2.1.1 and prior, the FlowRunPolicy schema required two properties,\n `max_retries` and `retry_delay_seconds`. These properties are deprecated.\n\n This test ensures old clients can load new FlowRunPolicySchemas. It can be removed\n when the corresponding properties are removed.\n ", "n_words": 37, "vocab_size": 32, "n_whitespaces": 73, "language": "en" } }, { "id": 183062, "commit_id": "91783b7c1e06a45e93fd89dbdb6aa3d1a9c2e990", "repo": "textual", "path": "tests/css/test_help_text.py", "file_name": "test_help_text.py", "fun_name": "test_help_text_examples_are_contextualized", "commit_message": "Testing for help text", "code": "def test_help_text_examples_are_contextualized():\n \n rendered_inline = render(spacing_invalid_value(\"padding\", \"inline\"))\n assert \"widget.styles.padding\" in rendered_inline\n\n rendered_css = render(spacing_invalid_value(\"padding\", \"css\"))\n assert \"padding:\" in rendered_css\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 33, "n_words": 18, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 35, "n_ast_nodes": 70, "n_identifiers": 5, "random_cut": "def test_help_text_examples_are_contextualized():\n \n rendered_inline = render(spacing_invalid_value(\"padding\", \"inline\"))\n assert \"widget.styles.padding\" in rendered_inline\n\n rendered_css = render(spacing_invalid_value(\"padding\", \"css\"))\n assert \"padding:\" in rendered_css\n\n", "d_id": 44041, "documentation": { "docstring": "Ensure that if the user is using CSS, they see CSS-specific examples\n and if they're using inline styles they see inline-specific examples.", "n_words": 22, "vocab_size": 18, "n_whitespaces": 24, "language": "en" } }, { "id": 251912, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/test_modes.py", "file_name": "test_modes.py", "fun_name": "test_reverse_proxy", "commit_message": "make it black!", "code": "def test_reverse_proxy(tctx, keep_host_header):\n \n server = Placeholder(Server)\n tctx.options.mode = \"reverse:http://localhost:8000\"\n tctx.options.connection_strategy = \"lazy\"\n tctx.options.keep_host_header = keep_host_header\n assert (\n Playbook(modes.ReverseProxy(tctx), hooks=False)\n >> DataReceived(\n tctx.client, b\"GET /foo HTTP/1.1\\r\\n\" b\"Host: example.com\\r\\n\\r\\n\"\n )\n << NextLayerHook(Placeholder(NextLayer))\n >> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent))\n << OpenConnection(server)\n >> reply(None)\n << SendData(\n server,\n b\"GET /foo HTTP/1.1\\r\\n\"\n b\"Host: \"\n + (b\"example.com\" if keep_host_header else b\"localhost:8000\")\n + b\"\\r\\n\\r\\n\",\n )\n >> DataReceived(server, b\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\")\n << SendData(tctx.client, b\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\")\n )\n assert server().address == (\"localhost\", 8000)\n\n\n@pytest.mark.parametrize(\"patch\", [True, False])\n@pytest.mark.parametrize(\"connection_strategy\", [\"eager\", \"lazy\"])", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"patch\", [True, False])\n@pytest.mark.parametrize(\"connection_strategy\", [\"eager\", \"lazy\"])", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 245, "n_words": 80, "vocab_size": 58, "complexity": 2, "nloc": 25, "token_counts": 160, "n_ast_nodes": 319, "n_identifiers": 30, "random_cut": "def test_reverse_proxy(tctx, keep_host_header):\n \n server = Placeholder(Server)\n tctx.options.mode = \"reverse:http://localhost:8000\"\n tctx.options.connection_strategy = \"lazy\"\n tctx.options.keep_host_header = keep_host_header\n assert (\n Playbook(modes.ReverseProxy(tctx), hooks=False)\n >> DataReceived(\n tctx.client, b\"GET /foo HTTP/1.1\\r\\n\" b\"Host: example.com\\r\\n\\r\\n\"\n )\n << NextLayerHook(Placeholder(NextLayer))\n >> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent))\n << OpenConnection(server)\n >> reply(None)\n << SendData(\n server,\n b\"GET /foo HTTP/1.1\\r\\n\"\n b\"Host: \"\n + (b\"example.com\" if keep_host_header else b\"localhost:8000\")\n + b\"\\r\\n\\r\\n\",\n )\n >> DataReceived(server, b\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\")\n << SendData(tctx.client, b\"HTTP/1.1 200 OK\\r\\nContent-Length: 0\\r\\n\\r\\n\")\n )\n assert server().address == (\"localhost\", 8000)\n\n\n@pytest.mark.parametrize(\"patch\", [True, False])\n@pytest.mark.parametrize(\"connection_strategy\", [\"eager\", \"lazy\"])", "d_id": 73885, "documentation": { "docstring": "Test mitmproxy in reverse proxy mode.\n\n - make sure that we connect to the right host\n - make sure that we respect keep_host_header\n - make sure that we include non-standard ports in the host header (#4280)\n ", "n_words": 36, "vocab_size": 23, "n_whitespaces": 48, "language": "en" } }, { "id": 203366, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/checks.py", "file_name": "checks.py", "fun_name": "_check_filter_horizontal", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_filter_horizontal(self, obj):\n \n if not isinstance(obj.filter_horizontal, (list, tuple)):\n return must_be(\n \"a list or tuple\", option=\"filter_horizontal\", obj=obj, id=\"admin.E018\"\n )\n else:\n return list(\n chain.from_iterable(\n self._check_filter_item(\n obj, field_name, \"filter_horizontal[%d]\" % index\n )\n for index, field_name in enumerate(obj.filter_horizontal)\n )\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 226, "n_words": 36, "vocab_size": 32, "complexity": 3, "nloc": 14, "token_counts": 74, "n_ast_nodes": 119, "n_identifiers": 16, "random_cut": "def _check_filter_horizontal(self, obj):\n \n if not isinstance(obj.filter_horizontal, (list, tuple)):\n return must_be(\n \"a list or tuple\", option=\"filter_horizontal\", obj=obj, id=\"admin.E018\"\n )\n else:\n return list(\n chain.from_iterable(\n self._check_filter_item(\n obj, field_name, \"filter_horizontal[%d]\" % index\n ", "d_id": 50337, "documentation": { "docstring": "Check that filter_horizontal is a sequence of field names.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 23163, "commit_id": "9f62b610dea6161627200ed85d92e19b1923279a", "repo": "PaddleOCR", "path": "ppocr/data/imaug/fce_aug.py", "file_name": "fce_aug.py", "fun_name": "poly_intersection", "commit_message": "add fcenet", "code": "def poly_intersection(poly_det, poly_gt):\n \n assert isinstance(poly_det, plg.Polygon)\n assert isinstance(poly_gt, plg.Polygon)\n\n poly_inter = poly_det & poly_gt\n if len(poly_inter) == 0:\n return 0, poly_inter\n return poly_inter.area(), poly_inter\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 24, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 51, "n_ast_nodes": 81, "n_identifiers": 9, "random_cut": "def poly_intersection(poly_det, poly_gt):\n \n assert isinstance(poly_det, plg.Polygon)\n assert isinstance(poly_gt, plg.Polygon)\n\n poly_inter = poly_det & poly_gt\n if len(poly_inter) == 0:\n return 0, poly_in", "d_id": 4523, "documentation": { "docstring": "Calculate the intersection area between two polygon.\n\n Args:\n poly_det (Polygon): A polygon predicted by detector.\n poly_gt (Polygon): A gt polygon.\n\n Returns:\n intersection_area (float): The intersection area between two polygons.\n ", "n_words": 29, "vocab_size": 22, "n_whitespaces": 59, "language": "en" } }, { "id": 250781, "commit_id": "8c700ec6e45fc69379eec230da1bd840854ac20e", "repo": "mitmproxy", "path": "mitmproxy/dns.py", "file_name": "dns.py", "fun_name": "size", "commit_message": "[dns] first commit", "code": "def size(self) -> int:\n \n return sum(len(x.data) for x in [*self.answers, *self.authorities, *self.additionals])\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 26, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 3, "token_counts": 37, "n_ast_nodes": 59, "n_identifiers": 10, "random_cut": "def size(self) -> int:\n \n return sum(len(x.data) for x in [*self.answers, *self.authorities, *self.additionals])\n", "d_id": 73544, "documentation": { "docstring": "Returns the cumulative data size of all resource record sections.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 126691, "commit_id": "326b5bd1acc6d3d00ab0546e4ae45da6bed501f7", "repo": "ray", "path": "dashboard/modules/snapshot/snapshot_head.py", "file_name": "snapshot_head.py", "fun_name": "get_job_submission_info", "commit_message": "Convert job_manager to be async (#27123)\n\nUpdates jobs api\r\nUpdates snapshot api\r\nUpdates state api\r\n\r\nIncreases jobs api version to 2\r\n\r\nSigned-off-by: Alan Guo aguo@anyscale.com\r\n\r\nWhy are these changes needed?\r\nfollow-up for #25902 (comment)", "code": "async def get_job_submission_info(self):\n \n\n jobs = {}\n fetched_jobs = await self._job_info_client.get_all_jobs()\n for (\n job_submission_id,\n job_info,\n ) in fetched_jobs.items():\n if job_info is not None:\n entry = {\n \"job_submission_id\": job_submission_id,\n \"status\": job_info.status,\n \"message\": job_info.message,\n \"error_type\": job_info.error_type,\n \"start_time\": job_info.start_time,\n \"end_time\": job_info.end_time,\n \"metadata\": job_info.metadata,\n \"runtime_env\": job_info.runtime_env,\n \"entrypoint\": job_info.entrypoint,\n }\n jobs[job_submission_id] = entry\n return jobs\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 340, "n_words": 49, "vocab_size": 43, "complexity": 3, "nloc": 21, "token_counts": 104, "n_ast_nodes": 173, "n_identifiers": 18, "random_cut": "async def get_job_submission_info(self):\n \n\n jobs = {}\n fetched_jobs = await self._job_info_client.get_all_jobs()\n for (\n job_submission_id,\n job_info,\n ) in fetched_jobs.items():\n if job_info is not None:\n entry = {\n \"jo", "d_id": 28234, "documentation": { "docstring": "Info for Ray job submission. Here a job can have 0 or many drivers.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 14, "language": "en" } }, { "id": 197330, "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", "repo": "sympy", "path": "sympy/ntheory/qs.py", "file_name": "qs.py", "fun_name": "_gen_sieve_array", "commit_message": "Remove abbreviations in documentation", "code": "def _gen_sieve_array(M, factor_base):\n \n sieve_array = [0]*(2*M + 1)\n for factor in factor_base:\n if factor.soln1 is None: #The prime does not divides a\n continue\n for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime):\n sieve_array[idx] += factor.log_p\n if factor.prime == 2:\n continue\n #if prime is 2 then sieve only with soln_1_p\n for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime):\n sieve_array[idx] += factor.log_p\n return sieve_array\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 156, "n_words": 65, "vocab_size": 45, "complexity": 6, "nloc": 12, "token_counts": 112, "n_ast_nodes": 171, "n_identifiers": 11, "random_cut": "def _gen_sieve_array(M, factor_base):\n \n sieve_array =", "d_id": 48473, "documentation": { "docstring": "Sieve Stage of the Quadratic Sieve. For every prime in the factor_base\n that does not divide the coefficient `a` we add log_p over the sieve_array\n such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i`\n is an integer. When p = 2 then log_p is only added using\n ``-M <= soln1 + i*p <= M``.\n\n Parameters\n ==========\n\n M : sieve interval\n factor_base : factor_base primes\n ", "n_words": 74, "vocab_size": 52, "n_whitespaces": 104, "language": "en" } }, { "id": 10748, "commit_id": "1f2c86359246e00eae7cba081d9e952cb64c9aea", "repo": "jina", "path": "setup.py", "file_name": "setup.py", "fun_name": "rescue_docarray", "commit_message": "fix: rescue docarray in setup (#4203)", "code": "def rescue_docarray():\n \n try:\n import docarray as docarray\n\n __docarray_version__ = docarray.__version__\n\n except AttributeError:\n # Being here means docarray is not installed correctly, attempt to reinstall it\n # as recommended by pip https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program\n import subprocess\n\n subprocess.check_call(\n [sys.executable, '-m', 'pip', 'uninstall', '--yes', 'docarray']\n )\n subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'docarray'])\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 122, "n_words": 46, "vocab_size": 39, "complexity": 2, "nloc": 10, "token_counts": 59, "n_ast_nodes": 110, "n_identifiers": 9, "random_cut": "def rescue_docarray():\n \n try:\n import docarray as docarray\n\n __docarray_version__ = docarray.__version__\n\n except AttributeError:\n # Being here means docarray is not installed correctly, attempt to reinstall it\n # as recommended by pip https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program\n import subprocess\n\n subprocess.check_call(\n [sys.executable, '-m', 'pip', 'uninstall', '--yes', 'docarray']\n ", "d_id": 1906, "documentation": { "docstring": "Upgrading from 2.x to 3.x is broken (https://github.com/jina-ai/jina/issues/4194)\n This function checks if docarray is broken and if so attempts to rescue it\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 28, "language": "en" } }, { "id": 258597, "commit_id": "ff85a34c95a9d8de13805be55f1a72f1b7ee2a42", "repo": "scikit-learn", "path": "sklearn/kernel_approximation.py", "file_name": "kernel_approximation.py", "fun_name": "transform", "commit_message": "DOC Fix docstring for AdditiveChi2Sampler (#22138)", "code": "def transform(self, X):\n \n msg = (\n \"%(name)s is not fitted. Call fit to set the parameters before\"\n \" calling transform\"\n )\n check_is_fitted(self, msg=msg)\n\n X = self._validate_data(X, accept_sparse=\"csr\", reset=False)\n check_non_negative(X, \"X in AdditiveChi2Sampler.transform\")\n sparse = sp.issparse(X)\n\n # zeroth component\n # 1/cosh = sech\n # cosh(0) = 1.0\n\n transf = self._transform_sparse if sparse else self._transform_dense\n return transf(X)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 161, "n_words": 55, "vocab_size": 47, "complexity": 2, "nloc": 11, "token_counts": 68, "n_ast_nodes": 118, "n_identifiers": 15, "random_cut": "def transform(self, X):\n \n msg = (\n \"%(name)s is not fitted. Call f", "d_id": 75313, "documentation": { "docstring": "Apply approximate feature map to X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : {ndarray, sparse matrix}, \\\n shape = (n_samples, n_features * (2*sample_steps - 1))\n Whether the return value is an array or sparse matrix depends on\n the type of the input X.\n ", "n_words": 66, "vocab_size": 50, "n_whitespaces": 173, "language": "en" } }, { "id": 61417, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py", "file_name": "versioncontrol.py", "fun_name": "get_repository_root", "commit_message": "upd; format", "code": "def get_repository_root(cls, location):\n # type: (str) -> Optional[str]\n \n if cls.is_repository_directory(location):\n return location\n return None\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 53, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 4, "random_cut": "def get_repository_root(cls, location):\n # type: (str) -> Optional[str]\n \n if cls.is_repository_directory(location):\n return location\n return None\n", "d_id": 12561, "documentation": { "docstring": "\n Return the \"root\" (top-level) directory controlled by the vcs,\n or `None` if the directory is not in any.\n\n It is meant to be overridden to implement smarter detection\n mechanisms for specific vcs.\n\n This can do more than is_repository_directory() alone. For\n example, the Git override checks that Git is actually available.\n ", "n_words": 50, "vocab_size": 42, "n_whitespaces": 100, "language": "en" } }, { "id": 3695, "commit_id": "b22efc03a18c5545c12cf8a0462dea7505aec410", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-hubspot/unit_tests/test_client.py", "file_name": "test_client.py", "fun_name": "test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client", "commit_message": "Source Hubspot: fix \"quotes\" key error exception (#10055)\n\n* check if stream exists in source\r\n\r\n* check if stream exists in source, added comment\r\n\r\n* test skipping reading quotes stream\r\n\r\n* format code\r\n\r\n* airbyte-cdk version\r\n\r\n* added __init__.py to unit_tests\r\n\r\n* fix importing airbyte models\r\n\r\n* bump the version\r\n\r\n* update spec and def yamls\r\n\r\nCo-authored-by: auganbay ", "code": "def test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client(oauth_config, configured_catalog):\n \n source = SourceHubspot()\n\n all_records = list(source.read(logger, config=oauth_config, catalog=configured_catalog, state=None))\n records = [record for record in all_records if record.type == Type.RECORD]\n assert not records\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 26, "vocab_size": 22, "complexity": 3, "nloc": 5, "token_counts": 56, "n_ast_nodes": 87, "n_identifiers": 17, "random_cut": "def test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client(oauth_config, configured_catalog):\n \n source = SourceHubspot()\n\n all_records = list(source.read(logger, config=oauth_config, catalog=configured_catalog, state=None))\n records = [record for record in all_records if reco", "d_id": 517, "documentation": { "docstring": "\n If 'quotes' stream is not in the client, it should skip it.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 130300, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/utils.py", "file_name": "utils.py", "fun_name": "get_conda_env_dir", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_conda_env_dir(env_name):\n \n conda_prefix = os.environ.get(\"CONDA_PREFIX\")\n if conda_prefix is None:\n # The caller is neither in a conda env or in (base) env. This is rare\n # because by default, new terminals start in (base), but we can still\n # support this case.\n conda_exe = os.environ.get(\"CONDA_EXE\")\n if conda_exe is None:\n raise ValueError(\n \"Cannot find environment variables set by conda. \"\n \"Please verify conda is installed.\"\n )\n # Example: CONDA_EXE=$HOME/anaconda3/bin/python\n # Strip out /bin/python by going up two parent directories.\n conda_prefix = str(Path(conda_exe).parent.parent)\n\n # There are two cases:\n # 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and\n # CONDA_PREFIX=$HOME/anaconda3\n # 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and\n # CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name\n if os.environ.get(\"CONDA_DEFAULT_ENV\") == \"base\":\n # Caller's curent environment is (base).\n # Not recommended by conda, but we can still support it.\n if env_name == \"base\":\n # Desired environment is (base), located at e.g. $HOME/anaconda3\n env_dir = conda_prefix\n else:\n # Desired environment is user-created, e.g.\n # $HOME/anaconda3/envs/$env_name\n env_dir = os.path.join(conda_prefix, \"envs\", env_name)\n else:\n # Now `conda_prefix` should be something like\n # $HOME/anaconda3/envs/$current_env_name\n # We want to replace the last component with the desired env name.\n conda_envs_dir = os.path.split(conda_prefix)[0]\n env_dir = os.path.join(conda_envs_dir, env_name)\n if not os.path.isdir(env_dir):\n raise ValueError(\n \"conda env \"\n + env_name\n + \" not found in conda envs directory. Run `conda env list` to \"\n + \"verify the name is correct.\"\n )\n return env_dir\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 553, "n_words": 226, "vocab_size": 130, "complexity": 6, "nloc": 26, "token_counts": 142, "n_ast_nodes": 268, "n_identifiers": 17, "random_cut": "def get_conda_env_dir(env_name):\n \n conda_prefix = os.environ.get(\"CONDA_PREFIX\")\n if conda_prefix is None:\n # The caller is neither in a conda env or in (base) env. This is rare\n # because by default, new terminals start in (base), but we can still\n # support this case.\n conda_exe = os.environ.get(\"CONDA_EXE\")\n if conda_exe is None:\n raise ValueError(\n \"Cannot find environment variables set by conda. \"\n \"Please verify conda is installed.\"\n )\n # Example: CONDA_EXE=$HOME/anaconda3/bin/python\n # Strip out /bin/python by going up two parent directories.\n conda_prefix = str(Path(conda_exe).parent.parent)\n\n # There are two cases:\n # 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and\n # CONDA_PREFIX=$HOME/anaconda3\n # 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and\n # CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name\n if os.environ.get(\"CONDA_DEFAULT_ENV\") == \"base\":\n # Caller's curent environment is (base).\n # Not recommended by conda, but we can still support it.\n if env_name == \"base\":\n # Desired environment is (base), located at e.g. $HOME/anaconda3\n env_dir = conda_prefix\n else:\n # Des", "d_id": 29221, "documentation": { "docstring": "Find and validate the conda directory for a given conda environment.\n\n For example, given the environment name `tf1`, this function checks\n the existence of the corresponding conda directory, e.g.\n `/Users/scaly/anaconda3/envs/tf1`, and returns it.\n ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 45, "language": "en" } }, { "id": 311550, "commit_id": "58b8c30221a6f6e5acbbe98b7e3298b03fb741f5", "repo": "core", "path": "tests/components/homekit_controller/test_switch.py", "file_name": "test_switch.py", "fun_name": "test_switch_change_outlet_state", "commit_message": "Improve homekit_controller tests (#65266)", "code": "async def test_switch_change_outlet_state(hass, utcnow):\n \n helper = await setup_test_component(hass, create_switch_service)\n\n await hass.services.async_call(\n \"switch\", \"turn_on\", {\"entity_id\": \"switch.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.OUTLET,\n {\n CharacteristicsTypes.ON: 1,\n },\n )\n\n await hass.services.async_call(\n \"switch\", \"turn_off\", {\"entity_id\": \"switch.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.OUTLET,\n {\n CharacteristicsTypes.ON: 0,\n },\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 147, "n_words": 39, "vocab_size": 24, "complexity": 1, "nloc": 20, "token_counts": 95, "n_ast_nodes": 158, "n_identifiers": 14, "random_cut": "async def test_switch_change_outlet_state(hass, utcnow):\n \n helper = await setup_test_component(hass, create_switch_service)\n\n await hass.services.async_call(\n \"switch\", \"turn_on\", {\"entity_id\": \"switch.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.OUTLET,\n {\n CharacteristicsTypes.ON: 1,\n },\n )\n\n await hass.services.async_call(\n \"switch\", \"turn_off\", {\"entity_id\": \"switch.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.OUTLET,\n {\n ", "d_id": 110215, "documentation": { "docstring": "Test that we can turn a HomeKit outlet on and off again.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 107428, "commit_id": "c0a78bdff86d7b02b8a23d373b64c72297f935d5", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "_update_label_position", "commit_message": "FIX: use window_extent instead", "code": "def _update_label_position(self, renderer):\n \n if not self._autolabelpos:\n return\n\n # get bounding boxes for this axis and any siblings\n # that have been set by `fig.align_xlabels()`\n bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)\n\n x, y = self.label.get_position()\n if self.label_position == 'bottom':\n try:\n spine = self.axes.spines['bottom']\n spinebbox = spine.get_window_extent()\n except KeyError:\n # use axes if spine doesn't exist\n spinebbox = self.axes.bbox\n bbox = mtransforms.Bbox.union(bboxes + [spinebbox])\n bottom = bbox.y0\n\n self.label.set_position(\n (x, bottom - self.labelpad * self.figure.dpi / 72)\n )\n else:\n try:\n spine = self.axes.spines['top']\n spinebbox = spine.get_window_extent()\n except KeyError:\n # use axes if spine doesn't exist\n spinebbox = self.axes.bbox\n bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])\n top = bbox.y1\n\n self.label.set_position(\n (x, top + self.labelpad * self.figure.dpi / 72)\n )\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 460, "n_words": 111, "vocab_size": 65, "complexity": 5, "nloc": 27, "token_counts": 191, "n_ast_nodes": 315, "n_identifiers": 30, "random_cut": "def _update_label_position(self, renderer):\n \n if not self._autolabelpos:\n return\n\n # get bounding boxes for this axis and any siblings\n # that have been set by `fig.align_xlabels()`\n bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)\n\n x, y = self.label.get_position()\n if self.label_position == 'bottom':\n try:\n spine = self.axes.spines['bottom']\n spinebbox = spine.get_window_extent()\n except KeyError:\n # use axes if spine doesn't", "d_id": 22743, "documentation": { "docstring": "\n Update the label position based on the bounding box enclosing\n all the ticklabels and axis spine\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 38, "language": "en" } }, { "id": 23225, "commit_id": "9f62b610dea6161627200ed85d92e19b1923279a", "repo": "PaddleOCR", "path": "ppocr/postprocess/fce_postprocess.py", "file_name": "fce_postprocess.py", "fun_name": "poly_union", "commit_message": "add fcenet", "code": "def poly_union(poly_det, poly_gt):\n \n assert isinstance(poly_det, plg.Polygon)\n assert isinstance(poly_gt, plg.Polygon)\n\n area_det = poly_det.area()\n area_gt = poly_gt.area()\n area_inters, _ = poly_intersection(poly_det, poly_gt)\n return area_det + area_gt - area_inters\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 26, "vocab_size": 20, "complexity": 1, "nloc": 7, "token_counts": 56, "n_ast_nodes": 89, "n_identifiers": 12, "random_cut": "def poly_union(poly_det, poly_gt):\n \n assert isinstance(poly_det, plg.Polygon)\n assert isinstance(poly_gt, plg.Polygon)\n\n area_det = poly_det.area()\n area_gt = poly_gt.area()\n area_inters, _ = poly_intersection(poly_det, poly_gt)\n return area_det + area_gt - area_inters\n\n", "d_id": 4547, "documentation": { "docstring": "Calculate the union area between two polygon.\n\n Args:\n poly_det (Polygon): A polygon predicted by detector.\n poly_gt (Polygon): A gt polygon.\n\n Returns:\n union_area (float): The union area between two polygons.\n ", "n_words": 29, "vocab_size": 22, "n_whitespaces": 59, "language": "en" } }, { "id": 250506, "commit_id": "3aeca2588b79111a48a6083c88efc4d68a2cea19", "repo": "synapse", "path": "tests/config/test_tls.py", "file_name": "test_tls.py", "fun_name": "test_whitelist_idna_result", "commit_message": "Add missing type hints to tests.config. (#14681)", "code": "def test_whitelist_idna_result(self) -> None:\n \n config: JsonDict = {\n \"federation_certificate_verification_whitelist\": [\n \"example.com\",\n \"*.xn--eckwd4c7c.xn--zckzah\",\n ]\n }\n t = TestConfig()\n t.tls.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n cf = FederationPolicyForHTTPS(cast(HomeServerConfig, t))\n\n # Not in the whitelist\n opts = cf.get_options(b\"notexample.com\")\n assert isinstance(opts, SSLClientConnectionCreator)\n self.assertTrue(opts._verifier._verify_certs)\n\n # Caught by the wildcard\n opts = cf.get_options(idna.encode(\"テスト.ドメイン.テスト\"))\n assert isinstance(opts, SSLClientConnectionCreator)\n self.assertFalse(opts._verifier._verify_certs)\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 198, "n_words": 48, "vocab_size": 38, "complexity": 1, "nloc": 19, "token_counts": 110, "n_ast_nodes": 187, "n_identifiers": 24, "random_cut": "def test_whitelist_idna_result(self) -> None:\n \n config: JsonDict = {\n \"federation_certificate_verification_whitelist\": [\n \"example.com\",\n \"*.xn--eckwd4c7c.xn--zckzah\",\n ]\n }\n t = TestConfig()\n t.tls.read_", "d_id": 73483, "documentation": { "docstring": "\n The federation certificate whitelist will match on IDNA encoded names.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 244348, "commit_id": "9a3bf7660e6ced54672741095f96df07919f9ba7", "repo": "mmdetection", "path": "mmdet/models/dense_heads/dense_test_mixins.py", "file_name": "dense_test_mixins.py", "fun_name": "simple_test_rpn", "commit_message": "[Refactor] Refactor dense head outputs to InstanceResults.", "code": "def simple_test_rpn(self, x, img_metas):\n \n rpn_outs = self(x)\n proposal_list = self.get_results(*rpn_outs, img_metas=img_metas)\n return proposal_list\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def simple_test_rpn(self, x, img_metas):\n \n rpn_outs = self(x)\n proposal_list = self.get_results(*rpn_outs, img_metas=img_metas)\n r", "d_id": 70340, "documentation": { "docstring": "Test without augmentation, only for ``RPNHead`` and its variants,\n e.g., ``GARPNHead``, etc.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n img_metas (list[dict]): Meta info of each image.\n\n Returns:\n list[Tensor]: Proposals of each image, each item has shape (n, 5),\n where 5 represent (tl_x, tl_y, br_x, br_y, score).\n ", "n_words": 51, "vocab_size": 47, "n_whitespaces": 142, "language": "en" } }, { "id": 107486, "commit_id": "f156db08eee54d285ab0fb4e031e48d078ba6aa3", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "_get_tick_boxes_siblings", "commit_message": "DOC: More cleanup axes -> Axes", "code": "def _get_tick_boxes_siblings(self, renderer):\n \n # Get the Grouper keeping track of x or y label groups for this figure.\n axis_names = [\n name for name, axis in self.axes._get_axis_map().items()\n if name in self.figure._align_label_groups and axis is self]\n if len(axis_names) != 1:\n return [], []\n axis_name, = axis_names\n grouper = self.figure._align_label_groups[axis_name]\n bboxes = []\n bboxes2 = []\n # If we want to align labels from other Axes:\n for ax in grouper.get_siblings(self.axes):\n axis = getattr(ax, f\"{axis_name}axis\")\n ticks_to_draw = axis._update_ticks()\n tlb, tlb2 = axis._get_ticklabel_bboxes(ticks_to_draw, renderer)\n bboxes.extend(tlb)\n bboxes2.extend(tlb2)\n return bboxes, bboxes2\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 250, "n_words": 85, "vocab_size": 64, "complexity": 6, "nloc": 17, "token_counts": 133, "n_ast_nodes": 219, "n_identifiers": 25, "random_cut": "def _get_tick_boxes_siblings(self, renderer):\n \n # Get the Grouper keeping track of x or y label groups for this figure.\n axis_names = [\n name for name, axis in self.axes._get_axis_map().items()\n if name in self.figure._align_label_groups and axis is self]\n if len(axis_names) != 1:\n return", "d_id": 22775, "documentation": { "docstring": "\n Get the bounding boxes for this `.axis` and its siblings\n as set by `.Figure.align_xlabels` or `.Figure.align_ylabels`.\n\n By default it just gets bboxes for self.\n ", "n_words": 24, "vocab_size": 23, "n_whitespaces": 54, "language": "en" } }, { "id": 276832, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/generic_utils.py", "file_name": "generic_utils.py", "fun_name": "default", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def default(method):\n \n method._is_default = True # pylint: disable=protected-access\n return method\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 3, "random_cut": "def default(method):\n \n method._is_de", "d_id": 81746, "documentation": { "docstring": "Decorates a method to detect overrides in subclasses.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 118528, "commit_id": "33855278eaf8599b2bec1ddefa5eebb592e55e25", "repo": "streamlit", "path": "lib/tests/streamlit/camera_input_test.py", "file_name": "camera_input_test.py", "fun_name": "test_help_tooltip", "commit_message": "Feature/camera image input (#4038)\n\n* Camera_input widget\r\nCo-authored-by: willhuang1997 \r\nCo-authored-by: Henrikh Kantuni \r\nCo-authored-by: William Huang \r\nCo-authored-by: Vincent Donato ", "code": "def test_help_tooltip(self):\n \n st.camera_input(\"the label\", help=\"help_label\")\n\n c = self.get_delta_from_queue().new_element.camera_input\n self.assertEqual(c.help, \"help_label\")\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 38, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def test_help_tooltip(self):\n \n st.camera_input(\"the label\", help=\"help_label\")\n\n c = self.get_delta_from_queue().new_element.camera_input\n self.assert", "d_id": 26270, "documentation": { "docstring": "Test that it can be called using a string for type parameter.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 308917, "commit_id": "4203e1b0640e16fbda55672c5be089431face880", "repo": "core", "path": "tests/components/nest/test_camera_sdm.py", "file_name": "test_camera_sdm.py", "fun_name": "test_multiple_event_images", "commit_message": "Delete nest event image fetching and use same APIs as media player (#62789)", "code": "async def test_multiple_event_images(hass, auth):\n \n subscriber = await async_setup_camera(hass, DEVICE_TRAITS, auth=auth)\n assert len(hass.states.async_all()) == 1\n assert hass.states.get(\"camera.my_camera\")\n\n event_timestamp = utcnow()\n await subscriber.async_receive_event(\n make_motion_event(event_session_id=\"event-session-1\", timestamp=event_timestamp)\n )\n await hass.async_block_till_done()\n\n auth.responses = [\n # Fake response from API that returns url image\n aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE),\n # Fake response for the image content fetch\n aiohttp.web.Response(body=IMAGE_BYTES_FROM_EVENT),\n # Image is refetched after being cleared by expiration alarm\n aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE),\n aiohttp.web.Response(body=b\"updated image bytes\"),\n ]\n\n image = await async_get_image(hass)\n assert image.content == IMAGE_BYTES_FROM_EVENT\n\n next_event_timestamp = event_timestamp + datetime.timedelta(seconds=25)\n await subscriber.async_receive_event(\n make_motion_event(\n event_id=\"updated-event-id\",\n event_session_id=\"event-session-2\",\n timestamp=next_event_timestamp,\n )\n )\n await hass.async_block_till_done()\n\n image = await async_get_image(hass)\n assert image.content == b\"updated image bytes\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 253, "n_words": 96, "vocab_size": 63, "complexity": 1, "nloc": 28, "token_counts": 183, "n_ast_nodes": 301, "n_identifiers": 33, "random_cut": "async def test_multiple_event_images(hass, auth):\n \n subscriber = await async_setup_camera(hass, DEVICE_TRAITS, auth=auth)\n assert len(hass.states.async_all()) == 1\n assert hass.states.get(\"camera.my_camera\")\n\n event_timestamp = utcnow()\n await subscriber.async_receive_event(\n make_motion_event(event_session_id=\"event-session-1\", timestamp=event_timestamp)\n )\n await hass.async_block_till_done()\n\n auth.responses = [\n # Fake response from API that returns url image\n aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE),\n # Fake response for the image content fetch\n aiohttp.web.Response(body=IMAGE_BYTES_FROM_EVENT),\n # Image is refetched after being cleared by expiration alarm\n aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE),\n aiohttp.web.Response(body=b\"updated image bytes\"),\n ]\n\n image = await async_get_image(hass)\n assert image.content == IMAGE_BYTES_FROM_EVENT\n\n next_event_timestamp = event_timestamp + datetime.timedelta(seconds=25)\n await subscriber.async_receive_ev", "d_id": 107643, "documentation": { "docstring": "Test fallback for an event event image that has been cleaned up on expiration.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 249161, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_room.py", "file_name": "test_room.py", "fun_name": "test_delete_same_room_twice", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_delete_same_room_twice(self) -> None:\n \n\n body = {\"new_room_user_id\": self.admin_user}\n\n # first call to delete room\n # and do not wait for finish the task\n first_channel = self.make_request(\n \"DELETE\",\n self.url.encode(\"ascii\"),\n content=body,\n access_token=self.admin_user_tok,\n await_result=False,\n )\n\n # second call to delete room\n second_channel = self.make_request(\n \"DELETE\",\n self.url.encode(\"ascii\"),\n content=body,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(\n HTTPStatus.BAD_REQUEST, second_channel.code, msg=second_channel.json_body\n )\n self.assertEqual(Codes.UNKNOWN, second_channel.json_body[\"errcode\"])\n self.assertEqual(\n f\"History purge already in progress for {self.room_id}\",\n second_channel.json_body[\"error\"],\n )\n\n # get result of first call\n first_channel.await_result()\n self.assertEqual(200, first_channel.code, msg=first_channel.json_body)\n self.assertIn(\"delete_id\", first_channel.json_body)\n\n # check status after finish the task\n self._test_result(\n first_channel.json_body[\"delete_id\"],\n self.other_user,\n expect_new_room=True,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 399, "n_words": 87, "vocab_size": 61, "complexity": 1, "nloc": 32, "token_counts": 176, "n_ast_nodes": 290, "n_identifiers": 26, "random_cut": "def test_delete_same_room_twice(self) -> None:\n \n\n body = {\"new_room_user_id\": self.admin_user}\n\n # first call to delete room\n # and do not wait for finish the task\n first_channel = self.make_request(\n \"DELETE\",\n self.url.encode(\"ascii\"),\n content=body,\n access_token=self.admin_user_tok,\n await_result=False,\n )\n\n # second call to delete room\n second_channel = self.make_request(\n \"DELETE\",\n self.url.encode(\"ascii\"),\n content=body,\n access_token=s", "d_id": 72668, "documentation": { "docstring": "Test that the call for delete a room at second time gives an exception.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 311654, "commit_id": "fab9c4aa20b4c2549691d0aa5066798a0259e803", "repo": "core", "path": "homeassistant/components/august/__init__.py", "file_name": "__init__.py", "fun_name": "_async_refresh_device_detail_by_ids", "commit_message": "Improve reliability of august setup with recent api changes (#65314)", "code": "async def _async_refresh_device_detail_by_ids(self, device_ids_list):\n \n for device_id in device_ids_list:\n try:\n await self._async_refresh_device_detail_by_id(device_id)\n except asyncio.TimeoutError:\n _LOGGER.warning(\n \"Timed out calling august api during refresh of device: %s\",\n device_id,\n )\n except (ClientResponseError, CannotConnect) as err:\n _LOGGER.warning(\n \"Error from august api during refresh of device: %s\",\n device_id,\n exc_info=err,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 261, "n_words": 44, "vocab_size": 33, "complexity": 4, "nloc": 15, "token_counts": 58, "n_ast_nodes": 96, "n_identifiers": 13, "random_cut": "async def _async_refresh_device_detail_by_ids(self, device_ids_list):\n \n for device_id in device_ids_list:\n try:\n await self._async_refresh_device_detail_by_id(device_id)\n except asyncio.TimeoutError:\n _LOGGER.warning(\n \"Timed out calling august api during refresh of device: %s\",\n device_id,\n )\n except (ClientResponseError, CannotConnect) as err:\n _LOGGER.warning(\n \"Error from august api during refres", "d_id": 110313, "documentation": { "docstring": "Refresh each device in sequence.\n\n This used to be a gather but it was less reliable with august's\n recent api changes.\n\n The august api has been timing out for some devices so\n we want the ones that it isn't timing out for to keep working.\n ", "n_words": 45, "vocab_size": 39, "n_whitespaces": 80, "language": "en" } }, { "id": 37631, "commit_id": "1ac698744c4dbdf1495d303246d08ffacdf4f5b8", "repo": "transformers", "path": "src/transformers/models/yolos/feature_extraction_yolos.py", "file_name": "feature_extraction_yolos.py", "fun_name": "masks_to_boxes", "commit_message": "Add YOLOS (#16848)\n\n* First draft\r\n\r\n* Add YolosForObjectDetection\r\n\r\n* Make forward pass work\r\n\r\n* Add mid position embeddings\r\n\r\n* Add interpolation of position encodings\r\n\r\n* Add expected values\r\n\r\n* Add YOLOS to tests\r\n\r\n* Add integration test\r\n\r\n* Support tiny model as well\r\n\r\n* Support all models in conversion script\r\n\r\n* Remove mid_pe_size attribute\r\n\r\n* Make more tests pass\r\n\r\n* Add model to README and fix config\r\n\r\n* Add copied from statements\r\n\r\n* Rename base_model_prefix to vit\r\n\r\n* Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP\r\n\r\n* Apply suggestions from code review\r\n\r\n* Apply more suggestions from code review\r\n\r\n* Convert remaining checkpoints\r\n\r\n* Improve docstrings\r\n\r\n* Add YolosFeatureExtractor\r\n\r\n* Add feature extractor to docs\r\n\r\n* Add corresponding tests\r\n\r\n* Fix style\r\n\r\n* Fix docs\r\n\r\n* Apply suggestion from code review\r\n\r\n* Fix bad rebase\r\n\r\n* Fix some more bad rebase\r\n\r\n* Fix missing character\r\n\r\n* Improve docs and variable names\r\n\r\nCo-authored-by: Niels Rogge ", "code": "def masks_to_boxes(masks):\n \n if masks.size == 0:\n return np.zeros((0, 4))\n\n h, w = masks.shape[-2:]\n\n y = np.arange(0, h, dtype=np.float32)\n x = np.arange(0, w, dtype=np.float32)\n # see https://github.com/pytorch/pytorch/issues/50276\n y, x = np.meshgrid(y, x, indexing=\"ij\")\n\n x_mask = masks * np.expand_dims(x, axis=0)\n x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)\n x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))\n x_min = x.filled(fill_value=1e8)\n x_min = x_min.reshape(x_min.shape[0], -1).min(-1)\n\n y_mask = masks * np.expand_dims(y, axis=0)\n y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)\n y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))\n y_min = y.filled(fill_value=1e8)\n y_min = y_min.reshape(y_min.shape[0], -1).min(-1)\n\n return np.stack([x_min, y_min, x_max, y_max], 1)\n\n\n# Copied from transformers.models.detr.feature_extraction_detr.rgb_to_id", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 146, "n_words": 86, "vocab_size": 56, "complexity": 2, "nloc": 18, "token_counts": 289, "n_ast_nodes": 442, "n_identifiers": 33, "random_cut": "def masks_to_boxes(masks):\n \n if masks.size == 0:\n return np.zeros((0, 4))\n\n h, w = masks.shape[-2:]\n\n y = np.arange(0, h, dtype=np.float32)\n x = np.arange(0, w, dtype=np.float32)\n # see https://github.com/pytorch/pytorch/issues/50276\n y, x = np.meshgrid(y, x, indexing=\"ij\")\n\n x_mask = masks * np.expand_dims(x, axis=0)\n x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)\n x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))\n x_min = x.filled(fill_value=1e8)\n x_min = x_min.reshape(x_min.shape[0], -1).min(-1)\n\n y_mask = masks * np.expand_dims(y, axis=0)\n y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)\n y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))\n y_min = y.filled(fill_value=1e8)\n y_min = y_min.reshape(y_min.shape[0], -1).min(-1)", "d_id": 6841, "documentation": { "docstring": "\n Compute the bounding boxes around the provided panoptic segmentation masks.\n\n The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.\n\n Returns a [N, 4] tensor, with the boxes in corner (xyxy) format.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 57, "language": "en" } }, { "id": 121009, "commit_id": "f6476f7a03f8390627c1a8e2a2ec8702d8a320e5", "repo": "jax", "path": "jax/_src/numpy/polynomial.py", "file_name": "polynomial.py", "fun_name": "_roots_with_zeros", "commit_message": "jnp.roots: better support for computation under JIT", "code": "def _roots_with_zeros(p, num_leading_zeros):\n # Avoid lapack errors when p is all zero\n p = _where(len(p) == num_leading_zeros, 1.0, p)\n # Roll any leading zeros to the end & compute the roots\n roots = _roots_no_zeros(roll(p, -num_leading_zeros))\n # Sort zero roots to the end.\n roots = lax.sort_key_val(roots == 0, roots)[1]\n # Set roots associated with num_leading_zeros to NaN\n return _where(arange(roots.size) < roots.size - num_leading_zeros, roots, complex(np.nan, np.nan))\n\n\n@_wraps(np.roots, lax_description=,\nextra_params=)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(np.roots, lax_description=\"\"\"\\\nUnlike the numpy version of this function, the JAX version returns the roots in\na complex array regardless of the values of the roots. Additionally, the jax\nversion of this function adds the ``strip_zeros`` function which must be set to\nFalse for the function to be compatible with JIT and other JAX transformations.\nWith ``strip_zeros=False``, if your coefficients have leading zeros, the\nroots will be padded with NaN values:\n\n>>> coeffs = jnp.array([0, 1, 2])\n\n# The default behavior matches numpy and strips leading zeros:\n>>> jnp.roots(coeffs)\nDeviceArray([-2.+0.j], dtype=complex64)\n\n# With strip_zeros=False, extra roots are set to NaN:\n>>> jnp.roots(coeffs, strip_zeros=False)\nDeviceArray([-2. +0.j, nan+nanj], dtype=complex64)\n\"\"\",\nextra_params=\"\"\"\nstrip_zeros : bool, default=True\n If set to True, then leading zeros in the coefficients will be stripped, similar\n to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and\n undefined roots will be represented by NaN values in the function output.\n ``strip_zeros`` must be set to ``False`` for the function to be compatible with\n :func:`jax.jit` and other JAX transformations.\n\"\"\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 73, "n_words": 68, "vocab_size": 51, "complexity": 1, "nloc": 5, "token_counts": 80, "n_ast_nodes": 147, "n_identifiers": 18, "random_cut": "def _roots_with_zeros(p, num_leading_zeros):\n # Avoid lapack errors when p is all zero\n p = _where(len(p) == num_leading_zeros, 1.0, p)\n # Roll any leading zeros to the end & compute the roots\n roots = _roots_no_zeros(roll(p, -num_leading_zeros))\n # Sort zero roots to the end.\n roots = lax.sort_key_val(roots == 0, roots)[1]\n # Set roots associated with num_leading_zeros to NaN\n", "d_id": 27011, "documentation": { "docstring": "\\\nUnlike the numpy version of this function, the JAX version returns the roots in\na complex array regardless of the values of the roots. Additionally, the jax\nversion of this function adds the ``strip_zeros`` function which must be set to\nFalse for the function to be compatible with JIT and other JAX transformations.\nWith ``strip_zeros=False``, if your coefficients have leading zeros, the\nroots will be padded with NaN values:\n\n>>> coeffs = jnp.array([0, 1, 2])\n\n# The default behavior matches numpy and strips leading zeros:\n>>> jnp.roots(coeffs)\nDeviceArray([-2.+0.j], dtype=complex64)\n\n# With strip_zeros=False, extra roots are set to NaN:\n>>> jnp.roots(coeffs, strip_zeros=False)\nDeviceArray([-2. +0.j, nan+nanj], dtype=complex64)\n\nstrip_zeros : bool, default=True\n If set to True, then leading zeros in the coefficients will be stripped, similar\n to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and\n undefined roots will be represented by NaN values in the function output.\n ``strip_zeros`` must be set to ``False`` for the function to be compatible with\n :func:`jax.jit` and other JAX transformations.\n", "n_words": 167, "vocab_size": 92, "n_whitespaces": 167, "language": "en" } }, { "id": 12072, "commit_id": "8dc2999a588c46deca60b3f0d5c1b6278a6e165c", "repo": "jina", "path": "jina/orchestrate/flow/base.py", "file_name": "base.py", "fun_name": "port_monitoring", "commit_message": "feat: expose prometheus metrics (#4526)", "code": "def port_monitoring(self) -> int:\n \n if GATEWAY_NAME in self._deployment_nodes:\n return self[GATEWAY_NAME].args.port_monitoring\n else:\n return self._common_kwargs.get(\n 'port_monitoring', __default_port_monitoring__\n )\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 85, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 10, "token_counts": 37, "n_ast_nodes": 62, "n_identifiers": 9, "random_cut": "def port_monitoring(self) -> int:\n \n if GATEWAY_NAME in self._deployment_nodes:\n return self[GATEWAY_NAME].args.port_monitoring\n else:\n return self._common_kwargs.get(\n 'port_monitoring', __default_port_monitoring__\n )\n", "d_id": 2178, "documentation": { "docstring": "Return if the monitoring is enabled\n .. # noqa: DAR201\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 24, "language": "en" } }, { "id": 206720, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/module_loading.py", "file_name": "module_loading.py", "fun_name": "autodiscover_modules", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def autodiscover_modules(*args, **kwargs):\n \n from django.apps import apps\n\n register_to = kwargs.get(\"register_to\")\n for app_config in apps.get_app_configs():\n for module_to_search in args:\n # Attempt to import the app's module.\n try:\n if register_to:\n before_import_registry = copy.copy(register_to._registry)\n\n import_module(\"%s.%s\" % (app_config.name, module_to_search))\n except Exception:\n # Reset the registry to the state before the last import\n # as this import will have to reoccur on the next request and\n # this could raise NotRegistered and AlreadyRegistered\n # exceptions (see #8245).\n if register_to:\n register_to._registry = before_import_registry\n\n # Decide whether to bubble up this error. If the app just\n # doesn't have the module in question, we can ignore the error\n # attempting to import it, otherwise we want it to bubble up.\n if module_has_submodule(app_config.module, module_to_search):\n raise\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 391, "n_words": 117, "vocab_size": 78, "complexity": 7, "nloc": 14, "token_counts": 87, "n_ast_nodes": 152, "n_identifiers": 18, "random_cut": "def autodiscover_modules(*args, **kwargs):\n \n from django.apps import apps\n\n register_to = kwargs.get(\"register_to\")\n for app_config in apps.get_app_configs():\n for module_to_search in args:\n # Attempt to import the app's module.\n try:\n if register_to:\n before_import_registry = copy.copy(register_to._registry)\n\n import_module(\"%s.%s\" % (app_config.name, module_to_search))\n except Exception:\n # Reset the registry to the state before the last import\n # as this import will have to reoccur on the next request and\n # this could raise NotRegistered and AlreadyRegistered\n # exceptions (see #8245).\n if register_to:\n register_to._registry = before_import_registry\n\n # Decide whether to bubble up this error. If the app just\n # doesn't have the module in", "d_id": 51655, "documentation": { "docstring": "\n Auto-discover INSTALLED_APPS modules and fail silently when\n not present. This forces an import on them to register any admin bits they\n may want.\n\n You may provide a register_to keyword parameter as a way to access a\n registry. This register_to object must have a _registry instance variable\n to access it.\n ", "n_words": 49, "vocab_size": 40, "n_whitespaces": 71, "language": "en" } }, { "id": 220829, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/tasks.py", "file_name": "tasks.py", "fun_name": "ensure_future", "commit_message": "add python 3.10.4 for windows", "code": "def ensure_future(coro_or_future, *, loop=None):\n \n return _ensure_future(coro_or_future, loop=loop)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 4, "random_cut": "def ensure_future(coro_or_future, *, loop=None):\n \n return _ensure_future(coro_or_future, loop=loop)\n\n", "d_id": 56137, "documentation": { "docstring": "Wrap a coroutine or an awaitable in a future.\n\n If the argument is a Future, it is returned directly.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 25, "language": "en" } }, { "id": 242362, "commit_id": "6be87277f71948bc7e4b945c46660cac3e5ce919", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "getpalette", "commit_message": "Allow rawmode None to return the palette in the current mode", "code": "def getpalette(self, rawmode=\"RGB\"):\n \n\n self.load()\n try:\n mode = self.im.getpalettemode()\n except ValueError:\n return None # no palette\n if rawmode is None:\n rawmode = mode\n return list(self.im.getpalette(mode, rawmode))\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 101, "n_words": 25, "vocab_size": 21, "complexity": 3, "nloc": 9, "token_counts": 53, "n_ast_nodes": 91, "n_identifiers": 9, "random_cut": "def getpalette(self, rawmode=\"RGB\"):\n \n\n self.load()\n try:\n mode = self.im.getpalettemode()\n except ValueError:\n return None # no palette\n if rawmode is None:\n rawmode = mode\n ", "d_id": 69846, "documentation": { "docstring": "\n Returns the image palette as a list.\n\n :param rawmode: The mode in which to return the palette. ``None`` will\n return the palette in its current mode.\n :returns: A list of color values [r, g, b, ...], or None if the\n image has no palette.\n ", "n_words": 44, "vocab_size": 36, "n_whitespaces": 93, "language": "en" } }, { "id": 13404, "commit_id": "ad96553b064b9c17d626f6fcb78e4a45987be2c3", "repo": "jina", "path": "jina/types/request/data.py", "file_name": "data.py", "fun_name": "last_executor", "commit_message": "feat: pass `docs_map` to Executor (#5366)", "code": "def last_executor(self):\n \n if len(self.proto_wo_data.routes) > 0:\n return self.proto_wo_data.routes[-1].executor\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 33, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 3, "token_counts": 30, "n_ast_nodes": 50, "n_identifiers": 6, "random_cut": "def last_executor(self):\n \n if len(self.proto_wo", "d_id": 2630, "documentation": { "docstring": "\n Returns the name of the last Executor that has processed this Request\n\n :return: the name of the last Executor that processed this Request\n ", "n_words": 23, "vocab_size": 12, "n_whitespaces": 45, "language": "en" } }, { "id": 276584, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/tests/temporal_sample_weights_correctness_test.py", "file_name": "temporal_sample_weights_correctness_test.py", "fun_name": "custom_generator_multi_io_temporal", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def custom_generator_multi_io_temporal(self, sample_weights=None):\n \n batch_size = 3\n num_samples = 3\n iteration = 0\n while True:\n batch_index = iteration * batch_size % num_samples\n iteration += 1\n start = batch_index\n end = start + batch_size\n x = [self.x[start:end], self.x[start:end]]\n y = [self.y1[start:end], self.y2[start:end]]\n if sample_weights:\n sw = tf.nest.map_structure(\n lambda w: w[start:end], sample_weights\n )\n else:\n sw = None\n yield x, y, sw\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 256, "n_words": 58, "vocab_size": 39, "complexity": 3, "nloc": 18, "token_counts": 116, "n_ast_nodes": 180, "n_identifiers": 18, "random_cut": "def custom_generator_multi_io_temporal(self, sample_weights=None):\n \n batch_size = 3\n num_samples = 3\n iteration = 0\n while True:\n batch_index = iteration * batch_size % num_samples\n iteration += 1\n start = batch_index\n end = start + batch_size\n x = [self.x[start:end], self.x[start:end]]\n y = [self.y1[start:end], self.y2[start:end]]\n if sample_weights:\n sw = tf.nest.map_structure(\n lambda w: w[start:end], sample_weights\n )\n else:\n ", "d_id": 81689, "documentation": { "docstring": "Generator for getting data for temporal multi io model.\n\n Args:\n sample_weights: List of sample_weights.\n\n Yields:\n Tuple of inputs, label, sample weights data.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 61, "language": "en" } }, { "id": 267484, "commit_id": "5b3557f8ba5c176eb7d2de21b3a4da3dcab3bada", "repo": "ansible", "path": "test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py", "file_name": "main.py", "fun_name": "_just_docs", "commit_message": "ansible-test - Allow docstring in docs-only module", "code": "def _just_docs(self):\n \n try:\n for child in self.ast.body:\n if not isinstance(child, ast.Assign):\n # allow string constant expressions (these are docstrings)\n if isinstance(child, ast.Expr) and isinstance(child.value, ast.Constant) and isinstance(child.value.value, str):\n continue\n\n # allowed from __future__ imports\n if isinstance(child, ast.ImportFrom) and child.module == '__future__':\n for future_import in child.names:\n if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS:\n break\n else:\n continue\n return False\n return True\n except AttributeError:\n return False\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 379, "n_words": 61, "vocab_size": 45, "complexity": 11, "nloc": 16, "token_counts": 107, "n_ast_nodes": 171, "n_identifiers": 18, "random_cut": "def _just_docs(self):\n \n try:\n for child in self.ast.body:\n if not isinstance(child, as", "d_id": 78920, "documentation": { "docstring": "Module can contain just docs and from __future__ boilerplate\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 83043, "commit_id": "031f4596ab1737a237b1c099e792fe627a937ff7", "repo": "zulip", "path": "zerver/openapi/openapi.py", "file_name": "openapi.py", "fun_name": "response_validator", "commit_message": "openapi: Use openapi_core ResponseValidator to validate responses.\n\nSigned-off-by: Anders Kaseorg ", "code": "def response_validator(self) -> RequestValidator:\n \n self.check_reload()\n assert self._response_validator is not None\n return self._response_validator\n\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 8, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 5, "random_cut": "def response_validator(self) -> RequestValidator:\n \n self.check_reload()\n assert s", "d_id": 17587, "documentation": { "docstring": "Reload the OpenAPI file if it has been modified after the last time\n it was read, and then return the openapi_core validator object. Similar\n to preceding functions. Used for proper access to OpenAPI objects.\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 55, "language": "en" } }, { "id": 119828, "commit_id": "603bb3c5ca288674579211e64fa47c6b2b0fb7a6", "repo": "jax", "path": "jax/_src/numpy/polynomial.py", "file_name": "polynomial.py", "fun_name": "roots", "commit_message": "lax_numpy: move poly functions into numpy.polynomial", "code": "def roots(p, *, strip_zeros=True):\n # ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251\n p = atleast_1d(p)\n if p.ndim != 1:\n raise ValueError(\"Input must be a rank-1 array.\")\n\n # strip_zeros=False is unsafe because leading zeros aren't removed\n if not strip_zeros:\n if p.size > 1:\n return _roots_no_zeros(p)\n else:\n return array([])\n\n if all(p == 0):\n return array([])\n\n # factor out trivial roots\n start, end = _nonzero_range(p)\n # number of trailing zeros = number of roots at 0\n trailing_zeros = p.size - end\n\n # strip leading and trailing zeros\n p = p[start:end]\n\n if p.size < 2:\n return zeros(trailing_zeros, p.dtype)\n else:\n roots = _roots_no_zeros(p)\n # combine roots and zero roots\n roots = hstack((roots, zeros(trailing_zeros, p.dtype)))\n return roots\n\n\n_POLYFIT_DOC = \n@_wraps(np.polyfit, lax_description=_POLYFIT_DOC)\n@partial(jit, static_argnames=('deg', 'rcond', 'full', 'cov'))", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(np.polyfit, lax_description=_POLYFIT_DOC)\n@partial(jit, static_argnames=('deg', 'rcond', 'full', 'cov'))", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 164, "n_words": 116, "vocab_size": 74, "complexity": 6, "nloc": 20, "token_counts": 133, "n_ast_nodes": 274, "n_identifiers": 25, "random_cut": "def roots(p, *, strip_zeros=True):\n # ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251\n p = atleast_1d(p)\n if p.ndim != 1:\n raise Valu", "d_id": 26694, "documentation": { "docstring": "\\\nUnlike NumPy's implementation of polyfit, :py:func:`jax.numpy.polyfit` will not warn on rank reduction, which indicates an ill conditioned matrix\nAlso, it works best on rcond <= 10e-3 values.\n", "n_words": 28, "vocab_size": 27, "n_whitespaces": 25, "language": "en" } }, { "id": 129465, "commit_id": "0abcd5eea529fc84c4398620f2808087e4d8c6b6", "repo": "ray", "path": "python/ray/tune/trainable.py", "file_name": "trainable.py", "fun_name": "_storage_path", "commit_message": "[tune] only sync up and sync down checkpoint folder for cloud checkpoint. (#21658)\n\nBy default, ~/ray_results/exp_name/trial_name/checkpoint_name.\r\nInstead of the whole trial checkpoint (~/ray_results/exp_name/trial_name/) directory.\r\nStuff like progress.csv, result.json, params.pkl, params.json, events.out etc are coming from driver process.\r\nThis could also enable us to de-couple sync up and delete - they don't have to wait for each other to finish.", "code": "def _storage_path(self, local_path):\n \n rel_local_path = os.path.relpath(local_path, self.logdir)\n return os.path.join(self.remote_checkpoint_dir, rel_local_path)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 35, "n_ast_nodes": 55, "n_identifiers": 10, "random_cut": "def _storage_path(self, local_path):\n \n rel_local_path =", "d_id": 28957, "documentation": { "docstring": "Converts a `local_path` to be based off of\n `self.remote_checkpoint_dir`.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 15, "language": "en" } }, { "id": 9373, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/dnnlib/submission/run_context.py", "file_name": "run_context.py", "fun_name": "get_time_since_last_update", "commit_message": "initialize ostec", "code": "def get_time_since_last_update(self) -> float:\n \n return time.time() - self.last_update_time\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 5, "random_cut": "def get_time_since_last_update(self) -> float:\n \n return time.time() - self.last_upda", "d_id": 1588, "documentation": { "docstring": "How much time has passed since the last call to update.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 9622, "commit_id": "2e5d23ee0e7fc1fdd7ad2e615fd651655aeb0f5b", "repo": "insightface", "path": "reconstruction/ostec/external/graphonomy/FaceHairMask/deeplab_xception.py", "file_name": "deeplab_xception.py", "fun_name": "train_fixbn", "commit_message": "Graphonomy Face/Hair Segmentation added", "code": "def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False):\n r\n super(DeepLabv3_plus, self).train(mode)\n if freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if freeze_bn_affine:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if freeze_bn:\n for m in self.xception_features.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if freeze_bn_affine:\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n # for m in self.aspp1.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.aspp2.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.aspp3.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.aspp4.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.global_avg_pool.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.concat_projection_bn1.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.feature_projection_bn1.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1136, "n_words": 192, "vocab_size": 35, "complexity": 7, "nloc": 23, "token_counts": 90, "n_ast_nodes": 188, "n_identifiers": 19, "random_cut": "def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False):\n r\n super(DeepLabv3_plus, self).train(mode)\n if freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if freeze_bn_affine:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if freeze_bn:\n for m in self.xception_features.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if freeze_bn_affine:\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n # for m in self.aspp1.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.aspp2.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.aspp3.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.aspp4.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n # for m in self.global_avg_pool.modules():\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n # if freeze_bn_affine:\n # m.weight.requires_grad = False\n # m.", "d_id": 1643, "documentation": { "docstring": "Sets the module in training mode.\n\n This has any effect only on certain modules. See documentations of\n particular modules for details of their behaviors in training/evaluation\n mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\n etc.\n\n Returns:\n Module: self\n ", "n_words": 38, "vocab_size": 36, "n_whitespaces": 91, "language": "en" } }, { "id": 245586, "commit_id": "73a12e6508d4ba0331b84b1313027a511ba26fe3", "repo": "mmdetection", "path": "tests/test_models/test_backbones/test_resnet.py", "file_name": "test_resnet.py", "fun_name": "assert_params_all_zeros", "commit_message": "[Fix] Fix UT and remove delete mmcv ops. (#8623)\n\n* Remove get_root_logger\r\n\r\n* Fix UT\r\n\r\n* Update", "code": "def assert_params_all_zeros(module) -> bool:\n \n weight_data = module.weight.data\n is_weight_zero = weight_data.allclose(\n weight_data.new_zeros(weight_data.size()))\n\n if hasattr(module, 'bias') and module.bias is not None:\n bias_data = module.bias.data\n is_bias_zero = bias_data.allclose(\n bias_data.new_zeros(bias_data.size()))\n else:\n is_bias_zero = True\n\n return is_weight_zero and is_bias_zero\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 91, "n_words": 34, "vocab_size": 26, "complexity": 4, "nloc": 19, "token_counts": 80, "n_ast_nodes": 133, "n_identifiers": 14, "random_cut": "def assert_params_all_zeros(module) -> bool:\n \n weight_data = module.weight.data\n is_weight_zero = weight_data.allclose(\n weight_data.new_zeros(weight_data.size()))\n\n if hasattr(module, 'bias') and module.bias is not None:\n bias_data = module.bias.data\n is_bias_zero = bias_data.allclose(\n bias_data.new_zeros(bias_data.size()))\n else:\n is_bias_zero = True\n\n ret", "d_id": 70831, "documentation": { "docstring": "Check if the parameters of the module is all zeros.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the parameters of the module is all zeros.\n ", "n_words": 29, "vocab_size": 18, "n_whitespaces": 52, "language": "en" } }, { "id": 209587, "commit_id": "495b21f2867e48286767085c8cf2918e4092e9dc", "repo": "scapy", "path": "scapy/contrib/automotive/scanner/executor.py", "file_name": "executor.py", "fun_name": "cleanup_state", "commit_message": "Add Automotive Logger for all debug outputs of the automotive layer", "code": "def cleanup_state(self):\n # type: () -> None\n \n for f in self.cleanup_functions:\n if not callable(f):\n continue\n try:\n if not f(self.socket, self.configuration):\n log_automotive.info(\n \"Cleanup function %s failed\", repr(f))\n except (OSError, ValueError, Scapy_Exception) as e:\n log_automotive.critical(\"Exception during cleanup: %s\", e)\n\n self.cleanup_functions = list()\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 188, "n_words": 40, "vocab_size": 38, "complexity": 5, "nloc": 11, "token_counts": 73, "n_ast_nodes": 123, "n_identifiers": 16, "random_cut": "def cleanup_state(self):\n # type: () -> None\n \n for f in self.cleanup_functions:\n if not callable(f):\n continue\n try:\n if not f(self.socket, self.configuration):\n log_automotive.info(\n ", "d_id": 52741, "documentation": { "docstring": "\n Executes all collected cleanup functions from a traversed path\n :return: None\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 73261, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/tests/test_simple_modeladmin.py", "file_name": "test_simple_modeladmin.py", "fun_name": "test_model_with_two_tabbed_panels_only", "commit_message": "Reformat with black", "code": "def test_model_with_two_tabbed_panels_only(self):\n Publisher.settings_panels = [FieldPanel(\"name\")]\n Publisher.promote_panels = [FieldPanel(\"headquartered_in\")]\n\n warning_1 = checks.Warning(\n \"Publisher.promote_panels will have no effect on modeladmin editing\",\n hint=,\n obj=Publisher,\n id=\"wagtailadmin.W002\",\n )\n\n warning_2 = checks.Warning(\n \"Publisher.settings_panels will have no effect on modeladmin editing\",\n hint=,\n obj=Publisher,\n id=\"wagtailadmin.W002\",\n )\n\n checks_results = self.get_checks_result()\n\n self.assertIn(warning_1, checks_results)\n self.assertIn(warning_2, checks_results)\n\n # clean up for future checks\n delattr(Publisher, \"settings_panels\")\n delattr(Publisher, \"promote_panels\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 226, "n_words": 55, "vocab_size": 37, "complexity": 1, "nloc": 26, "token_counts": 102, "n_ast_nodes": 178, "n_identifiers": 17, "random_cut": "def test_model_with_two_tabbed_panels_only(self):\n Publisher.settings_panels = [FieldPanel(\"name\")]\n Publisher.promote_panels = [FieldPanel(\"headquartered_in\")]\n\n warning_1 = checks.Warning(\n \"Publisher.promo", "d_id": 15998, "documentation": { "docstring": "Ensure that Publisher uses `panels` instead of `promote_panels`\\\nor set up an `edit_handler` if you want a tabbed editing interface.\nThere are no default tabs on non-Page models so there will be no\\\n Promote tab for the promote_panels to render in.Ensure that Publisher uses `panels` instead of `settings_panels`\\\nor set up an `edit_handler` if you want a tabbed editing interface.\nThere are no default tabs on non-Page models so there will be no\\\n Settings tab for the settings_panels to render in.", "n_words": 81, "vocab_size": 45, "n_whitespaces": 76, "language": "en" } }, { "id": 274158, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/serialization.py", "file_name": "serialization.py", "fun_name": "get_builtin_layer", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_builtin_layer(class_name):\n \n if not hasattr(LOCAL, \"ALL_OBJECTS\"):\n populate_deserializable_objects()\n return LOCAL.ALL_OBJECTS.get(class_name)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 49, "n_identifiers": 7, "random_cut": "def get_builtin_layer(class_name):\n \n if not hasattr(LOCAL, \"ALL_OBJECTS\"):\n populate_deserializable_objects()\n return L", "d_id": 81181, "documentation": { "docstring": "Returns class if `class_name` is registered, else returns None.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 196117, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/perm_groups.py", "file_name": "perm_groups.py", "fun_name": "abelian_invariants", "commit_message": "Updated import locations", "code": "def abelian_invariants(self):\n \n if self.is_trivial:\n return []\n gns = self.generators\n inv = []\n G = self\n H = G.derived_subgroup()\n Hgens = H.generators\n for p in primefactors(G.order()):\n ranks = []\n while True:\n pows = []\n for g in gns:\n elm = g**p\n if not H.contains(elm):\n pows.append(elm)\n K = PermutationGroup(Hgens + pows) if pows else H\n r = G.order()//K.order()\n G = K\n gns = pows\n if r == 1:\n break\n ranks.append(multiplicity(p, r))\n\n if ranks:\n pows = [1]*ranks[0]\n for i in ranks:\n for j in range(0, i):\n pows[j] = pows[j]*p\n inv.extend(pows)\n inv.sort()\n return inv\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 491, "n_words": 90, "vocab_size": 53, "complexity": 11, "nloc": 31, "token_counts": 181, "n_ast_nodes": 300, "n_identifiers": 28, "random_cut": "def abelian_invariants(self):\n \n if self.is_trivial:\n return []\n gns = self.generators\n inv = []\n G = self\n H = G.derived_subgroup()\n Hgens = H.generators\n for p in primefactors(G.order()):\n ranks = []\n while True:\n pows = []\n for g in gns:\n ", "d_id": 47617, "documentation": { "docstring": "\n Returns the abelian invariants for the given group.\n Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to\n the direct product of finitely many nontrivial cyclic groups of\n prime-power order.\n\n Explanation\n ===========\n\n The prime-powers that occur as the orders of the factors are uniquely\n determined by G. More precisely, the primes that occur in the orders of the\n factors in any such decomposition of ``G`` are exactly the primes that divide\n ``|G|`` and for any such prime ``p``, if the orders of the factors that are\n p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``,\n then the orders of the factors that are p-groups in any such decomposition of ``G``\n are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``.\n\n The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken\n for all primes that divide ``|G|`` are called the invariants of the nontrivial\n group ``G`` as suggested in ([14], p. 542).\n\n Notes\n =====\n\n We adopt the convention that the invariants of a trivial group are [].\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> a = Permutation([0, 2, 1])\n >>> b = Permutation([1, 0, 2])\n >>> G = PermutationGroup([a, b])\n >>> G.abelian_invariants()\n [2]\n >>> from sympy.combinatorics import CyclicGroup\n >>> G = CyclicGroup(7)\n >>> G.abelian_invariants()\n [7]\n\n ", "n_words": 212, "vocab_size": 103, "n_whitespaces": 437, "language": "en" } }, { "id": 271364, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/functional_utils.py", "file_name": "functional_utils.py", "fun_name": "clone_graph_nodes", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def clone_graph_nodes(inputs, outputs):\n \n nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)\n cloned_inputs = []\n cloned_outputs = []\n # We not only need to create copies of Nodes (mimic the calls), also need to\n # clone keras_tensors to avoid the override of _keras_history attached on the\n # keras_tensor. The following dict is used to track any keras tensor we cloned\n # The key is the string ID of the original keras tensor, and value is the\n # cloned keras_tensor instance.\n kt_id_mapping = {}\n\n for kt_input in tf.nest.flatten(inputs):\n if kt_input.node.is_input:\n # For any existing keras_tensor from tf.keras.Input, we leave them as is.\n cloned_inputs.append(kt_input)\n kt_id_mapping[id(kt_input)] = kt_input\n else:\n # We need to create a new tf.keras.Input for any intermediate keras_tensor\n cpy = _clone_keras_tensor(kt_input)\n cloned_input = input_layer_module.Input(tensor=cpy)\n cloned_inputs.append(cloned_input)\n kt_id_mapping[id(kt_input)] = cloned_input\n cloned_inputs = tf.nest.pack_sequence_as(inputs, cloned_inputs)\n\n for kt_output in tf.nest.flatten(outputs):\n cpy = _clone_keras_tensor(kt_output)\n # We reuse the _keras_history here, which contains the old information. It\n # is used in the Node constructor to check if the tensor \"is_keras_tensor()\"\n # The history will be override by the Node constructor anyway for the\n # corresponding layer output anyway.\n cpy._keras_history = (\n kt_output._keras_history\n ) # pylint: disable=protected-access\n cloned_outputs.append(cpy)\n kt_id_mapping[id(kt_output)] = cpy\n cloned_outputs = tf.nest.pack_sequence_as(outputs, cloned_outputs)\n\n for node in nodes_to_clone:\n # Clone any keras_tensors to avoid override of _keras_history\n # Or reuse an existing keras_tensor if it has already been cloned.\n output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping)\n call_args_copy = clone_keras_tensors(node.call_args, kt_id_mapping)\n call_kwargs_copy = clone_keras_tensors(node.call_kwargs, kt_id_mapping)\n # Creating new nodes based on the existing node information.\n # Node wires itself to inbound and outbound layers.\n # The Node constructor actually updates this layer's self._inbound_nodes,\n # sets _keras_history on the outputs, and adds itself to the\n # `_outbound_nodes` of the layers that produced the inputs to this\n # layer call.\n node_module.Node(\n node.layer,\n call_args=call_args_copy,\n call_kwargs=call_kwargs_copy,\n outputs=output_copy,\n )\n return cloned_inputs, cloned_outputs\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 652, "n_words": 292, "vocab_size": 161, "complexity": 5, "nloc": 34, "token_counts": 221, "n_ast_nodes": 364, "n_identifiers": 35, "random_cut": "def clone_graph_nodes(inputs, outputs):\n \n nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)\n cloned_inputs = []\n cloned_outputs = []\n # We not only need to create copies of Nodes (mimic the calls), also need to\n # clone keras_tensors to avoid the override of _keras_history attached on the\n # keras_tensor. The following dict is used to track any keras tensor we cloned\n # The key is the string ID of the original keras tensor, and value is the\n # cloned keras_tensor instance.\n kt_id_mapping = {}\n\n for kt_input in tf.nest.flatten(inputs):\n if kt_input.", "d_id": 80751, "documentation": { "docstring": "Clone the `Node` between the inputs and output tensors.\n\n This function is used to create a new functional model from any intermediate\n keras tensors. The clone of the nodes mimic the behavior of reconstructing the\n functional graph network by re-executing all the __call__ methods. The cloned\n nodes will be appended to the layers.\n\n Note that a new tf.keras.Inputs will be created for any items in the `inputs`\n\n Args:\n inputs: A nested structure of keras_tensors.\n outputs: A nested structure of keras_tensors.\n\n Returns:\n A pair of inputs and outputs, with cloned keras_tensors. They can be used to\n create a new functional model.\n ", "n_words": 100, "vocab_size": 63, "n_whitespaces": 144, "language": "en" } }, { "id": 199471, "commit_id": "801e149d69d5f88919a735f8b55b6024f97c6950", "repo": "sympy", "path": "sympy/physics/mechanics/rigidbody.py", "file_name": "rigidbody.py", "fun_name": "parallel_axis", "commit_message": "Add optional frame argument to parallel axis method", "code": "def parallel_axis(self, point, frame=None):\n \n # circular import issue\n from sympy.physics.mechanics.functions import inertia_of_point_mass\n if frame is None:\n frame = self.frame\n return self.central_inertia.express(frame) + inertia_of_point_mass(\n self.mass, self.masscenter.pos_from(point), frame)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 83, "n_words": 26, "vocab_size": 24, "complexity": 2, "nloc": 6, "token_counts": 59, "n_ast_nodes": 90, "n_identifiers": 14, "random_cut": "def parallel_axis(self, point, frame=None):\n \n # circular import issue\n from sympy.physics.mechanics.functions import inertia_of_point_mass\n if frame is None:\n frame = self.frame\n return self.central_inertia.express(frame) + inertia_of_point_mass(\n sel", "d_id": 49276, "documentation": { "docstring": "Returns the inertia dyadic of the body with respect to another\n point.\n\n Parameters\n ==========\n\n point : sympy.physics.vector.Point\n The point to express the inertia dyadic about.\n frame : sympy.physics.vector.ReferenceFrame\n The reference frame used to construct the dyadic.\n\n Returns\n =======\n\n inertia : sympy.physics.vector.Dyadic\n The inertia dyadic of the rigid body expressed about the provided\n point.\n\n ", "n_words": 53, "vocab_size": 31, "n_whitespaces": 160, "language": "en" } }, { "id": 259103, "commit_id": "3605c140af992b6ac52f04f1689c58509cc0b5b2", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_class_weight.py", "file_name": "test_class_weight.py", "fun_name": "test_class_weight_does_not_contains_more_classses", "commit_message": "FIX Support extra class_weights in compute_class_weight (#22595)", "code": "def test_class_weight_does_not_contains_more_classses():\n \n tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20})\n\n # Does not raise\n tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1])\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 38, "n_words": 26, "vocab_size": 22, "complexity": 1, "nloc": 3, "token_counts": 63, "n_ast_nodes": 89, "n_identifiers": 5, "random_cut": "def test_class_weight_does_not_contains_more_classses():\n \n tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20})\n\n # Does not raise\n tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1])\n", "d_id": 75568, "documentation": { "docstring": "Check that class_weight can contain more labels than in y.\n\n Non-regression test for #22413\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 266720, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/cli/argparsing/parsers.py", "file_name": "parsers.py", "fun_name": "parse", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def parse(self, state): # type: (ParserState) -> str\n \n if state.mode == ParserMode.PARSE:\n path = AnyParser().parse(state)\n\n if not os.path.isfile(path):\n raise ParserError(f'Not a file: {path}')\n else:\n path = ''\n\n with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary\n while boundary.ready:\n directory = path or '.'\n\n try:\n with os.scandir(directory) as scan: # type: t.Iterator[os.DirEntry]\n choices = [f'{item.name}{PATH_DELIMITER}' if item.is_dir() else item.name for item in scan]\n except OSError:\n choices = []\n\n if not path:\n choices.append(PATH_DELIMITER) # allow absolute paths\n choices.append('../') # suggest relative paths\n\n part = RelativePathNameParser(choices).parse(state)\n path += f'{part}{boundary.match or \"\"}'\n\n return path\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 429, "n_words": 89, "vocab_size": 65, "complexity": 9, "nloc": 21, "token_counts": 145, "n_ast_nodes": 285, "n_identifiers": 28, "random_cut": "def parse(self, state): # type: (ParserState) -> str\n \n if state.mode == ParserMode.PARSE:\n path = AnyParser().", "d_id": 78532, "documentation": { "docstring": "Parse the input from the given state and return the result.", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 182133, "commit_id": "c90cdd4ec8a10c689fee83a6a71e025393dcb38d", "repo": "textual", "path": "src/textual/view.py", "file_name": "view.py", "fun_name": "layout", "commit_message": "implement inline styles", "code": "def layout(self) -> Layout:\n \n # self.log(\"I\", self._inline_styles)\n # self.log(\"C\", self._css_styles)\n # self.log(\"S\", self.styles)\n assert self.styles.layout\n return self.styles.layout\n\n # @layout.setter\n # def layout(self, new_value: Layout) -> None:\n # \n # self.styles.layout = new_value\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 94, "n_words": 31, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 20, "n_ast_nodes": 46, "n_identifiers": 4, "random_cut": "def layout(self) -> Layout:\n \n # self.log(\"I\", self._inline_styles)\n # self.log(\"C\", self._css_styles)\n # self.log(\"S\", self.styles)\n assert self.s", "d_id": 43751, "documentation": { "docstring": "Convenience property for accessing ``self.styles.layout``.\n\n Returns: The Layout associated with this view\n Convenience property setter for setting ``view.styles.layout``.\n # Args:\n # new_value:\n\n # Returns:\n # None\n # ", "n_words": 27, "vocab_size": 19, "n_whitespaces": 84, "language": "en" } }, { "id": 215425, "commit_id": "ab4803984bce4a4de7cc10910e7310c4babf557e", "repo": "salt", "path": "salt/transport/rabbitmq.py", "file_name": "rabbitmq.py", "fun_name": "timeout_message", "commit_message": "Start to add base class defs", "code": "def timeout_message(self, message):\n \n future = self.send_future_map.pop(message, None)\n # In a race condition the message might have been sent by the time\n # we're timing it out. Make sure the future is not None\n if future is not None:\n del self.send_timeout_map[message]\n if future.attempts < future.tries:\n future.attempts += 1\n log.info(\n \"SaltReqTimeoutError, retrying. (%s/%s)\",\n future.attempts,\n future.tries,\n )\n self.send(\n message,\n timeout=future.timeout,\n tries=future.tries,\n future=future,\n )\n\n else:\n future.set_exception(SaltReqTimeoutError(\"Message timed out\"))\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 355, "n_words": 64, "vocab_size": 54, "complexity": 3, "nloc": 19, "token_counts": 96, "n_ast_nodes": 152, "n_identifiers": 15, "random_cut": "def timeout_message(self, message):\n \n future = self.send_future_map.pop(message, None)\n # In a race condition the message might have been sent by the time\n # we're timing it out. Make sure the future is not None\n if future is not None:\n del self.send_timeout_map[message]\n if future.attempts < future.tries:\n future.attempts += 1\n log.info(\n \"SaltReqTimeoutError, retrying. (%s/%s)\",\n future.attempts,\n future.tries,\n )\n self.send(\n message,\n timeout=future.timeout,\n tries=future.tries,\n ", "d_id": 53965, "documentation": { "docstring": "\n Handle a message timeout by removing it from the sending queue\n and informing the caller\n\n :raises: SaltReqTimeoutError\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 46, "language": "en" } }, { "id": 5410, "commit_id": "9d1cd42ff9f3118e2312ea9c94ad647f1baaad73", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-freshdesk/unit_tests/test_300_page.py", "file_name": "test_300_page.py", "fun_name": "test_not_all_records", "commit_message": "🎉 Source Freshdesk: Migrated to latest CDK (#12334)", "code": "def test_not_all_records(self, requests_mock, authenticator, config, responses):\n \n\n expected_output = [\n {\"id\": 1, \"updated_at\": \"2018-01-02T00:00:00Z\"},\n {\"id\": 2, \"updated_at\": \"2018-02-02T00:00:00Z\"},\n {\"id\": 2, \"updated_at\": \"2018-02-02T00:00:00Z\"}, # duplicate\n {\"id\": 3, \"updated_at\": \"2018-03-02T00:00:00Z\"},\n {\"id\": 3, \"updated_at\": \"2018-03-02T00:00:00Z\"}, # duplicate\n {\"id\": 4, \"updated_at\": \"2019-01-03T00:00:00Z\"},\n {\"id\": 4, \"updated_at\": \"2019-01-03T00:00:00Z\"}, # duplicate\n {\"id\": 5, \"updated_at\": \"2019-02-03T00:00:00Z\"},\n {\"id\": 5, \"updated_at\": \"2019-02-03T00:00:00Z\"}, # duplicate\n {\"id\": 6, \"updated_at\": \"2019-03-03T00:00:00Z\"},\n ]\n\n # INT value of page number where the switch state should be triggered.\n # in this test case values from: 1 - 4, assuming we want to switch state on this page.\n ticket_paginate_limit = 2\n # This parameter mocks the \"per_page\" parameter in the API Call\n result_return_limit = 1\n\n # Create test_stream instance.\n test_stream = Tickets(authenticator=authenticator, config=config)\n test_stream.ticket_paginate_limit = ticket_paginate_limit\n test_stream.result_return_limit = result_return_limit\n\n # Mocking Request\n for response in responses:\n requests_mock.register_uri(\n \"GET\",\n response[\"url\"],\n json=response[\"json\"],\n headers=response.get(\"headers\", {}),\n )\n\n records = list(test_stream.read_records(sync_mode=SyncMode.full_refresh))\n\n # We're expecting 6 records to return from the tickets_stream\n assert records == expected_output\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 467, "n_words": 152, "vocab_size": 90, "complexity": 2, "nloc": 27, "token_counts": 201, "n_ast_nodes": 360, "n_identifiers": 22, "random_cut": "def test_not_all_records(self, requests_mock, authenticator, config, responses):\n \n\n expected_output = [\n {\"id\": 1, \"updated_at\": \"2018-01-02T00:00:00Z\"},\n {\"id\": 2, \"updated_at\": \"2018-02-02T00:00:00Z\"},\n {\"id\": 2, \"updated_at\": \"2018-02-02T00:00:00Z\"}, # duplicate\n {\"id\": 3, \"updated_at\": \"2018-03-02T00:00:00Z\"},\n {\"id\": 3, \"updated_at\": \"2018-03-02T00:00:00Z\"}, # duplicate\n {\"id\": 4, \"updated_at\": \"2019-01-03T00:00:00Z\"},\n {\"id\": 4, \"updated_at\": \"2019-01-03T00:00:00Z\"}, # duplicate\n {\"id\": 5, \"updated_at\": \"2019-02-03T00:00:00Z\"},\n {\"id\": 5, \"updated_at\": \"2019-02-03T00:00:00Z\"}, # duplicate\n {\"id\": 6, \"updated_at\": \"2019-03-03T00:00:00Z\"},\n ]\n\n # INT value of page number where the switch state should be triggered.\n # in this test case values from: 1 - 4, assuming we want to switch state on this page.\n ticket_paginate_limit = 2\n # This parameter mocks the \"per_page\" parameter in the API Ca", "d_id": 768, "documentation": { "docstring": "\n TEST 1 - not all records are retrieved\n\n During test1 the tickets_stream changes the state of parameters on page: 2,\n by updating the params:\n `params[\"order_by\"] = \"updated_at\"`\n `params[\"updated_since\"] = last_record`\n continues to fetch records from the source, using new cycle, and so on.\n\n NOTE:\n After switch of the state on ticket_paginate_limit = 2, is this example, we will experience the\n records duplication, because of the last_record state, starting at the point\n where we stoped causes the duplication of the output. The solution for this is to add at least 1 second to the\n last_record state. The DBT normalization should handle this for the end user, so the duplication issue is not a\n blocker in such cases.\n Main pricipal here is: airbyte is at-least-once delivery, but skipping records is data loss.\n ", "n_words": 130, "vocab_size": 90, "n_whitespaces": 229, "language": "en" } }, { "id": 204704, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/commands/test.py", "file_name": "test.py", "fun_name": "run_from_argv", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def run_from_argv(self, argv):\n \n self.test_runner = get_command_line_option(argv, \"--testrunner\")\n super().run_from_argv(argv)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "def run_from_argv(self, argv):\n \n s", "d_id": 50844, "documentation": { "docstring": "\n Pre-parse the command line to extract the value of the --testrunner\n option. This allows a test runner to define additional command line\n arguments.\n ", "n_words": 23, "vocab_size": 18, "n_whitespaces": 52, "language": "en" } }, { "id": 93612, "commit_id": "e4c6ad69c22692e2999baa26d8bf8f44947cd1c1", "repo": "sentry", "path": "tests/sentry/integrations/slack/notifications/test_new_processing_issues.py", "file_name": "test_new_processing_issues.py", "fun_name": "test_new_processing_issue", "commit_message": "fix(slack): Fix broken url formatting (#36976)\n\nFix the URL format, it should be ``.", "code": "def test_new_processing_issue(self, mock_func):\n \n\n notification = NewProcessingIssuesActivityNotification(\n Activity(\n project=self.project,\n user=self.user,\n type=ActivityType.NEW_PROCESSING_ISSUES,\n data={\n \"issues\": get_issues_data(),\n \"reprocessing_active\": True,\n },\n )\n )\n with self.tasks():\n notification.send()\n\n attachment, text = get_attachment()\n assert (\n text\n == f\"Processing issues on \"\n )\n assert (\n attachment[\"text\"]\n == f\"Some events failed to process in your project {self.project.slug}\"\n )\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 346, "n_words": 57, "vocab_size": 45, "complexity": 1, "nloc": 27, "token_counts": 95, "n_ast_nodes": 208, "n_identifiers": 20, "random_cut": "def test_new_processing_issue(self, mock_func):\n \n\n notification = NewProcessingIssuesActivityNotification(\n Activity(\n project=self.project,\n user=self.user,\n type=ActivityType.NEW_PROCESSING_ISSUES,\n data={\n \"issues\": get_issues_data(),\n \"reprocessing_active\": True,\n },\n", "d_id": 18999, "documentation": { "docstring": "\n Test that a Slack message is sent with the expected payload when an issue is held back in reprocessing\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 34, "language": "en" } }, { "id": 158401, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/torch.py", "file_name": "torch.py", "fun_name": "load_array", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def load_array(data_arrays, batch_size, is_train=True):\n \n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 34, "n_ast_nodes": 53, "n_identifiers": 9, "random_cut": "def load_array(data_arrays, batch_size, is_train=True):\n \n dataset = data.TensorDataset(*data_arrays)\n ", "d_id": 37541, "documentation": { "docstring": "Construct a PyTorch data iterator.\n\n Defined in :numref:`sec_linear_concise`", "n_words": 8, "vocab_size": 8, "n_whitespaces": 10, "language": "en" } }, { "id": 22092, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/models.py", "file_name": "models.py", "fun_name": "is_permanent_redirect", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def is_permanent_redirect(self):\n \n return \"location\" in self.headers and self.status_code in (\n codes.moved_permanently,\n codes.permanent_redirect,\n )\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def is_permanent_redirect(self):\n ", "d_id": 4172, "documentation": { "docstring": "True if this Response one of the permanent versions of redirect.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 278431, "commit_id": "80ee2fa4e1db2dda14370110830db82be3eb97b7", "repo": "keras", "path": "keras/utils/generic_utils.py", "file_name": "generic_utils.py", "fun_name": "_estimate_step_duration", "commit_message": "resolve line-too-long in utils", "code": "def _estimate_step_duration(self, current, now):\n \n if current:\n # there are a few special scenarios here:\n # 1) somebody is calling the progress bar without ever supplying\n # step 1\n # 2) somebody is calling the progress bar and supplies step one\n # multiple times, e.g. as part of a finalizing call\n # in these cases, we just fall back to the simple calculation\n if self._time_after_first_step is not None and current > 1:\n time_per_unit = (now - self._time_after_first_step) / (\n current - 1\n )\n else:\n time_per_unit = (now - self._start) / current\n\n if current == 1:\n self._time_after_first_step = now\n return time_per_unit\n else:\n return 0\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 329, "n_words": 102, "vocab_size": 68, "complexity": 5, "nloc": 13, "token_counts": 69, "n_ast_nodes": 116, "n_identifiers": 7, "random_cut": "def _estimate_step_duration(self, current, now):\n \n if current:\n # there are a few special scenarios here:\n # 1) somebody is calling the progress bar without ever supplying\n # step 1\n # 2) somebody is calling the p", "d_id": 82535, "documentation": { "docstring": "Estimate the duration of a single step.\n\n Given the step number `current` and the corresponding time `now` this\n function returns an estimate for how long a single step takes. If this\n is called before one step has been completed (i.e. `current == 0`) then\n zero is given as an estimate. The duration estimate ignores the duration\n of the (assumed to be non-representative) first step for estimates when\n more steps are available (i.e. `current>1`).\n\n Args:\n current: Index of current step.\n now: The current time.\n\n Returns: Estimate of the duration of a single step.\n ", "n_words": 92, "vocab_size": 62, "n_whitespaces": 173, "language": "en" } }, { "id": 152956, "commit_id": "3c740dbfcdd69ddc3ab45a42be996e5c61104342", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_propagate_index_objs", "commit_message": "FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662)\n\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: Naren Krishna ", "code": "def _propagate_index_objs(self, axis=None):\n \n self._filter_empties()\n if axis is None or axis == 0:\n cum_row_lengths = np.cumsum([0] + self._row_lengths)\n if axis is None or axis == 1:\n cum_col_widths = np.cumsum([0] + self._column_widths)\n\n if axis is None:\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 34, "vocab_size": 20, "complexity": 15, "nloc": 64, "token_counts": 373, "n_ast_nodes": 107, "n_identifiers": 10, "random_cut": "def _propagate_index_objs(self, axis=None):\n \n self._filter_empties()\n if axis is None or axis == 0:\n cum_row_lengths = np.cumsum([0] + self._row_lengths)\n if axis is None or axis == 1:\n cum_col_widths = np.cumsum([0] + self._column", "d_id": 35202, "documentation": { "docstring": "\n Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily.\n\n Adds `set_axis` function to call-queue of each partition from `self._partitions`\n to apply new axis.\n\n Parameters\n ----------\n axis : int, default: None\n The axis to apply to. If it's None applies to both axes.\n ", "n_words": 47, "vocab_size": 38, "n_whitespaces": 108, "language": "en" } }, { "id": 247618, "commit_id": "5dd949bee6158a8b651db9f2ae417a62c8184bfd", "repo": "synapse", "path": "tests/handlers/test_e2e_keys.py", "file_name": "test_e2e_keys.py", "fun_name": "test_query_devices_remote_no_sync", "commit_message": "Add type hints to some tests/handlers files. (#12224)", "code": "def test_query_devices_remote_no_sync(self) -> None:\n \n\n remote_user_id = \"@test:other\"\n local_user_id = \"@test:test\"\n\n remote_master_key = \"85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY\"\n remote_self_signing_key = \"QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ\"\n\n self.hs.get_federation_client().query_client_keys = mock.Mock(\n return_value=defer.succeed(\n {\n \"device_keys\": {remote_user_id: {}},\n \"master_keys\": {\n remote_user_id: {\n \"user_id\": remote_user_id,\n \"usage\": [\"master\"],\n \"keys\": {\"ed25519:\" + remote_master_key: remote_master_key},\n },\n },\n \"self_signing_keys\": {\n remote_user_id: {\n \"user_id\": remote_user_id,\n \"usage\": [\"self_signing\"],\n \"keys\": {\n \"ed25519:\"\n + remote_self_signing_key: remote_self_signing_key\n },\n }\n },\n }\n )\n )\n\n e2e_handler = self.hs.get_e2e_keys_handler()\n\n query_result = self.get_success(\n e2e_handler.query_devices(\n {\n \"device_keys\": {remote_user_id: []},\n },\n timeout=10,\n from_user_id=local_user_id,\n from_device_id=\"some_device_id\",\n )\n )\n\n self.assertEqual(query_result[\"failures\"], {})\n self.assertEqual(\n query_result[\"master_keys\"],\n {\n remote_user_id: {\n \"user_id\": remote_user_id,\n \"usage\": [\"master\"],\n \"keys\": {\"ed25519:\" + remote_master_key: remote_master_key},\n },\n },\n )\n self.assertEqual(\n query_result[\"self_signing_keys\"],\n {\n remote_user_id: {\n \"user_id\": remote_user_id,\n \"usage\": [\"self_signing\"],\n \"keys\": {\n \"ed25519:\" + remote_self_signing_key: remote_self_signing_key\n },\n }\n },\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1107, "n_words": 114, "vocab_size": 52, "complexity": 1, "nloc": 66, "token_counts": 244, "n_ast_nodes": 423, "n_identifiers": 23, "random_cut": "def test_query_devices_remote_no_sync(self) -> None:\n \n\n remote_user_id = \"@test:other\"\n local_user_id = \"@test:test\"\n\n remote_master_key = \"85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY\"\n remote_self_signing_key = \"QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ\"\n\n self.hs.get_federation_client().query_client_keys", "d_id": 71784, "documentation": { "docstring": "Tests that querying keys for a remote user that we don't share a room\n with returns the cross signing keys correctly.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 198477, "commit_id": "9d58006fc0a23afcba38f641c9472917c436428a", "repo": "sympy", "path": "sympy/core/mul.py", "file_name": "mul.py", "fun_name": "_matches_get_other_nodes", "commit_message": "Code cleanup", "code": "def _matches_get_other_nodes(dictionary, nodes, node_ind):\n \n ind_node = nodes[node_ind]\n return [ind for ind in dictionary if nodes[ind] == ind_node]\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 38, "n_words": 17, "vocab_size": 17, "complexity": 3, "nloc": 3, "token_counts": 31, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def _matches_get_other_nodes(dictionary, nodes, node_ind):\n \n ind_node = nodes[node_ind]\n return [ind for ind in dictionary if nodes[ind] == ind_node]\n", "d_id": 48956, "documentation": { "docstring": "Find other wildcards that may have already been matched.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 35025, "commit_id": "b5c6fdecf0cab6ffe22bee2ca5b8474afba0d813", "repo": "transformers", "path": "src/transformers/processing_utils.py", "file_name": "processing_utils.py", "fun_name": "save_pretrained", "commit_message": "PoC for a ProcessorMixin class (#15549)\n\n* PoC for a ProcessorMixin class\r\n\r\n* Documentation\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Suraj Patil \r\nCo-authored-by: Patrick von Platen \r\n\r\n* Roll out to other processors\r\n\r\n* Add base feature extractor class in init\r\n\r\n* Use args and kwargs\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Suraj Patil \r\nCo-authored-by: Patrick von Platen ", "code": "def save_pretrained(self, save_directory):\n \n for attribute_name in self.attributes:\n attribute = getattr(self, attribute_name)\n # Include the processor class in the attribute config so this processor can then be reloaded with the\n # `AutoProcessor` API.\n if hasattr(attribute, \"_set_processor_class\"):\n attribute._set_processor_class(self.__class__.__name__)\n attribute.save_pretrained(save_directory)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 121, "n_words": 37, "vocab_size": 31, "complexity": 3, "nloc": 6, "token_counts": 47, "n_ast_nodes": 79, "n_identifiers": 11, "random_cut": "def save_pretrained(self, save_directory):\n \n for attribute_name in self.attributes:\n attribute = getattr(self, attribute_name)\n # Include the processor class in the attribute config so this processor can then be reloaded with the\n # `AutoProcessor` API.\n if hasattr(attribute, \"_set_processor_class\"):\n ", "d_id": 6372, "documentation": { "docstring": "\n Saves the attributes of this processor (feature extractor, tokenizer...) in the specified directory so that it\n can be reloaded using the [`~ProcessorMixin.from_pretrained`] method.\n\n \n\n This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and\n [`~tokenization_utils_base.PreTrainedTokenizer.save_pretrained`]. Please refer to the docstrings of the methods\n above for more information.\n\n \n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will\n be created if it does not exist).\n ", "n_words": 74, "vocab_size": 62, "n_whitespaces": 179, "language": "en" } }, { "id": 30033, "commit_id": "d5ef58653803075849a6a13177e7a6e604aa2f60", "repo": "saleor", "path": "saleor/permission/models.py", "file_name": "models.py", "fun_name": "_user_has_module_perms", "commit_message": "Move PermissionsMixin from django auth", "code": "def _user_has_module_perms(user, app_label):\n \n for backend in auth.get_backends():\n if not hasattr(backend, \"has_module_perms\"):\n continue\n try:\n if backend.has_module_perms(user, app_label):\n return True\n except PermissionDenied:\n return False\n return False\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 102, "n_words": 24, "vocab_size": 19, "complexity": 5, "nloc": 10, "token_counts": 48, "n_ast_nodes": 80, "n_identifiers": 9, "random_cut": "def _user_has_module_perms(user, app_label):\n \n for backend in auth.get_backends():\n if not hasattr(backend, \"has_module_perms\"):\n continue\n try:\n if backend.has_module_perms(user, app_label", "d_id": 5284, "documentation": { "docstring": "Backend can raise `PermissionDenied` to short-circuit permission checking.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 67615, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/delivery_trip/delivery_trip.py", "file_name": "delivery_trip.py", "fun_name": "get_default_address", "commit_message": "style: format code with black", "code": "def get_default_address(out, name):\n\tshipping_addresses = frappe.db.sql(\n\t\t,\n\t\t(name),\n\t\tas_dict=1,\n\t)\n\n\tif shipping_addresses:\n\t\tfor out.shipping_address in shipping_addresses:\n\t\t\tif out.shipping_address.is_shipping_address:\n\t\t\t\treturn out.shipping_address\n\n\t\tout.shipping_address = shipping_addresses[0]\n\n\t\treturn out.shipping_address\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 13, "n_words": 26, "vocab_size": 19, "complexity": 4, "nloc": 21, "token_counts": 59, "n_ast_nodes": 101, "n_identifiers": 11, "random_cut": "def get_default_address(out, name):\n\tshipping_addresses = frappe.db.sql(\n\t\t,\n\t\t(name),\n\t\tas_dict=1,\n\t)\n\n\tif shipping_addresses:\n\t\tfor out.shipping_address in shipping_addresses:\n\t\t\tif out.shipping_address.is_shipping_address:\n\t\t\t\treturn out.shipping_address\n\n\t\tout.shipping_address = shipping_addresses[0]\n\n\t\treturn out.shipping_address\n\n\n@frappe.whitelis", "d_id": 14578, "documentation": { "docstring": "\n\t\t\tSELECT parent,\n\t\t\t\t(SELECT is_shipping_address FROM tabAddress a WHERE a.name=dl.parent) AS is_shipping_address\n\t\t\tFROM\n\t\t\t\t`tabDynamic Link` dl\n\t\t\tWHERE\n\t\t\t\tdl.link_doctype=\"Customer\"\n\t\t\t\tAND dl.link_name=%s\n\t\t\t\tAND dl.parenttype = \"Address\"\n\t\t", "n_words": 23, "vocab_size": 19, "n_whitespaces": 15, "language": "en" } }, { "id": 129707, "commit_id": "371fbb17e4120f569e0b6c5efde9a00a097f438e", "repo": "ray", "path": "rllib/utils/__init__.py", "file_name": "__init__.py", "fun_name": "force_list", "commit_message": "[RLlib] Make `policies_to_train` more flexible via callable option. (#20735)", "code": "def force_list(elements=None, to_tuple=False):\n \n ctor = list\n if to_tuple is True:\n ctor = tuple\n return ctor() if elements is None else ctor(elements) \\\n if type(elements) in [list, set, tuple] else ctor([elements])\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 30, "vocab_size": 24, "complexity": 4, "nloc": 6, "token_counts": 57, "n_ast_nodes": 87, "n_identifiers": 8, "random_cut": "def force_list(elements=None, to_tuple=False):\n \n ctor = list\n if to_tuple is True:\n ctor = tuple\n return ctor() if elements is None else ctor(elements) \\\n if type(el", "d_id": 29007, "documentation": { "docstring": "\n Makes sure `elements` is returned as a list, whether `elements` is a single\n item, already a list, or a tuple.\n\n Args:\n elements (Optional[any]): The inputs as single item, list, or tuple to\n be converted into a list/tuple. If None, returns empty list/tuple.\n to_tuple (bool): Whether to use tuple (instead of list).\n\n Returns:\n Union[list,tuple]: All given elements in a list/tuple depending on\n `to_tuple`'s value. If elements is None,\n returns an empty list/tuple.\n ", "n_words": 71, "vocab_size": 47, "n_whitespaces": 141, "language": "en" } }, { "id": 157365, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "file_name": "dpm_solver.py", "fun_name": "get_orders_and_timesteps_for_singlestep_solver", "commit_message": "release more models", "code": "def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):\n \n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [3, ] * (K - 2) + [2, 1]\n elif steps % 3 == 1:\n orders = [3, ] * (K - 1) + [1]\n else:\n orders = [3, ] * (K - 1) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [2, ] * K\n else:\n K = steps // 2 + 1\n orders = [2, ] * (K - 1) + [1]\n elif order == 1:\n K = 1\n orders = [1, ] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == 'logSNR':\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[\n torch.cumsum(torch.tensor([0, ] + orders)).to(device)]\n return timesteps_outer, orders\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 467, "n_words": 159, "vocab_size": 66, "complexity": 8, "nloc": 27, "token_counts": 228, "n_ast_nodes": 359, "n_identifiers": 17, "random_cut": "def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):\n \n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [3, ] * (K - 2) + [2, 1]\n elif steps % 3 == 1:\n orders = [3, ] * (K - 1) + [1]\n else:\n orders = [3, ] * (K - 1) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [2, ] * K\n else:\n K = steps // 2 + 1\n orders = [2, ] * (K - 1) + [1]\n elif order == 1:\n K = 1\n orders = [1, ] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == 'logSNR':\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n", "d_id": 36905, "documentation": { "docstring": "\n Get the order of each step for sampling by the singlestep DPM-Solver.\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n ", "n_words": 309, "vocab_size": 125, "n_whitespaces": 634, "language": "en" } }, { "id": 160675, "commit_id": "ac624d012cc0c3f90da4593e7bb8d9d335fa9696", "repo": "numpy", "path": "numpy/core/tests/test_multiarray.py", "file_name": "test_multiarray.py", "fun_name": "_aligned_zeros", "commit_message": "MAINT: Simplify element setting and use it for filling\n\nThis slightly modifies the behaviour of `arr.fill()` to be\n`arr.fill(scalar)`, i.e. match `arr1d[0] = scalar`, rather than\n`arr.fill(np.asarray(scalar))`, which subtely different!\n(Note that object was already special cased to have the scalar\nlogic.)\n\nOtherwise, `PyArray_Pack` is now the actual, full featured, \"scalar\"\nassignment logic. It is a bit strange due to that quantity/masked\narray issue, but there is nothing to be done about it.\nThe simplifications in `PyArray_AssignFromCache` should not cause\nany change in practice, because non 0-D arrays would have been\nrejected earlier on in that path.\n(Basically, it does not need the full `PyArray_Pack` logic, but that\nis fine, I intially split the two, but consolidated them again.)", "code": "def _aligned_zeros(shape, dtype=float, order=\"C\", align=None):\n \n dtype = np.dtype(dtype)\n if dtype == np.dtype(object):\n # Can't do this, fall back to standard allocation (which\n # should always be sufficiently aligned)\n if align is not None:\n raise ValueError(\"object array alignment not supported\")\n return np.zeros(shape, dtype=dtype, order=order)\n if align is None:\n align = dtype.alignment\n if not hasattr(shape, '__len__'):\n shape = (shape,)\n size = functools.reduce(operator.mul, shape) * dtype.itemsize\n buf = np.empty(size + 2*align + 1, np.uint8)\n\n ptr = buf.__array_interface__['data'][0]\n offset = ptr % align\n if offset != 0:\n offset = align - offset\n if (ptr % (2*align)) == 0:\n offset += align\n\n # Note: slices producing 0-size arrays do not necessarily change\n # data pointer --- so we use and allocate size+1\n buf = buf[offset:offset+size+1][:-1]\n buf.fill(0)\n data = np.ndarray(shape, dtype, buf, order=order)\n return data\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 247, "n_words": 129, "vocab_size": 86, "complexity": 7, "nloc": 22, "token_counts": 204, "n_ast_nodes": 323, "n_identifiers": 27, "random_cut": "def _aligned_zeros(shape, dtype=float, order=\"C\", align=None):\n \n dtype = np.dtype(dtype)\n if dtype == np.dtype(object):\n # Can't do this, fall back to standard allocation (which\n # should always be sufficiently aligned)\n if align is not None:\n raise ValueError(\"object array alignment not supported\")\n return np.zeros(shape, dtype=dtype, order=order)\n if align is None:\n align = dtype.alignment\n if not hasattr(shape, '__len__'):\n shape = (shape,)\n size = functools.reduce(operator.mul, shape) * dtype.itemsize\n buf = np.empty(size + 2*align + 1, np.uint8)\n\n ptr = buf.__array_interface__['data'][0]\n offse", "d_id": 38693, "documentation": { "docstring": "\n Allocate a new ndarray with aligned memory.\n\n The ndarray is guaranteed *not* aligned to twice the requested alignment.\n Eg, if align=4, guarantees it is not aligned to 8. If align=None uses\n dtype.alignment.", "n_words": 32, "vocab_size": 27, "n_whitespaces": 44, "language": "en" } }, { "id": 268942, "commit_id": "373ad97c72ed1ac4b6898e85b2cfd7b016e4b469", "repo": "keras", "path": "keras/preprocessing/image.py", "file_name": "image.py", "fun_name": "smart_resize", "commit_message": "Copy image utils from keras_preprocessing directly into core keras\n\nThis is not new code, we are just moving these utilities directly\ninto keras from keras-preprocessing.\n\nFor the library code, just fixed linting errors.\nFor the test code, had to do more major changes to port from pytest, but\nhopefully any errors have been caught by the tests themselves.\n\nPiperOrigin-RevId: 427274651", "code": "def smart_resize(x, size, interpolation='bilinear'):\n \n if len(size) != 2:\n raise ValueError('Expected `size` to be a tuple of 2 integers, '\n f'but got: {size}.')\n img = tf.convert_to_tensor(x)\n if img.shape.rank is not None:\n if img.shape.rank < 3 or img.shape.rank > 4:\n raise ValueError(\n 'Expected an image array with shape `(height, width, channels)`, '\n 'or `(batch_size, height, width, channels)`, but '\n f'got input with incorrect rank, of shape {img.shape}.')\n shape = tf.shape(img)\n height, width = shape[-3], shape[-2]\n target_height, target_width = size\n if img.shape.rank is not None:\n static_num_channels = img.shape[-1]\n else:\n static_num_channels = None\n\n crop_height = tf.cast(\n tf.cast(width * target_height, 'float32') / target_width, 'int32')\n crop_width = tf.cast(\n tf.cast(height * target_width, 'float32') / target_height, 'int32')\n\n # Set back to input height / width if crop_height / crop_width is not smaller.\n crop_height = tf.minimum(height, crop_height)\n crop_width = tf.minimum(width, crop_width)\n\n crop_box_hstart = tf.cast(\n tf.cast(height - crop_height, 'float32') / 2, 'int32')\n crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32')\n\n if img.shape.rank == 4:\n crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0])\n crop_box_size = tf.stack([-1, crop_height, crop_width, -1])\n else:\n crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0])\n crop_box_size = tf.stack([crop_height, crop_width, -1])\n\n img = tf.slice(img, crop_box_start, crop_box_size)\n img = tf.image.resize(images=img, size=size, method=interpolation)\n # Apparent bug in resize_images_v2 may cause shape to be lost\n if img.shape.rank is not None:\n if img.shape.rank == 4:\n img.set_shape((None, None, None, static_num_channels))\n if img.shape.rank == 3:\n img.set_shape((None, None, static_num_channels))\n if isinstance(x, np.ndarray):\n return img.numpy()\n return img\n\n\n@keras_export('keras.utils.array_to_img',\n 'keras.preprocessing.image.array_to_img')", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export('keras.utils.array_to_img',\n 'keras.preprocessing.image.array_to_img')", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 374, "n_words": 228, "vocab_size": 124, "complexity": 11, "nloc": 43, "token_counts": 404, "n_ast_nodes": 661, "n_identifiers": 36, "random_cut": "def smart_resize(x, size, interpolation='bilinear'):\n \n if len(size) != 2:\n raise ValueError('Expected `size` to be a tuple of 2 integers, '\n f'but got: {size}.')\n img = tf.convert_to_tensor(x)\n if img.shape.rank is not None:\n if img.shape.rank < 3 or img.shape.rank > 4:\n raise ValueError(\n 'Expected an image array with shape `(height, width, channels)`, '\n 'or `(batch_size, height, width, channels)`, but '\n f'got input with incorrect rank, of shape {img.shape}.')\n shape = tf.shape(img)\n height, width = shape[-3], shape[-2]\n target_height, target_width = size\n if img.shape.rank is not None:\n static_num_channels = img.shape[-1]\n else:\n static_num_channels = None\n\n crop_height = tf.cast(\n tf.cast(width * target_height, 'float32') / target_width, 'int32')\n crop_width = tf.cast(\n tf.cast(height * target_width, 'float32') / target_height, 'int32')\n\n # Set back to input height / width if crop_height / crop_width is not smaller.\n crop_height = tf.minimum(height, crop_height)\n crop_width = tf.minimum(width, crop_width)\n\n crop_box_hstart = tf.cast(\n tf.cast(height - crop_height, 'float32') / 2, 'int32')\n crop_box_w", "d_id": 79774, "documentation": { "docstring": "Resize images to a target size without aspect ratio distortion.\n\n TensorFlow image datasets typically yield images that have each a different\n size. However, these images need to be batched before they can be\n processed by Keras layers. To be batched, images need to share the same height\n and width.\n\n You could simply do:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: tf.image.resize(img, size))\n ```\n\n However, if you do this, you distort the aspect ratio of your images, since\n in general they do not all have the same aspect ratio as `size`. This is\n fine in many cases, but not always (e.g. for GANs this can be a problem).\n\n Note that passing the argument `preserve_aspect_ratio=True` to `resize`\n will preserve the aspect ratio, but at the cost of no longer respecting the\n provided target size. Because `tf.image.resize` doesn't crop images,\n your output images will still have different sizes.\n\n This calls for:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: smart_resize(img, size))\n ```\n\n Your output images will actually be `(200, 200)`, and will not be distorted.\n Instead, the parts of the image that do not fit within the target size\n get cropped out.\n\n The resizing process is:\n\n 1. Take the largest centered crop of the image that has the same aspect ratio\n as the target size. For instance, if `size=(200, 200)` and the input image has\n size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.\n 2. Resize the cropped image to the target size. In the example above,\n we resize the `(340, 340)` crop to `(200, 200)`.\n\n Args:\n x: Input image or batch of images (as a tensor or NumPy array). Must be in\n format `(height, width, channels)` or `(batch_size, height, width,\n channels)`.\n size: Tuple of `(height, width)` integer. Target size.\n interpolation: String, interpolation to use for resizing. Defaults to\n `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`,\n `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.\n\n Returns:\n Array with shape `(size[0], size[1], channels)`. If the input image was a\n NumPy array, the output is a NumPy array, and if it was a TF tensor,\n the output is a TF tensor.\n ", "n_words": 348, "vocab_size": 194, "n_whitespaces": 419, "language": "en" } }, { "id": 337266, "commit_id": "bbccd2c3fbaa93ed5984e22fc8bf66eb13fdb82b", "repo": "accelerate", "path": "src/accelerate/utils.py", "file_name": "utils.py", "fun_name": "gather", "commit_message": "Basic fixes for DeepSpeed (#264)", "code": "def gather(tensor):\n \n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]:\n return _gpu_gather(tensor)\n elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU:\n return _cpu_gather(tensor)\n else:\n return tensor\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 25, "vocab_size": 18, "complexity": 4, "nloc": 9, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 13, "random_cut": "def gather(tensor):\n \n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n elif AcceleratorState().distribut", "d_id": 120987, "documentation": { "docstring": "\n Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to gather.\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n ", "n_words": 37, "vocab_size": 29, "n_whitespaces": 75, "language": "en" } }, { "id": 32946, "commit_id": "c23cbdff4c097d3f3039999827a675cf8f06a32e", "repo": "transformers", "path": "src/transformers/trainer_utils.py", "file_name": "trainer_utils.py", "fun_name": "speed_metrics", "commit_message": "Fix docstrings with last version of hf-doc-builder styler (#18581)\n\n* Fix docstrings with last version of hf-doc-builder styler\r\n\r\n* Remove empty Parameter block", "code": "def speed_metrics(split, start_time, num_samples=None, num_steps=None):\n \n runtime = time.time() - start_time\n result = {f\"{split}_runtime\": round(runtime, 4)}\n if num_samples is not None:\n samples_per_second = num_samples / runtime\n result[f\"{split}_samples_per_second\"] = round(samples_per_second, 3)\n if num_steps is not None:\n steps_per_second = num_steps / runtime\n result[f\"{split}_steps_per_second\"] = round(steps_per_second, 3)\n return result\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 91, "n_words": 45, "vocab_size": 29, "complexity": 3, "nloc": 10, "token_counts": 86, "n_ast_nodes": 145, "n_identifiers": 11, "random_cut": "def speed_metrics(split, start_time, num_samples=None, num_steps=None):\n \n runtime = time.time() - start_time\n result = {f\"{split}_runtime\": round(runtime, 4)}\n if num_samples is not None:\n samples_per_second = num_samples / runtime\n result[f\"{split}_samples_per_second\"] = round(samples_per_second, 3)\n if num_steps is not None:\n steps_per_second = num_steps / runtime\n result[f\"{split}_steps_per_second\"] = round(steps_per_second, 3)\n return result\n\n", "d_id": 6038, "documentation": { "docstring": "\n Measure and return speed performance metrics.\n\n This function requires a time snapshot `start_time` before the operation to be measured starts and this function\n should be run immediately after the operation to be measured has completed.\n\n Args:\n - split: name to prefix metric (like train, eval, test...)\n - start_time: operation start time\n - num_samples: number of samples processed\n ", "n_words": 57, "vocab_size": 44, "n_whitespaces": 82, "language": "en" } }, { "id": 167440, "commit_id": "e48c9c3973286e257f6da1966c91806d86b917e0", "repo": "pandas", "path": "pandas/io/date_converters.py", "file_name": "date_converters.py", "fun_name": "parse_date_fields", "commit_message": "TYP: more return annotations for io/* (#47524)\n\n* TYP: more return annotations for io/*\r\n\r\n* import future", "code": "def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]:\n \n warnings.warn(\n , # noqa: E501\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n year_col = _maybe_cast(year_col)\n month_col = _maybe_cast(month_col)\n day_col = _maybe_cast(day_col)\n return parsing.try_parse_year_month_day(year_col, month_col, day_col)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 70, "n_words": 27, "vocab_size": 23, "complexity": 1, "nloc": 19, "token_counts": 63, "n_ast_nodes": 100, "n_identifiers": 16, "random_cut": "def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]:\n \n warnings.warn(\n , # noqa: E501\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n year_col = _maybe_cast(year_col)\n ", "d_id": 40017, "documentation": { "docstring": "\n Parse columns with years, months and days into a single date column.\n\n .. deprecated:: 1.2\n \n Use pd.to_datetime({\"year\": year_col, \"month\": month_col, \"day\": day_col}) instead to get a Pandas Series.\n Use ser = pd.to_datetime({\"year\": year_col, \"month\": month_col, \"day\": day_col}) and\n np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array.\n", "n_words": 49, "vocab_size": 36, "n_whitespaces": 80, "language": "en" } }, { "id": 258587, "commit_id": "2b15b908c11b90a15253394b1a03bd535720d6ce", "repo": "scikit-learn", "path": "sklearn/ensemble/tests/test_forest.py", "file_name": "test_forest.py", "fun_name": "test_poisson_vs_mse", "commit_message": "FIX poisson proxy_impurity_improvement (#22191)", "code": "def test_poisson_vs_mse():\n \n rng = np.random.RandomState(42)\n n_train, n_test, n_features = 500, 500, 10\n X = datasets.make_low_rank_matrix(\n n_samples=n_train + n_test, n_features=n_features, random_state=rng\n )\n # We create a log-linear Poisson model and downscale coef as it will get\n # exponentiated.\n coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)\n y = rng.poisson(lam=np.exp(X @ coef))\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=n_test, random_state=rng\n )\n # We prevent some overfitting by setting min_samples_split=10.\n forest_poi = RandomForestRegressor(\n criterion=\"poisson\", min_samples_leaf=10, max_features=\"sqrt\", random_state=rng\n )\n forest_mse = RandomForestRegressor(\n criterion=\"squared_error\",\n min_samples_leaf=10,\n max_features=\"sqrt\",\n random_state=rng,\n )\n\n forest_poi.fit(X_train, y_train)\n forest_mse.fit(X_train, y_train)\n dummy = DummyRegressor(strategy=\"mean\").fit(X_train, y_train)\n\n for X, y, val in [(X_train, y_train, \"train\"), (X_test, y_test, \"test\")]:\n metric_poi = mean_poisson_deviance(y, forest_poi.predict(X))\n # squared_error forest might produce non-positive predictions => clip\n # If y = 0 for those, the poisson deviance gets too good.\n # If we drew more samples, we would eventually get y > 0 and the\n # poisson deviance would explode, i.e. be undefined. Therefore, we do\n # not clip to a tiny value like 1e-15, but to 1e-6. This acts like a\n # small penalty to the non-positive predictions.\n metric_mse = mean_poisson_deviance(\n y, np.clip(forest_mse.predict(X), 1e-6, None)\n )\n metric_dummy = mean_poisson_deviance(y, dummy.predict(X))\n # As squared_error might correctly predict 0 in train set, its train\n # score can be better than Poisson. This is no longer the case for the\n # test set. But keep the above comment for clipping in mind.\n if val == \"test\":\n assert metric_poi < metric_mse\n assert metric_poi < 0.5 * metric_dummy\n\n\n@pytest.mark.parametrize(\"criterion\", (\"poisson\", \"squared_error\"))", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"criterion\", (\"poisson\", \"squared_error\"))", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 483, "n_words": 247, "vocab_size": 163, "complexity": 3, "nloc": 32, "token_counts": 279, "n_ast_nodes": 458, "n_identifiers": 50, "random_cut": "def test_poisson_vs_mse():\n \n rng = np.random.RandomState(42)\n n_train, n_test, n_features = 500, 500, 10\n X = datasets.make_low_rank_matrix(\n n_samples=n_train + n_test, n_features=n_features, random_state=rng\n )\n # We create a log-linear Poisson model and downscale coef as it will get\n # exponentiated.\n coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)\n y = rng.poisson(lam=np.exp(X @ coef))\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=n_test, random_state=rng\n )\n # We prevent some overfitting by setting min_samples_split=10.\n forest_poi = RandomForestRegressor(\n criterion=\"poisson\", min_samples_leaf=10, max_features=\"sqrt\", random_state=rng\n )\n forest_mse = RandomForestRegressor(\n criterion=\"squared_error\",\n min_samples_leaf=10,\n max_features=\"sqrt\",\n random_state=rng,\n )\n\n forest_poi.fit(X_train, y_train)\n forest_mse.fit(X_train, y_train)\n dummy = DummyRegressor(strategy=\"mean\").fit(X_train, y_train)\n\n ", "d_id": 75307, "documentation": { "docstring": "Test that random forest with poisson criterion performs better than\n mse for a poisson target.\n\n There is a similar test for DecisionTreeRegressor.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 31, "language": "en" } }, { "id": 130419, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/cli_logger.py", "file_name": "cli_logger.py", "fun_name": "_external_caller_info", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _external_caller_info():\n \n\n frame = inspect.currentframe()\n caller = frame\n levels = 0\n while caller.f_code.co_filename == __file__:\n caller = caller.f_back\n levels += 1\n return {\n \"lineno\": caller.f_lineno,\n \"filename\": os.path.basename(caller.f_code.co_filename),\n }\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 28, "vocab_size": 22, "complexity": 2, "nloc": 11, "token_counts": 59, "n_ast_nodes": 100, "n_identifiers": 14, "random_cut": "def _external_caller_info():\n \n\n frame = inspect.currentframe()\n caller = frame\n levels = 0\n while caller.f_code.co_filename == __file__:\n caller = caller.f_back\n ", "d_id": 29265, "documentation": { "docstring": "Get the info from the caller frame.\n\n Used to override the logging function and line number with the correct\n ones. See the comment on _patched_makeRecord for more info.\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 37, "language": "en" } }, { "id": 186660, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/override_centos.py", "file_name": "override_centos.py", "fun_name": "_try_restart_fedora", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def _try_restart_fedora(self) -> None:\n \n\n try:\n util.run_script(['systemctl', 'restart', 'httpd'])\n except errors.SubprocessError as err:\n raise errors.MisconfigurationError(str(err))\n\n # Finish with actual config check to see if systemctl restart helped\n super().config_test()\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 27, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 46, "n_ast_nodes": 85, "n_identifiers": 11, "random_cut": "def _try_restart_fedora(self) -> None:\n \n\n try:\n util.run_script(['systemctl', 'restart', 'httpd'])\n except errors.SubprocessError as err:\n raise errors.MisconfigurationError(str(err))\n\n # Finish with actual config check to see if systemctl restart helped\n super().config_test()\n", "d_id": 45568, "documentation": { "docstring": "\n Tries to restart httpd using systemctl to generate the self signed key pair.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 220661, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/selector_events.py", "file_name": "selector_events.py", "fun_name": "sock_accept", "commit_message": "add python 3.10.4 for windows", "code": "async def sock_accept(self, sock):\n \n base_events._check_ssl_socket(sock)\n if self._debug and sock.gettimeout() != 0:\n raise ValueError(\"the socket must be non-blocking\")\n fut = self.create_future()\n self._sock_accept(fut, sock)\n return await fut\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 78, "n_words": 25, "vocab_size": 24, "complexity": 3, "nloc": 7, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 11, "random_cut": "async def sock_accept(self, sock):\n \n base_events._check_ssl_s", "d_id": 56078, "documentation": { "docstring": "Accept a connection.\n\n The socket must be bound to an address and listening for connections.\n The return value is a pair (conn, address) where conn is a new socket\n object usable to send and receive data on the connection, and address\n is the address bound to the socket on the other end of the connection.\n ", "n_words": 55, "vocab_size": 35, "n_whitespaces": 90, "language": "en" } }, { "id": 3825, "commit_id": "a3aae8017a0a40ff2006e2567f71dccb04c997a5", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py", "file_name": "test_base_insight_streams.py", "fun_name": "test_state", "commit_message": "🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)\n\n* Facebook Marketing performance improvement\r\n\r\n* add comments and little refactoring\r\n\r\n* fix integration tests with the new config\r\n\r\n* improve job status handling, limit concurrency to 10\r\n\r\n* fix campaign jobs, refactor manager\r\n\r\n* big refactoring of async jobs, support random order of slices\r\n\r\n* update source _read_incremental to hook new state logic\r\n\r\n* fix issues with timeout\r\n\r\n* remove debugging and clean up, improve retry logic\r\n\r\n* merge changes from #8234\r\n\r\n* fix call super _read_increment\r\n\r\n* generalize batch execution, add use_batch flag\r\n\r\n* improve coverage, do some refactoring of spec\r\n\r\n* update test, remove overrides of source\r\n\r\n* add split by AdSet\r\n\r\n* add smaller insights\r\n\r\n* fix end_date < start_date case\r\n\r\n* add account_id to PK\r\n\r\n* add notes\r\n\r\n* fix new streams\r\n\r\n* fix reversed incremental stream\r\n\r\n* update spec.json for SAT\r\n\r\n* upgrade CDK and bump version\r\n\r\nCo-authored-by: Dmytro Rezchykov \r\nCo-authored-by: Eugene Kulak ", "code": "def test_state(self, api, state):\n \n stream = AdsInsights(\n api=api,\n start_date=datetime(2010, 1, 1),\n end_date=datetime(2011, 1, 1),\n )\n\n assert stream.state == {}\n\n stream.state = state\n actual_state = stream.state\n actual_state[\"slices\"] = sorted(actual_state.get(\"slices\", []))\n state[\"slices\"] = sorted(state.get(\"slices\", []))\n\n assert actual_state == state\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 133, "n_words": 37, "vocab_size": 24, "complexity": 1, "nloc": 12, "token_counts": 96, "n_ast_nodes": 152, "n_identifiers": 12, "random_cut": "def test_state(self, api, state):\n \n stream = AdsInsights(\n api=api,\n start_", "d_id": 572, "documentation": { "docstring": "State setter/getter should work with all combinations", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 96187, "commit_id": "cf30c11a194aa5e61d8d7c7fc506764f846fcf82", "repo": "sentry", "path": "src/sentry/search/events/builder.py", "file_name": "builder.py", "fun_name": "get_snql_query", "commit_message": "feat(MEP): Add initial framework for metric queries (#31649)\n\n- This adds a MetricsQueryBuilder, which works very similarily to our\r\n QueryBuilder, but with specific handlers for how metrics construct\r\n queries\r\n- This MetricsQueryBuilder does not yet construct snql queries, and will\r\n not because table queries will require multiple queries to construct\r\n similar table data\r\n - that is, if we want [transaction, p95, count_unique(user)], we need\r\n a query against distributions with [transaction, p95] followed by a\r\n second query for [transaction, count_unique(user)] against the sets\r\n table\r\n - This is so we can maintain a sortby", "code": "def get_snql_query(self) -> None:\n \n raise NotImplementedError(\"get_snql_query cannot be implemented for MetricsQueryBuilder\")\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 3, "random_cut": "def get_snql_query(self) -> None:\n \n raise NotImplementedError(\"get_snql_", "d_id": 19288, "documentation": { "docstring": "Because metrics table queries need to make multiple requests per metric type this function cannot be\n inmplemented see run_query", "n_words": 19, "vocab_size": 19, "n_whitespaces": 25, "language": "en" } }, { "id": 133391, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/worker_group.py", "file_name": "worker_group.py", "fun_name": "_validate", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _validate(self, params):\n \n remote_worker_stats = [w.validate.remote(**params) for w in self.remote_workers]\n return remote_worker_stats\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 29, "n_ast_nodes": 47, "n_identifiers": 8, "random_cut": "def _validate(self, params):\n \n remote_worker_stats = [w.validate.remote(**params) for w in self.remote_workers]\n ", "d_id": 30004, "documentation": { "docstring": "Runs validation for each worker. Returns results as promises.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 108967, "commit_id": "31d13198ecf6969b1b693c28a02b0805f3f20420", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/axes3d.py", "file_name": "axes3d.py", "fun_name": "set_aspect", "commit_message": "Add equalxy, equalyz, equalxz aspect ratios\n\nUpdate docstrings", "code": "def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):\n \n _api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),\n aspect=aspect)\n super().set_aspect(\n aspect='auto', adjustable=adjustable, anchor=anchor, share=share)\n\n if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):\n if aspect == 'equal':\n axis_indices = [0, 1, 2]\n elif aspect == 'equalxy':\n axis_indices = [0, 1]\n elif aspect == 'equalxz':\n axis_indices = [0, 2]\n elif aspect == 'equalyz':\n axis_indices = [1, 2]\n\n view_intervals = np.array([self.xaxis.get_view_interval(),\n self.yaxis.get_view_interval(),\n self.zaxis.get_view_interval()])\n mean = np.mean(view_intervals, axis=1)\n delta = np.max(np.ptp(view_intervals, axis=1))\n deltas = delta * self._box_aspect / min(self._box_aspect)\n\n for i, set_lim in enumerate((self.set_xlim3d,\n self.set_ylim3d,\n self.set_zlim3d)):\n if i in axis_indices:\n set_lim(mean[i] - deltas[i]/2., mean[i] + deltas[i]/2.)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 508, "n_words": 94, "vocab_size": 65, "complexity": 8, "nloc": 25, "token_counts": 255, "n_ast_nodes": 399, "n_identifiers": 31, "random_cut": "def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):\n \n _api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),\n aspect=aspect)\n super().set_aspect(\n aspect='auto', adjustable=adjustable, anchor=anchor, share=share)\n\n if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):\n if aspect == 'equal':\n axis_indices = [0, 1, 2]\n elif aspect == 'equalxy':\n axis_indices = [0, 1]\n elif aspect == 'equalxz':\n axis_indices = [0, 2]\n elif aspect == 'equalyz':\n axis_indices = [1, 2]\n\n view_intervals = np.array([self.xaxis.get_view_interval(),\n self.yaxis.get_view_interval(),\n self.zaxis.get_view_interval()])\n mean = np.mean(view_intervals, axis=1)\n delta = np.max(np.ptp(view_intervals, axis=1))\n deltas = delta * self._box_aspect / min(self._box_aspect)\n\n for i, set_lim in enumerate((self.set_xlim", "d_id": 23404, "documentation": { "docstring": "\n Set the aspect ratios.\n\n Parameters\n ----------\n aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'}\n Possible values:\n\n ========= ==================================================\n value description\n ========= ==================================================\n 'auto' automatic; fill the position rectangle with data.\n 'equal' adapt all the axes to have equal aspect ratios.\n 'equalxy' adapt the x and y axes to have equal aspect ratios.\n 'equalxz' adapt the x and z axes to have equal aspect ratios.\n 'equalyz' adapt the y and z axes to have equal aspect ratios.\n ========= ==================================================\n\n adjustable : None\n Currently ignored by Axes3D\n\n If not *None*, this defines which parameter will be adjusted to\n meet the required aspect. See `.set_adjustable` for further\n details.\n\n anchor : None or str or 2-tuple of float, optional\n If not *None*, this defines where the Axes will be drawn if there\n is extra space due to aspect constraints. The most common way to\n to specify the anchor are abbreviations of cardinal directions:\n\n ===== =====================\n value description\n ===== =====================\n 'C' centered\n 'SW' lower left corner\n 'S' middle of bottom edge\n 'SE' lower right corner\n etc.\n ===== =====================\n\n See `~.Axes.set_anchor` for further details.\n\n share : bool, default: False\n If ``True``, apply the settings to all shared Axes.\n\n See Also\n --------\n mpl_toolkits.mplot3d.axes3d.Axes3D.set_box_aspect\n ", "n_words": 195, "vocab_size": 117, "n_whitespaces": 630, "language": "en" } }, { "id": 217315, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/enum.py", "file_name": "enum.py", "fun_name": "__getattr__", "commit_message": "add python 3.10.4 for windows", "code": "def __getattr__(cls, name):\n \n if _is_dunder(name):\n raise AttributeError(name)\n try:\n return cls._member_map_[name]\n except KeyError:\n raise AttributeError(name) from None\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 77, "n_words": 16, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 38, "n_ast_nodes": 62, "n_identifiers": 7, "random_cut": "def __getattr__(cls, name):\n \n if _is_dunder(name):\n raise AttributeError(name)\n try:\n return cl", "d_id": 54713, "documentation": { "docstring": "\n Return the enum member matching `name`\n\n We use __getattr__ instead of descriptors or inserting into the enum\n class' __dict__ in order to support `name` and `value` being both\n properties for enum members (which live in the class' __dict__) and\n enum members themselves.\n ", "n_words": 42, "vocab_size": 32, "n_whitespaces": 85, "language": "en" } }, { "id": 259759, "commit_id": "0822851f5cb17827939a7d7b4f8c84f43184ae89", "repo": "scikit-learn", "path": "sklearn/cluster/tests/test_bisect_k_means.py", "file_name": "test_bisect_k_means.py", "fun_name": "test_n_clusters", "commit_message": "FEA Bisecting K-Means (#20031)\n\nCo-authored-by: Gael Varoquaux \r\nCo-authored-by: Tom Dupré la Tour \r\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def test_n_clusters(n_clusters):\n \n\n rng = np.random.RandomState(0)\n X = rng.rand(10, 2)\n\n bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0)\n bisect_means.fit(X)\n\n assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters))\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 62, "n_ast_nodes": 100, "n_identifiers": 16, "random_cut": "def test_n_clusters(n_clusters):\n \n\n rng = np.random.RandomState(0)\n X", "d_id": 75904, "documentation": { "docstring": "Test if resulting labels are in range [0, n_clusters - 1].", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 224175, "commit_id": "372384d8102ddb4be6360f44d1bfddb8b45435a4", "repo": "mkdocs", "path": "mkdocs/tests/structure/nav_tests.py", "file_name": "nav_tests.py", "fun_name": "test_nested_ungrouped_nav", "commit_message": "Some manual changes ahead of formatting code with Black", "code": "def test_nested_ungrouped_nav(self):\n nav_cfg = [\n {'Home': 'index.md'},\n {'Contact': 'about/contact.md'},\n {'License Title': 'about/sub/license.md'},\n ]\n expected = dedent(\n \n )\n cfg = load_config(nav=nav_cfg, site_url='http://example.com/')\n fs = [\n File(list(item.values())[0], cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])\n for item in nav_cfg\n ]\n files = Files(fs)\n site_navigation = get_navigation(files, cfg)\n self.assertEqual(str(site_navigation).strip(), expected)\n self.assertEqual(len(site_navigation.items), 3)\n self.assertEqual(len(site_navigation.pages), 3)\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 196, "n_words": 46, "vocab_size": 37, "complexity": 2, "nloc": 23, "token_counts": 137, "n_ast_nodes": 228, "n_identifiers": 24, "random_cut": "def test_nested_ungrouped_nav(self):\n nav_cfg = [\n {'Home': 'index.md'},\n {'Contact': 'about/contact.md'},\n {'License Title': 'about/sub/license.md'},\n ]\n expected = dedent(\n \n )\n cfg = load_config(nav=nav_cfg, site_url='http://example.com/')\n fs = [\n File(list(item.values())[0], cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls'])\n for item in nav_cfg\n ]", "d_id": 57228, "documentation": { "docstring": "\n Page(title='Home', url='/')\n Page(title='Contact', url='/about/contact/')\n Page(title='License Title', url='/about/sub/license/')\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 52, "language": "en" } }, { "id": 130806, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/node.py", "file_name": "node.py", "fun_name": "_get_log_file_names", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _get_log_file_names(self, name, unique=False):\n \n\n if unique:\n log_stdout = self._make_inc_temp(\n suffix=\".out\", prefix=name, directory_name=self._logs_dir\n )\n log_stderr = self._make_inc_temp(\n suffix=\".err\", prefix=name, directory_name=self._logs_dir\n )\n else:\n log_stdout = os.path.join(self._logs_dir, f\"{name}.out\")\n log_stderr = os.path.join(self._logs_dir, f\"{name}.err\")\n return log_stdout, log_stderr\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 156, "n_words": 32, "vocab_size": 21, "complexity": 2, "nloc": 12, "token_counts": 91, "n_ast_nodes": 151, "n_identifiers": 14, "random_cut": "def _get_log_file_names(self, name, unique=False):\n \n\n if unique:\n log_stdout = self._make_inc_temp(\n suffix=\".out\", prefix=name, directory_name=self._logs_dir\n )\n log_stderr = self._make_inc_temp(\n suffix=\".err\", prefix=name, directory_name=self._logs_dir\n )\n else:\n log_stdout = os.path.join(self._logs_dir, f\"{name}.out\")\n log_stderr = os.path.join(self._logs_dir, f\"{name}.err\")\n return log_stdout, log_stderr\n", "d_id": 29378, "documentation": { "docstring": "Generate partially randomized filenames for log files.\n\n Args:\n name (str): descriptive string for this log file.\n unique (bool): if true, a counter will be attached to `name` to\n ensure the returned filename is not already used.\n\n Returns:\n A tuple of two file names for redirecting (stdout, stderr).\n ", "n_words": 47, "vocab_size": 43, "n_whitespaces": 116, "language": "en" } }, { "id": 221263, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/calendar.py", "file_name": "calendar.py", "fun_name": "yeardayscalendar", "commit_message": "add python 3.10.4 for windows", "code": "def yeardayscalendar(self, year, width=3):\n \n months = [\n self.monthdayscalendar(year, i)\n for i in range(January, January+12)\n ]\n return [months[i:i+width] for i in range(0, len(months), width) ]\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 24, "vocab_size": 20, "complexity": 3, "nloc": 6, "token_counts": 60, "n_ast_nodes": 88, "n_identifiers": 10, "random_cut": "def yeardayscalendar(self, year, width=3):\n \n months = [\n self.monthdayscalendar(year, i)\n for i in range(January, January+12)\n ]\n return [months[i:i+width] for i in range(0, len(months), width) ]\n\n", "d_id": 56302, "documentation": { "docstring": "\n Return the data for the specified year ready for formatting (similar to\n yeardatescalendar()). Entries in the week lists are day numbers.\n Day numbers outside this month are zero.\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 57, "language": "en" } }, { "id": 207141, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_filters/tests.py", "file_name": "tests.py", "fun_name": "test_simplelistfilter_without_parameter", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_simplelistfilter_without_parameter(self):\n \n modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n msg = \"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.\"\n with self.assertRaisesMessage(ImproperlyConfigured, msg):\n modeladmin.get_changelist_instance(request)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 81, "n_words": 28, "vocab_size": 25, "complexity": 1, "nloc": 7, "token_counts": 53, "n_ast_nodes": 92, "n_identifiers": 15, "random_cut": "def test_simplelistfilter_without_parameter(self):\n \n modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n msg = \"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.\"\n with self.assertRaisesMessage(ImproperlyConfigured, msg):\n modeladmin.get_changelist_instance(request)\n", "d_id": 51877, "documentation": { "docstring": "\n Any SimpleListFilter must define a parameter_name.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 88327, "commit_id": "565f971da955d57c754a47f5802fe9f9f7c66b39", "repo": "sentry", "path": "src/sentry/api/invite_helper.py", "file_name": "invite_helper.py", "fun_name": "from_session_or_email", "commit_message": "Move invite code functionality from cookie to session (#40905)\n\nMoves the invite functionality from cookies to the session. This is to\r\nharden the security of the platform.\r\n\r\nWith the cookie approach, a client can manipulate the cookie value for\r\n`pending-invite` resulting in situations where an invite code can be\r\nreused.", "code": "def from_session_or_email(cls, request, organization, email, instance=None, logger=None):\n \n invite_token, invite_member_id = get_invite_details(request)\n\n try:\n if invite_token and invite_member_id:\n om = OrganizationMember.objects.get(token=invite_token, id=invite_member_id)\n else:\n om = OrganizationMember.objects.get(\n email=email, organization=organization, user=None\n )\n except OrganizationMember.DoesNotExist:\n # Unable to locate the pending organization member. Cannot setup\n # the invite helper.\n return None\n\n return cls(\n request=request, member_id=om.id, token=om.token, instance=instance, logger=logger\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 226, "n_words": 54, "vocab_size": 47, "complexity": 4, "nloc": 14, "token_counts": 107, "n_ast_nodes": 161, "n_identifiers": 19, "random_cut": "def from_session_or_email(cls, request, organization, email, instance=None, logger=None):\n \n invite_token, invite_member_id = get_invite_details(request)\n\n try:\n if invite_token and invite_member_id:\n om = OrganizationMember.objects.get(token=invite_token, id=invite_member_id)\n else:\n om = OrganizationMember.objects.get(\n email=email, organization=organization, user=None\n )\n except OrganizationMember.DoesNotExist:\n # Unable to locate the pending organization member. Cannot setup\n # the invite helper.\n return None\n\n re", "d_id": 18375, "documentation": { "docstring": "\n Initializes the ApiInviteHelper by locating the pending organization\n member via the currently set pending invite details in the session, or\n via the passed email if no cookie is currently set.\n ", "n_words": 30, "vocab_size": 23, "n_whitespaces": 59, "language": "en" } }, { "id": 290159, "commit_id": "ab14e55c052433e42224199798b026637614685f", "repo": "core", "path": "tests/components/bluetooth/test_usage.py", "file_name": "test_usage.py", "fun_name": "test_multiple_bleak_scanner_instances", "commit_message": "Ensure we do not actually create a BleakScanner in the usage test (#81362)\n\nAvoids a failure when bluetooth is turned off when\r\ntesting on macos:\r\n\r\nbleak.exc.BleakError: Bluetooth device is turned off", "code": "async def test_multiple_bleak_scanner_instances(hass):\n \n install_multiple_bleak_catcher()\n\n instance = bleak.BleakScanner()\n\n assert isinstance(instance, HaBleakScannerWrapper)\n\n uninstall_multiple_bleak_catcher()\n\n with patch(\"bleak.get_platform_scanner_backend_type\"):\n instance = bleak.BleakScanner()\n\n assert not isinstance(instance, HaBleakScannerWrapper)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 20, "vocab_size": 14, "complexity": 1, "nloc": 8, "token_counts": 47, "n_ast_nodes": 86, "n_identifiers": 10, "random_cut": "async def test_multiple_bleak_scanner_instances(hass):\n \n install_multiple_bleak_catcher()\n\n instance = bleak.BleakScanner()\n\n assert isinstance(instance, HaBleakScannerWrapper)\n\n uninstall_multiple_bleak_catcher()\n\n with patch(\"bleak.get_platform_scanner_backend_type\"):\n instance = bleak.BleakScanner()\n\n assert not isinstance(instance, HaBleakScannerWrapper)\n\n", "d_id": 89278, "documentation": { "docstring": "Test creating multiple BleakScanners without an integration.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 265912, "commit_id": "9628dead07ccef9608b32906aa8194bc948e5a09", "repo": "netbox", "path": "netbox/utilities/utils.py", "file_name": "utils.py", "fun_name": "highlight_string", "commit_message": "Closes #10560: New global search (#10676)\n\n* Initial work on new search backend\r\n\r\n* Clean up search backends\r\n\r\n* Return only the most relevant result per object\r\n\r\n* Clear any pre-existing cached entries on cache()\r\n\r\n* #6003: Implement global search functionality for custom field values\r\n\r\n* Tweak field weights & document guidance\r\n\r\n* Extend search() to accept a lookup type\r\n\r\n* Move get_registry() out of SearchBackend\r\n\r\n* Enforce object permissions when returning search results\r\n\r\n* Add indexers for remaining models\r\n\r\n* Avoid calling remove() on non-cacheable objects\r\n\r\n* Use new search backend by default\r\n\r\n* Extend search backend to filter by object type\r\n\r\n* Clean up search view form\r\n\r\n* Enable specifying lookup logic\r\n\r\n* Add indexes for value field\r\n\r\n* Remove object type selector from search bar\r\n\r\n* Introduce SearchTable and enable HTMX for results\r\n\r\n* Enable pagination\r\n\r\n* Remove legacy search backend\r\n\r\n* Cleanup\r\n\r\n* Use a UUID for CachedValue primary key\r\n\r\n* Refactoring search methods\r\n\r\n* Define max search results limit\r\n\r\n* Extend reindex command to support specifying particular models\r\n\r\n* Add clear() and size to SearchBackend\r\n\r\n* Optimize bulk caching performance\r\n\r\n* Highlight matched portion of field value\r\n\r\n* Performance improvements for reindexing\r\n\r\n* Started on search tests\r\n\r\n* Cleanup & docs\r\n\r\n* Documentation updates\r\n\r\n* Clean up SearchIndex\r\n\r\n* Flatten search registry to register by app_label.model_name\r\n\r\n* Clean up search backend classes\r\n\r\n* Clean up RestrictedGenericForeignKey and RestrictedPrefetch\r\n\r\n* Resolve migrations conflict", "code": "def highlight_string(value, highlight, trim_pre=None, trim_post=None, trim_placeholder='...'):\n \n # Split value on highlight string\n try:\n pre, match, post = re.split(fr'({highlight})', value, maxsplit=1, flags=re.IGNORECASE)\n except ValueError:\n # Match not found\n return escape(value)\n\n # Trim pre/post sections to length\n if trim_pre and len(pre) > trim_pre:\n pre = trim_placeholder + pre[-trim_pre:]\n if trim_post and len(post) > trim_post:\n post = post[:trim_post] + trim_placeholder\n\n return f'{escape(pre)}{escape(match)}{escape(post)}'\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 59, "vocab_size": 48, "complexity": 6, "nloc": 10, "token_counts": 97, "n_ast_nodes": 185, "n_identifiers": 17, "random_cut": "def highlight_string(value, highlight, trim_pre=None, trim_post=None, trim_placeholder='...'):\n \n # Split value on highlight string\n try:\n pre, match, post = re.split(fr'({highlight})', value, maxsplit=1, flags=re.IGNORECASE)\n except ValueError:\n # Match not found\n return escape(value)\n\n # Trim pre/post sections to length\n if trim_pre and len(pre) > trim_pre:\n pre = trim_placeholder + pre[-trim_pre:]\n if trim_post and len(post) > trim_post:\n post = post[:trim_post] + trim_placeholder\n\n return f'{escape(pre)}{e", "d_id": 78244, "documentation": { "docstring": "\n Highlight a string within a string and optionally trim the pre/post portions of the original string.\n ", "n_words": 16, "vocab_size": 13, "n_whitespaces": 23, "language": "en" } }, { "id": 125350, "commit_id": "62288724b2b4add7ad9b12ff5299559caaa5fb55", "repo": "ray", "path": "python/ray/_private/state.py", "file_name": "state.py", "fun_name": "node_table", "commit_message": "[Python]More efficient node_table() in state.py (#26760)\n\nThis picks up https://github.com/ray-project/ray/pull/24088\r\nThe `get_node_table` already has resources of nodes, so we don't need to invoke `get_node_resource_info` for every node again. This change will reduce lots of rpc calls and make the api more efficient.", "code": "def node_table(self):\n \n self._check_connected()\n\n node_table = self.global_state_accessor.get_node_table()\n\n results = []\n for node_info_item in node_table:\n item = gcs_utils.GcsNodeInfo.FromString(node_info_item)\n node_info = {\n \"NodeID\": ray._private.utils.binary_to_hex(item.node_id),\n \"Alive\": item.state\n == gcs_utils.GcsNodeInfo.GcsNodeState.Value(\"ALIVE\"),\n \"NodeManagerAddress\": item.node_manager_address,\n \"NodeManagerHostname\": item.node_manager_hostname,\n \"NodeManagerPort\": item.node_manager_port,\n \"ObjectManagerPort\": item.object_manager_port,\n \"ObjectStoreSocketName\": item.object_store_socket_name,\n \"RayletSocketName\": item.raylet_socket_name,\n \"MetricsExportPort\": item.metrics_export_port,\n \"NodeName\": item.node_name,\n }\n node_info[\"alive\"] = node_info[\"Alive\"]\n node_info[\"Resources\"] = (\n {key: value for key, value in item.resources_total.items()}\n if node_info[\"Alive\"]\n else {}\n )\n results.append(node_info)\n return results\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 392, "n_words": 63, "vocab_size": 53, "complexity": 4, "nloc": 27, "token_counts": 172, "n_ast_nodes": 288, "n_identifiers": 33, "random_cut": "def node_table(self):\n \n self._check_connected()\n\n node_table = self.global_state_accessor.get_node_table()\n\n results = []\n for node_info_item in node_table:\n item = gcs_utils.GcsNodeInfo.FromString(node_info_item)\n node_info = {\n \"NodeID\": ray._private.utils.binary_to_hex(item.node_id),\n \"Alive\": item.state\n == gcs_utils.GcsNodeInfo.GcsNodeState.Value(\"ALIVE\"),\n \"NodeManagerAddress\": item.node_manager_address,\n \"NodeManagerHostname\": item.node_manager_hostname,\n \"NodeManagerPort\": item.node_manager_port,\n \"ObjectManagerPort\": item.object_manager_port,\n \"ObjectStoreSocketName\": item.object_store_socket_name,\n \"RayletSocketName\": item.raylet_socket_name,\n \"MetricsExportPort\": item.metrics_export_port,\n \"NodeName\": item.node_name,\n }\n node_info[\"alive\"] = node_info[\"Alive\"]\n node_info[\"Resources\"] = (\n {key: value for key, value in item.resources_total.items()}\n if node_info[\"Alive\"]\n else {}\n )\n results.append(node_info)\n return results\n", "d_id": 27841, "documentation": { "docstring": "Fetch and parse the Gcs node info table.\n\n Returns:\n Information about the node in the cluster.\n ", "n_words": 16, "vocab_size": 13, "n_whitespaces": 41, "language": "en" } }, { "id": 273174, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/preprocessing/index_lookup.py", "file_name": "index_lookup.py", "fun_name": "vocabulary_size", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def vocabulary_size(self):\n \n if tf.executing_eagerly():\n return (\n int(self.lookup_table.size().numpy())\n + self._token_start_index()\n )\n else:\n return self.lookup_table.size() + self._token_start_index()\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 99, "n_words": 15, "vocab_size": 12, "complexity": 2, "nloc": 8, "token_counts": 52, "n_ast_nodes": 90, "n_identifiers": 9, "random_cut": "def vocabulary_size(self):\n \n if tf.executing_eagerly():\n return (\n int(self.lookup_table.size().numpy())\n + self._token_start_index()\n )\n else:\n return self.looku", "d_id": 81097, "documentation": { "docstring": "Gets the current size of the layer's vocabulary.\n\n Returns:\n The integer size of the vocabulary, including optional mask and oov indices.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 44, "language": "en" } }, { "id": 215095, "commit_id": "f1c37893caf90738288e789c3233ab934630254f", "repo": "salt", "path": "tests/pytests/unit/modules/test_aixpkg.py", "file_name": "test_aixpkg.py", "fun_name": "test_version_with_invalid_names", "commit_message": "Working tests for install", "code": "def test_version_with_invalid_names():\n \n\n lslpp_mydog_out = \n\n ver_chk = MagicMock(return_value={\"retcode\": 1, \"stdout\": lslpp_mydog_out})\n with patch.dict(aixpkg.__grains__, {\"osarch\": \"PowerPC_POWER8\"}), patch.dict(\n aixpkg.__salt__,\n {\"cmd.run_all\": ver_chk},\n ):\n versions_checked = aixpkg.version(\n \"mydog\", versions_as_list=True, use_context=False\n )\n assert ver_chk.call_count == 1\n ver_chk.assert_called_with(\"lslpp -Lq mydog\", python_shell=False)\n assert versions_checked == \"\"\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 114, "n_words": 38, "vocab_size": 33, "complexity": 1, "nloc": 31, "token_counts": 92, "n_ast_nodes": 161, "n_identifiers": 17, "random_cut": "def test_version_with_invalid_names():\n \n\n lslpp_mydog_out = \n\n ver_chk = MagicMock(return_value={\"retcode\": 1, \"stdout\": lslpp_mydog_out})\n with patch.dict(aixpkg.__grains", "d_id": 53812, "documentation": { "docstring": "\n test version of packages\n lslpp: Fileset mydog not installed.\n\n\nState codes: \n A -- Applied. \n B -- Broken. \n C -- Committed. \n E -- EFIX Locked. \n O -- Obsolete. (partially migrated to newer version) \n ? -- Inconsistent State...Run lppchk -v. \n\nType codes: \n F -- Installp Fileset \n P -- Product \n C -- Component \n T -- Feature \n R -- RPM Package \n E -- Interim Fix \n", "n_words": 61, "vocab_size": 46, "n_whitespaces": 80, "language": "en" } }, { "id": 43222, "commit_id": "b692517ce3aafb276e9d23570e9734c30a5f3d1f", "repo": "airflow", "path": "tests/models/test_dagrun.py", "file_name": "test_dagrun.py", "fun_name": "test_mapped_literal_length_increase_adds_additional_ti", "commit_message": "Fix mapped task immutability after clear (#23667)\n\nWe should be able to detect if the structure of mapped task has changed\r\nand verify the integrity.\r\n\r\nThis PR ensures this\r\nCo-authored-by: Tzu-ping Chung ", "code": "def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session):\n \n\n with dag_maker(session=session) as dag:\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 3, "nloc": 29, "token_counts": 233, "n_ast_nodes": 34, "n_identifiers": 4, "random_cut": "def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session):\n \n\n with dag_make", "d_id": 7878, "documentation": { "docstring": "Test that when the length of mapped literal increases, additional ti is added", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 96012, "commit_id": "2790a30b7f6a6cffa2cd1aa69c678327a41a0664", "repo": "sentry", "path": "tests/sentry/integrations/bitbucket/test_installed.py", "file_name": "test_installed.py", "fun_name": "test_installed_without_username", "commit_message": "fix(bitbucket): Fix domain name (#31536)\n\n* fix(bitbucket): Fix domain name", "code": "def test_installed_without_username(self):\n \n\n # Remove username to simulate privacy mode\n del self.user_data_from_bitbucket[\"principal\"][\"username\"]\n\n response = self.client.post(self.path, data=self.user_data_from_bitbucket)\n assert response.status_code == 200\n integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)\n assert integration.name == self.user_display_name\n assert integration.metadata == self.user_metadata\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 31, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 76, "n_ast_nodes": 122, "n_identifiers": 20, "random_cut": "def test_installed_without_username(self):\n \n\n # Remove username to simulate privacy mode\n del self.user_data_from_bitbucket[\"principal\"][\"username\"]\n\n response = self.client.post(self.path, data=self.user_data_from_bitbucket)\n assert response.status_code == 200\n integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)\n assert integration.name == self.user_display_name\n assert integration.metadata == self", "d_id": 19263, "documentation": { "docstring": "Test a user (not team) installation where the user has hidden their username from public view", "n_words": 16, "vocab_size": 15, "n_whitespaces": 15, "language": "en" } }, { "id": 101891, "commit_id": "dab823a3eb7a5257cb1e0818ee10ed234d3de97f", "repo": "faceswap", "path": "lib/gui/display.py", "file_name": "display.py", "fun_name": "_command_display", "commit_message": "Typing - lib.gui.display_command", "code": "def _command_display(self, command):\n \n build_tabs = getattr(self, f\"_{command}_tabs\")\n build_tabs()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 39, "n_identifiers": 5, "random_cut": "def _command_display(self, command):\n \n ", "d_id": 21273, "documentation": { "docstring": " Build the relevant command specific tabs based on the incoming Faceswap command.\n\n Parameters\n ----------\n command: str\n The Faceswap command that is being executed\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 63, "language": "en" } }, { "id": 63924, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/urllib3/_collections.py", "file_name": "_collections.py", "fun_name": "itermerged", "commit_message": "upd; format", "code": "def itermerged(self):\n \n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 49, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 39, "n_ast_nodes": 66, "n_identifiers": 7, "random_cut": "def itermerged(self):\n \n for key in s", "d_id": 13532, "documentation": { "docstring": "Iterate over all headers, merging duplicate ones together.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 224020, "commit_id": "e7f07cc82ab2be920ab426ba07456d8b2592714d", "repo": "mkdocs", "path": "mkdocs/structure/files.py", "file_name": "files.py", "fun_name": "get_file_from_path", "commit_message": "Remove spaces at the ends of docstrings, normalize quotes", "code": "def get_file_from_path(self, path):\n \n return self.src_paths.get(os.path.normpath(path))\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def get_file_from_path(self, path):\n \n return self.src_paths.get(os.path.normpath(path))\n", "d_id": 57167, "documentation": { "docstring": "Return a File instance with File.src_path equal to path.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 208104, "commit_id": "1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc", "repo": "celery", "path": "t/unit/conftest.py", "file_name": "conftest.py", "fun_name": "sleepdeprived", "commit_message": "Canvas Header Stamping (#7384)\n\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Redo header stamping (#7341)\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Omer Katz \r\n\r\n* Added stamping mechanism\r\n\r\n* Manual stamping improved\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Add comma.\r\n\r\n* Moved groups to stamps\r\n\r\n* Fixed chord and added test for that\r\n\r\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* Fixed lint and elements\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* type -> isinstance\r\n\r\n* Added stamping mechanism\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Manual stamping improved\r\n\r\n* fail_ci_if_error uncommented\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Changes\r\n\r\n* Add comma.\r\n\r\n* Fixed chord and added test for that\r\n\r\n* canvas.py fixed\r\n\r\n* Test chord.py fixed\r\n\r\n* Fixed stamped_headers\r\n\r\n* collections import fixed\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* collections import fixed\r\n\r\n* Update celery/backends/base.py\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* ampq.py fixed\r\n\r\n* Refrain from using deprecated import path.\r\n\r\n* Fix test_complex_chain regression.\r\n\r\nWhenever we stamp a group we need to freeze it first if it wasn't already frozen.\r\nSomewhere along the line, the group id changed because we were freezing twice.\r\nThis commit places the stamping operation after preparing the chain's steps which fixes the problem somehow.\r\n\r\nWe don't know why yet.\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed issues with maybe_list. Add documentation\r\n\r\n* Fixed potential issue with integration tests\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed test_generator issues\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Update docs/userguide/canvas.rst\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* Fixed Couchbase\r\n\r\n* Better stamping intro\r\n\r\n* New GroupVisitor example\r\n\r\n* Adjust documentation.\r\n\r\nCo-authored-by: Naomi Elstein \r\nCo-authored-by: Omer Katz \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Asif Saif Uddin \r\nCo-authored-by: Omer Katz ", "code": "def sleepdeprived(request):\n \n module = request.node.get_closest_marker(\n \"sleepdeprived_patched_module\").args[0]\n old_sleep, module.sleep = module.sleep, noop\n try:\n yield\n finally:\n module.sleep = old_sleep\n\n\n# Taken from\n# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py\n@pytest.fixture", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 56, "n_words": 23, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 42, "n_ast_nodes": 83, "n_identifiers": 11, "random_cut": "def sleepdeprived(request):\n \n module = request.node.get_closest_marker(\n \"sleepdeprived_patched_module\").args[0]\n old_sleep, module.sleep = module.sleep, noop\n try:\n yield\n finally:\n module.sleep = old_sleep\n\n\n", "d_id": 52203, "documentation": { "docstring": "Mock sleep method in patched module to do nothing.\n\n Example:\n >>> import time\n >>> @pytest.mark.sleepdeprived_patched_module(time)\n >>> def test_foo(self, sleepdeprived):\n >>> pass\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 59, "language": "en" } }, { "id": 101575, "commit_id": "7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5", "repo": "faceswap", "path": "lib/training/preview_tk.py", "file_name": "preview_tk.py", "fun_name": "_set_mouse_bindings", "commit_message": "Training - Use custom preview pop-out", "code": "def _set_mouse_bindings(self) -> None:\n \n logger.debug(\"Binding mouse events\")\n if system() == \"Linux\":\n self._canvas.tag_bind(self._canvas.image_id, \"\", self._on_bound_zoom)\n self._canvas.tag_bind(self._canvas.image_id, \"\", self._on_bound_zoom)\n else:\n self._canvas.tag_bind(self._canvas.image_id, \"\", self._on_bound_zoom)\n\n self._canvas.tag_bind(self._canvas.image_id, \"\", self._on_mouse_click)\n self._canvas.tag_bind(self._canvas.image_id, \"\", self._on_mouse_drag)\n logger.debug(\"Bound mouse events\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 112, "n_words": 30, "vocab_size": 22, "complexity": 2, "nloc": 15, "token_counts": 119, "n_ast_nodes": 198, "n_identifiers": 11, "random_cut": "def _set_mouse_bindings(self) -> None:\n \n logger.debug(\"Binding mouse events\")\n if system() == \"Linux\":\n self._canvas.tag_bind(self._canvas.image_id, \"\", self._on_bound_zoom)\n self._canvas.tag_bind", "d_id": 20985, "documentation": { "docstring": " Set the mouse bindings for interacting with the preview image\n\n Mousewheel: Zoom in and out\n Mouse click: Move image\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 297739, "commit_id": "1a42bd5c4cb51ffbfcaf8d5389b80a228712ac81", "repo": "core", "path": "tests/helpers/test_area_registry.py", "file_name": "test_area_registry.py", "fun_name": "test_create_area_with_id_already_in_use", "commit_message": "Add aliases to area registry items (#84294)\n\n* Add aliases to area registry items\r\n\r\n* Update test\r\n\r\n* Fix WS API", "code": "async def test_create_area_with_id_already_in_use(registry):\n \n area1 = registry.async_create(\"mock\")\n\n updated_area1 = registry.async_update(area1.id, name=\"New Name\")\n assert updated_area1.id == area1.id\n\n area2 = registry.async_create(\"mock\")\n assert area2.id == \"mock_2\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 40, "n_words": 22, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 50, "n_ast_nodes": 90, "n_identifiers": 9, "random_cut": "async def test_create_area_with_id_already_in_use(registry):\n ", "d_id": 96701, "documentation": { "docstring": "Make sure that we can't create an area with a name already in use.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 50265, "commit_id": "ffcde21305c61d950a9f93e57e6180c9a9665b87", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/ernie_modeling.py", "file_name": "ernie_modeling.py", "fun_name": "forward", "commit_message": "add disco_diffusion_ernievil_base", "code": "def forward(self, *args, **kwargs):\n \n labels = kwargs.pop('labels', None)\n pooled, encoded = super(ErnieModelForSequenceClassification, self).forward(*args, **kwargs)\n hidden = self.dropout(pooled)\n logits = self.classifier(hidden)\n\n if labels is not None:\n if len(labels.shape) != 1:\n labels = labels.squeeze()\n loss = F.cross_entropy(logits, labels)\n else:\n loss = None\n return loss, logits\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 147, "n_words": 43, "vocab_size": 32, "complexity": 3, "nloc": 12, "token_counts": 99, "n_ast_nodes": 160, "n_identifiers": 20, "random_cut": "def forward(self, *args, **kwargs):\n \n labels = kwargs.pop('label", "d_id": 10074, "documentation": { "docstring": "\n Args:\n labels (optional, `Variable` of shape [batch_size]):\n ground truth label id for each sentence\n Returns:\n loss (`Variable` of shape []):\n Cross entropy loss mean over batch\n if labels not set, returns None\n logits (`Variable` of shape [batch_size, hidden_size]):\n output logits of classifier\n ", "n_words": 42, "vocab_size": 33, "n_whitespaces": 157, "language": "en" } }, { "id": 62023, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py", "file_name": "locators.py", "fun_name": "_get_project", "commit_message": "upd; format", "code": "def _get_project(self, name):\n \n raise NotImplementedError('Please implement in the subclass')\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 4, "random_cut": "def _get_project(self, name):\n \n raise NotImplemen", "d_id": 12832, "documentation": { "docstring": "\n For a given project, get a dictionary mapping available versions to Distribution\n instances.\n\n This should be implemented in subclasses.\n\n If called from a locate() request, self.matcher will be set to a\n matcher for the requirement to satisfy, otherwise it will be None.\n ", "n_words": 42, "vocab_size": 34, "n_whitespaces": 85, "language": "en" } }, { "id": 160031, "commit_id": "935fe83ddaa3250d176bc848579ffdc4e1017090", "repo": "numpy", "path": "numpy/core/tests/test_multiarray.py", "file_name": "test_multiarray.py", "fun_name": "test_pickle_empty", "commit_message": "BUG: Fix unpickling an empty ndarray with a none-zero dimension (#21067)\n\nChanging num to the number of bytes in the input array, PyArray_NBYTES(self). Solves #21009.\r\n\r\n* Fixing nbyte size in methods.c:memcpy\r\n\r\n* Adding a test\r\n\r\n* Re-adding removed newline\r\n\r\n* Shrinking the test array to save memory", "code": "def test_pickle_empty(self):\n \n arr = np.array([]).reshape(999999, 0)\n pk_dmp = pickle.dumps(arr)\n pk_load = pickle.loads(pk_dmp)\n\n assert pk_load.size == 0\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 51, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 44, "n_ast_nodes": 73, "n_identifiers": 12, "random_cut": "def test_pickle_empty(self):\n \n arr = np.array([]).reshape(999999, 0)\n pk_dmp = pickle.dumps(arr)\n pk_load = pickle.loads(pk_dmp)\n\n assert pk_load.size == 0\n", "d_id": 38470, "documentation": { "docstring": "Checking if an empty array pickled and un-pickled will not cause a\n segmentation fault", "n_words": 14, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 42022, "commit_id": "5910d6ef50196c8bd1f4ed40a5da202a39d7f62c", "repo": "seaborn", "path": "seaborn/_oldcore.py", "file_name": "_oldcore.py", "fun_name": "get_semantics", "commit_message": "docs: fix typos (#2899)\n\n* Small typo fixes\r\n\r\n* Catch an additional typo\r\n\r\nCo-authored-by: Michael Waskom ", "code": "def get_semantics(cls, kwargs, semantics=None):\n \n # TODO this should be get_variables since we have included x and y\n if semantics is None:\n semantics = cls.semantics\n variables = {}\n for key, val in kwargs.items():\n if key in semantics and val is not None:\n variables[key] = val\n return variables\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 125, "n_words": 46, "vocab_size": 34, "complexity": 5, "nloc": 8, "token_counts": 55, "n_ast_nodes": 88, "n_identifiers": 8, "random_cut": "def get_semantics(cls, kwargs, semantics=None):\n \n # TODO this should be get_variables since we have included x and y\n if semantics is None:\n semantics = ", "d_id": 7461, "documentation": { "docstring": "Subset a dictionary arguments with known semantic variables.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 19485, "commit_id": "3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8", "repo": "pipenv", "path": "pipenv/utils/dependencies.py", "file_name": "dependencies.py", "fun_name": "convert_deps_to_pip", "commit_message": "Code reorg utils into utils module reduces complexity (#4990)\n\n* Split apart the massive utils.py into a utils module", "code": "def convert_deps_to_pip(deps, project=None, r=True, include_index=True):\n \n from pipenv.vendor.requirementslib.models.requirements import Requirement\n\n dependencies = []\n for dep_name, dep in deps.items():\n if project:\n project.clear_pipfile_cache()\n indexes = getattr(project, \"pipfile_sources\", []) if project is not None else []\n new_dep = Requirement.from_pipfile(dep_name, dep)\n if new_dep.index:\n include_index = True\n req = new_dep.as_line(sources=indexes if include_index else None).strip()\n dependencies.append(req)\n if not r:\n return dependencies\n\n # Write requirements.txt to tmp directory.\n from pipenv.vendor.vistir.path import create_tracked_tempfile\n f = create_tracked_tempfile(suffix=\"-requirements.txt\", delete=False)\n f.write(\"\\n\".join(dependencies).encode(\"utf-8\"))\n f.close()\n return f.name\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 176, "n_words": 72, "vocab_size": 55, "complexity": 7, "nloc": 19, "token_counts": 167, "n_ast_nodes": 266, "n_identifiers": 37, "random_cut": "def convert_deps_to_pip(deps, project=None, r=True, include_index=True):\n \n from pipenv.vendor.requirementslib.models.requirements import Requirement\n\n dependencies = []\n for dep_name, dep in deps.items():\n if project:\n project.clear_pipfile_cache()\n indexes = getattr(project, \"pipfile_sources\", []) if project is not None else []\n new_dep = Requirement.from_pipfile(dep_name, dep)\n if new_dep.index:\n include_index = True\n req = new_dep.as_line(sources=indexes if include_index else None).strip()\n dependencies.append(req)\n if not r:\n return dependencies\n\n # Write requirements.txt to tmp directory.\n from pipenv.vendor.vistir.path import create_tracked_tempfile\n f = create_tracked_tempfile(suffix=\"-requirements.txt\", delete=Fa", "d_id": 2995, "documentation": { "docstring": "\"Converts a Pipfile-formatted dependency to a pip-formatted one.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 113475, "commit_id": "bcc640c4e5e687a03fe21503692dad96e0b97fa7", "repo": "nni", "path": "nni/algorithms/hpo/hyperband_advisor.py", "file_name": "hyperband_advisor.py", "fun_name": "handle_trial_end", "commit_message": "[nas] fix issue introduced by the trial recovery feature (#5109)", "code": "def handle_trial_end(self, data):\n \n hyper_params = nni.load(data['hyper_params'])\n if self.is_created_in_previous_exp(hyper_params['parameter_id']):\n # The end of the recovered trial is ignored\n return\n self._handle_trial_end(hyper_params['parameter_id'])\n if data['trial_job_id'] in self.job_id_para_id_map:\n del self.job_id_para_id_map[data['trial_job_id']]\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 93, "n_words": 25, "vocab_size": 24, "complexity": 3, "nloc": 7, "token_counts": 60, "n_ast_nodes": 105, "n_identifiers": 9, "random_cut": "def handle_trial_end(self, data):\n \n hyper_params = nni.load(data['hyper_params'])\n if self.is_created_in_previous_exp(hyper_params['parameter_id']):\n # The end of the recovered trial is ignored\n return\n self._handle_trial_end(hyper_params['parameter_id'])\n if data['trial_job_id']", "d_id": 24919, "documentation": { "docstring": "\n Parameters\n ----------\n data: dict()\n it has three keys: trial_job_id, event, hyper_params\n trial_job_id: the id generated by training service\n event: the job's state\n hyper_params: the hyperparameters (a string) generated and returned by tuner\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 105, "language": "en" } }, { "id": 220508, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/futures.py", "file_name": "futures.py", "fun_name": "_copy_future_state", "commit_message": "add python 3.10.4 for windows", "code": "def _copy_future_state(source, dest):\n \n assert source.done()\n if dest.cancelled():\n return\n assert not dest.done()\n if source.cancelled():\n dest.cancel()\n else:\n exception = source.exception()\n if exception is not None:\n dest.set_exception(_convert_future_exc(exception))\n else:\n result = source.result()\n dest.set_result(result)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 115, "n_words": 29, "vocab_size": 22, "complexity": 4, "nloc": 14, "token_counts": 80, "n_ast_nodes": 138, "n_identifiers": 11, "random_cut": "def _copy_future_state(source, dest):\n \n assert source.done()\n if dest.cancelled():\n return\n assert not dest.done()\n if source.cancelled():\n dest.cancel()\n else:\n exception = source.exception()\n if exception is not None:\n dest.set_exception(_convert_future_exc(exception))\n else:\n result = source.result()\n dest.set_resul", "d_id": 56021, "documentation": { "docstring": "Internal helper to copy state from another Future.\n\n The other Future may be a concurrent.futures.Future.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 261047, "commit_id": "9f9f1684e91fbfffbc446f786a8c64628b752efb", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_validation.py", "file_name": "test_validation.py", "fun_name": "test_get_feature_names_invalid_dtypes", "commit_message": "MAINT Clean deprecation for 1.2: validation (#24493)\n\n* cln deprecations\r\n\r\n* cln\r\n\r\n* fix tst switch to pytest.raises", "code": "def test_get_feature_names_invalid_dtypes(names, dtypes):\n \n pd = pytest.importorskip(\"pandas\")\n X = pd.DataFrame([[1, 2], [4, 5], [5, 6]], columns=names)\n\n msg = re.escape(\n \"Feature names only support names that are all strings. \"\n f\"Got feature names with dtypes: {dtypes}.\"\n )\n with pytest.raises(TypeError, match=msg):\n names = _get_feature_names(X)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 80, "n_words": 41, "vocab_size": 34, "complexity": 1, "nloc": 9, "token_counts": 74, "n_ast_nodes": 123, "n_identifiers": 16, "random_cut": "def test_get_feature_names_invalid_dtypes(names, dtypes):\n \n pd =", "d_id": 76648, "documentation": { "docstring": "Get feature names errors when the feature names have mixed dtypes", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 251435, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/platform/pf.py", "file_name": "pf.py", "fun_name": "lookup", "commit_message": "make it black!", "code": "def lookup(address, port, s):\n \n # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.\n # Those still appear as \"127.0.0.1\" in the table, so we need to strip the prefix.\n address = re.sub(r\"^::ffff:(?=\\d+.\\d+.\\d+.\\d+$)\", \"\", address)\n s = s.decode()\n\n # ALL tcp 192.168.1.13:57474 -> 23.205.82.58:443 ESTABLISHED:ESTABLISHED\n specv4 = f\"{address}:{port}\"\n\n # ALL tcp 2a01:e35:8bae:50f0:9d9b:ef0d:2de3:b733[58505] -> 2606:4700:30::681f:4ad0[443] ESTABLISHED:ESTABLISHED\n specv6 = f\"{address}[{port}]\"\n\n for i in s.split(\"\\n\"):\n if \"ESTABLISHED:ESTABLISHED\" in i and specv4 in i:\n s = i.split()\n if len(s) > 4:\n if sys.platform.startswith(\"freebsd\"):\n # strip parentheses for FreeBSD pfctl\n s = s[3][1:-1].split(\":\")\n else:\n s = s[4].split(\":\")\n\n if len(s) == 2:\n return s[0], int(s[1])\n elif \"ESTABLISHED:ESTABLISHED\" in i and specv6 in i:\n s = i.split()\n if len(s) > 4:\n s = s[4].split(\"[\")\n port = s[1].split(\"]\")\n port = port[0]\n return s[0], int(port)\n raise RuntimeError(\"Could not resolve original destination.\")\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 417, "n_words": 133, "vocab_size": 82, "complexity": 10, "nloc": 23, "token_counts": 200, "n_ast_nodes": 358, "n_identifiers": 17, "random_cut": "def lookup(address, port, s):\n \n # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.\n # Those still appear as \"127.0.0.1\" in the table, so we need to strip the prefix.\n address = re.sub(r\"^::ffff:(?=\\d+.\\d+.\\d+.\\d+$)\", \"\", address)\n s = s.decode()\n\n # ALL tcp 192.168.1.13:57474 -> 23.205.82.58:443 ESTABLISHED:ESTABLISHED\n specv4 = f\"{address}:{port}\"\n\n # ALL tcp 2a01:e35:8bae:50f0:9d9b:ef0d:2de3:b733[58505] -> 2606:4700:30::681f:4ad0[443] ESTABLISHED:ESTABLISHED\n specv6 = f\"{address}[{port}]\"\n\n for i in s.split(\"\\n\"):\n if \"ESTABLISHED:ESTABLISHED\" in i and specv4 in i:\n s = i.split()\n if len(s) > 4:\n if sys.platform.startswith(\"freebsd\"):\n # strip parentheses for FreeBSD pfctl\n s = s[3][1:-1].split(\":\")\n else:\n s = s[4].split(\":\")\n\n if len(s) == 2:\n return s[0], int(s[1])\n elif \"ESTABLISHED:ESTABLISHED\" in i and specv6 in i:\n s = i.split()\n if len(s) > 4:\n s = s[4].split(\"[\")\n port = s[1].split(\"]\")\n port = port[0]\n return s[0], int(port)\n raise RuntimeError(\"Could not resolve original ", "d_id": 73739, "documentation": { "docstring": "\n Parse the pfctl state output s, to look up the destination host\n matching the client (address, port).\n\n Returns an (address, port) tuple, or None.\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 37, "language": "en" } }, { "id": 86903, "commit_id": "712ba34a4d51be636396e70557aa3f99471814be", "repo": "sentry", "path": "src/sentry/models/projectownership.py", "file_name": "projectownership.py", "fun_name": "_hydrate_rules", "commit_message": "feat(commit-context): Refactor Issue Owner auto-assignment (#40048)\n\n## Objective:\r\nThis PR refactors how we calculate the Issue Owners from Code\r\nOwners/Ownership Rules and who should get the auto-assignment. Auto\r\nAssignment will first go to the Suspect Committer (if it exists and the\r\nsetting is on) then to Issue Owners (if it exists and the setting is on)\r\nthen nothing. We will also store the rule that triggered the Issue Owner\r\nmatch in GroupOwner.", "code": "def _hydrate_rules(cls, project_id, rules, type=OwnerRuleType.OWNERSHIP_RULE.value):\n \n owners = [owner for rule in rules for owner in rule.owners]\n actors = {\n key: val\n for key, val in resolve_actors({owner for owner in owners}, project_id).items()\n if val\n }\n result = [\n (rule, ActorTuple.resolve_many([actors[owner] for owner in rule.owners]), type)\n for rule in rules\n ]\n return result\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 155, "n_words": 51, "vocab_size": 32, "complexity": 8, "nloc": 12, "token_counts": 96, "n_ast_nodes": 139, "n_identifiers": 19, "random_cut": "def _hydrate_rules(cls, project_id, rules, type=OwnerRuleType.OWNERSHIP_RULE.value):\n \n owners = [owner for rule in r", "d_id": 18185, "documentation": { "docstring": "\n Get the last matching rule to take the most precedence.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 56999, "commit_id": "8f3ffd09dc47bfd2af6a635cc04c640febffd519", "repo": "prefect", "path": "src/prefect/blocks/kubernetes.py", "file_name": "kubernetes.py", "fun_name": "activate", "commit_message": "add test coerage for get_api_client and activate", "code": "def activate(self) -> str:\n \n load_kube_config_from_dict(\n config_dict=self.config,\n context=self.context,\n )\n\n return self.current_context()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 11, "token_counts": 29, "n_ast_nodes": 48, "n_identifiers": 8, "random_cut": "def activate(self) -> str:\n \n load_kube_config_from_dict(\n config_dict=s", "d_id": 11603, "documentation": { "docstring": "\n Convenience method for activating the k8s config stored in an instance of this block\n\n Returns current_context for sanity check\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 197875, "commit_id": "675e6d6ca7aa63ce26f8aa0ca2467976b6570113", "repo": "sympy", "path": "sympy/core/expr.py", "file_name": "expr.py", "fun_name": "as_coeff_add", "commit_message": "add some type hints to expr.py", "code": "def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]:\n \n if deps:\n if not self.has_free(*deps):\n return self, tuple()\n return S.Zero, (self,)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 18, "vocab_size": 15, "complexity": 3, "nloc": 35, "token_counts": 49, "n_ast_nodes": 77, "n_identifiers": 8, "random_cut": "def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]:\n \n ", "d_id": 48731, "documentation": { "docstring": "Return the tuple (c, args) where self is written as an Add, ``a``.\n\n c should be a Rational added to any terms of the Add that are\n independent of deps.\n\n args should be a tuple of all other terms of ``a``; args is empty\n if self is a Number or if self is independent of deps (when given).\n\n This should be used when you do not know if self is an Add or not but\n you want to treat self as an Add or if you want to process the\n individual arguments of the tail of self as an Add.\n\n - if you know self is an Add and want only the head, use self.args[0];\n - if you do not want to process the arguments of the tail but need the\n tail then use self.as_two_terms() which gives the head and tail.\n - if you want to split self into an independent and dependent parts\n use ``self.as_independent(*deps)``\n\n >>> from sympy import S\n >>> from sympy.abc import x, y\n >>> (S(3)).as_coeff_add()\n (3, ())\n >>> (3 + x).as_coeff_add()\n (3, (x,))\n >>> (3 + x + y).as_coeff_add(x)\n (y + 3, (x,))\n >>> (3 + y).as_coeff_add(x)\n (y + 3, ())\n\n ", "n_words": 195, "vocab_size": 91, "n_whitespaces": 360, "language": "en" } }, { "id": 30229, "commit_id": "bbb7a02ef889134af71593102bc6f65035ab14cb", "repo": "spotify-downloader", "path": "spotdl/console/web.py", "file_name": "web.py", "fun_name": "create_github_url", "commit_message": "update web code\n\nCo-Authored-By: Peyton Creery <44987569+phcreery@users.noreply.github.com>", "code": "def create_github_url(url):\n \n repo_only_url = re.compile(\n r\"https:\\/\\/github\\.com\\/[a-z\\d](?:[a-z\\d]|-(?=[a-z\\d])){0,38}\\/[a-zA-Z0-9]+$\"\n )\n re_branch = re.compile(\"/(tree|blob)/(.+?)/\")\n\n # Check if the given url is a url to a GitHub repo. If it is, tell the\n # user to use 'git clone' to download it\n if re.match(repo_only_url, url):\n print(\n \"✘ The given url is a complete repository. Use 'git clone' to download the repository\",\n \"red\",\n )\n sys.exit()\n\n # extract the branch name from the given url (e.g master)\n branch = re_branch.search(url)\n if branch:\n download_dirs = url[branch.end() :]\n api_url = (\n url[: branch.start()].replace(\"github.com\", \"api.github.com/repos\", 1)\n + \"/contents/\"\n + download_dirs\n + \"?ref=\"\n + branch.group(2)\n )\n return api_url, download_dirs\n\n raise ValueError(\"The given url is not a valid GitHub url\")\n\n\n# Modification of https://github.com/sdushantha/gitdir/blob/master/gitdir/gitdir.py", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 277, "n_words": 112, "vocab_size": 71, "complexity": 3, "nloc": 23, "token_counts": 111, "n_ast_nodes": 198, "n_identifiers": 19, "random_cut": "def create_github_url(url):\n \n repo_only_url = re.compile(\n r\"https:\\/\\/github\\.com\\/[a-z\\d](?:[a-z\\d]|-(?=[a-z\\d])){0,38}\\/[a-zA-Z0-9]+$\"\n )\n re_branch = re.compile(\"/(tree|blob)/(.+?)/\")\n\n # Check if the given url is a url to a GitHub repo. If it is, tell the\n # user", "d_id": 5414, "documentation": { "docstring": "\n From the given url, produce a URL that is compatible with Github's REST API. Can handle blob or tree paths.\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 27, "language": "en" } }, { "id": 259555, "commit_id": "4253eace9893eb6aef36ca631e7978b6a8808fbc", "repo": "scikit-learn", "path": "sklearn/metrics/cluster/_supervised.py", "file_name": "_supervised.py", "fun_name": "homogeneity_score", "commit_message": "DOC Ensures that homogeneity_score passes numpydoc validation (#23006)", "code": "def homogeneity_score(labels_true, labels_pred):\n \n return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def homogeneity_score(labels_true, labels_pred):\n \n return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]\n\n", "d_id": 75819, "documentation": { "docstring": "Homogeneity metric of a cluster labeling given a ground truth.\n\n A clustering result satisfies homogeneity if all of its clusters\n contain only data points which are members of a single class.\n\n This metric is independent of the absolute values of the labels:\n a permutation of the class or cluster label values won't change the\n score value in any way.\n\n This metric is not symmetric: switching ``label_true`` with ``label_pred``\n will return the :func:`completeness_score` which will be different in\n general.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n labels_true : int array, shape = [n_samples]\n Ground truth class labels to be used as a reference.\n\n labels_pred : array-like of shape (n_samples,)\n Cluster labels to evaluate.\n\n Returns\n -------\n homogeneity : float\n Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.\n\n See Also\n --------\n completeness_score : Completeness metric of cluster labeling.\n v_measure_score : V-Measure (NMI with arithmetic mean option).\n\n References\n ----------\n\n .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A\n conditional entropy-based external cluster evaluation measure\n `_\n\n Examples\n --------\n\n Perfect labelings are homogeneous::\n\n >>> from sklearn.metrics.cluster import homogeneity_score\n >>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])\n 1.0\n\n Non-perfect labelings that further split classes into more clusters can be\n perfectly homogeneous::\n\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))\n 1.000000\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))\n 1.000000\n\n Clusters that include samples from different classes do not make for an\n homogeneous labeling::\n\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))\n 0.0...\n >>> print(\"%.6f\" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))\n 0.0...\n ", "n_words": 263, "vocab_size": 162, "n_whitespaces": 443, "language": "en" } }, { "id": 199720, "commit_id": "3d30d00c37371f142e6a0e9dc5058752d8c9d401", "repo": "sympy", "path": "sympy/polys/orthopolys.py", "file_name": "orthopolys.py", "fun_name": "dup_chebyshevt", "commit_message": "Restore domain elements in dup_* functions", "code": "def dup_chebyshevt(n, K):\n \n if n < 1:\n return [K.one]\n m2, m1 = [K.one], [K.one, K.zero]\n for i in range(2, n+1):\n m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2), K), m2, K)\n return m1\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 62, "n_words": 33, "vocab_size": 26, "complexity": 3, "nloc": 7, "token_counts": 83, "n_ast_nodes": 123, "n_identifiers": 12, "random_cut": "def dup_chebyshevt(n, K):\n \n if n", "d_id": 49376, "documentation": { "docstring": "Low-level implementation of Chebyshev polynomials of the first kind.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 224043, "commit_id": "e7f07cc82ab2be920ab426ba07456d8b2592714d", "repo": "mkdocs", "path": "mkdocs/tests/base.py", "file_name": "base.py", "fun_name": "get_markdown_toc", "commit_message": "Remove spaces at the ends of docstrings, normalize quotes", "code": "def get_markdown_toc(markdown_source):\n \n md = markdown.Markdown(extensions=['toc'])\n md.convert(markdown_source)\n return md.toc_tokens\n\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 20, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 8, "random_cut": "def get_markdown_toc(markdown_source):\n \n md = markdown.Markdown(extensions=['toc", "d_id": 57190, "documentation": { "docstring": "Return TOC generated by Markdown parser from Markdown source text.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 77547, "commit_id": "39f7886a6f8ee98db7e73ce33d94c06139f35bd8", "repo": "wagtail", "path": "wagtail/admin/widgets/chooser.py", "file_name": "chooser.py", "fun_name": "get_value_data_from_instance", "commit_message": "Split out common logic from get_value_data", "code": "def get_value_data_from_instance(self, instance):\n \n return {\n \"id\": instance.pk,\n \"edit_url\": AdminURLFinder().get_edit_url(instance),\n }\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 6, "random_cut": "def get_value_data_from_instance(self, instance):\n ", "d_id": 16673, "documentation": { "docstring": "\n Given a model instance, return a value that we can pass to both the server-side template\n and the client-side rendering code (via telepath) that contains all the information needed\n for display. Typically this is a dict of id, title etc; it must be JSON-serialisable.\n ", "n_words": 44, "vocab_size": 39, "n_whitespaces": 73, "language": "en" } }, { "id": 212865, "commit_id": "07bb93d47f01468660a01f42150e87e5cb08d546", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "set_options", "commit_message": "Addition of tooltip_offset parm to set_options call (major hack to get around 8.6.12 problem). Backed out the experiments to try and fix new problem with Ubuntu", "code": "def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None),\n margins=(None, None),\n element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None,\n slider_border_width=None, slider_relief=None, slider_orientation=None,\n autoclose_time=None, message_box_line_width=None,\n progress_meter_border_depth=None, progress_meter_style=None,\n progress_meter_relief=None, progress_meter_color=None, progress_meter_size=None,\n text_justification=None, background_color=None, element_background_color=None,\n text_element_background_color=None, input_elements_background_color=None, input_text_color=None,\n scrollbar_color=None, text_color=None, element_text_color=None, debug_win_size=(None, None),\n window_location=(None, None), error_button_color=(None, None), tooltip_time=None, tooltip_font=None, use_ttk_buttons=None, ttk_theme=None,\n suppress_error_popups=None, suppress_raise_key_errors=None, suppress_key_guessing=None,warn_button_key_duplicates=False, enable_treeview_869_patch=None,\n enable_mac_notitlebar_patch=None, use_custom_titlebar=None, titlebar_background_color=None, titlebar_text_color=None, titlebar_font=None,\n titlebar_icon=None, user_settings_path=None, pysimplegui_settings_path=None, pysimplegui_settings_filename=None, keep_on_top=None, dpi_awareness=None, scaling=None, disable_modal_windows=None, tooltip_offset=(None, None)):\n \n\n global DEFAULT_ELEMENT_SIZE\n global DEFAULT_BUTTON_ELEMENT_SIZE\n global DEFAULT_MARGINS # Margins for each LEFT/RIGHT margin is first term\n global DEFAULT_ELEMENT_PADDING # Padding between elements (row, col) in pixels\n global DEFAULT_AUTOSIZE_TEXT\n global DEFAULT_AUTOSIZE_BUTTONS\n global DEFAULT_FONT\n global DEFAULT_BORDER_WIDTH\n global DEFAULT_AUTOCLOSE_TIME\n global DEFAULT_BUTTON_COLOR\n global MESSAGE_BOX_LINE_WIDTH\n global DEFAULT_PROGRESS_BAR_BORDER_WIDTH\n global DEFAULT_PROGRESS_BAR_STYLE\n global DEFAULT_PROGRESS_BAR_RELIEF\n global DEFAULT_PROGRESS_BAR_COLOR\n global DEFAULT_PROGRESS_BAR_SIZE\n global DEFAULT_TEXT_JUSTIFICATION\n global DEFAULT_DEBUG_WINDOW_SIZE\n global DEFAULT_SLIDER_BORDER_WIDTH\n global DEFAULT_SLIDER_RELIEF\n global DEFAULT_SLIDER_ORIENTATION\n global DEFAULT_BACKGROUND_COLOR\n global DEFAULT_INPUT_ELEMENTS_COLOR\n global DEFAULT_ELEMENT_BACKGROUND_COLOR\n global DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR\n global DEFAULT_SCROLLBAR_COLOR\n global DEFAULT_TEXT_COLOR\n global DEFAULT_WINDOW_LOCATION\n global DEFAULT_ELEMENT_TEXT_COLOR\n global DEFAULT_INPUT_TEXT_COLOR\n global DEFAULT_TOOLTIP_TIME\n global DEFAULT_ERROR_BUTTON_COLOR\n global DEFAULT_TTK_THEME\n global USE_TTK_BUTTONS\n global TOOLTIP_FONT\n global SUPPRESS_ERROR_POPUPS\n global SUPPRESS_RAISE_KEY_ERRORS\n global SUPPRESS_KEY_GUESSING\n global WARN_DUPLICATE_BUTTON_KEY_ERRORS\n global ENABLE_TREEVIEW_869_PATCH\n global ENABLE_MAC_NOTITLEBAR_PATCH\n global USE_CUSTOM_TITLEBAR\n global CUSTOM_TITLEBAR_BACKGROUND_COLOR\n global CUSTOM_TITLEBAR_TEXT_COLOR\n global CUSTOM_TITLEBAR_ICON\n global CUSTOM_TITLEBAR_FONT\n global DEFAULT_USER_SETTINGS_PATH\n global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH\n global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME\n global DEFAULT_KEEP_ON_TOP\n global DEFAULT_SCALING\n global DEFAULT_MODAL_WINDOWS_ENABLED\n global DEFAULT_TOOLTIP_OFFSET\n global _pysimplegui_user_settings\n # global _my_windows\n\n if icon:\n Window._user_defined_icon = icon\n # _my_windows._user_defined_icon = icon\n\n if button_color != None:\n if button_color == COLOR_SYSTEM_DEFAULT:\n DEFAULT_BUTTON_COLOR = (COLOR_SYSTEM_DEFAULT, COLOR_SYSTEM_DEFAULT)\n else:\n DEFAULT_BUTTON_COLOR = button_color\n\n if element_size != (None, None):\n DEFAULT_ELEMENT_SIZE = element_size\n\n if button_element_size != (None, None):\n DEFAULT_BUTTON_ELEMENT_SIZE = button_element_size\n\n if margins != (None, None):\n DEFAULT_MARGINS = margins\n\n if element_padding != (None, None):\n DEFAULT_ELEMENT_PADDING = element_padding\n\n if auto_size_text != None:\n DEFAULT_AUTOSIZE_TEXT = auto_size_text\n\n if auto_size_buttons != None:\n DEFAULT_AUTOSIZE_BUTTONS = auto_size_buttons\n\n if font != None:\n DEFAULT_FONT = font\n\n if border_width != None:\n DEFAULT_BORDER_WIDTH = border_width\n\n if autoclose_time != None:\n DEFAULT_AUTOCLOSE_TIME = autoclose_time\n\n if message_box_line_width != None:\n MESSAGE_BOX_LINE_WIDTH = message_box_line_width\n\n if progress_meter_border_depth != None:\n DEFAULT_PROGRESS_BAR_BORDER_WIDTH = progress_meter_border_depth\n\n if progress_meter_style != None:\n warnings.warn('You can no longer set a progress bar style. All ttk styles must be the same for the window', UserWarning)\n # DEFAULT_PROGRESS_BAR_STYLE = progress_meter_style\n\n if progress_meter_relief != None:\n DEFAULT_PROGRESS_BAR_RELIEF = progress_meter_relief\n\n if progress_meter_color != None:\n DEFAULT_PROGRESS_BAR_COLOR = progress_meter_color\n\n if progress_meter_size != None:\n DEFAULT_PROGRESS_BAR_SIZE = progress_meter_size\n\n if slider_border_width != None:\n DEFAULT_SLIDER_BORDER_WIDTH = slider_border_width\n\n if slider_orientation != None:\n DEFAULT_SLIDER_ORIENTATION = slider_orientation\n\n if slider_relief != None:\n DEFAULT_SLIDER_RELIEF = slider_relief\n\n if text_justification != None:\n DEFAULT_TEXT_JUSTIFICATION = text_justification\n\n if background_color != None:\n DEFAULT_BACKGROUND_COLOR = background_color\n\n if text_element_background_color != None:\n DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR = text_element_background_color\n\n if input_elements_background_color != None:\n DEFAULT_INPUT_ELEMENTS_COLOR = input_elements_background_color\n\n if element_background_color != None:\n DEFAULT_ELEMENT_BACKGROUND_COLOR = element_background_color\n\n if window_location != (None, None):\n DEFAULT_WINDOW_LOCATION = window_location\n\n if debug_win_size != (None, None):\n DEFAULT_DEBUG_WINDOW_SIZE = debug_win_size\n\n if text_color != None:\n DEFAULT_TEXT_COLOR = text_color\n\n if scrollbar_color != None:\n DEFAULT_SCROLLBAR_COLOR = scrollbar_color\n\n if element_text_color != None:\n DEFAULT_ELEMENT_TEXT_COLOR = element_text_color\n\n if input_text_color is not None:\n DEFAULT_INPUT_TEXT_COLOR = input_text_color\n\n if tooltip_time is not None:\n DEFAULT_TOOLTIP_TIME = tooltip_time\n\n if error_button_color != (None, None):\n DEFAULT_ERROR_BUTTON_COLOR = error_button_color\n\n if ttk_theme is not None:\n DEFAULT_TTK_THEME = ttk_theme\n\n if use_ttk_buttons is not None:\n USE_TTK_BUTTONS = use_ttk_buttons\n\n if tooltip_font is not None:\n TOOLTIP_FONT = tooltip_font\n\n if suppress_error_popups is not None:\n SUPPRESS_ERROR_POPUPS = suppress_error_popups\n\n if suppress_raise_key_errors is not None:\n SUPPRESS_RAISE_KEY_ERRORS = suppress_raise_key_errors\n\n if suppress_key_guessing is not None:\n SUPPRESS_KEY_GUESSING = suppress_key_guessing\n\n if warn_button_key_duplicates is not None:\n WARN_DUPLICATE_BUTTON_KEY_ERRORS = warn_button_key_duplicates\n\n if enable_treeview_869_patch is not None:\n ENABLE_TREEVIEW_869_PATCH = enable_treeview_869_patch\n\n if enable_mac_notitlebar_patch is not None:\n ENABLE_MAC_NOTITLEBAR_PATCH = enable_mac_notitlebar_patch\n\n if use_custom_titlebar is not None:\n USE_CUSTOM_TITLEBAR = use_custom_titlebar\n\n if titlebar_background_color is not None:\n CUSTOM_TITLEBAR_BACKGROUND_COLOR = titlebar_background_color\n\n if titlebar_text_color is not None:\n CUSTOM_TITLEBAR_TEXT_COLOR = titlebar_text_color\n\n if titlebar_font is not None:\n CUSTOM_TITLEBAR_FONT = titlebar_font\n\n if titlebar_icon is not None:\n CUSTOM_TITLEBAR_ICON = titlebar_icon\n\n if user_settings_path is not None:\n DEFAULT_USER_SETTINGS_PATH = user_settings_path\n\n if pysimplegui_settings_path is not None:\n DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH = pysimplegui_settings_path\n\n if pysimplegui_settings_filename is not None:\n DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME = pysimplegui_settings_filename\n\n if pysimplegui_settings_filename is not None or pysimplegui_settings_filename is not None:\n _pysimplegui_user_settings = UserSettings(filename=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME,\n path=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH)\n\n if keep_on_top is not None:\n DEFAULT_KEEP_ON_TOP = keep_on_top\n\n if dpi_awareness is True:\n if running_windows():\n if platform.release() == \"7\":\n ctypes.windll.user32.SetProcessDPIAware()\n elif platform.release() == \"8\" or platform.release() == \"10\":\n ctypes.windll.shcore.SetProcessDpiAwareness(1)\n\n if scaling is not None:\n DEFAULT_SCALING = scaling\n\n if disable_modal_windows is not None:\n DEFAULT_MODAL_WINDOWS_ENABLED = not disable_modal_windows\n\n if tooltip_offset != (None, None):\n DEFAULT_TOOLTIP_OFFSET = tooltip_offset\n\n return True\n\n\n# ----------------------------------------------------------------- #\n\n# .########.##.....##.########.##.....##.########..######.\n# ....##....##.....##.##.......###...###.##.......##....##\n# ....##....##.....##.##.......####.####.##.......##......\n# ....##....#########.######...##.###.##.######....######.\n# ....##....##.....##.##.......##.....##.##.............##\n# ....##....##.....##.##.......##.....##.##.......##....##\n# ....##....##.....##.########.##.....##.########..######.\n\n# ----------------------------------------------------------------- #\n\n# The official Theme code\n\n#################### ChangeLookAndFeel #######################\n# Predefined settings that will change the colors and styles #\n# of the elements. #\n##############################################################\nLOOK_AND_FEEL_TABLE = {\n \"SystemDefault\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1,\n \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"SystemDefaultForReal\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT,\n \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT, \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT,\n \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"SystemDefault1\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"Material1\": {\"BACKGROUND\": \"#E3F2FD\", \"TEXT\": \"#000000\", \"INPUT\": \"#86A8FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#86A8FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#5079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"Material2\": {\"BACKGROUND\": \"#FAFAFA\", \"TEXT\": \"#000000\", \"INPUT\": \"#004EA1\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#5EA7FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#0079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"Reddit\": {\"BACKGROUND\": \"#ffffff\", \"TEXT\": \"#1a1a1b\", \"INPUT\": \"#dae0e6\", \"TEXT_INPUT\": \"#222222\", \"SCROLL\": \"#a5a4a4\", \"BUTTON\": (\"#FFFFFF\", \"#0079d3\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, \"ACCENT1\": \"#ff5414\", \"ACCENT2\": \"#33a8ff\",\n \"ACCENT3\": \"#dbf0ff\", },\n \"Topanga\": {\"BACKGROUND\": \"#282923\", \"TEXT\": \"#E7DB74\", \"INPUT\": \"#393a32\", \"TEXT_INPUT\": \"#E7C855\", \"SCROLL\": \"#E7C855\", \"BUTTON\": (\"#E7C855\", \"#284B5A\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, \"ACCENT1\": \"#c15226\", \"ACCENT2\": \"#7a4d5f\",\n \"ACCENT3\": \"#889743\", },\n \"GreenTan\": {\"BACKGROUND\": \"#9FB8AD\", \"TEXT\": '#000000', \"INPUT\": \"#F7F3EC\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#F7F3EC\", \"BUTTON\": (\"#FFFFFF\", \"#475841\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Dark\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#FFFFFF\", \"#004F00\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen\": {\"BACKGROUND\": \"#B7CECE\", \"TEXT\": \"#000000\", \"INPUT\": \"#FDFFF7\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#FDFFF7\",\n \"BUTTON\": (\"#FFFFFF\", \"#658268\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"ACCENT1\": \"#76506d\",\n \"ACCENT2\": \"#5148f1\", \"ACCENT3\": \"#0a1c84\", \"PROGRESS_DEPTH\": 0, },\n \"Dark2\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#FFFFFF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#FFFFFF\", \"#004F00\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Black\": {\"BACKGROUND\": \"#000000\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#000000\", \"#FFFFFF\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Tan\": {\"BACKGROUND\": \"#fdf6e3\", \"TEXT\": \"#268bd1\", \"INPUT\": \"#eee8d5\", \"TEXT_INPUT\": \"#6c71c3\", \"SCROLL\": \"#eee8d5\", \"BUTTON\": (\"#FFFFFF\", \"#063542\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"TanBlue\": {\"BACKGROUND\": \"#e5dece\", \"TEXT\": \"#063289\", \"INPUT\": \"#f9f8f4\", \"TEXT_INPUT\": \"#242834\", \"SCROLL\": \"#eee8d5\", \"BUTTON\": (\"#FFFFFF\", \"#063289\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkTanBlue\": {\"BACKGROUND\": \"#242834\", \"TEXT\": \"#dfe6f8\", \"INPUT\": \"#97755c\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#a9afbb\",\n \"BUTTON\": (\"#FFFFFF\", \"#063289\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkAmber\": {\"BACKGROUND\": \"#2c2825\", \"TEXT\": \"#fdcb52\", \"INPUT\": \"#705e52\", \"TEXT_INPUT\": \"#fdcb52\", \"SCROLL\": \"#705e52\",\n \"BUTTON\": (\"#000000\", \"#fdcb52\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue\": {\"BACKGROUND\": \"#1a2835\", \"TEXT\": \"#d1ecff\", \"INPUT\": \"#335267\", \"TEXT_INPUT\": \"#acc2d0\", \"SCROLL\": \"#1b6497\", \"BUTTON\": (\"#000000\", \"#fafaf8\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Reds\": {\"BACKGROUND\": \"#280001\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#d8d584\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#763e00\", \"BUTTON\": (\"#000000\", \"#daad28\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Green\": {\"BACKGROUND\": \"#82a459\", \"TEXT\": \"#000000\", \"INPUT\": \"#d8d584\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e3ecf3\", \"BUTTON\": (\"#FFFFFF\", \"#517239\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"BluePurple\": {\"BACKGROUND\": \"#A5CADD\", \"TEXT\": \"#6E266E\", \"INPUT\": \"#E0F5FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#E0F5FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#303952\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Purple\": {\"BACKGROUND\": \"#B0AAC2\", \"TEXT\": \"#000000\", \"INPUT\": \"#F2EFE8\", \"SCROLL\": \"#F2EFE8\", \"TEXT_INPUT\": \"#000000\", \"BUTTON\": (\"#000000\", \"#C2D4D8\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"BlueMono\": {\"BACKGROUND\": \"#AAB6D3\", \"TEXT\": \"#000000\", \"INPUT\": \"#F1F4FC\", \"SCROLL\": \"#F1F4FC\", \"TEXT_INPUT\": \"#000000\", \"BUTTON\": (\"#FFFFFF\", \"#7186C7\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"GreenMono\": {\"BACKGROUND\": \"#A8C1B4\", \"TEXT\": \"#000000\", \"INPUT\": \"#DDE0DE\", \"SCROLL\": \"#E3E3E3\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#6D9F85\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"BrownBlue\": {\"BACKGROUND\": \"#64778d\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#f0f3f7\", \"SCROLL\": \"#A6B2BE\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#283b5b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"BrightColors\": {\"BACKGROUND\": \"#b4ffb4\", \"TEXT\": \"#000000\", \"INPUT\": \"#ffff64\", \"SCROLL\": \"#ffb482\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#ffa0dc\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"NeutralBlue\": {\"BACKGROUND\": \"#92aa9d\", \"TEXT\": \"#000000\", \"INPUT\": \"#fcfff6\", \"SCROLL\": \"#fcfff6\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#d0dbbd\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Kayak\": {\"BACKGROUND\": \"#a7ad7f\", \"TEXT\": \"#000000\", \"INPUT\": \"#e6d3a8\", \"SCROLL\": \"#e6d3a8\", \"TEXT_INPUT\": \"#000000\", \"BUTTON\": (\"#FFFFFF\", \"#5d907d\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"SandyBeach\": {\"BACKGROUND\": \"#efeccb\", \"TEXT\": \"#012f2f\", \"INPUT\": \"#e6d3a8\", \"SCROLL\": \"#e6d3a8\", \"TEXT_INPUT\": \"#012f2f\",\n \"BUTTON\": (\"#FFFFFF\", \"#046380\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"TealMono\": {\"BACKGROUND\": \"#a8cfdd\", \"TEXT\": \"#000000\", \"INPUT\": \"#dfedf2\", \"SCROLL\": \"#dfedf2\", \"TEXT_INPUT\": \"#000000\", \"BUTTON\": (\"#FFFFFF\", \"#183440\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Default\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"Default1\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"DefaultNoMoreNagging\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT,\n \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT, \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR,\n \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"GrayGrayGray\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"LightBlue\": {\"BACKGROUND\": \"#E3F2FD\", \"TEXT\": \"#000000\", \"INPUT\": \"#86A8FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#86A8FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#5079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"LightGrey\": {\"BACKGROUND\": \"#FAFAFA\", \"TEXT\": \"#000000\", \"INPUT\": \"#004EA1\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#5EA7FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#0079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"LightGrey1\": {\"BACKGROUND\": \"#ffffff\", \"TEXT\": \"#1a1a1b\", \"INPUT\": \"#dae0e6\", \"TEXT_INPUT\": \"#222222\", \"SCROLL\": \"#a5a4a4\",\n \"BUTTON\": (\"#FFFFFF\", \"#0079d3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#ff5414\", \"ACCENT2\": \"#33a8ff\", \"ACCENT3\": \"#dbf0ff\", },\n \"DarkBrown\": {\"BACKGROUND\": \"#282923\", \"TEXT\": \"#E7DB74\", \"INPUT\": \"#393a32\", \"TEXT_INPUT\": \"#E7C855\", \"SCROLL\": \"#E7C855\",\n \"BUTTON\": (\"#E7C855\", \"#284B5A\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#c15226\", \"ACCENT2\": \"#7a4d5f\", \"ACCENT3\": \"#889743\", },\n \"LightGreen1\": {\"BACKGROUND\": \"#9FB8AD\", \"TEXT\": \"#000000\", \"INPUT\": \"#F7F3EC\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#F7F3EC\",\n \"BUTTON\": (\"#FFFFFF\", \"#475841\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#FFFFFF\", \"#004F00\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen2\": {\"BACKGROUND\": \"#B7CECE\", \"TEXT\": \"#000000\", \"INPUT\": \"#FDFFF7\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#FDFFF7\",\n \"BUTTON\": (\"#FFFFFF\", \"#658268\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"ACCENT1\": \"#76506d\",\n \"ACCENT2\": \"#5148f1\", \"ACCENT3\": \"#0a1c84\", \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey1\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#FFFFFF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#707070\",\n \"BUTTON\": (\"#FFFFFF\", \"#004F00\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlack\": {\"BACKGROUND\": \"#000000\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\",\n \"BUTTON\": (\"#000000\", \"#FFFFFF\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown\": {\"BACKGROUND\": \"#fdf6e3\", \"TEXT\": \"#268bd1\", \"INPUT\": \"#eee8d5\", \"TEXT_INPUT\": \"#6c71c3\", \"SCROLL\": \"#eee8d5\",\n \"BUTTON\": (\"#FFFFFF\", \"#063542\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown1\": {\"BACKGROUND\": \"#e5dece\", \"TEXT\": \"#063289\", \"INPUT\": \"#f9f8f4\", \"TEXT_INPUT\": \"#242834\", \"SCROLL\": \"#eee8d5\",\n \"BUTTON\": (\"#FFFFFF\", \"#063289\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue1\": {\"BACKGROUND\": \"#242834\", \"TEXT\": \"#dfe6f8\", \"INPUT\": \"#97755c\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#a9afbb\",\n \"BUTTON\": (\"#FFFFFF\", \"#063289\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBrown1\": {\"BACKGROUND\": \"#2c2825\", \"TEXT\": \"#fdcb52\", \"INPUT\": \"#705e52\", \"TEXT_INPUT\": \"#fdcb52\", \"SCROLL\": \"#705e52\",\n \"BUTTON\": (\"#000000\", \"#fdcb52\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue2\": {\"BACKGROUND\": \"#1a2835\", \"TEXT\": \"#d1ecff\", \"INPUT\": \"#335267\", \"TEXT_INPUT\": \"#acc2d0\", \"SCROLL\": \"#1b6497\",\n \"BUTTON\": (\"#000000\", \"#fafaf8\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBrown2\": {\"BACKGROUND\": \"#280001\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#d8d584\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#763e00\",\n \"BUTTON\": (\"#000000\", \"#daad28\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGreen\": {\"BACKGROUND\": \"#82a459\", \"TEXT\": \"#000000\", \"INPUT\": \"#d8d584\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e3ecf3\",\n \"BUTTON\": (\"#FFFFFF\", \"#517239\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBlue1\": {\"BACKGROUND\": \"#A5CADD\", \"TEXT\": \"#6E266E\", \"INPUT\": \"#E0F5FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#E0F5FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#303952\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightPurple\": {\"BACKGROUND\": \"#B0AAC2\", \"TEXT\": \"#000000\", \"INPUT\": \"#F2EFE8\", \"SCROLL\": \"#F2EFE8\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#C2D4D8\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBlue2\": {\"BACKGROUND\": \"#AAB6D3\", \"TEXT\": \"#000000\", \"INPUT\": \"#F1F4FC\", \"SCROLL\": \"#F1F4FC\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#7186C7\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen3\": {\"BACKGROUND\": \"#A8C1B4\", \"TEXT\": \"#000000\", \"INPUT\": \"#DDE0DE\", \"SCROLL\": \"#E3E3E3\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#6D9F85\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue3\": {\"BACKGROUND\": \"#64778d\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#f0f3f7\", \"SCROLL\": \"#A6B2BE\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#283b5b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen4\": {\"BACKGROUND\": \"#b4ffb4\", \"TEXT\": \"#000000\", \"INPUT\": \"#ffff64\", \"SCROLL\": \"#ffb482\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#ffa0dc\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen5\": {\"BACKGROUND\": \"#92aa9d\", \"TEXT\": \"#000000\", \"INPUT\": \"#fcfff6\", \"SCROLL\": \"#fcfff6\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#000000\", \"#d0dbbd\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown2\": {\"BACKGROUND\": \"#a7ad7f\", \"TEXT\": \"#000000\", \"INPUT\": \"#e6d3a8\", \"SCROLL\": \"#e6d3a8\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#5d907d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown3\": {\"BACKGROUND\": \"#efeccb\", \"TEXT\": \"#012f2f\", \"INPUT\": \"#e6d3a8\", \"SCROLL\": \"#e6d3a8\", \"TEXT_INPUT\": \"#012f2f\",\n \"BUTTON\": (\"#FFFFFF\", \"#046380\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBlue3\": {\"BACKGROUND\": \"#a8cfdd\", \"TEXT\": \"#000000\", \"INPUT\": \"#dfedf2\", \"SCROLL\": \"#dfedf2\", \"TEXT_INPUT\": \"#000000\",\n \"BUTTON\": (\"#FFFFFF\", \"#183440\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightBrown4\": {\"BACKGROUND\": \"#d7c79e\", \"TEXT\": \"#a35638\", \"INPUT\": \"#9dab86\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#a35638\",\n \"BUTTON\": (\"#FFFFFF\", \"#a35638\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#a35638\", \"#9dab86\", \"#e08f62\", \"#d7c79e\"], },\n \"DarkTeal\": {\"BACKGROUND\": \"#003f5c\", \"TEXT\": \"#fb5b5a\", \"INPUT\": \"#bc4873\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#bc4873\", \"BUTTON\": (\"#FFFFFF\", \"#fb5b5a\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#003f5c\", \"#472b62\", \"#bc4873\", \"#fb5b5a\"], },\n \"DarkPurple\": {\"BACKGROUND\": \"#472b62\", \"TEXT\": \"#fb5b5a\", \"INPUT\": \"#bc4873\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#bc4873\",\n \"BUTTON\": (\"#FFFFFF\", \"#472b62\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#003f5c\", \"#472b62\", \"#bc4873\", \"#fb5b5a\"], },\n \"LightGreen6\": {\"BACKGROUND\": \"#eafbea\", \"TEXT\": \"#1f6650\", \"INPUT\": \"#6f9a8d\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#1f6650\",\n \"BUTTON\": (\"#FFFFFF\", \"#1f6650\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#1f6650\", \"#6f9a8d\", \"#ea5e5e\", \"#eafbea\"], },\n \"DarkGrey2\": {\"BACKGROUND\": \"#2b2b28\", \"TEXT\": \"#f8f8f8\", \"INPUT\": \"#f1d6ab\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f1d6ab\",\n \"BUTTON\": (\"#2b2b28\", \"#e3b04b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#2b2b28\", \"#e3b04b\", \"#f1d6ab\", \"#f8f8f8\"], },\n \"LightBrown6\": {\"BACKGROUND\": \"#f9b282\", \"TEXT\": \"#8f4426\", \"INPUT\": \"#de6b35\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#8f4426\",\n \"BUTTON\": (\"#FFFFFF\", \"#8f4426\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#8f4426\", \"#de6b35\", \"#64ccda\", \"#f9b282\"], },\n \"DarkTeal1\": {\"BACKGROUND\": \"#396362\", \"TEXT\": \"#ffe7d1\", \"INPUT\": \"#f6c89f\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f6c89f\",\n \"BUTTON\": (\"#ffe7d1\", \"#4b8e8d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#396362\", \"#4b8e8d\", \"#f6c89f\", \"#ffe7d1\"], },\n \"LightBrown7\": {\"BACKGROUND\": \"#f6c89f\", \"TEXT\": \"#396362\", \"INPUT\": \"#4b8e8d\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#396362\",\n \"BUTTON\": (\"#FFFFFF\", \"#396362\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#396362\", \"#4b8e8d\", \"#f6c89f\", \"#ffe7d1\"], },\n \"DarkPurple1\": {\"BACKGROUND\": \"#0c093c\", \"TEXT\": \"#fad6d6\", \"INPUT\": \"#eea5f6\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#eea5f6\",\n \"BUTTON\": (\"#FFFFFF\", \"#df42d1\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#0c093c\", \"#df42d1\", \"#eea5f6\", \"#fad6d6\"], },\n \"DarkGrey3\": {\"BACKGROUND\": \"#211717\", \"TEXT\": \"#dfddc7\", \"INPUT\": \"#f58b54\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f58b54\",\n \"BUTTON\": (\"#dfddc7\", \"#a34a28\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#211717\", \"#a34a28\", \"#f58b54\", \"#dfddc7\"], },\n \"LightBrown8\": {\"BACKGROUND\": \"#dfddc7\", \"TEXT\": \"#211717\", \"INPUT\": \"#a34a28\", \"TEXT_INPUT\": \"#dfddc7\", \"SCROLL\": \"#211717\",\n \"BUTTON\": (\"#dfddc7\", \"#a34a28\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#211717\", \"#a34a28\", \"#f58b54\", \"#dfddc7\"], },\n \"DarkBlue4\": {\"BACKGROUND\": \"#494ca2\", \"TEXT\": \"#e3e7f1\", \"INPUT\": \"#c6cbef\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#c6cbef\",\n \"BUTTON\": (\"#FFFFFF\", \"#8186d5\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#494ca2\", \"#8186d5\", \"#c6cbef\", \"#e3e7f1\"], },\n \"LightBlue4\": {\"BACKGROUND\": \"#5c94bd\", \"TEXT\": \"#470938\", \"INPUT\": \"#1a3e59\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#470938\",\n \"BUTTON\": (\"#FFFFFF\", \"#470938\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#470938\", \"#1a3e59\", \"#5c94bd\", \"#f2d6eb\"], },\n \"DarkTeal2\": {\"BACKGROUND\": \"#394a6d\", \"TEXT\": \"#c0ffb3\", \"INPUT\": \"#52de97\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#52de97\",\n \"BUTTON\": (\"#c0ffb3\", \"#394a6d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#394a6d\", \"#3c9d9b\", \"#52de97\", \"#c0ffb3\"], },\n \"DarkTeal3\": {\"BACKGROUND\": \"#3c9d9b\", \"TEXT\": \"#c0ffb3\", \"INPUT\": \"#52de97\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#52de97\",\n \"BUTTON\": (\"#c0ffb3\", \"#394a6d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#394a6d\", \"#3c9d9b\", \"#52de97\", \"#c0ffb3\"], },\n \"DarkPurple5\": {\"BACKGROUND\": \"#730068\", \"TEXT\": \"#f6f078\", \"INPUT\": \"#01d28e\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#01d28e\",\n \"BUTTON\": (\"#f6f078\", \"#730068\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#730068\", \"#434982\", \"#01d28e\", \"#f6f078\"], },\n \"DarkPurple2\": {\"BACKGROUND\": \"#202060\", \"TEXT\": \"#b030b0\", \"INPUT\": \"#602080\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#602080\",\n \"BUTTON\": (\"#FFFFFF\", \"#202040\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#202040\", \"#202060\", \"#602080\", \"#b030b0\"], },\n \"DarkBlue5\": {\"BACKGROUND\": \"#000272\", \"TEXT\": \"#ff6363\", \"INPUT\": \"#a32f80\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#a32f80\",\n \"BUTTON\": (\"#FFFFFF\", \"#341677\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#000272\", \"#341677\", \"#a32f80\", \"#ff6363\"], },\n \"LightGrey2\": {\"BACKGROUND\": \"#f6f6f6\", \"TEXT\": \"#420000\", \"INPUT\": \"#d4d7dd\", \"TEXT_INPUT\": \"#420000\", \"SCROLL\": \"#420000\",\n \"BUTTON\": (\"#420000\", \"#d4d7dd\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#420000\", \"#d4d7dd\", \"#eae9e9\", \"#f6f6f6\"], },\n \"LightGrey3\": {\"BACKGROUND\": \"#eae9e9\", \"TEXT\": \"#420000\", \"INPUT\": \"#d4d7dd\", \"TEXT_INPUT\": \"#420000\", \"SCROLL\": \"#420000\",\n \"BUTTON\": (\"#420000\", \"#d4d7dd\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#420000\", \"#d4d7dd\", \"#eae9e9\", \"#f6f6f6\"], },\n \"DarkBlue6\": {\"BACKGROUND\": \"#01024e\", \"TEXT\": \"#ff6464\", \"INPUT\": \"#8b4367\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#8b4367\",\n \"BUTTON\": (\"#FFFFFF\", \"#543864\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#01024e\", \"#543864\", \"#8b4367\", \"#ff6464\"], },\n \"DarkBlue7\": {\"BACKGROUND\": \"#241663\", \"TEXT\": \"#eae7af\", \"INPUT\": \"#a72693\", \"TEXT_INPUT\": \"#eae7af\", \"SCROLL\": \"#a72693\",\n \"BUTTON\": (\"#eae7af\", \"#160f30\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#160f30\", \"#241663\", \"#a72693\", \"#eae7af\"], },\n \"LightBrown9\": {\"BACKGROUND\": \"#f6d365\", \"TEXT\": \"#3a1f5d\", \"INPUT\": \"#c83660\", \"TEXT_INPUT\": \"#f6d365\", \"SCROLL\": \"#3a1f5d\",\n \"BUTTON\": (\"#f6d365\", \"#c83660\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3a1f5d\", \"#c83660\", \"#e15249\", \"#f6d365\"], },\n \"DarkPurple3\": {\"BACKGROUND\": \"#6e2142\", \"TEXT\": \"#ffd692\", \"INPUT\": \"#e16363\", \"TEXT_INPUT\": \"#ffd692\", \"SCROLL\": \"#e16363\",\n \"BUTTON\": (\"#ffd692\", \"#943855\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#6e2142\", \"#943855\", \"#e16363\", \"#ffd692\"], },\n \"LightBrown10\": {\"BACKGROUND\": \"#ffd692\", \"TEXT\": \"#6e2142\", \"INPUT\": \"#943855\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#6e2142\",\n \"BUTTON\": (\"#FFFFFF\", \"#6e2142\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#6e2142\", \"#943855\", \"#e16363\", \"#ffd692\"], },\n \"DarkPurple4\": {\"BACKGROUND\": \"#200f21\", \"TEXT\": \"#f638dc\", \"INPUT\": \"#5a3d5c\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#5a3d5c\",\n \"BUTTON\": (\"#FFFFFF\", \"#382039\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#200f21\", \"#382039\", \"#5a3d5c\", \"#f638dc\"], },\n \"LightBlue5\": {\"BACKGROUND\": \"#b2fcff\", \"TEXT\": \"#3e64ff\", \"INPUT\": \"#5edfff\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#3e64ff\",\n \"BUTTON\": (\"#FFFFFF\", \"#3e64ff\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3e64ff\", \"#5edfff\", \"#b2fcff\", \"#ecfcff\"], },\n \"DarkTeal4\": {\"BACKGROUND\": \"#464159\", \"TEXT\": \"#c7f0db\", \"INPUT\": \"#8bbabb\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#8bbabb\",\n \"BUTTON\": (\"#FFFFFF\", \"#6c7b95\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#464159\", \"#6c7b95\", \"#8bbabb\", \"#c7f0db\"], },\n \"LightTeal\": {\"BACKGROUND\": \"#c7f0db\", \"TEXT\": \"#464159\", \"INPUT\": \"#6c7b95\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#464159\",\n \"BUTTON\": (\"#FFFFFF\", \"#464159\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#464159\", \"#6c7b95\", \"#8bbabb\", \"#c7f0db\"], },\n \"DarkTeal5\": {\"BACKGROUND\": \"#8bbabb\", \"TEXT\": \"#464159\", \"INPUT\": \"#6c7b95\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#464159\",\n \"BUTTON\": (\"#c7f0db\", \"#6c7b95\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#464159\", \"#6c7b95\", \"#8bbabb\", \"#c7f0db\"], },\n \"LightGrey4\": {\"BACKGROUND\": \"#faf5ef\", \"TEXT\": \"#672f2f\", \"INPUT\": \"#99b19c\", \"TEXT_INPUT\": \"#672f2f\", \"SCROLL\": \"#672f2f\",\n \"BUTTON\": (\"#672f2f\", \"#99b19c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#672f2f\", \"#99b19c\", \"#d7d1c9\", \"#faf5ef\"], },\n \"LightGreen7\": {\"BACKGROUND\": \"#99b19c\", \"TEXT\": \"#faf5ef\", \"INPUT\": \"#d7d1c9\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#d7d1c9\",\n \"BUTTON\": (\"#FFFFFF\", \"#99b19c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#672f2f\", \"#99b19c\", \"#d7d1c9\", \"#faf5ef\"], },\n \"LightGrey5\": {\"BACKGROUND\": \"#d7d1c9\", \"TEXT\": \"#672f2f\", \"INPUT\": \"#99b19c\", \"TEXT_INPUT\": \"#672f2f\", \"SCROLL\": \"#672f2f\",\n \"BUTTON\": (\"#FFFFFF\", \"#672f2f\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#672f2f\", \"#99b19c\", \"#d7d1c9\", \"#faf5ef\"], },\n \"DarkBrown3\": {\"BACKGROUND\": \"#a0855b\", \"TEXT\": \"#f9f6f2\", \"INPUT\": \"#f1d6ab\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f1d6ab\",\n \"BUTTON\": (\"#FFFFFF\", \"#38470b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#38470b\", \"#a0855b\", \"#f1d6ab\", \"#f9f6f2\"], },\n \"LightBrown11\": {\"BACKGROUND\": \"#f1d6ab\", \"TEXT\": \"#38470b\", \"INPUT\": \"#a0855b\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#38470b\",\n \"BUTTON\": (\"#f9f6f2\", \"#a0855b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#38470b\", \"#a0855b\", \"#f1d6ab\", \"#f9f6f2\"], },\n \"DarkRed\": {\"BACKGROUND\": \"#83142c\", \"TEXT\": \"#f9d276\", \"INPUT\": \"#ad1d45\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#ad1d45\", \"BUTTON\": (\"#f9d276\", \"#ad1d45\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#44000d\", \"#83142c\", \"#ad1d45\", \"#f9d276\"], },\n \"DarkTeal6\": {\"BACKGROUND\": \"#204969\", \"TEXT\": \"#fff7f7\", \"INPUT\": \"#dadada\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#dadada\",\n \"BUTTON\": (\"#000000\", \"#fff7f7\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#204969\", \"#08ffc8\", \"#dadada\", \"#fff7f7\"], },\n \"DarkBrown4\": {\"BACKGROUND\": \"#252525\", \"TEXT\": \"#ff0000\", \"INPUT\": \"#af0404\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#af0404\",\n \"BUTTON\": (\"#FFFFFF\", \"#252525\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#252525\", \"#414141\", \"#af0404\", \"#ff0000\"], },\n \"LightYellow\": {\"BACKGROUND\": \"#f4ff61\", \"TEXT\": \"#27aa80\", \"INPUT\": \"#32ff6a\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#27aa80\",\n \"BUTTON\": (\"#f4ff61\", \"#27aa80\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#27aa80\", \"#32ff6a\", \"#a8ff3e\", \"#f4ff61\"], },\n \"DarkGreen1\": {\"BACKGROUND\": \"#2b580c\", \"TEXT\": \"#fdef96\", \"INPUT\": \"#f7b71d\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f7b71d\",\n \"BUTTON\": (\"#fdef96\", \"#2b580c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#2b580c\", \"#afa939\", \"#f7b71d\", \"#fdef96\"], },\n \"LightGreen8\": {\"BACKGROUND\": \"#c8dad3\", \"TEXT\": \"#63707e\", \"INPUT\": \"#93b5b3\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#63707e\",\n \"BUTTON\": (\"#FFFFFF\", \"#63707e\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#63707e\", \"#93b5b3\", \"#c8dad3\", \"#f2f6f5\"], },\n \"DarkTeal7\": {\"BACKGROUND\": \"#248ea9\", \"TEXT\": \"#fafdcb\", \"INPUT\": \"#aee7e8\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#aee7e8\",\n \"BUTTON\": (\"#000000\", \"#fafdcb\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#248ea9\", \"#28c3d4\", \"#aee7e8\", \"#fafdcb\"], },\n \"DarkBlue8\": {\"BACKGROUND\": \"#454d66\", \"TEXT\": \"#d9d872\", \"INPUT\": \"#58b368\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#58b368\",\n \"BUTTON\": (\"#000000\", \"#009975\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#009975\", \"#454d66\", \"#58b368\", \"#d9d872\"], },\n \"DarkBlue9\": {\"BACKGROUND\": \"#263859\", \"TEXT\": \"#ff6768\", \"INPUT\": \"#6b778d\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#6b778d\",\n \"BUTTON\": (\"#ff6768\", \"#263859\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#17223b\", \"#263859\", \"#6b778d\", \"#ff6768\"], },\n \"DarkBlue10\": {\"BACKGROUND\": \"#0028ff\", \"TEXT\": \"#f1f4df\", \"INPUT\": \"#10eaf0\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#10eaf0\",\n \"BUTTON\": (\"#f1f4df\", \"#24009c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#24009c\", \"#0028ff\", \"#10eaf0\", \"#f1f4df\"], },\n \"DarkBlue11\": {\"BACKGROUND\": \"#6384b3\", \"TEXT\": \"#e6f0b6\", \"INPUT\": \"#b8e9c0\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#b8e9c0\",\n \"BUTTON\": (\"#e6f0b6\", \"#684949\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#684949\", \"#6384b3\", \"#b8e9c0\", \"#e6f0b6\"], },\n \"DarkTeal8\": {\"BACKGROUND\": \"#71a0a5\", \"TEXT\": \"#212121\", \"INPUT\": \"#665c84\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#212121\",\n \"BUTTON\": (\"#fab95b\", \"#665c84\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#212121\", \"#665c84\", \"#71a0a5\", \"#fab95b\"], },\n \"DarkRed1\": {\"BACKGROUND\": \"#c10000\", \"TEXT\": \"#eeeeee\", \"INPUT\": \"#dedede\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#dedede\", \"BUTTON\": (\"#c10000\", \"#eeeeee\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#c10000\", \"#ff4949\", \"#dedede\", \"#eeeeee\"], },\n \"LightBrown5\": {\"BACKGROUND\": \"#fff591\", \"TEXT\": \"#e41749\", \"INPUT\": \"#f5587b\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e41749\",\n \"BUTTON\": (\"#fff591\", \"#e41749\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#e41749\", \"#f5587b\", \"#ff8a5c\", \"#fff591\"], },\n \"LightGreen9\": {\"BACKGROUND\": \"#f1edb3\", \"TEXT\": \"#3b503d\", \"INPUT\": \"#4a746e\", \"TEXT_INPUT\": \"#f1edb3\", \"SCROLL\": \"#3b503d\",\n \"BUTTON\": (\"#f1edb3\", \"#3b503d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3b503d\", \"#4a746e\", \"#c8cf94\", \"#f1edb3\"], \"DESCRIPTION\": [\"Green\", \"Turquoise\", \"Yellow\"], },\n \"DarkGreen2\": {\"BACKGROUND\": \"#3b503d\", \"TEXT\": \"#f1edb3\", \"INPUT\": \"#c8cf94\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#c8cf94\",\n \"BUTTON\": (\"#f1edb3\", \"#3b503d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3b503d\", \"#4a746e\", \"#c8cf94\", \"#f1edb3\"], \"DESCRIPTION\": [\"Green\", \"Turquoise\", \"Yellow\"], },\n \"LightGray1\": {\"BACKGROUND\": \"#f2f2f2\", \"TEXT\": \"#222831\", \"INPUT\": \"#393e46\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#222831\",\n \"BUTTON\": (\"#f2f2f2\", \"#222831\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#222831\", \"#393e46\", \"#f96d00\", \"#f2f2f2\"], \"DESCRIPTION\": [\"#000000\", \"Grey\", \"Orange\", \"Grey\", \"Autumn\"], },\n \"DarkGrey4\": {\"BACKGROUND\": \"#52524e\", \"TEXT\": \"#e9e9e5\", \"INPUT\": \"#d4d6c8\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#d4d6c8\",\n \"BUTTON\": (\"#FFFFFF\", \"#9a9b94\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#52524e\", \"#9a9b94\", \"#d4d6c8\", \"#e9e9e5\"], \"DESCRIPTION\": [\"Grey\", \"Pastel\", \"Winter\"], },\n \"DarkBlue12\": {\"BACKGROUND\": \"#324e7b\", \"TEXT\": \"#f8f8f8\", \"INPUT\": \"#86a6df\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#86a6df\",\n \"BUTTON\": (\"#FFFFFF\", \"#5068a9\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#324e7b\", \"#5068a9\", \"#86a6df\", \"#f8f8f8\"], \"DESCRIPTION\": [\"Blue\", \"Grey\", \"Cold\", \"Winter\"], },\n \"DarkPurple6\": {\"BACKGROUND\": \"#070739\", \"TEXT\": \"#e1e099\", \"INPUT\": \"#c327ab\", \"TEXT_INPUT\": \"#e1e099\", \"SCROLL\": \"#c327ab\",\n \"BUTTON\": (\"#e1e099\", \"#521477\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#070739\", \"#521477\", \"#c327ab\", \"#e1e099\"], \"DESCRIPTION\": [\"#000000\", \"Purple\", \"Yellow\", \"Dark\"], },\n \"DarkPurple7\": {\"BACKGROUND\": \"#191930\", \"TEXT\": \"#B1B7C5\", \"INPUT\": \"#232B5C\", \"TEXT_INPUT\": \"#D0E3E7\", \"SCROLL\": \"#B1B7C5\",\n \"BUTTON\": (\"#272D38\", \"#B1B7C5\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue13\": {\"BACKGROUND\": \"#203562\", \"TEXT\": \"#e3e8f8\", \"INPUT\": \"#c0c5cd\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#c0c5cd\",\n \"BUTTON\": (\"#FFFFFF\", \"#3e588f\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#203562\", \"#3e588f\", \"#c0c5cd\", \"#e3e8f8\"], \"DESCRIPTION\": [\"Blue\", \"Grey\", \"Wedding\", \"Cold\"], },\n \"DarkBrown5\": {\"BACKGROUND\": \"#3c1b1f\", \"TEXT\": \"#f6e1b5\", \"INPUT\": \"#e2bf81\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e2bf81\",\n \"BUTTON\": (\"#3c1b1f\", \"#f6e1b5\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3c1b1f\", \"#b21e4b\", \"#e2bf81\", \"#f6e1b5\"], \"DESCRIPTION\": [\"Brown\", \"Red\", \"Yellow\", \"Warm\"], },\n \"DarkGreen3\": {\"BACKGROUND\": \"#062121\", \"TEXT\": \"#eeeeee\", \"INPUT\": \"#e4dcad\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e4dcad\",\n \"BUTTON\": (\"#eeeeee\", \"#181810\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#062121\", \"#181810\", \"#e4dcad\", \"#eeeeee\"], \"DESCRIPTION\": [\"#000000\", \"#000000\", \"Brown\", \"Grey\"], },\n \"DarkBlack1\": {\"BACKGROUND\": \"#181810\", \"TEXT\": \"#eeeeee\", \"INPUT\": \"#e4dcad\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e4dcad\",\n \"BUTTON\": (\"#FFFFFF\", \"#062121\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#062121\", \"#181810\", \"#e4dcad\", \"#eeeeee\"], \"DESCRIPTION\": [\"#000000\", \"#000000\", \"Brown\", \"Grey\"], },\n \"DarkGrey5\": {\"BACKGROUND\": \"#343434\", \"TEXT\": \"#f3f3f3\", \"INPUT\": \"#e9dcbe\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e9dcbe\",\n \"BUTTON\": (\"#FFFFFF\", \"#8e8b82\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#343434\", \"#8e8b82\", \"#e9dcbe\", \"#f3f3f3\"], \"DESCRIPTION\": [\"Grey\", \"Brown\"], },\n \"LightBrown12\": {\"BACKGROUND\": \"#8e8b82\", \"TEXT\": \"#f3f3f3\", \"INPUT\": \"#e9dcbe\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e9dcbe\",\n \"BUTTON\": (\"#f3f3f3\", \"#8e8b82\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#343434\", \"#8e8b82\", \"#e9dcbe\", \"#f3f3f3\"], \"DESCRIPTION\": [\"Grey\", \"Brown\"], },\n \"DarkTeal9\": {\"BACKGROUND\": \"#13445a\", \"TEXT\": \"#fef4e8\", \"INPUT\": \"#446878\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#446878\",\n \"BUTTON\": (\"#fef4e8\", \"#446878\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#13445a\", \"#970747\", \"#446878\", \"#fef4e8\"], \"DESCRIPTION\": [\"Red\", \"Grey\", \"Blue\", \"Wedding\", \"Retro\"], },\n \"DarkBlue14\": {\"BACKGROUND\": \"#21273d\", \"TEXT\": \"#f1f6f8\", \"INPUT\": \"#b9d4f1\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#b9d4f1\",\n \"BUTTON\": (\"#FFFFFF\", \"#6a759b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#21273d\", \"#6a759b\", \"#b9d4f1\", \"#f1f6f8\"], \"DESCRIPTION\": [\"Blue\", \"#000000\", \"Grey\", \"Cold\", \"Winter\"], },\n \"LightBlue6\": {\"BACKGROUND\": \"#f1f6f8\", \"TEXT\": \"#21273d\", \"INPUT\": \"#6a759b\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#21273d\",\n \"BUTTON\": (\"#f1f6f8\", \"#6a759b\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#21273d\", \"#6a759b\", \"#b9d4f1\", \"#f1f6f8\"], \"DESCRIPTION\": [\"Blue\", \"#000000\", \"Grey\", \"Cold\", \"Winter\"], },\n \"DarkGreen4\": {\"BACKGROUND\": \"#044343\", \"TEXT\": \"#e4e4e4\", \"INPUT\": \"#045757\", \"TEXT_INPUT\": \"#e4e4e4\", \"SCROLL\": \"#045757\",\n \"BUTTON\": (\"#e4e4e4\", \"#045757\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#222222\", \"#044343\", \"#045757\", \"#e4e4e4\"], \"DESCRIPTION\": [\"#000000\", \"Turquoise\", \"Grey\", \"Dark\"], },\n \"DarkGreen5\": {\"BACKGROUND\": \"#1b4b36\", \"TEXT\": \"#e0e7f1\", \"INPUT\": \"#aebd77\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#aebd77\",\n \"BUTTON\": (\"#FFFFFF\", \"#538f6a\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#1b4b36\", \"#538f6a\", \"#aebd77\", \"#e0e7f1\"], \"DESCRIPTION\": [\"Green\", \"Grey\"], },\n \"DarkTeal10\": {\"BACKGROUND\": \"#0d3446\", \"TEXT\": \"#d8dfe2\", \"INPUT\": \"#71adb5\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#71adb5\",\n \"BUTTON\": (\"#FFFFFF\", \"#176d81\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#0d3446\", \"#176d81\", \"#71adb5\", \"#d8dfe2\"], \"DESCRIPTION\": [\"Grey\", \"Turquoise\", \"Winter\", \"Cold\"], },\n \"DarkGrey6\": {\"BACKGROUND\": \"#3e3e3e\", \"TEXT\": \"#ededed\", \"INPUT\": \"#68868c\", \"TEXT_INPUT\": \"#ededed\", \"SCROLL\": \"#68868c\",\n \"BUTTON\": (\"#FFFFFF\", \"#405559\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3e3e3e\", \"#405559\", \"#68868c\", \"#ededed\"], \"DESCRIPTION\": [\"Grey\", \"Turquoise\", \"Winter\"], },\n \"DarkTeal11\": {\"BACKGROUND\": \"#405559\", \"TEXT\": \"#ededed\", \"INPUT\": \"#68868c\", \"TEXT_INPUT\": \"#ededed\", \"SCROLL\": \"#68868c\",\n \"BUTTON\": (\"#ededed\", \"#68868c\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#3e3e3e\", \"#405559\", \"#68868c\", \"#ededed\"], \"DESCRIPTION\": [\"Grey\", \"Turquoise\", \"Winter\"], },\n \"LightBlue7\": {\"BACKGROUND\": \"#9ed0e0\", \"TEXT\": \"#19483f\", \"INPUT\": \"#5c868e\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#19483f\",\n \"BUTTON\": (\"#FFFFFF\", \"#19483f\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#19483f\", \"#5c868e\", \"#ff6a38\", \"#9ed0e0\"], \"DESCRIPTION\": [\"Orange\", \"Blue\", \"Turquoise\"], },\n \"LightGreen10\": {\"BACKGROUND\": \"#d8ebb5\", \"TEXT\": \"#205d67\", \"INPUT\": \"#639a67\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#205d67\",\n \"BUTTON\": (\"#d8ebb5\", \"#205d67\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#205d67\", \"#639a67\", \"#d9bf77\", \"#d8ebb5\"], \"DESCRIPTION\": [\"Blue\", \"Green\", \"Brown\", \"Vintage\"], },\n \"DarkBlue15\": {\"BACKGROUND\": \"#151680\", \"TEXT\": \"#f1fea4\", \"INPUT\": \"#375fc0\", \"TEXT_INPUT\": \"#f1fea4\", \"SCROLL\": \"#375fc0\",\n \"BUTTON\": (\"#f1fea4\", \"#1c44ac\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#151680\", \"#1c44ac\", \"#375fc0\", \"#f1fea4\"], \"DESCRIPTION\": [\"Blue\", \"Yellow\", \"Cold\"], },\n \"DarkBlue16\": {\"BACKGROUND\": \"#1c44ac\", \"TEXT\": \"#f1fea4\", \"INPUT\": \"#375fc0\", \"TEXT_INPUT\": \"#f1fea4\", \"SCROLL\": \"#375fc0\",\n \"BUTTON\": (\"#f1fea4\", \"#151680\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#151680\", \"#1c44ac\", \"#375fc0\", \"#f1fea4\"], \"DESCRIPTION\": [\"Blue\", \"Yellow\", \"Cold\"], },\n \"DarkTeal12\": {\"BACKGROUND\": \"#004a7c\", \"TEXT\": \"#fafafa\", \"INPUT\": \"#e8f1f5\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#e8f1f5\",\n \"BUTTON\": (\"#fafafa\", \"#005691\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#004a7c\", \"#005691\", \"#e8f1f5\", \"#fafafa\"], \"DESCRIPTION\": [\"Grey\", \"Blue\", \"Cold\", \"Winter\"], },\n \"LightBrown13\": {\"BACKGROUND\": \"#ebf5ee\", \"TEXT\": \"#921224\", \"INPUT\": \"#bdc6b8\", \"TEXT_INPUT\": \"#921224\", \"SCROLL\": \"#921224\",\n \"BUTTON\": (\"#FFFFFF\", \"#921224\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#921224\", \"#bdc6b8\", \"#bce0da\", \"#ebf5ee\"], \"DESCRIPTION\": [\"Red\", \"Blue\", \"Grey\", \"Vintage\", \"Wedding\"], },\n \"DarkBlue17\": {\"BACKGROUND\": \"#21294c\", \"TEXT\": \"#f9f2d7\", \"INPUT\": \"#f2dea8\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#f2dea8\",\n \"BUTTON\": (\"#f9f2d7\", \"#141829\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#141829\", \"#21294c\", \"#f2dea8\", \"#f9f2d7\"], \"DESCRIPTION\": [\"#000000\", \"Blue\", \"Yellow\"], },\n \"DarkBrown6\": {\"BACKGROUND\": \"#785e4d\", \"TEXT\": \"#f2eee3\", \"INPUT\": \"#baaf92\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#baaf92\",\n \"BUTTON\": (\"#FFFFFF\", \"#785e4d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#785e4d\", \"#ff8426\", \"#baaf92\", \"#f2eee3\"], \"DESCRIPTION\": [\"Grey\", \"Brown\", \"Orange\", \"Autumn\"], },\n \"DarkGreen6\": {\"BACKGROUND\": \"#5c715e\", \"TEXT\": \"#f2f9f1\", \"INPUT\": \"#ddeedf\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#ddeedf\",\n \"BUTTON\": (\"#f2f9f1\", \"#5c715e\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#5c715e\", \"#b6cdbd\", \"#ddeedf\", \"#f2f9f1\"], \"DESCRIPTION\": [\"Grey\", \"Green\", \"Vintage\"], },\n \"DarkGreen7\": {\"BACKGROUND\": \"#0C231E\", \"TEXT\": \"#efbe1c\", \"INPUT\": \"#153C33\", \"TEXT_INPUT\": \"#efbe1c\", \"SCROLL\": \"#153C33\",\n \"BUTTON\": (\"#efbe1c\", \"#153C33\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey7\": {\"BACKGROUND\": \"#4b586e\", \"TEXT\": \"#dddddd\", \"INPUT\": \"#574e6d\", \"TEXT_INPUT\": \"#dddddd\", \"SCROLL\": \"#574e6d\",\n \"BUTTON\": (\"#dddddd\", \"#43405d\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#43405d\", \"#4b586e\", \"#574e6d\", \"#dddddd\"], \"DESCRIPTION\": [\"Grey\", \"Winter\", \"Cold\"], },\n \"DarkRed2\": {\"BACKGROUND\": \"#ab1212\", \"TEXT\": \"#f6e4b5\", \"INPUT\": \"#cd3131\", \"TEXT_INPUT\": \"#f6e4b5\", \"SCROLL\": \"#cd3131\", \"BUTTON\": (\"#f6e4b5\", \"#ab1212\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#ab1212\", \"#1fad9f\", \"#cd3131\", \"#f6e4b5\"], \"DESCRIPTION\": [\"Turquoise\", \"Red\", \"Yellow\"], },\n \"LightGrey6\": {\"BACKGROUND\": \"#e3e3e3\", \"TEXT\": \"#233142\", \"INPUT\": \"#455d7a\", \"TEXT_INPUT\": \"#e3e3e3\", \"SCROLL\": \"#233142\",\n \"BUTTON\": (\"#e3e3e3\", \"#455d7a\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"COLOR_LIST\": [\"#233142\", \"#455d7a\", \"#f95959\", \"#e3e3e3\"], \"DESCRIPTION\": [\"#000000\", \"Blue\", \"Red\", \"Grey\"], },\n \"HotDogStand\": {\"BACKGROUND\": \"red\", \"TEXT\": \"yellow\", \"INPUT\": \"yellow\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"yellow\", \"BUTTON\": (\"red\", \"yellow\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey8\": {\"BACKGROUND\": \"#19232D\", \"TEXT\": \"#ffffff\", \"INPUT\": \"#32414B\", \"TEXT_INPUT\": \"#ffffff\", \"SCROLL\": \"#505F69\",\n \"BUTTON\": (\"#ffffff\", \"#32414B\"), \"PROGRESS\": (\"#505F69\", \"#32414B\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey9\": {\"BACKGROUND\": \"#36393F\", \"TEXT\": \"#DCDDDE\", \"INPUT\": \"#40444B\", \"TEXT_INPUT\": \"#ffffff\", \"SCROLL\": \"#202225\",\n \"BUTTON\": (\"#202225\", \"#B9BBBE\"), \"PROGRESS\": (\"#202225\", \"#40444B\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey10\": {\"BACKGROUND\": \"#1c1e23\", \"TEXT\": \"#cccdcf\", \"INPUT\": \"#272a31\", \"TEXT_INPUT\": \"#8b9fde\", \"SCROLL\": \"#313641\",\n \"BUTTON\": (\"#f5f5f6\", \"#2e3d5a\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey11\": {\"BACKGROUND\": \"#1c1e23\", \"TEXT\": \"#cccdcf\", \"INPUT\": \"#313641\", \"TEXT_INPUT\": \"#cccdcf\", \"SCROLL\": \"#313641\",\n \"BUTTON\": (\"#f5f5f6\", \"#313641\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey12\": {\"BACKGROUND\": \"#1c1e23\", \"TEXT\": \"#8b9fde\", \"INPUT\": \"#313641\", \"TEXT_INPUT\": \"#8b9fde\", \"SCROLL\": \"#313641\",\n \"BUTTON\": (\"#cccdcf\", \"#2e3d5a\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey13\": {\"BACKGROUND\": \"#1c1e23\", \"TEXT\": \"#cccdcf\", \"INPUT\": \"#272a31\", \"TEXT_INPUT\": \"#cccdcf\", \"SCROLL\": \"#313641\",\n \"BUTTON\": (\"#8b9fde\", \"#313641\"), \"PROGRESS\": (\"#cccdcf\", \"#272a31\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkGrey14\": {\"BACKGROUND\": \"#24292e\", \"TEXT\": \"#fafbfc\", \"INPUT\": \"#1d2125\", \"TEXT_INPUT\": \"#fafbfc\", \"SCROLL\": \"#1d2125\",\n \"BUTTON\": (\"#fafbfc\", \"#155398\"), \"PROGRESS\": (\"#155398\", \"#1d2125\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBrown7\": {\"BACKGROUND\": \"#2c2417\", \"TEXT\": \"#baa379\", \"INPUT\": \"#baa379\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#392e1c\",\n \"BUTTON\": (\"#000000\", \"#baa379\"), \"PROGRESS\": (\"#baa379\", \"#453923\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"Python\": {\"BACKGROUND\": \"#3d7aab\", \"TEXT\": \"#ffde56\", \"INPUT\": \"#295273\", \"TEXT_INPUT\": \"#ffde56\", \"SCROLL\": \"#295273\", \"BUTTON\": (\"#ffde56\", \"#295273\"),\n \"PROGRESS\": (\"#ffde56\", \"#295273\"), \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n}\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 10839, "n_words": 4824, "vocab_size": 1112, "complexity": 1, "nloc": 14, "token_counts": 255, "n_ast_nodes": 19192, "n_identifiers": 131, "random_cut": "def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None),\n margins=(None, None),\n element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None,\n slider_border_width=None, slider_relief=None, slider_orientation=None,\n autoclose_time=None, message_box_line_width=None,\n progress_meter_border_depth=None, progress_meter_style=None,\n progress_meter_relief=None, progress_meter_color=None, progress_meter_size=None,\n text_justification=None, background_color=None, element_background_color=None,\n text_element_background_color=None, input_elements_background_color=None, input_text_color=None,\n scrollbar_color=None, text_color=None, element_text_color=None, debug_win_size=(None, None),\n window_location=(None, None), error_button_color=(None, None), tooltip_time=None, tooltip_font=None, use_ttk_buttons=None, ttk_theme=None,\n suppress_error_popups=None, suppress_raise_key_errors=None, suppress_key_guessing=None,warn_button_key_duplicates=False, enable_treeview_869_patch=None,\n enable_mac_notitlebar_patch=None, use_custom_titlebar=None, titlebar_background_color=None, titlebar_text_color=None, titlebar_font=None,\n titlebar_icon=None, user_settings_path=None, pysimplegui_settings_path=None, pysimplegui_settings_filename=None, keep_on_top=None, dpi_awareness=None, scaling=None, disable_modal_windows=None, tooltip_offset=(None, None)):\n \n\n global DEFAULT_ELEMENT_SIZE\n global DEFAULT_BUTTON_ELEMENT_SIZE\n global DEFAULT_MARGINS # Margins for each LEFT/RIGHT margin is first term\n global DEFAULT_ELEMENT_PADDING # Padding between elements (row, col) in pixels\n global DEFAULT_AUTOSIZE_TEXT\n global DEFAULT_AUTOSIZE_BUTTONS\n global DEFAULT_FONT\n global DEFAULT_BORDER_WIDTH\n global DEFAULT_AUTOCLOSE_TIME\n global DEFAULT_BUTTON_COLOR\n global MESSAGE_BOX_LINE_WIDTH\n global DEFAULT_PROGRESS_BAR_BORDER_WIDTH\n global DEFAULT_PROGRESS_BAR_STYLE\n global DEFAULT_PROGRESS_BAR_RELIEF\n global DEFAULT_PROGRESS_BAR_COLOR\n global DEFAULT_PROGRESS_BAR_SIZE\n global DEFAULT_TEXT_JUSTIFICATION\n global DEFAULT_DEBUG_WINDOW_SIZE\n global DEFAULT_SLIDER_BORDER_WIDTH\n global DEFAULT_SLIDER_RELIEF\n global DEFAULT_SLIDER_ORIENTATION\n global DEFAULT_BACKGROUND_COLOR\n global DEFAULT_INPUT_ELEMENTS_COLOR\n global DEFAULT_ELEMENT_BACKGROUND_COLOR\n global DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR\n global DEFAULT_SCROLLBAR_COLOR\n global DEFAULT_TEXT_COLOR\n global DEFAULT_WINDOW_LOCATION\n global DEFAULT_ELEMENT_TEXT_COLOR\n global DEFAULT_INPUT_TEXT_COLOR\n global DEFAULT_TOOLTIP_TIME\n global DEFAULT_ERROR_BUTTON_COLOR\n global DEFAULT_TTK_THEME\n global USE_TTK_BUTTONS\n global TOOLTIP_FONT\n global SUPPRESS_ERROR_POPUPS\n global SUPPRESS_RAISE_KEY_ERRORS\n global SUPPRESS_KEY_GUESSING\n global WARN_DUPLICATE_BUTTON_KEY_ERRORS\n global ENABLE_TREEVIEW_869_PATCH\n global ENABLE_MAC_NOTITLEBAR_PATCH\n global USE_CUSTOM_TITLEBAR\n global CUSTOM_TITLEBAR_BACKGROUND_COLOR\n global CUSTOM_TITLEBAR_TEXT_COLOR\n global CUSTOM_TITLEBAR_ICON\n global CUSTOM_TITLEBAR_FONT\n global DEFAULT_USER_SETTINGS_PATH\n global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH\n global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME\n global DEFAULT_KEEP_ON_TOP\n global DEFAULT_SCALING\n global DEFAULT_MODAL_WINDOWS_ENABLED\n global DEFAULT_TOOLTIP_OFFSET\n global _pysimplegui_user_settings\n # global _my_windows\n\n if icon:\n Window._user_defined_icon = icon\n # _my_windows._user_defined_icon = icon\n\n if button_color != None:\n if button_color == COLOR_SYSTEM_DEFAULT:\n DEFAULT_BUTTON_COLOR = (COLOR_SYSTEM_DEFAULT, COLOR_SYSTEM_DEFAULT)\n else:\n DEFAULT_BUTTON_COLOR = button_color\n\n if element_size != (None, None):\n DEFAULT_ELEMENT_SIZE = element_size\n\n if button_element_size != (None, None):\n DEFAULT_BUTTON_ELEMENT_SIZE = button_element_size\n\n if margins != (None, None):\n DEFAULT_MARGINS = margins\n\n if element_padding != (None, None):\n DEFAULT_ELEMENT_PADDING = element_padding\n\n if auto_size_text != None:\n DEFAULT_AUTOSIZE_TEXT = auto_size_text\n\n if auto_size_buttons != None:\n DEFAULT_AUTOSIZE_BUTTONS = auto_size_buttons\n\n if font != None:\n DEFAULT_FONT = font\n\n if border_width != None:\n DEFAULT_BORDER_WIDTH = border_width\n\n if autoclose_time != None:\n DEFAULT_AUTOCLOSE_TIME = autoclose_time\n\n if message_box_line_width != None:\n MESSAGE_BOX_LINE_WIDTH = message_box_line_width\n\n if progress_meter_border_depth != None:\n DEFAULT_PROGRESS_BAR_BORDER_WIDTH = progress_meter_border_depth\n\n if progress_meter_style != None:\n warnings.warn('You can no longer set a progress bar style. All ttk styles must be the same for the window', UserWarning)\n # DEFAULT_PROGRESS_BAR_STYLE = progress_meter_style\n\n if progress_meter_relief != None:\n DEFAULT_PROGRESS_BAR_RELIEF = progress_meter_relief\n\n if progress_meter_color != None:\n DEFAULT_PROGRESS_BAR_COLOR = progress_meter_color\n\n if progress_meter_size != None:\n DEFAULT_PROGRESS_BAR_SIZE = progress_meter_size\n\n if slider_border_width != None:\n DEFAULT_SLIDER_BORDER_WIDTH = slider_border_width\n\n if slider_orientation != None:\n DEFAULT_SLIDER_ORIENTATION = slider_orientation\n\n if slider_relief != None:\n DEFAULT_SLIDER_RELIEF = slider_relief\n\n if text_justification != None:\n DEFAULT_TEXT_JUSTIFICATION = text_justification\n\n if background_color != None:\n DEFAULT_BACKGROUND_COLOR = background_color\n\n if text_element_background_color != None:\n DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR = text_element_background_color\n\n if input_elements_background_color != None:\n DEFAULT_INPUT_ELEMENTS_COLOR = input_elements_background_color\n\n if element_background_color != None:\n DEFAULT_ELEMENT_BACKGROUND_COLOR = element_background_color\n\n if window_location != (None, None):\n DEFAULT_WINDOW_LOCATION = window_location\n\n if debug_win_size != (None, None):\n DEFAULT_DEBUG_WINDOW_SIZE = debug_win_size\n\n if text_color != None:\n DEFAULT_TEXT_COLOR = text_color\n\n if scrollbar_color != None:\n DEFAULT_SCROLLBAR_COLOR = scrollbar_color\n\n if element_text_color != None:\n DEFAULT_ELEMENT_TEXT_COLOR = element_text_color\n\n if input_text_color is not None:\n DEFAULT_INPUT_TEXT_COLOR = input_text_color\n\n if tooltip_time is not None:\n DEFAULT_TOOLTIP_TIME = tooltip_time\n\n if error_button_color != (None, None):\n DEFAULT_ERROR_BUTTON_COLOR = error_button_color\n\n if ttk_theme is not None:\n DEFAULT_TTK_THEME = ttk_theme\n\n if use_ttk_buttons is not None:\n USE_TTK_BUTTONS = use_ttk_buttons\n\n if tooltip_font is not None:\n TOOLTIP_FONT = tooltip_font\n\n if suppress_error_popups is not None:\n SUPPRESS_ERROR_POPUPS = suppress_error_popups\n\n if suppress_raise_key_errors is not None:\n SUPPRESS_RAISE_KEY_ERRORS = suppress_raise_key_errors\n\n if suppress_key_guessing is not None:\n SUPPRESS_KEY_GUESSING = suppress_key_guessing\n\n if warn_button_key_duplicates is not None:\n WARN_DUPLICATE_BUTTON_KEY_ERRORS = warn_button_key_duplicates\n\n if enable_treeview_869_patch is not None:\n ENABLE_TREEVIEW_869_PATCH = enable_treeview_869_patch\n\n if enable_mac_notitlebar_patch is not None:\n ENABLE_MAC_NOTITLEBAR_PATCH = enable_mac_notitlebar_patch\n\n if use_custom_titlebar is not None:\n USE_CUSTOM_TITLEBAR = use_custom_titlebar\n\n if titlebar_background_color is not None:\n CUSTOM_TITLEBAR_BACKGROUND_COLOR = titlebar_background_color\n\n if titlebar_text_color is not None:\n CUSTOM_TITLEBAR_TEXT_COLOR = titlebar_text_color\n\n if titlebar_font is not None:\n CUSTOM_TITLEBAR_FONT = titlebar_font\n\n if titlebar_icon is not None:\n CUSTOM_TITLEBAR_ICON = titlebar_icon\n\n if user_settings_path is not None:\n DEFAULT_USER_SETTINGS_PATH = user_settings_path\n\n if pysimplegui_settings_path is not None:\n DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH = pysimplegui_settings_path\n\n if pysimplegui_settings_filename is not None:\n DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME = pysimplegui_settings_filename\n\n if pysimplegui_settings_filename is not None or pysimplegui_settings_filename is not None:\n _pysimplegui_user_settings = UserSettings(filename=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME,\n path=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH)\n\n if keep_on_top is not None:\n DEFAULT_KEEP_ON_TOP = keep_on_top\n\n if dpi_awareness is True:\n if running_windows():\n if platform.release() == \"7\":\n ctypes.windll.user32.SetProcessDPIAware()\n elif platform.release() == \"8\" or platform.release() == \"10\":\n ctypes.windll.shcore.SetProcessDpiAwareness(1)\n\n if scaling is not None:\n DEFAULT_SCALING = scaling\n\n if disable_modal_windows is not None:\n DEFAULT_MODAL_WINDOWS_ENABLED = not disable_modal_windows\n\n if tooltip_offset != (None, None):\n DEFAULT_TOOLTIP_OFFSET = tooltip_offset\n\n return True\n\n\n# ----------------------------------------------------------------- #\n\n# .########.##.....##.########.##.....##.########..######.\n# ....##....##.....##.##.......###...###.##.......##....##\n# ....##....##.....##.##.......####.####.##.......##......\n# ....##....#########.######...##.###.##.######....######.\n# ....##....##.....##.##.......##.....##.##.............##\n# ....##....##.....##.##.......##.....##.##.......##....##\n# ....##....##.....##.########.##.....##.########..######.\n\n# ----------------------------------------------------------------- #\n\n# The official Theme code\n\n#################### ChangeLookAndFeel #######################\n# Predefined settings that will change the colors and styles #\n# of the elements. #\n##############################################################\nLOOK_AND_FEEL_TABLE = {\n \"SystemDefault\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1,\n \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"SystemDefaultForReal\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT,\n \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT, \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT,\n \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1, \"PROGRESS_DEPTH\": 0, },\n \"SystemDefault1\": {\"BACKGROUND\": COLOR_SYSTEM_DEFAULT, \"TEXT\": COLOR_SYSTEM_DEFAULT, \"INPUT\": COLOR_SYSTEM_DEFAULT, \"TEXT_INPUT\": COLOR_SYSTEM_DEFAULT,\n \"SCROLL\": COLOR_SYSTEM_DEFAULT, \"BUTTON\": COLOR_SYSTEM_DEFAULT, \"PROGRESS\": COLOR_SYSTEM_DEFAULT, \"BORDER\": 1, \"SLIDER_DEPTH\": 1,\n \"PROGRESS_DEPTH\": 0, },\n \"Material1\": {\"BACKGROUND\": \"#E3F2FD\", \"TEXT\": \"#000000\", \"INPUT\": \"#86A8FF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#86A8FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#5079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"Material2\": {\"BACKGROUND\": \"#FAFAFA\", \"TEXT\": \"#000000\", \"INPUT\": \"#004EA1\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#5EA7FF\",\n \"BUTTON\": (\"#FFFFFF\", \"#0079D3\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 0, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0,\n \"ACCENT1\": \"#FF0266\", \"ACCENT2\": \"#FF5C93\", \"ACCENT3\": \"#C5003C\", },\n \"Reddit\": {\"BACKGROUND\": \"#ffffff\", \"TEXT\": \"#1a1a1b\", \"INPUT\": \"#dae0e6\", \"TEXT_INPUT\": \"#222222\", \"SCROLL\": \"#a5a4a4\", \"BUTTON\": (\"#FFFFFF\", \"#0079d3\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, \"ACCENT1\": \"#ff5414\", \"ACCENT2\": \"#33a8ff\",\n \"ACCENT3\": \"#dbf0ff\", },\n \"Topanga\": {\"BACKGROUND\": \"#282923\", \"TEXT\": \"#E7DB74\", \"INPUT\": \"#393a32\", \"TEXT_INPUT\": \"#E7C855\", \"SCROLL\": \"#E7C855\", \"BUTTON\": (\"#E7C855\", \"#284B5A\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, \"ACCENT1\": \"#c15226\", \"ACCENT2\": \"#7a4d5f\",\n \"ACCENT3\": \"#889743\", },\n \"GreenTan\": {\"BACKGROUND\": \"#9FB8AD\", \"TEXT\": '#000000', \"INPUT\": \"#F7F3EC\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#F7F3EC\", \"BUTTON\": (\"#FFFFFF\", \"#475841\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Dark\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#FFFFFF\", \"#004F00\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"LightGreen\": {\"BACKGROUND\": \"#B7CECE\", \"TEXT\": \"#000000\", \"INPUT\": \"#FDFFF7\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#FDFFF7\",\n \"BUTTON\": (\"#FFFFFF\", \"#658268\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"ACCENT1\": \"#76506d\",\n \"ACCENT2\": \"#5148f1\", \"ACCENT3\": \"#0a1c84\", \"PROGRESS_DEPTH\": 0, },\n \"Dark2\": {\"BACKGROUND\": \"#404040\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#FFFFFF\", \"TEXT_INPUT\": \"#000000\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#FFFFFF\", \"#004F00\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Black\": {\"BACKGROUND\": \"#000000\", \"TEXT\": \"#FFFFFF\", \"INPUT\": \"#4D4D4D\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#707070\", \"BUTTON\": (\"#000000\", \"#FFFFFF\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"Tan\": {\"BACKGROUND\": \"#fdf6e3\", \"TEXT\": \"#268bd1\", \"INPUT\": \"#eee8d5\", \"TEXT_INPUT\": \"#6c71c3\", \"SCROLL\": \"#eee8d5\", \"BUTTON\": (\"#FFFFFF\", \"#063542\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"TanBlue\": {\"BACKGROUND\": \"#e5dece\", \"TEXT\": \"#063289\", \"INPUT\": \"#f9f8f4\", \"TEXT_INPUT\": \"#242834\", \"SCROLL\": \"#eee8d5\", \"BUTTON\": (\"#FFFFFF\", \"#063289\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkTanBlue\": {\"BACKGROUND\": \"#242834\", \"TEXT\": \"#dfe6f8\", \"INPUT\": \"#97755c\", \"TEXT_INPUT\": \"#FFFFFF\", \"SCROLL\": \"#a9afbb\",\n \"BUTTON\": (\"#FFFFFF\", \"#063289\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkAmber\": {\"BACKGROUND\": \"#2c2825\", \"TEXT\": \"#fdcb52\", \"INPUT\": \"#705e52\", \"TEXT_INPUT\": \"#fdcb52\", \"SCROLL\": \"#705e52\",\n \"BUTTON\": (\"#000000\", \"#fdcb52\"), \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1, \"SLIDER_DEPTH\": 0, \"PROGRESS_DEPTH\": 0, },\n \"DarkBlue\": {\"BACKGROUND\": \"#1a2835\", \"TEXT\": \"#d1ecff\", \"INPUT\": \"#335267\", \"TEXT_INPUT\": \"#acc2d0\", \"SCROLL\": \"#1b6497\", \"BUTTON\": (\"#000000\", \"#fafaf8\"),\n \"PROGRESS\": DEFAULT_PROGRESS_BAR_COMPUTE, \"BORDER\": 1", "d_id": 53473, "documentation": { "docstring": "\n :param icon: Can be either a filename or Base64 value. For Windows if filename, it MUST be ICO format. For Linux, must NOT be ICO. Most portable is to use a Base64 of a PNG file. This works universally across all OS's\n :type icon: bytes | str\n :param button_color: Color of the button (text, background)\n :type button_color: (str, str) or str\n :param element_size: element size (width, height) in characters\n :type element_size: (int, int)\n :param button_element_size: Size of button\n :type button_element_size: (int, int)\n :param margins: (left/right, top/bottom) tkinter margins around outsize. Amount of pixels to leave inside the window's frame around the edges before your elements are shown.\n :type margins: (int, int)\n :param element_padding: Default amount of padding to put around elements in window (left/right, top/bottom) or ((left, right), (top, bottom))\n :type element_padding: (int, int) or ((int, int),(int,int))\n :param auto_size_text: True if the Widget should be shrunk to exactly fit the number of chars to show\n :type auto_size_text: bool\n :param auto_size_buttons: True if Buttons in this Window should be sized to exactly fit the text on this.\n :type auto_size_buttons: (bool)\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike\n :type font: (str or (str, int[, str]) or None)\n :param border_width: width of border around element\n :type border_width: (int)\n :param slider_border_width: Width of the border around sliders\n :type slider_border_width: (int)\n :param slider_relief: Type of relief to use for sliders\n :type slider_relief: (str)\n :param slider_orientation: ???\n :type slider_orientation: ???\n :param autoclose_time: ???\n :type autoclose_time: ???\n :param message_box_line_width: ???\n :type message_box_line_width: ???\n :param progress_meter_border_depth: ???\n :type progress_meter_border_depth: ???\n :param progress_meter_style: You can no longer set a progress bar style. All ttk styles must be the same for the window\n :type progress_meter_style: ???\n :param progress_meter_relief:\n :type progress_meter_relief: ???\n :param progress_meter_color: ???\n :type progress_meter_color: ???\n :param progress_meter_size: ???\n :type progress_meter_size: ???\n :param text_justification: Default text justification for all Text Elements in window\n :type text_justification: 'left' | 'right' | 'center'\n :param background_color: color of background\n :type background_color: (str)\n :param element_background_color: element background color\n :type element_background_color: (str)\n :param text_element_background_color: text element background color\n :type text_element_background_color: (str)\n :param input_elements_background_color: Default color to use for the background of input elements\n :type input_elements_background_color: (str)\n :param input_text_color: Default color to use for the text for Input elements\n :type input_text_color: (str)\n :param scrollbar_color: Default color to use for the slider trough\n :type scrollbar_color: (str)\n :param text_color: color of the text\n :type text_color: (str)\n :param element_text_color: Default color to use for Text elements\n :type element_text_color: (str)\n :param debug_win_size: window size\n :type debug_win_size: (int, int)\n :param window_location: Default location to place windows. Not setting will center windows on the display\n :type window_location: (int, int) | None\n :param error_button_color: (Default = (None))\n :type error_button_color: ???\n :param tooltip_time: time in milliseconds to wait before showing a tooltip. Default is 400ms\n :type tooltip_time: (int)\n :param tooltip_font: font to use for all tooltips\n :type tooltip_font: str or Tuple[str, int] or Tuple[str, int, str]\n :param use_ttk_buttons: if True will cause all buttons to be ttk buttons\n :type use_ttk_buttons: (bool)\n :param ttk_theme: Theme to use with ttk widgets. Choices (on Windows) include - 'default', 'winnative', 'clam', 'alt', 'classic', 'vista', 'xpnative'\n :type ttk_theme: (str)\n :param suppress_error_popups: If True then error popups will not be shown if generated internally to PySimpleGUI\n :type suppress_error_popups: (bool)\n :param suppress_raise_key_errors: If True then key errors won't be raised (you'll still get popup error)\n :type suppress_raise_key_errors: (bool)\n :param suppress_key_guessing: If True then key errors won't try and find closest matches for you\n :type suppress_key_guessing: (bool)\n :param warn_button_key_duplicates: If True then duplicate Button Keys generate warnings (not recommended as they're expected)\n :type warn_button_key_duplicates: (bool) \n :param enable_treeview_869_patch: If True, then will use the treeview color patch for tk 8.6.9\n :type enable_treeview_869_patch: (bool)\n :param enable_mac_notitlebar_patch: If True then Windows with no titlebar use an alternative technique when tkinter version < 8.6.10\n :type enable_mac_notitlebar_patch: (bool)\n :param use_custom_titlebar: If True then a custom titlebar is used instead of the normal system titlebar\n :type use_custom_titlebar: (bool)\n :param titlebar_background_color: If custom titlebar indicated by use_custom_titlebar, then use this as background color\n :type titlebar_background_color: str | None\n :param titlebar_text_color: If custom titlebar indicated by use_custom_titlebar, then use this as text color\n :type titlebar_text_color: str | None\n :param titlebar_font: If custom titlebar indicated by use_custom_titlebar, then use this as title font\n :type titlebar_font: (str or (str, int[, str]) or None) | None\n :param titlebar_icon: If custom titlebar indicated by use_custom_titlebar, then use this as the icon (file or base64 bytes)\n :type titlebar_icon: bytes | str\n :param user_settings_path: default path for user_settings API calls. Expanded with os.path.expanduser so can contain ~ to represent user\n :type user_settings_path: (str)\n :param pysimplegui_settings_path: default path for the global PySimpleGUI user_settings\n :type pysimplegui_settings_path: (str)\n :param pysimplegui_settings_filename: default filename for the global PySimpleGUI user_settings\n :type pysimplegui_settings_filename: (str)\n :param keep_on_top: If True then all windows will automatically be set to keep_on_top=True\n :type keep_on_top: (bool)\n :param dpi_awareness: If True then will turn on DPI awareness (Windows only at the moment)\n :type dpi_awareness: (bool)\n :param scaling: Sets the default scaling for all windows including popups, etc.\n :type scaling: (float)\n :param disable_modal_windows: If True then all windows, including popups, will not be modal windows\n :type disable_modal_windows: (bool)\n :param tooltip_offset: Offset to use for tooltips as a tuple. These values will be added to the mouse location when the widget was entered.\n :type tooltip_offset: ((None, None) | (int, int))\n :return: None\n :rtype: None\n ", "n_words": 889, "vocab_size": 356, "n_whitespaces": 2847, "language": "en" } }, { "id": 208027, "commit_id": "59263b0409e3f02dc16ca8a3bd1e42b5a3eba36d", "repo": "celery", "path": "celery/utils/imports.py", "file_name": "imports.py", "fun_name": "find_module", "commit_message": "Minor refactors, found by static analysis (#7587)\n\n* Remove deprecated methods in `celery.local.Proxy`\r\n\r\n* Collapse conditionals for readability\r\n\r\n* Remove unused parameter `uuid`\r\n\r\n* Remove unused import `ClusterOptions`\r\n\r\n* Remove dangerous mutable default argument\r\n\r\nContinues work from #5478\r\n\r\n* Remove always `None` and unused global variable\r\n\r\n* Remove unreachable `elif` block\r\n\r\n* Consolidate import statements\r\n\r\n* Add missing parameter to `os._exit()`\r\n\r\n* Add missing assert statement\r\n\r\n* Remove unused global `WindowsError`\r\n\r\n* Use `mkstemp` instead of deprecated `mktemp`\r\n\r\n* No need for `for..else` constructs in loops that don't break\r\n\r\nIn these cases where the loop returns or raises instead of breaking, it\r\nis simpler to just put the code that runs after the loop completes right\r\nafter the loop instead.\r\n\r\n* Use the previously unused parameter `compat_modules`\r\n\r\nPreviously this parameter was always overwritten by the value of\r\n`COMPAT_MODULES.get(name, ())`, which was very likely unintentional.\r\n\r\n* Remove unused local variable `tz`\r\n\r\n* Make `assert_received` actually check for `is_received`\r\n\r\nPreviously, it called `is_accepted`, which was likely a copy-paste\r\nmistake from the `assert_accepted` method.\r\n\r\n* Use previously unused `args` and `kwargs` params\r\n\r\nUnlike other backends' `__reduce__` methods, the one from `RedisBackend`\r\nsimply overwrites `args` and `kwargs` instead of adding to them. This\r\nchange makes it more in line with other backends.\r\n\r\n* Update celery/backends/filesystem.py\r\n\r\nCo-authored-by: Gabriel Soldani <1268700+gabrielsoldani@users.noreply.github.com>\r\n\r\nCo-authored-by: Asif Saif Uddin ", "code": "def find_module(module, path=None, imp=None):\n \n if imp is None:\n imp = import_module\n with cwd_in_path():\n try:\n return imp(module)\n except ImportError:\n # Raise a more specific error if the problem is that one of the\n # dot-separated segments of the module name is not a package.\n if '.' in module:\n parts = module.split('.')\n for i, part in enumerate(parts[:-1]):\n package = '.'.join(parts[:i + 1])\n try:\n mpart = imp(package)\n except ImportError:\n # Break out and re-raise the original ImportError\n # instead.\n break\n try:\n mpart.__path__\n except AttributeError:\n raise NotAPackage(package)\n raise\n\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 432, "n_words": 84, "vocab_size": 61, "complexity": 7, "nloc": 20, "token_counts": 105, "n_ast_nodes": 185, "n_identifiers": 18, "random_cut": "def find_module(module, path=None, imp=None):\n \n if imp is None:\n imp = import_module\n with cwd_in_path():\n try:\n return imp(module)\n except I", "d_id": 52179, "documentation": { "docstring": "Version of :func:`imp.find_module` supporting dots.", "n_words": 5, "vocab_size": 5, "n_whitespaces": 4, "language": "en" } }, { "id": 30117, "commit_id": "fa2ad657482aca9dc628e6d7062b8badf2706bb6", "repo": "spotify-downloader", "path": "spotdl/utils/ffmpeg.py", "file_name": "ffmpeg.py", "fun_name": "get_ffmpeg_path", "commit_message": "v4 init", "code": "def get_ffmpeg_path() -> Optional[Path]:\n \n\n # Check if ffmpeg is installed\n global_ffmpeg = shutil.which(\"ffmpeg\")\n if global_ffmpeg:\n return Path(global_ffmpeg)\n\n # Get local ffmpeg path\n return get_local_ffmpeg()\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 24, "vocab_size": 20, "complexity": 2, "nloc": 9, "token_counts": 30, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "def get_ffmpeg_path() -> Optional[Path]:\n \n\n # Check if ffmpeg is installed\n global_ffmpeg = shutil.which(\"ffmpeg\")\n if global_ffmpeg:\n return Path(global_ffmpeg)\n\n ", "d_id": 5326, "documentation": { "docstring": "\n Get path to global ffmpeg binary or a local ffmpeg binary.\n Or None if not found.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 273959, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/rnn/legacy_cell_wrappers.py", "file_name": "legacy_cell_wrappers.py", "fun_name": "__call__", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def __call__(self, inputs, state, scope=None):\n \n return self._call_wrapped_cell(\n inputs, state, cell_call_fn=self.cell.__call__, scope=scope\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "def __call__(self, inputs, state, scope=None):\n \n return self._call_wrapped_cell(\n inputs, state, cell_call_fn=self.cell.__call__, scope=scope\n )\n", "d_id": 81158, "documentation": { "docstring": "Runs the RNN cell step computation.\n\n We assume that the wrapped RNNCell is being built within its `__call__`\n method. We directly use the wrapped cell's `__call__` in the overridden\n wrapper `__call__` method.\n\n This allows to use the wrapped cell and the non-wrapped cell equivalently\n when using `__call__`.\n\n Args:\n inputs: A tensor with wrapped cell's input.\n state: A tensor or tuple of tensors with wrapped cell's state.\n scope: VariableScope for the subgraph created in the wrapped cells'\n `__call__`.\n\n Returns:\n A pair containing:\n\n - Output: A tensor with cell's output.\n - New state: A tensor or tuple of tensors with new wrapped cell's state.\n ", "n_words": 102, "vocab_size": 59, "n_whitespaces": 223, "language": "en" } }, { "id": 284531, "commit_id": "0e3b62e143c981d81fb46a7e7bb75f93d9159198", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_model.py", "file_name": "portfolio_model.py", "fun_name": "get_kurtosis", "commit_message": "Portfolio improvements (#1818)\n\n* improve portfolio controller\r\n\r\n* improve menu ux with disabling command when port or bench are not loaded\r\n\r\n* allow custom reset with benchmark and portfolio loaded\r\n\r\n* bench needs portfolio loaded to use start date, reflect that\r\n\r\n* fix tests\r\n\r\n* allow to see sum of a portfolio holdings\r\n\r\n* add r-square to portfolio\r\n\r\n* add skewness of data\r\n\r\n* add kurtosis\r\n\r\n* add stats\r\n\r\n* allow perf command to select a period\r\n\r\n* add yearly returns to cumulative return plot\r\n\r\n* add individual rolling volatility\r\n\r\n* add individual rolling sharpe\r\n\r\n* add individual rolling sortino\r\n\r\n* add individual rolling beta\r\n\r\n* add period to cumulative returns\r\n\r\n* clean up on aisle 5\r\n\r\n* minor fix\r\n\r\n* add volatility, sharpe ratio, sortino ratio and maximum drawdown ratio\r\n\r\n* remove duplicated metrics\r\n\r\n* check for portfolio and benchmark more modular\r\n\r\n* fix tests\r\n\r\n* remove sqrt(N) and N from sharpe and sortino calculations\r\n\r\n* allow hold to export raw data from tail\r\n\r\n* automatically add space before and after table\r\n\r\n* add portfolio holdings in percentage\r\n\r\n* fix relative dates to be more accurate\r\n\r\n* refactor metric command to allow to select a metric of interest and check different periods\r\n\r\n* fix cumulative return and implement new yearly return command\r\n\r\n* add daily returns graph\r\n\r\n* add distribution of daily returns command\r\n\r\n* add monthly returns command\r\n\r\n* add summary command with multiple metrics for a specific period\r\n\r\n* calculate yearly (out)performance\r\n\r\n* fix show\r\n\r\n* rbeta with benchmark of 1\r\n\r\n* improve mret style\r\n\r\n* improve title of distribution\r\n\r\n* improve volatility\r\n\r\n* minor improvement in doc\r\n\r\n* improve mret and yret\r\n\r\n* tests\r\n\r\n* update portfolio content on hugo docs\r\n\r\n* fix ycrv hugo docs\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* fix issue\r\n\r\nCo-authored-by: Jeroen Bouma ", "code": "def get_kurtosis(self) -> pd.DataFrame:\n \n vals = list()\n for period in portfolio_helper.PERIODS:\n vals.append(\n [\n round(\n scipy.stats.kurtosis(\n portfolio_helper.filter_df_by_period(self.returns, period)\n ),\n 3,\n ),\n round(\n scipy.stats.skew(\n portfolio_helper.filter_df_by_period(\n self.benchmark_returns, period\n )\n ),\n 3,\n ),\n ]\n )\n return pd.DataFrame(\n vals, index=portfolio_helper.PERIODS, columns=[\"Portfolio\", \"Benchmark\"]\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 462, "n_words": 38, "vocab_size": 30, "complexity": 2, "nloc": 31, "token_counts": 98, "n_ast_nodes": 151, "n_identifiers": 20, "random_cut": "def get_kurtosis(self) -> pd.DataFrame:\n \n vals = list()\n for period in portfolio_helper.PERIODS:\n vals.append(\n [\n round(\n scipy.stats.kurtosis(\n portfolio_helper.filter_df_by_period(self.returns, period)\n ),\n 3,\n ),\n round(\n scipy.stats.skew(\n portfolio_helper.filter_df_by_period(\n self.benchmark_returns, period\n )\n ),\n 3,\n ", "d_id": 84781, "documentation": { "docstring": "Class method that retrieves kurtosis for portfolio and benchmark selected\n\n Returns\n -------\n pd.DataFrame\n DataFrame with kurtosis for portfolio and benchmark for different periods\n ", "n_words": 23, "vocab_size": 17, "n_whitespaces": 62, "language": "en" } }, { "id": 269933, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks.py", "file_name": "callbacks.py", "fun_name": "_save_model", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _save_model(self, epoch, batch, logs):\n \n logs = logs or {}\n\n if (\n isinstance(self.save_freq, int)\n or self.epochs_since_last_save >= self.period\n ):\n # Block only when saving interval is reached.\n logs = tf_utils.sync_to_numpy_or_python_type(logs)\n self.epochs_since_last_save = 0\n filepath = self._get_file_path(epoch, batch, logs)\n\n try:\n if self.save_best_only:\n current = logs.get(self.monitor)\n if current is None:\n logging.warning(\n \"Can save best model only with %s available, \"\n \"skipping.\",\n self.monitor,\n )\n else:\n if self.monitor_op(current, self.best):\n if self.verbose > 0:\n io_utils.print_msg(\n f\"\\nEpoch {epoch + 1}: {self.monitor} improved \"\n f\"from {self.best:.5f} to {current:.5f}, \"\n f\"saving model to {filepath}\"\n )\n self.best = current\n if self.save_weights_only:\n self.model.save_weights(\n filepath,\n overwrite=True,\n options=self._options,\n )\n else:\n self.model.save(\n filepath,\n overwrite=True,\n options=self._options,\n )\n else:\n if self.verbose > 0:\n io_utils.print_msg(\n f\"\\nEpoch {epoch + 1}: \"\n f\"{self.monitor} did not improve from {self.best:.5f}\"\n )\n else:\n if self.verbose > 0:\n io_utils.print_msg(\n f\"\\nEpoch {epoch + 1}: saving model to {filepath}\"\n )\n if self.save_weights_only:\n self.model.save_weights(\n filepath, overwrite=True, options=self._options\n )\n else:\n self.model.save(\n filepath, overwrite=True, options=self._options\n )\n\n self._maybe_remove_file()\n except IsADirectoryError as e: # h5py 3.x\n raise IOError(\n \"Please specify a non-directory filepath for \"\n \"ModelCheckpoint. Filepath used is an existing \"\n f\"directory: {filepath}\"\n )\n except IOError as e: # h5py 2.x\n # `e.errno` appears to be `None` so checking the content of `e.args[0]`.\n if \"is a directory\" in str(e.args[0]).lower():\n raise IOError(\n \"Please specify a non-directory filepath for \"\n \"ModelCheckpoint. Filepath used is an existing \"\n f\"directory: f{filepath}\"\n )\n # Re-throw the error for any other causes.\n raise e\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 1932, "n_words": 230, "vocab_size": 123, "complexity": 15, "nloc": 73, "token_counts": 306, "n_ast_nodes": 579, "n_identifiers": 39, "random_cut": "def _save_model(self, epoch, batch, logs):\n \n logs = logs or {}\n\n if (\n isinstance(self.save_freq, int)\n or self.epochs_since_last_save >= self.period\n ):\n # Block only when saving interval is reached.\n logs = tf_utils.sync_to_numpy_or_python_type(logs)\n self.epochs_since_last_save = 0\n filepath = self._get_file_path(epoch, batch, logs)\n\n try:\n if self.save_best_only:\n current = logs.get(self.monitor)\n if current is None:\n logging.warning(\n \"Can save best model only with %s available, \"\n \"skipping.\",\n self.monitor,\n )\n else:\n if self.monitor_op(current, self.best):\n if self.verbose > 0:\n io_utils.print_msg(\n f\"\\nEpoch {epoch + 1}: {self.monitor} improved \"\n f\"from {self.best:.5f} to {current:.5f}, \"\n f\"saving model to {filepath}\"\n )\n self.best = current\n if self.save_weights_only:\n self.model.save_weights(\n filepath,\n overwrite=True,\n options=self._options,\n )\n else:\n self.model.save(\n filepath,\n overwrite=True,\n options=self._options,\n )\n else:\n if self.verbose > 0:\n io_utils.print_msg(\n f\"\\nEpoch {epoch + 1}: \"\n f\"{self.monitor} did not improve from {self.best:.5f}\"\n )\n else:\n if self.verbose > 0:\n io_utils.print_msg(\n f\"\\nEpoch {epoch + 1}: saving model to {filepath}\"\n )\n if self.save_weights_only:\n self.model.save_weights(\n filepath, overwrite=True, options=self._options\n )\n else:\n self.model.save(\n filepath, overwrite=True, options=self._options\n )\n\n self._maybe_remove_file()\n except IsADirectoryError as e: # h5py 3.x\n raise IOError(\n \"Please specify a non-directory filepath for \"\n \"ModelCheckpoint. Filepath used is an existing \"\n f\"directory: {filepath}\"\n )\n except IOError as e: # h5py 2.x\n # `e.errno` appears to be `None` so checking t", "d_id": 80341, "documentation": { "docstring": "Saves the model.\n\n Args:\n epoch: the epoch this iteration is in.\n batch: the batch this iteration is in. `None` if the `save_freq`\n is set to `epoch`.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.\n ", "n_words": 36, "vocab_size": 26, "n_whitespaces": 96, "language": "en" } }, { "id": 177172, "commit_id": "56032abfdff74aebe7e6adbaa711bf4fd6bd7826", "repo": "networkx", "path": "networkx/algorithms/approximation/steinertree.py", "file_name": "steinertree.py", "fun_name": "steiner_tree", "commit_message": "Add Mehlhorn Steiner approximations (#5629)\n\n* Add Wu et al. and Mehlhorn Steiner approximations\r\n\r\n* Change default steiner tree approximation method\r\n\r\n* Add missing space in error message\r\n\r\n* Changes as suggested\r\n\r\n* Fix Kou implementation\r\n\r\n* Bugfix and variable name change for Mehlhorn\r\n\r\n* Add failing test case for Wu Steiner tree\r\n\r\n* Add additional valid Steiner tree for test\r\n\r\n* Remove Wu et al implementation\r\n\r\n* Style change + remove unused code", "code": "def steiner_tree(G, terminal_nodes, weight=\"weight\", method=None):\n r\n if method is None:\n import warnings\n\n msg = (\n \"steiner_tree will change default method from 'kou' to 'mehlhorn'\"\n \"in version 3.2.\\nSet the `method` kwarg to remove this warning.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=4)\n method = \"kou\"\n\n try:\n algo = ALGORITHMS[method]\n except KeyError as e:\n msg = f\"{method} is not a valid choice for an algorithm.\"\n raise ValueError(msg) from e\n\n edges = algo(G, terminal_nodes, weight)\n # For multigraph we should add the minimal weight edge keys\n if G.is_multigraph():\n edges = (\n (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges\n )\n T = G.edge_subgraph(edges)\n return T\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 231, "n_words": 102, "vocab_size": 81, "complexity": 5, "nloc": 86, "token_counts": 141, "n_ast_nodes": 226, "n_identifiers": 24, "random_cut": "def steiner_tree(G, terminal_nodes, weight=\"weight\", method=None):\n r\n if method is None:\n import warnings\n\n msg = (\n \"steiner_tree will change default method from 'kou' to 'mehlhorn'\"\n ", "d_id": 42302, "documentation": { "docstring": "Return an approximation to the minimum Steiner tree of a graph.\n\n The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*)\n is a tree within `G` that spans those nodes and has minimum size (sum of\n edge weights) among all such trees.\n\n The approximation algorithm is specified with the `method` keyword\n argument. All three available algorithms produce a tree whose weight is\n within a (2 - (2 / l)) factor of the weight of the optimal Steiner tree,\n where *l* is the minimum number of leaf nodes across all possible Steiner\n trees.\n\n * `kou` [2]_ (runtime $O(|S| |V|^2)$) computes the minimum spanning tree of\n the subgraph of the metric closure of *G* induced by the terminal nodes,\n where the metric closure of *G* is the complete graph in which each edge is\n weighted by the shortest path distance between the nodes in *G*.\n * `mehlhorn` [3]_ (runtime $O(|E|+|V|\\log|V|)$) modifies Kou et al.'s\n algorithm, beginning by finding the closest terminal node for each\n non-terminal. This data is used to create a complete graph containing only\n the terminal nodes, in which edge is weighted with the shortest path\n distance between them. The algorithm then proceeds in the same way as Kou\n et al..\n\n Parameters\n ----------\n G : NetworkX graph\n\n terminal_nodes : list\n A list of terminal nodes for which minimum steiner tree is\n to be found.\n\n weight : string (default = 'weight')\n Use the edge attribute specified by this string as the edge weight.\n Any edge attribute not present defaults to 1.\n\n method : string, optional (default = 'kou')\n The algorithm to use to approximate the Steiner tree.\n Supported options: 'kou', 'mehlhorn'.\n Other inputs produce a ValueError.\n\n Returns\n -------\n NetworkX graph\n Approximation to the minimum steiner tree of `G` induced by\n `terminal_nodes` .\n\n Notes\n -----\n For multigraphs, the edge between two nodes with minimum weight is the\n edge put into the Steiner tree.\n\n\n References\n ----------\n .. [1] Steiner_tree_problem on Wikipedia.\n https://en.wikipedia.org/wiki/Steiner_tree_problem\n .. [2] Kou, L., G. Markowsky, and L. Berman. 1981.\n ‘A Fast Algorithm for Steiner Trees’.\n Acta Informatica 15 (2): 141–45.\n https://doi.org/10.1007/BF00288961.\n .. [3] Mehlhorn, Kurt. 1988.\n ‘A Faster Approximation Algorithm for the Steiner Problem in Graphs’.\n Information Processing Letters 27 (3): 125–28.\n https://doi.org/10.1016/0020-0190(88)90066-X.\n ", "n_words": 366, "vocab_size": 202, "n_whitespaces": 612, "language": "en" } }, { "id": 72955, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/views.py", "file_name": "views.py", "fun_name": "find_object", "commit_message": "Reformat with black", "code": "def find_object(self, queryset, request):\n \n if \"id\" in request.GET:\n return queryset.get(id=request.GET[\"id\"])\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 35, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 3, "token_counts": 31, "n_ast_nodes": 53, "n_identifiers": 7, "random_cut": "def find_object(self, queryset, request):\n \n if \"id\" in request.GET:\n ", "d_id": 15916, "documentation": { "docstring": "\n Override this to implement more find methods.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 163372, "commit_id": "5ba7d714014ae8feaccc0dd4a98890828cf2832d", "repo": "pandas", "path": "pandas/core/dtypes/cast.py", "file_name": "cast.py", "fun_name": "maybe_infer_dtype_type", "commit_message": "CLN: assorted, privatize, easy issues (#45305)", "code": "def _maybe_infer_dtype_type(element):\n \n tipo = None\n if hasattr(element, \"dtype\"):\n tipo = element.dtype\n elif is_list_like(element):\n element = np.asarray(element)\n tipo = element.dtype\n return tipo\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 57, "n_words": 21, "vocab_size": 14, "complexity": 3, "nloc": 8, "token_counts": 43, "n_ast_nodes": 74, "n_identifiers": 8, "random_cut": "def _maybe_infer_dtype_type(element):\n \n tipo = None\n if hasattr(element, \"dtype\"):\n tipo = element.dtype\n elif is_list_like(element):\n element = np.asarray(element)\n tipo = ", "d_id": 39421, "documentation": { "docstring": "\n Try to infer an object's dtype, for use in arithmetic ops.\n\n Uses `element.dtype` if that's available.\n Objects implementing the iterator protocol are cast to a NumPy array,\n and from there the array's type is used.\n\n Parameters\n ----------\n element : object\n Possibly has a `.dtype` attribute, and possibly the iterator\n protocol.\n\n Returns\n -------\n tipo : type\n\n Examples\n --------\n >>> from collections import namedtuple\n >>> Foo = namedtuple(\"Foo\", \"dtype\")\n >>> _maybe_infer_dtype_type(Foo(np.dtype(\"i8\")))\n dtype('int64')\n ", "n_words": 70, "vocab_size": 59, "n_whitespaces": 136, "language": "en" } }, { "id": 309776, "commit_id": "c109d59862d1e2e28e54160ee75f9465771e99eb", "repo": "core", "path": "tests/components/alexa/test_smart_home.py", "file_name": "test_smart_home.py", "fun_name": "test_create_api_message_special", "commit_message": "Fix comments in Alexa (#64289)", "code": "def test_create_api_message_special():\n \n request = get_new_request(\"Alexa.PowerController\", \"TurnOn\")\n directive_header = request[\"directive\"][\"header\"]\n directive_header.pop(\"correlationToken\")\n directive = messages.AlexaDirective(request)\n\n msg = directive.response(\"testName\", \"testNameSpace\")._response\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n\n assert msg[\"header\"][\"messageId\"] is not None\n assert msg[\"header\"][\"messageId\"] != directive_header[\"messageId\"]\n assert \"correlationToken\" not in msg[\"header\"]\n assert msg[\"header\"][\"name\"] == \"testName\"\n assert msg[\"header\"][\"namespace\"] == \"testNameSpace\"\n assert msg[\"header\"][\"payloadVersion\"] == \"3\"\n\n assert msg[\"payload\"] == {}\n assert \"endpoint\" not in msg\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 107, "n_words": 59, "vocab_size": 36, "complexity": 1, "nloc": 16, "token_counts": 133, "n_ast_nodes": 252, "n_identifiers": 11, "random_cut": "def test_create_api_message_special():\n \n request = get_new_request(\"Alexa.PowerController\", \"TurnOn\")\n directive_header = request[\"directive\"][\"header\"]\n directive_header.pop(\"correlationToken\")\n directive = messages.AlexaDirective(request)\n\n msg = directive.response(\"testName\", \"testNameSpace\")._response\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n\n assert msg[\"header\"][\"messageId\"] is not None\n assert msg[\"header\"][\"messageId\"] != directive_header[\"messageId\"]\n assert \"correlationToken\" not in msg[\"header\"]\n assert msg[\"header\"][\"name\"] == \"testName\"\n assert msg[\"header\"][\"namespace\"] == \"testNameSpace\"\n assert msg[\"header\"][\"payloadVersion\"] == \"3\"\n\n assert msg[\"payload\"] == {}\n assert \"endpoint\" not in msg\n\n", "d_id": 108472, "documentation": { "docstring": "Create an API message response of a request with non defaults.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 32880, "commit_id": "4a51075a96d2049f368b5f3dd6c0e9f08f599b62", "repo": "transformers", "path": "tests/mixed_int8/test_mixed_int8.py", "file_name": "test_mixed_int8.py", "fun_name": "tearDown", "commit_message": "`bitsandbytes` - `Linear8bitLt` integration into `transformers` models (#17901)\n\n* first commit\r\n\r\n* correct replace function\r\n\r\n* add final changes\r\n\r\n- works like charm!\r\n- cannot implement tests yet\r\n- tested\r\n\r\n* clean up a bit\r\n\r\n* add bitsandbytes dependencies\r\n\r\n* working version\r\n\r\n- added import function\r\n- added bitsandbytes utils file\r\n\r\n* small fix\r\n\r\n* small fix\r\n\r\n- fix import issue\r\n\r\n* fix import issues\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* refactor a bit\r\n\r\n- move bitsandbytes utils to utils\r\n- change comments on functions\r\n\r\n* reformat docstring\r\n\r\n- reformat docstring on init_empty_weights_8bit\r\n\r\n* Update src/transformers/__init__.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* revert bad formatting\r\n\r\n* change to bitsandbytes\r\n\r\n* refactor a bit\r\n\r\n- remove init8bit since it is useless\r\n\r\n* more refactoring\r\n\r\n- fixed init empty weights issue\r\n- added threshold param\r\n\r\n* small hack to make it work\r\n\r\n* Update src/transformers/modeling_utils.py\r\n\r\n* Update src/transformers/modeling_utils.py\r\n\r\n* revmoe the small hack\r\n\r\n* modify utils file\r\n\r\n* make style + refactor a bit\r\n\r\n* create correctly device map\r\n\r\n* add correct dtype for device map creation\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* apply suggestions\r\n\r\n- remove with torch.grad\r\n- do not rely on Python bool magic!\r\n\r\n* add docstring\r\n\r\n - add docstring for new kwargs\r\n\r\n* add docstring\r\n\r\n- comment `replace_8bit_linear` function\r\n- fix weird formatting\r\n\r\n* - added more documentation\r\n- added new utility function for memory footprint tracking\r\n- colab demo to add\r\n\r\n* few modifs\r\n\r\n- typo doc\r\n- force cast into float16 when load_in_8bit is enabled\r\n\r\n* added colab link\r\n\r\n* add test architecture + docstring a bit\r\n\r\n* refactor a bit testing class\r\n\r\n* make style + refactor a bit\r\n\r\n* enhance checks\r\n\r\n- add more checks\r\n- start writing saving test\r\n\r\n* clean up a bit\r\n\r\n* male style\r\n\r\n* add more details on doc\r\n\r\n* add more tests\r\n\r\n- still needs to fix 2 tests\r\n\r\n* replace by \"or\"\r\n\r\n- could not fix it from GitHub GUI\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* refactor a bit testing code + add readme\r\n\r\n* make style\r\n\r\n* fix import issue\r\n\r\n* Update src/transformers/modeling_utils.py\r\n\r\nCo-authored-by: Michael Benayoun \r\n\r\n* add few comments\r\n\r\n* add more doctring + make style\r\n\r\n* more docstring\r\n\r\n* raise error when loaded in 8bit\r\n\r\n* make style\r\n\r\n* add warning if loaded on CPU\r\n\r\n* add small sanity check\r\n\r\n* fix small comment\r\n\r\n* add bitsandbytes on dockerfile\r\n\r\n* Improve documentation\r\n\r\n- improve documentation from comments\r\n\r\n* add few comments\r\n\r\n* slow tests pass on the VM but not on the CI VM\r\n\r\n* Fix merge conflict\r\n\r\n* make style\r\n\r\n* another test should pass on a multi gpu setup\r\n\r\n* fix bad import in testing file\r\n\r\n* Fix slow tests\r\n\r\n- remove dummy batches\r\n- no more CUDA illegal memory errors\r\n\r\n* odify dockerfile\r\n\r\n* Update docs/source/en/main_classes/model.mdx\r\n\r\n* Update Dockerfile\r\n\r\n* Update model.mdx\r\n\r\n* Update Dockerfile\r\n\r\n* Apply suggestions from code review\r\n\r\n* few modifications\r\n\r\n- lm head can stay on disk/cpu\r\n- change model name so that test pass\r\n\r\n* change test value\r\n\r\n- change test value to the correct output\r\n- torch bmm changed to baddmm in bloom modeling when merging\r\n\r\n* modify installation guidelines\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* replace `n`by `name`\r\n\r\n* merge `load_in_8bit` and `low_cpu_mem_usage`\r\n\r\n* first try - keep the lm head in full precision\r\n\r\n* better check\r\n\r\n- check the attribute `base_model_prefix` instead of computing the number of parameters\r\n\r\n* added more tests\r\n\r\n* Update src/transformers/utils/bitsandbytes.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Merge branch 'integration-8bit' of https://github.com/younesbelkada/transformers into integration-8bit\r\n\r\n* improve documentation\r\n\r\n- fix typos for installation\r\n- change title in the documentation\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Michael Benayoun ", "code": "def tearDown(self):\n r\n del self.model_fp16\n del self.model_8bit\n\n gc.collect()\n torch.cuda.empty_cache()\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 43, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 9, "token_counts": 27, "n_ast_nodes": 46, "n_identifiers": 9, "random_cut": "def tearDown(self):\n r\n del self.model_fp16\n del self.model_8bit\n\n gc.collect()\n torch.c", "d_id": 6012, "documentation": { "docstring": "\n TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to\n avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 49, "language": "en" } }, { "id": 246307, "commit_id": "df36945ff0e4a293a9dac0da07e2c94256835b32", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "test_pagination_from_sync_and_messages", "commit_message": "Support pagination tokens from /sync and /messages in the relations API. (#11952)", "code": "def test_pagination_from_sync_and_messages(self):\n \n channel = self._send_relation(RelationTypes.ANNOTATION, \"m.reaction\", \"A\")\n self.assertEquals(200, channel.code, channel.json_body)\n annotation_id = channel.json_body[\"event_id\"]\n # Send an event after the relation events.\n self.helper.send(self.room, body=\"Latest event\", tok=self.user_token)\n\n # Request /sync, limiting it such that only the latest event is returned\n # (and not the relation).\n filter = urllib.parse.quote_plus(\n '{\"room\": {\"timeline\": {\"limit\": 1}}}'.encode()\n )\n channel = self.make_request(\n \"GET\", f\"/sync?filter={filter}\", access_token=self.user_token\n )\n self.assertEquals(200, channel.code, channel.json_body)\n room_timeline = channel.json_body[\"rooms\"][\"join\"][self.room][\"timeline\"]\n sync_prev_batch = room_timeline[\"prev_batch\"]\n self.assertIsNotNone(sync_prev_batch)\n # Ensure the relation event is not in the batch returned from /sync.\n self.assertNotIn(\n annotation_id, [ev[\"event_id\"] for ev in room_timeline[\"events\"]]\n )\n\n # Request /messages, limiting it such that only the latest event is\n # returned (and not the relation).\n channel = self.make_request(\n \"GET\",\n f\"/rooms/{self.room}/messages?dir=b&limit=1\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n messages_end = channel.json_body[\"end\"]\n self.assertIsNotNone(messages_end)\n # Ensure the relation event is not in the chunk returned from /messages.\n self.assertNotIn(\n annotation_id, [ev[\"event_id\"] for ev in channel.json_body[\"chunk\"]]\n )\n\n # Request /relations with the pagination tokens received from both the\n # /sync and /messages responses above, in turn.\n #\n # This is a tiny bit silly since the client wouldn't know the parent ID\n # from the requests above; consider the parent ID to be known from a\n # previous /sync.\n for from_token in (sync_prev_batch, messages_end):\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n\n # The relation should be in the returned chunk.\n self.assertIn(\n annotation_id, [ev[\"event_id\"] for ev in channel.json_body[\"chunk\"]]\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 681, "n_words": 226, "vocab_size": 111, "complexity": 5, "nloc": 39, "token_counts": 289, "n_ast_nodes": 505, "n_identifiers": 32, "random_cut": "def test_pagination_from_sync_and_messages(self):\n \n channel = self._send_relation(RelationTypes.ANNOTATION, \"m.reaction\", \"A\")\n self.assertEquals(200, channel.code, channel.json_body)\n annotation_id = channel.json_body[\"event_id\"]\n # Send an event after the relation events.\n self.helper.send(self.room, body=\"Latest event\", tok=self.user_token)\n\n # Request /sync, limiting it such that only the latest event is returned\n # (and not the relation).\n filter = urllib.parse.quote_plus(\n '{\"room\": {\"timeline\": {\"limit\": 1}}}'.encode()\n )\n channel = self.make_request(\n \"GET\", f\"/sync?filter={filter}\", access_token=self.user_token\n )\n self.assertEquals(200, channel.code, channel.json_body)\n room_timeline = channel.json_body[\"rooms\"][\"join\"][self.room][\"timeline\"]\n sync_prev_batch = room_timeline[\"prev_batch\"]\n self.assertIsNotNone(sync_prev_batch)\n # Ensure the relation event is not in the batch returned from /sync.\n self.assertNotIn(\n annotation_id, [ev[\"event_id\"] for ev in room_timeline[\"events\"]]\n )\n\n # Request /messages, limiting it such that only the latest event is\n # returned (and not the relation).\n channel = self.make_request(\n \"GET\",\n f\"/rooms/{self.room}/messages?dir=b&limit=1\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n messages_end = channel.json_body[\"end\"]\n self.assertIsNotNone(messages_end)\n # Ensure the relation event is not in the chunk returned from /messages.\n self.assertNotIn(\n annotation_id, [ev[\"event_id\"] for ev in channel.json_body[\"chunk\"]]\n )\n\n # Request /relations with the pagination tokens received from both the\n # /sync and /messages responses above, in turn.\n #\n # This is a tiny bit silly ", "d_id": 71142, "documentation": { "docstring": "Pagination tokens from /sync and /messages can be used to paginate /relations.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 104910, "commit_id": "d1d4f1065fd4ab91b2c8682643dbd12f86d66fcd", "repo": "datasets", "path": "src/datasets/utils/streaming_download_manager.py", "file_name": "streaming_download_manager.py", "fun_name": "download", "commit_message": "Add API code examples for Builder classes (#4313)\n\n* 📝 add examples for builder classes\r\n\r\n* 📝 apply quentin review", "code": "def download(self, url_or_urls):\n \n url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)\n return url_or_urls\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 38, "n_identifiers": 6, "random_cut": "def download(self, url_or_urls):\n \n url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)\n ", "d_id": 22025, "documentation": { "docstring": "Download given url(s).\n\n Args:\n url_or_urls: url or `list`/`dict` of urls to download and extract. Each\n url is a `str`.\n\n Returns:\n downloaded_path(s): `str`, The downloaded paths matching the given input\n url_or_urls.\n\n Example:\n\n ```py\n >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')\n ```\n ", "n_words": 37, "vocab_size": 35, "n_whitespaces": 138, "language": "en" } }, { "id": 9673, "commit_id": "2a8b181d4ddfc542d0784b8ea7341f09500ff299", "repo": "insightface", "path": "reconstruction/ostec/utils/generate_heatmap.py", "file_name": "generate_heatmap.py", "fun_name": "draw_gaussian", "commit_message": "Improved landmark differentiability by heatmaps.", "code": "def draw_gaussian(image, point, sigma):\n \n # Check if the gaussian is inside\n point[0] = round(point[0], 2)\n point[1] = round(point[1], 2)\n\n ul = [math.floor(point[0] - 7.5 * sigma), math.floor(point[1] - 7.5 * sigma)]\n br = [math.floor(point[0] + 7.5 * sigma), math.floor(point[1] + 7.5 * sigma)]\n if (ul[0] > image.shape[1] or ul[1] >\n image.shape[0] or br[0] < 1 or br[1] < 1):\n return image\n size = 15 * sigma + 1\n g = _gaussian(size, sigma=0.1)\n g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) -\n int(max(1, ul[0])) + int(max(1, -ul[0]))]\n g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) -\n int(max(1, ul[1])) + int(max(1, -ul[1]))]\n img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]\n img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]\n assert (g_x[0] > 0 and g_y[1] > 0)\n image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] = \\\n image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]]\n image[image > 1] = 1\n\n return image\n\n\n# Adapted from: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/api.py", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 243, "n_words": 148, "vocab_size": 86, "complexity": 6, "nloc": 21, "token_counts": 469, "n_ast_nodes": 667, "n_identifiers": 20, "random_cut": "def draw_gaussian(image, point, sigma):\n \n # Check if the gaussian is inside\n point[0] = round(point[0], 2)\n point[1] = round(point[1], 2)\n\n ul = [math.floor(point[0] - 7.5 * sigma), math.floor(point[1] - 7.5 * sigma)]\n br = [math.floor(point[0] + 7.5 * sigma), math.floor(point[1] + 7.5 * sigma)]\n if (ul[0] > image.shape[1] or ul[1] >\n image.shape[0] or br[0] < 1 or br[1] < 1):\n return image\n size = 15 * sigma + 1\n g = _ga", "d_id": 1652, "documentation": { "docstring": " Draw gaussian circle at a point in an image.\n\n Args:\n image (np.array): An image of shape (H, W)\n point (np.array): The center point of the guassian circle\n sigma (float): Standard deviation of the gaussian kernel\n\n Returns:\n np.array: The image with the drawn gaussian.\n ", "n_words": 43, "vocab_size": 31, "n_whitespaces": 81, "language": "en" } }, { "id": 221530, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/collections/__init__.py", "file_name": "__init__.py", "fun_name": "setdefault", "commit_message": "add python 3.10.4 for windows", "code": "def setdefault(self, key, default=None):\n \n if key in self:\n return self[key]\n self[key] = default\n return default\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 54, "n_words": 15, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 47, "n_identifiers": 4, "random_cut": "def setdefault(self, key, default=None):\n \n if key in self:\n return self[key]\n self[key] = default\n return default\n", "d_id": 56422, "documentation": { "docstring": "Insert key with a value of default if key is not in the dictionary.\n\n Return the value for key if key is in the dictionary, else default.\n ", "n_words": 27, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 169203, "commit_id": "bbf17ea692e437cec908eae6759ffff8092fb42e", "repo": "pandas", "path": "web/pandas_web.py", "file_name": "pandas_web.py", "fun_name": "current_year", "commit_message": "WEB: Add new footer to web (#48557)", "code": "def current_year(context):\n \n context[\"current_year\"] = datetime.datetime.now().year\n return context\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 40, "n_identifiers": 5, "random_cut": "def current_year(context):\n \n context[\"current_year\"] = datetime.", "d_id": 40398, "documentation": { "docstring": "\n Add the current year to the context, so it can be used for the copyright\n note, or other places where it is needed.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 45, "language": "en" } }, { "id": 3627, "commit_id": "91eff1dffdb04be968b6ee4ef8d8bbfeb2e882d0", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/stream.py", "file_name": "stream.py", "fun_name": "fileformatparser_map", "commit_message": "🐛 Source S3: Loading of files' metadata (#8252)", "code": "def fileformatparser_map(self) -> Mapping[str, type]:\n \n return {\n \"csv\": CsvParser,\n \"parquet\": ParquetParser,\n }\n\n # TODO: make these user configurable in spec.json\n ab_additional_col = \"_ab_additional_properties\"\n ab_last_mod_col = \"_ab_source_file_last_modified\"\n ab_file_name_col = \"_ab_source_file_url\"\n airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col]\n datetime_format_string = \"%Y-%m-%dT%H:%M:%S%z\"\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 98, "n_words": 37, "vocab_size": 33, "complexity": 1, "nloc": 6, "token_counts": 24, "n_ast_nodes": 82, "n_identifiers": 12, "random_cut": "def fileformatparser_map(self) -> Mapping[str, type]:\n \n return {\n \"csv\": CsvParser,\n \"parquet\": ParquetParser,\n }\n\n # TODO: make these user configurable in spec.json\n ab_additional_col = \"_ab_additional_properties\"\n ab_last_mod_col = \"_ab_source_file_last_modified\"\n ab_file_name_col = \"_ab_source_file_url\"\n airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col]\n ", "d_id": 505, "documentation": { "docstring": "Mapping where every key is equal 'filetype' and values are corresponding parser classes.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 15, "language": "en" } }, { "id": 156094, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/dataframe/core.py", "file_name": "core.py", "fun_name": "pivot_table", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def pivot_table(self, index=None, columns=None, values=None, aggfunc=\"mean\"):\n \n from dask.dataframe.reshape import pivot_table\n\n return pivot_table(\n self, index=index, columns=columns, values=values, aggfunc=aggfunc\n )\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 57, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 51, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def pivot_table(self, index=None, columns=None, values=None, aggfunc=\"mean\"):\n \n from dask.dataframe.reshape import pivot_table\n\n return pivot_table(\n self, index=index, columns=colum", "d_id": 36553, "documentation": { "docstring": "\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.\n\n Parameters\n ----------\n values : scalar\n column to aggregate\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n aggfunc : {'mean', 'sum', 'count'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n ", "n_words": 61, "vocab_size": 43, "n_whitespaces": 186, "language": "en" } }, { "id": 138092, "commit_id": "1510fb2cd631b2776092fb45ee4082e5e65f16f8", "repo": "ray", "path": "python/ray/tune/tests/test_actor_reuse.py", "file_name": "test_actor_reuse.py", "fun_name": "test_multi_trial_reuse_with_failing", "commit_message": "[air/tune] Internal resource management 2 - Ray Tune to use new Ray AIR resource manager (#30016)\n\nIncludes/depends on #30777\r\n\r\nTLDR: This PR refactors Ray Tune's resource management to use a central AIR resource management package instead of the tightly coupled PlacementGroupManager.\r\n\r\nRay Tune's resource management currently uses a tightly coupled placement group manager. This leads to a number of shortcomings:\r\n- The tight coupling on the manager side (e.g. PG manager keeps track of trials) prevents re-usability\r\n- The tight coupling on the trial executor side prevents using different resource management strategies (e.g. shared or budget-based)\r\n- It's hard to test independently. Tests for the resource management require a simulated tune setup.\r\n\r\nTo improve stability, extensibility, and maintainability, this PR moves the resource management logic into a central `ray.air.execution.resources` subpackage. The resource management has a simple API that works with `ResourceRequest`s and `AllocatedResources` to manage requested and assigned resources, respectively. The actual resource management can then be anything - per default it is a placement group based manager, but this PR also introduces a PoC budget-based manager that can be plugged in.\r\n\r\nThe PR does not substantially change existing tests, so we can be certain that the new resource model is a fully compatible replacement for the old placement group manager.\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra):\n \n os.environ[\"TUNE_MAX_PENDING_TRIALS_PG\"] = \"2\"\n\n register_trainable(\"foo2\", MyResettableClass)\n\n [trial1, trial2, trial3, trial4] = tune.run(\n \"foo2\",\n config={\n \"fail\": tune.grid_search([False, True, False, False]),\n \"id\": -1,\n \"sleep\": 2,\n },\n reuse_actors=True,\n resources_per_trial={\"cpu\": 2},\n raise_on_failed_trial=False,\n ).trials\n\n assert trial1.last_result[\"num_resets\"] == 0\n assert trial3.last_result[\"num_resets\"] == 0\n assert trial4.last_result[\"num_resets\"] == 1\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 141, "n_words": 42, "vocab_size": 36, "complexity": 1, "nloc": 17, "token_counts": 113, "n_ast_nodes": 183, "n_identifiers": 19, "random_cut": "def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra):\n \n os.environ[\"TUNE_MAX_PENDING_TRIALS_PG\"] = \"2\"\n\n register_trainable(\"foo2\", MyResettableClass)\n\n [trial1, trial2, trial3, trial4] = tune.run(\n \"foo2\",\n config={\n \"fail\": tune.grid_search([False, True, False, False]),\n \"id\": -1,\n \"sleep\": 2,\n },\n reuse_actors=True,\n resources_per_trial={\"cpu\": 2},\n raise_on_failed_trial=False,\n ).trials\n\n assert trial1.last_result[\"num_resets\"] == 0\n assert trial3.last_result[\"num_resets", "d_id": 31314, "documentation": { "docstring": "Test that failing trial's actors are not reused.\n\n - 2 trials can run at the same time\n - Trial 1 succeeds, trial 2 fails\n - Trial 3 will be scheduled after trial 2 failed, so won't reuse actor\n - Trial 4 will be scheduled after trial 1 succeeded, so will reuse actor\n ", "n_words": 52, "vocab_size": 34, "n_whitespaces": 67, "language": "en" } }, { "id": 73067, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/forms/views.py", "file_name": "views.py", "fun_name": "dispatch", "commit_message": "Reformat with black", "code": "def dispatch(self, request, *args, **kwargs):\n \n page_id = kwargs.get(\"page_id\")\n\n if not get_forms_for_user(self.request.user).filter(id=page_id).exists():\n raise PermissionDenied\n\n self.page = get_object_or_404(Page, id=page_id).specific\n\n self.submissions = self.get_queryset()\n\n if self.request.method == \"POST\":\n self.handle_delete(self.submissions)\n return redirect(self.get_success_url(), page_id)\n\n return super().dispatch(request, *args, **kwargs)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 114, "n_words": 32, "vocab_size": 27, "complexity": 3, "nloc": 10, "token_counts": 112, "n_ast_nodes": 182, "n_identifiers": 24, "random_cut": "def dispatch(self, request, *args, **kwargs):\n \n page_id = kwargs.get(\"page_id\")\n\n if not get_forms_for_user(self.request.user).filter(id=page_id).exists():\n raise PermissionDenied\n\n self.page = get_object_or_404(Page, id=page_id).specific\n\n self.submissions = self.get_queryset()\n\n if self.request.method == \"POST\":\n self.handle_delete(self.submissions)\n return redirect(self.get_success_url(), page_id)\n\n return super().dispatch(request, *args, *", "d_id": 15938, "documentation": { "docstring": "Check permissions, set the page and submissions, handle delete", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 157358, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/models/diffusion/ddpm.py", "file_name": "ddpm.py", "fun_name": "_prior_bpd", "commit_message": "release more models", "code": "def _prior_bpd(self, x_start):\n \n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 72, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 6, "token_counts": 90, "n_ast_nodes": 127, "n_identifiers": 23, "random_cut": "def _prior_bpd(self, x_start):\n \n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_f", "d_id": 36901, "documentation": { "docstring": "\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n ", "n_words": 48, "vocab_size": 40, "n_whitespaces": 91, "language": "en" } }, { "id": 165326, "commit_id": "6caefb19f4d7c05451fafca182c6eb39fe9901ed", "repo": "pandas", "path": "pandas/tests/window/test_rolling.py", "file_name": "test_rolling.py", "fun_name": "test_rolling_non_monotonic", "commit_message": "ENH: Rolling window with step size (GH-15354) (#45765)", "code": "def test_rolling_non_monotonic(method, expected):\n \n # Based on an example found in computation.rst\n use_expanding = [True, False, True, False, True, True, True, True]\n df = DataFrame({\"values\": np.arange(len(use_expanding)) ** 2})\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 39, "n_words": 27, "vocab_size": 22, "complexity": 1, "nloc": 9, "token_counts": 100, "n_ast_nodes": 72, "n_identifiers": 9, "random_cut": "def test_rolling_non_monotonic(method, expected):\n \n # Based on an example found in computation.rst\n use_expanding = [True, False, True, False, True, True, True, True]\n df = DataFrame({\"values\": np.arange(len(use_expanding)) ** 2})\n", "d_id": 39660, "documentation": { "docstring": "\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 48, "language": "en" } }, { "id": 156746, "commit_id": "2820bae493a49cb1d0a6e376985c5473b8f04fa8", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "clip", "commit_message": "Don't include docs in ``Array`` methods, just refer to module docs (#9244)\n\nCo-authored-by: James Bourbeau ", "code": "def clip(self, min=None, max=None):\n \n from dask.array.ufunc import clip\n\n return clip(self, min, max)\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 33, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 46, "n_identifiers": 7, "random_cut": "def clip(self, min=None, max=None):\n \n from dask.array.ufunc import clip\n\n return cl", "d_id": 36756, "documentation": { "docstring": "Return an array whose values are limited to ``[min, max]``.\n One of max or min must be given.\n\n Refer to :func:`dask.array.clip` for full documentation.\n\n See Also\n --------\n dask.array.clip : equivalent function\n ", "n_words": 31, "vocab_size": 30, "n_whitespaces": 73, "language": "en" } }, { "id": 262675, "commit_id": "3b8b105b0d6539ac12972de94e0b2a5077fa1ce2", "repo": "TTS", "path": "TTS/tts/layers/overflow/common_layers.py", "file_name": "common_layers.py", "fun_name": "_floor_std", "commit_message": "Adding OverFlow (#2183)\n\n* Adding encoder\r\n\r\n* currently modifying hmm\r\n\r\n* Adding hmm\r\n\r\n* Adding overflow\r\n\r\n* Adding overflow setting up flat start\r\n\r\n* Removing runs\r\n\r\n* adding normalization parameters\r\n\r\n* Fixing models on same device\r\n\r\n* Training overflow and plotting evaluations\r\n\r\n* Adding inference\r\n\r\n* At the end of epoch the test sentences are coming on cpu instead of gpu\r\n\r\n* Adding figures from model during training to monitor\r\n\r\n* reverting tacotron2 training recipe\r\n\r\n* fixing inference on gpu for test sentences on config\r\n\r\n* moving helpers and texts within overflows source code\r\n\r\n* renaming to overflow\r\n\r\n* moving loss to the model file\r\n\r\n* Fixing the rename\r\n\r\n* Model training but not plotting the test config sentences's audios\r\n\r\n* Formatting logs\r\n\r\n* Changing model name to camelcase\r\n\r\n* Fixing test log\r\n\r\n* Fixing plotting bug\r\n\r\n* Adding some tests\r\n\r\n* Adding more tests to overflow\r\n\r\n* Adding all tests for overflow\r\n\r\n* making changes to camel case in config\r\n\r\n* Adding information about parameters and docstring\r\n\r\n* removing compute_mel_statistics moved statistic computation to the model instead\r\n\r\n* Added overflow in readme\r\n\r\n* Adding more test cases, now it doesn't saves transition_p like tensor and can be dumped as json", "code": "def _floor_std(self, std):\n r\n original_tensor = std.clone().detach()\n std = torch.clamp(std, min=self.std_floor)\n if torch.any(original_tensor != std):\n print(\n \"[*] Standard deviation was floored! The model is preventing overfitting, nothing serious to worry about\"\n )\n return std\n\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 105, "n_words": 34, "vocab_size": 31, "complexity": 2, "nloc": 16, "token_counts": 50, "n_ast_nodes": 83, "n_identifiers": 12, "random_cut": "def _floor_std(self, std):\n r\n origi", "d_id": 77311, "documentation": { "docstring": "\n It clamps the standard deviation to not to go below some level\n This removes the problem when the model tries to cheat for higher likelihoods by converting\n one of the gaussians to a point mass.\n\n Args:\n std (float Tensor): tensor containing the standard deviation to be\n ", "n_words": 46, "vocab_size": 36, "n_whitespaces": 93, "language": "en" } }, { "id": 63314, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "asXML", "commit_message": "upd; format", "code": "def asXML(self, doctag=None, namedItemsOnly=False, indent=\"\", formatted=True):\n \n nl = \"\\n\"\n out = []\n namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()\n for v in vlist)\n nextLevelIndent = indent + \" \"\n\n # collapse out indents if formatting is not desired\n if not formatted:\n indent = \"\"\n nextLevelIndent = \"\"\n nl = \"\"\n\n selfTag = None\n if doctag is not None:\n selfTag = doctag\n else:\n if self.__name:\n selfTag = self.__name\n\n if not selfTag:\n if namedItemsOnly:\n return \"\"\n else:\n selfTag = \"ITEM\"\n\n out += [nl, indent, \"<\", selfTag, \">\"]\n\n for i, res in enumerate(self.__toklist):\n if isinstance(res, ParseResults):\n if i in namedItems:\n out += [res.asXML(namedItems[i],\n namedItemsOnly and doctag is None,\n nextLevelIndent,\n formatted)]\n else:\n out += [res.asXML(None,\n namedItemsOnly and doctag is None,\n nextLevelIndent,\n formatted)]\n else:\n # individual token, see if there is a name for it\n resTag = None\n if i in namedItems:\n resTag = namedItems[i]\n if not resTag:\n if namedItemsOnly:\n continue\n else:\n resTag = \"ITEM\"\n xmlBodyText = _xml_escape(_ustr(res))\n out += [nl, nextLevelIndent, \"<\", resTag, \">\",\n xmlBodyText,\n \"\"]\n\n out += [nl, indent, \"\"]\n return \"\".join(out)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1003, "n_words": 175, "vocab_size": 83, "complexity": 16, "nloc": 49, "token_counts": 278, "n_ast_nodes": 454, "n_identifiers": 29, "random_cut": "def asXML(self, doctag=None, namedItemsOnly=False, indent=\"\", formatted=True):\n \n nl = \"\\n\"\n out = []\n namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()\n for v in vlist)\n nextLevelIndent = indent + \" \"\n\n # collapse out indents if formatting is not desired\n if not formatted:\n indent = \"\"\n nextLevelIndent = \"\"\n nl = \"\"\n\n selfTag = None\n if doctag is not None:\n selfTag = doctag\n else:\n if self.__name:\n selfTag = self.__name\n\n if not selfTag:\n if namedItemsOnly:\n return \"\"\n else:\n selfTag = \"ITEM\"\n\n out += [nl, indent, \"<\", selfTag, \">\"]\n\n for i, res in enumerate(self.__toklist):\n if isinstance(res, ParseResults):\n if i in namedItems:\n out += [res.asXML(namedItems[i],\n namedItemsOnly and doctag is None,\n nextLevelIndent,\n formatted)]\n else:\n out += [res.asXML(None,\n namedItemsOnly and doctag is None,\n nextLevelIndent,\n formatted)]\n else:\n # individual token, see if there is a name for it\n resTag = None\n if i in namedIt", "d_id": 13247, "documentation": { "docstring": "\n (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 34, "language": "en" } }, { "id": 96149, "commit_id": "09726d7fc95e53bb516e328fc1811fc9a0704cac", "repo": "sentry", "path": "src/sentry/models/group.py", "file_name": "group.py", "fun_name": "times_seen_with_pending", "commit_message": "fix(post_process): Fetch buffered `times_seen` values and add them to `Group.times_seen` (#31624)\n\nIn `post_process_group` we process issue alert rules and also ignored groups. Both of these can have\r\nconditions that read from the `times_seen` value on the `Group`.\r\n\r\nThe problem here is that updates to `times_seen` are buffered and only written every 45s or so. This\r\nmeans that most of the time when a `Group` goes through `post_process_group` it has an out of date\r\n`times_seen` value. For infrequently updated groups, this can just mean that the count is -1. But\r\nfor high volume groups this could mean that we're considerably below the count.\r\n\r\nTo improve this, we read the current value from buffers and store it as pending updates on the group.\r\nWe then use this pending value when checking rules and snoozes in post process. There's a potential \r\nrace condition here where we fetch the `Group`, and before we fetch the value from buffers it is \r\ncleared, and so we miss out on the update. This should be infrequent enough that it's not a problem, \r\nand either way we will be considerably more accurate most of the time.", "code": "def times_seen_with_pending(self) -> int:\n \n return self.times_seen + self.times_seen_pending\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 5, "random_cut": "def times_seen_with_pending(self) -> int:", "d_id": 19283, "documentation": { "docstring": "\n Returns `times_seen` with any additional pending updates from `buffers` added on. This value\n must be set first.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 319841, "commit_id": "d7f7d839f8a6b7d0378dda1e0744739748d71b9c", "repo": "paperless-ngx", "path": "src/documents/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_api_create_storage_path", "commit_message": "Adds invalid storage path format test", "code": "def test_api_create_storage_path(self):\n \n response = self.client.post(\n self.ENDPOINT,\n json.dumps(\n {\n \"name\": \"A storage path\",\n \"path\": \"Somewhere/{asn}\",\n },\n ),\n content_type=\"application/json\",\n )\n self.assertEqual(response.status_code, 201)\n self.assertEqual(StoragePath.objects.count(), 2)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 169, "n_words": 22, "vocab_size": 22, "complexity": 1, "nloc": 13, "token_counts": 64, "n_ast_nodes": 108, "n_identifiers": 14, "random_cut": "def test_api_create_storage_path(self):\n \n response = self.client.post(\n self.ENDPOINT,\n json.dumps(\n {\n \"name\": \"A storage path\",\n \"path\": \"Somewhere/{asn}\",\n },\n ),\n content_", "d_id": 117005, "documentation": { "docstring": "\n GIVEN:\n - API request to create a storage paths\n WHEN:\n - API is called\n THEN:\n - Correct HTTP response\n - New storage path is created\n ", "n_words": 25, "vocab_size": 19, "n_whitespaces": 98, "language": "en" } }, { "id": 81780, "commit_id": "663ef2cc6413c0cdb26392bb046b37fe564fb546", "repo": "awx", "path": "awx/main/tests/functional/models/test_workflow.py", "file_name": "test_workflow.py", "fun_name": "test_set_all_ask_for_prompts_true_from_post", "commit_message": "adding prompt-to-launch field on Labels field in Workflow Templates; with necessary UI and testing changes\n\nCo-authored-by: Keith Grant ", "code": "def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):\n \n r = post(\n url=reverse('api:workflow_job_template_list'),\n data=dict(\n name='workflow that tests ask_for prompts',\n organization=organization.id,\n inventory=inventory.id,\n job_tags='',\n skip_tags='',\n ask_inventory_on_launch=True,\n ask_labels_on_launch=True,\n ask_limit_on_launch=True,\n ask_scm_branch_on_launch=True,\n ask_skip_tags_on_launch=True,\n ask_tags_on_launch=True,\n ask_variables_on_launch=True,\n ),\n user=org_admin,\n expect=201,\n )\n wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])\n\n assert wfjt.ask_inventory_on_launch is True\n assert wfjt.ask_labels_on_launch is True\n assert wfjt.ask_limit_on_launch is True\n assert wfjt.ask_scm_branch_on_launch is True\n assert wfjt.ask_skip_tags_on_launch is True\n assert wfjt.ask_tags_on_launch is True\n assert wfjt.ask_variables_on_launch is True\n\n\n@pytest.mark.django_db", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "@pytest.mark.django_db", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 374, "n_words": 63, "vocab_size": 44, "complexity": 1, "nloc": 28, "token_counts": 151, "n_ast_nodes": 234, "n_identifiers": 31, "random_cut": "def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):\n \n r = post(\n url=reverse('api:workflow_job_template_list'),\n data=dict(\n name='workflow that tests ask_for prompts',\n organization=organization.id,\n inventory=inventory.id,\n job_tags='',\n skip_tags='',\n ask_inventory_on_launch=True,\n ask_labels_on_launch=True,\n ask_limit_on_launch=True,\n ask_scm_branch_on_launch=True,\n ", "d_id": 17258, "documentation": { "docstring": "\n Tests behaviour and values of ask_for_* fields on WFJT via POST\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 128697, "commit_id": "036225dec2d1f0d895043ca5f0aeeff377aa7fc7", "repo": "ray", "path": "python/ray/_private/utils.py", "file_name": "utils.py", "fun_name": "get_used_memory", "commit_message": "[core] update cgroup v1 memory usage calculation to ignore inactive (cache) files (#29103)\n\nSigned-off-by: Clarence Ng clarence.wyng@gmail.com\r\n\r\nAdjust used memory calculation for cgroup v1, to make it inline with how working set memory is calculated, which is what the cgroup oom killer uses. Before this change we include the rss and cache, and not discount the inactive / purgeable cache content. When we write to disk or object store it generates a large amount of page cache. If we don't discount this cache content it will result in over-counting, and hence trigger the ray oom killer earlier than what it should be.", "code": "def get_used_memory():\n \n # Try to accurately figure out the memory usage if we are in a docker\n # container.\n docker_usage = None\n # For cgroups v1:\n memory_usage_filename = \"/sys/fs/cgroup/memory/memory.stat\"\n # For cgroups v2:\n memory_usage_filename_v2 = \"/sys/fs/cgroup/memory.current\"\n if os.path.exists(memory_usage_filename):\n docker_usage = get_cgroupv1_used_memory(memory_usage_filename)\n elif os.path.exists(memory_usage_filename_v2):\n with open(memory_usage_filename_v2, \"r\") as f:\n docker_usage = int(f.read())\n\n if docker_usage is not None:\n return docker_usage\n return psutil.virtual_memory().used\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 128, "n_words": 60, "vocab_size": 44, "complexity": 4, "nloc": 12, "token_counts": 76, "n_ast_nodes": 139, "n_identifiers": 15, "random_cut": "def get_used_memory():\n \n # Try to accurately figure out the memory usage if we are in a docker\n # container.\n docker_usage = None\n # For cgroups v1:\n memory_usage_filename = \"/sys/fs/cgroup/memory/memory.stat\"\n # For cgroups v2:\n memory_usage_filename", "d_id": 28779, "documentation": { "docstring": "Return the currently used system memory in bytes\n\n Returns:\n The total amount of used memory\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 271128, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/data_adapter.py", "file_name": "data_adapter.py", "fun_name": "pack_x_y_sample_weight", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def pack_x_y_sample_weight(x, y=None, sample_weight=None):\n \n if y is None:\n # For single x-input, we do no tuple wrapping since in this case\n # there is no ambiguity. This also makes NumPy and Dataset\n # consistent in that the user does not have to wrap their Dataset\n # data in an unnecessary tuple\n if not tf.nest.is_nested(x):\n return x\n else:\n return (x,)\n elif sample_weight is None:\n return (x, y)\n else:\n return (x, y, sample_weight)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 161, "n_words": 71, "vocab_size": 53, "complexity": 4, "nloc": 10, "token_counts": 60, "n_ast_nodes": 97, "n_identifiers": 7, "random_cut": "def pack_x_y_sample_weight(x, y=None, sample_weight=None):\n \n if y is None:\n # For single x-input, we do no tuple wrapping since in this case\n # there is no ambiguity. This also makes NumPy and Dataset\n # consistent in that the user does not have to wrap their Dataset\n # data in an unnecessary tuple\n if not tf.nest.is_nested(x):\n return x\n else:\n return (x,)\n elif sample_weight is None:\n return (x, y)\n ", "d_id": 80703, "documentation": { "docstring": "Packs user-provided data into a tuple.\n\n This is a convenience utility for packing data into the tuple formats\n that `Model.fit` uses.\n\n Standalone usage:\n\n >>> x = tf.ones((10, 1))\n >>> data = tf.keras.utils.pack_x_y_sample_weight(x)\n >>> isinstance(data, tf.Tensor)\n True\n >>> y = tf.ones((10, 1))\n >>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)\n >>> isinstance(data, tuple)\n True\n >>> x, y = data\n\n Args:\n x: Features to pass to `Model`.\n y: Ground-truth targets to pass to `Model`.\n sample_weight: Sample weight for each element.\n\n Returns:\n Tuple in the format used in `Model.fit`.\n ", "n_words": 83, "vocab_size": 54, "n_whitespaces": 148, "language": "en" } }, { "id": 260408, "commit_id": "9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/tests/test_glm.py", "file_name": "test_glm.py", "fun_name": "test_glm_regression_vstacked_X", "commit_message": "TST tight tests for GLMs (#23619)\n\nCo-authored-by: Olivier Grisel ", "code": "def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):\n \n model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset\n n_samples, n_features = X.shape\n params = dict(\n alpha=alpha,\n fit_intercept=fit_intercept,\n # solver=solver, # only lbfgs available\n tol=1e-12,\n max_iter=1000,\n )\n\n model = clone(model).set_params(**params)\n X = X[:, :-1] # remove intercept\n X = np.concatenate((X, X), axis=0)\n assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)\n y = np.r_[y, y]\n if fit_intercept:\n coef = coef_with_intercept\n intercept = coef[-1]\n coef = coef[:-1]\n else:\n coef = coef_without_intercept\n intercept = 0\n model.fit(X, y)\n\n rtol = 3e-5\n assert model.intercept_ == pytest.approx(intercept, rel=rtol)\n assert_allclose(model.coef_, coef, rtol=rtol)\n\n\n@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 209, "n_words": 91, "vocab_size": 71, "complexity": 2, "nloc": 25, "token_counts": 188, "n_ast_nodes": 320, "n_identifiers": 40, "random_cut": "def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):\n \n model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset\n n_samples, n_features = X.shape\n params = dict(\n alpha=alpha,\n fit", "d_id": 76232, "documentation": { "docstring": "Test that GLM converges for all solvers to correct solution on vstacked data.\n\n We work with a simple constructed data set with known solution.\n Fit on [X] with alpha is the same as fit on [X], [y]\n [X], [y] with 1 * alpha.\n It is the same alpha as the average loss stays the same.\n For wide X, [X', X'] is a singular matrix.\n ", "n_words": 64, "vocab_size": 48, "n_whitespaces": 126, "language": "en" } }, { "id": 197115, "commit_id": "cba899d4137b0b65f6850120ee42cd4fcd4f9dbf", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "deprecate_data", "commit_message": "Update the various tensor deprecations", "code": "def deprecate_data():\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.4\",\n active_deprecations_target=\"deprecated-tensorindextype-attrs\",\n stacklevel=4,\n )\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 10, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def deprecate_data():\n sympy_deprecation_warning(\n ,\n ", "d_id": 48348, "documentation": { "docstring": "\n The data attribute of TensorIndexType is deprecated. Use The\n replace_with_arrays() method instead.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 34, "language": "en" } }, { "id": 260743, "commit_id": "b85f799d0a7242aace8bffd5c8fd7cf3585340af", "repo": "scikit-learn", "path": "sklearn/preprocessing/_function_transformer.py", "file_name": "_function_transformer.py", "fun_name": "fit", "commit_message": "MAINT Add parameter validation for `FunctionTransformer`. (#24180)\n\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def fit(self, X, y=None):\n \n self._validate_params()\n X = self._check_input(X, reset=True)\n if self.check_inverse and not (self.func is None or self.inverse_func is None):\n self._check_inverse_transform(X)\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 23, "vocab_size": 22, "complexity": 4, "nloc": 6, "token_counts": 57, "n_ast_nodes": 91, "n_identifiers": 11, "random_cut": "def fit(self, X, y=None):\n \n sel", "d_id": 76453, "documentation": { "docstring": "Fit transformer by checking X.\n\n If ``validate`` is ``True``, ``X`` will be checked.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input array.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n FunctionTransformer class instance.\n ", "n_words": 43, "vocab_size": 40, "n_whitespaces": 139, "language": "en" } }, { "id": 198505, "commit_id": "9d58006fc0a23afcba38f641c9472917c436428a", "repo": "sympy", "path": "sympy/printing/dot.py", "file_name": "dot.py", "fun_name": "styleof", "commit_message": "Code cleanup", "code": "def styleof(expr, styles=default_styles):\n \n style = {}\n for typ, sty in styles:\n if isinstance(expr, typ):\n style.update(sty)\n return style\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 47, "n_words": 17, "vocab_size": 16, "complexity": 3, "nloc": 6, "token_counts": 37, "n_ast_nodes": 60, "n_identifiers": 9, "random_cut": "def styleof(expr, styles=default_styles):\n \n style = {}\n for typ, sty in styles:\n if isinstance(expr, typ):\n style", "d_id": 48967, "documentation": { "docstring": " Merge style dictionaries in order\n\n Examples\n ========\n\n >>> from sympy import Symbol, Basic, Expr, S\n >>> from sympy.printing.dot import styleof\n >>> styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}),\n ... (Expr, {'color': 'black'})]\n\n >>> styleof(Basic(S(1)), styles)\n {'color': 'blue', 'shape': 'ellipse'}\n\n >>> x = Symbol('x')\n >>> styleof(x + 1, styles) # this is an Expr\n {'color': 'black', 'shape': 'ellipse'}\n ", "n_words": 57, "vocab_size": 41, "n_whitespaces": 106, "language": "en" } }, { "id": 68324, "commit_id": "a896895a9e76a68ab055ce7871bb9d181d3fac15", "repo": "erpnext", "path": "erpnext/support/report/first_response_time_for_issues/first_response_time_for_issues.py", "file_name": "first_response_time_for_issues.py", "fun_name": "execute", "commit_message": "fix: bulk fix (~330) missing translations", "code": "def execute(filters=None):\n\tcolumns = [\n\t\t{\"fieldname\": \"creation_date\", \"label\": _(\"Date\"), \"fieldtype\": \"Date\", \"width\": 300},\n\t\t{\n\t\t\t\"fieldname\": \"first_response_time\",\n\t\t\t\"fieldtype\": \"Duration\",\n\t\t\t\"label\": _(\"First Response Time\"),\n\t\t\t\"width\": 300,\n\t\t},\n\t]\n\n\tdata = frappe.db.sql(\n\t\t,\n\t\t(filters.from_date, filters.to_date),\n\t)\n\n\treturn columns, data\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 21, "n_words": 36, "vocab_size": 31, "complexity": 1, "nloc": 25, "token_counts": 79, "n_ast_nodes": 142, "n_identifiers": 10, "random_cut": "def execute(filters=None):\n\tcolumns = [\n\t\t{\"fieldname\": \"creation_date", "d_id": 14760, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\tdate(creation) as creation_date,\n\t\t\tavg(first_response_time) as avg_response_time\n\t\tFROM tabIssue\n\t\tWHERE\n\t\t\tdate(creation) between %s and %s\n\t\t\tand first_response_time > 0\n\t\tGROUP BY creation_date\n\t\tORDER BY creation_date desc\n\t", "n_words": 26, "vocab_size": 20, "n_whitespaces": 17, "language": "en" } }, { "id": 19295, "commit_id": "b53fdf75f66ccb63b5cfaadaa81253d43f01805a", "repo": "PythonRobotics", "path": "PathPlanning/RRTStar/rrt_star.py", "file_name": "rrt_star.py", "fun_name": "choose_parent", "commit_message": "Add optional robot radius to RRT/RRTStar path planners (#655)\n\n* Add optional robot radius to RRT/RRTStar path planners.\r\n* update __init__ and check_collision to include radius\r\n* during animation, if a robot radius is given then it is drawn\r\n\r\n* Add test for robot radius\r\n\r\n* Correct import error\r\n\r\n* Correct missing robot_radius errors\r\n\r\n* Address \"expected 2 blank lines, found 1\" error\r\n\r\n* Address \"line too long\" errors\r\n\r\n* Add missing argument description.\r\n\r\n* Remove collision_check_with_xy and replace with check_collision\r\n\r\n* Fix \"missing whitespace after ','\" error\r\n\r\n* Update PathPlanning/ClosedLoopRRTStar/closed_loop_rrt_star_car.py\r\n\r\nCo-authored-by: Atsushi Sakai \r\n\r\nCo-authored-by: Atsushi Sakai ", "code": "def choose_parent(self, new_node, near_inds):\n \n if not near_inds:\n return None\n\n # search nearest cost in near_inds\n costs = []\n for i in near_inds:\n near_node = self.node_list[i]\n t_node = self.steer(near_node, new_node)\n if t_node and self.check_collision(\n t_node, self.obstacle_list, self.robot_radius):\n costs.append(self.calc_new_cost(near_node, new_node))\n else:\n costs.append(float(\"inf\")) # the cost of collision node\n min_cost = min(costs)\n\n if min_cost == float(\"inf\"):\n print(\"There is no good path.(min_cost is inf)\")\n return None\n\n min_ind = near_inds[costs.index(min_cost)]\n new_node = self.steer(self.node_list[min_ind], new_node)\n new_node.cost = min_cost\n\n return new_node\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 278, "n_words": 74, "vocab_size": 53, "complexity": 6, "nloc": 20, "token_counts": 138, "n_ast_nodes": 224, "n_identifiers": 22, "random_cut": "def choose_parent(self, new_node, near_inds):\n \n if not near_inds:\n return None\n\n # search nearest cost in near_inds\n costs = []\n for i in near_inds:\n near_node = self.node_list[i]\n t_node = self.steer(near_node, new_node)\n if t_node and self.check_collision(\n t_node, self.obstacle_list, self.robot_radius):\n ", "d_id": 2931, "documentation": { "docstring": "\n Computes the cheapest point to new_node contained in the list\n near_inds and set such a node as the parent of new_node.\n Arguments:\n --------\n new_node, Node\n randomly generated node with a path from its neared point\n There are not coalitions between this node and th tree.\n near_inds: list\n Indices of indices of the nodes what are near to new_node\n\n Returns.\n ------\n Node, a copy of new_node\n ", "n_words": 65, "vocab_size": 48, "n_whitespaces": 233, "language": "en" } }, { "id": 133631, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/a3c/tests/test_a3c.py", "file_name": "test_a3c.py", "fun_name": "test_a3c_compilation", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_a3c_compilation(self):\n \n config = a3c.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 2\n config[\"num_envs_per_worker\"] = 2\n\n num_iterations = 1\n\n # Test against all frameworks.\n for _ in framework_iterator(config, with_eager_tracing=True):\n for env in [\"CartPole-v1\", \"Pendulum-v1\", \"PongDeterministic-v0\"]:\n print(\"env={}\".format(env))\n config[\"model\"][\"use_lstm\"] = env == \"CartPole-v1\"\n trainer = a3c.A3CTrainer(config=config, env=env)\n for i in range(num_iterations):\n results = trainer.train()\n check_train_results(results)\n print(results)\n check_compute_single_action(\n trainer, include_state=config[\"model\"][\"use_lstm\"]\n )\n trainer.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 295, "n_words": 54, "vocab_size": 42, "complexity": 4, "nloc": 18, "token_counts": 129, "n_ast_nodes": 224, "n_identifiers": 23, "random_cut": "def test_a3c_compilation(self):\n \n config = a3c.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 2\n config[\"num_envs_per_worker\"] = 2\n\n num_iterations = 1\n\n # Test against all frameworks.\n for _ in framework_iterator(config, with_eager_tracing=True):\n for env in [\"CartPole-v1\", \"Pendulum-v1\", \"PongDeterministic-v0\"]:\n print(\"env={}\".format(env))\n config[\"model\"][\"use_lstm\"] = env == \"CartPole-v1\"\n trainer = a3c.A3CTrainer(config=config, env=env)\n for i in range(num_iterations):\n ", "d_id": 30064, "documentation": { "docstring": "Test whether an A3CTrainer can be built with both frameworks.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 21475, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "extract", "commit_message": "Vendor in pip 22.1.2", "code": "def extract(self, member, path=\"\", set_attrs=True):\n \n self._check(\"r\")\n\n if isinstance(member, str):\n tarinfo = self.getmember(member)\n else:\n tarinfo = member\n\n # Prepare the link target for makelink().\n if tarinfo.islnk():\n tarinfo._link_target = os.path.join(path, tarinfo.linkname)\n\n try:\n self._extract_member(tarinfo, os.path.join(path, tarinfo.name),\n set_attrs=set_attrs)\n except EnvironmentError as e:\n if self.errorlevel > 0:\n raise\n else:\n if e.filename is None:\n self._dbg(1, \"tarfile: %s\" % e.strerror)\n else:\n self._dbg(1, \"tarfile: %s %r\" % (e.strerror, e.filename))\n except ExtractError as e:\n if self.errorlevel > 1:\n raise\n else:\n self._dbg(1, \"tarfile: %s\" % e)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 372, "n_words": 76, "vocab_size": 52, "complexity": 8, "nloc": 24, "token_counts": 170, "n_ast_nodes": 279, "n_identifiers": 24, "random_cut": "def extract(self, member, path=\"\", set_attrs=True):\n \n self._check(\"r\")\n\n if isinstance(member, str):\n tarinfo = self.getmember(member)\n else:\n tarinfo = member\n\n # Prepare the link target for makelink().\n if tarinfo.islnk():\n tarinfo._link_target = os.path.join(path, tar", "d_id": 3863, "documentation": { "docstring": "Extract a member from the archive to the current working directory,\n using its full name. Its file information is extracted as accurately\n as possible. `member' may be a filename or a TarInfo object. You can\n specify a different directory using `path'. File attributes (owner,\n mtime, mode) are set unless `set_attrs' is False.\n ", "n_words": 52, "vocab_size": 45, "n_whitespaces": 99, "language": "en" } }, { "id": 30465, "commit_id": "57ce5c09ee1ac101f79962e59bd44a0396dfb76c", "repo": "spotify-downloader", "path": "tests/types/test_artist.py", "file_name": "test_artist.py", "fun_name": "test_artist_from_string", "commit_message": "Search album by string enhancement (#1663)", "code": "def test_artist_from_string():\n \n\n artist = Artist.from_search_term(\"artist:gorillaz\")\n\n assert artist.name == \"Gorillaz\"\n assert artist.url == \"http://open.spotify.com/artist/3AA28KZvwAUcZuOKwyblJQ\"\n assert len(artist.urls) > 1\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 34, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def test_artist_from_string():\n \n\n artist = Artist.from_search_term(\"artist:gorillaz\")\n\n assert artist.name == \"Gorillaz\"\n assert artist.url == \"http://open.spotify.com/artist/3AA28KZvwAUcZuOKwyblJQ\"\n", "d_id": 5604, "documentation": { "docstring": "\n Test if Artist class can be initialized from string.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 151776, "commit_id": "24766928baddfed919be1138a64d51cdbb0d3764", "repo": "freqtrade", "path": "freqtrade/freqai/RL/BaseEnvironment.py", "file_name": "BaseEnvironment.py", "fun_name": "reset", "commit_message": "reorganize/generalize tensorboard callback", "code": "def reset(self):\n \n # custom_info is used for episodic reports and tensorboard logging\n self.custom_info[\"Invalid\"] = 0\n self.custom_info[\"Hold\"] = 0\n self.custom_info[\"Unknown\"] = 0\n self.custom_info[\"pnl_factor\"] = 0\n self.custom_info[\"duration_factor\"] = 0\n self.custom_info[\"reward_exit\"] = 0\n self.custom_info[\"reward_hold\"] = 0\n for action in self.actions:\n self.custom_info[f\"{action.name}\"] = 0\n\n self._done = False\n\n if self.starting_point is True:\n if self.rl_config.get('randomize_starting_position', False):\n length_of_data = int(self._end_tick / 4)\n start_tick = random.randint(self.window_size + 1, length_of_data)\n self._start_tick = start_tick\n self._position_history = (self._start_tick * [None]) + [self._position]\n else:\n self._position_history = (self.window_size * [None]) + [self._position]\n\n self._current_tick = self._start_tick\n self._last_trade_tick = None\n self._position = Positions.Neutral\n\n self.total_reward = 0.\n self._total_profit = 1. # unit\n self.history = {}\n self.trade_history = []\n self.portfolio_log_returns = np.zeros(len(self.prices))\n\n self._profits = [(self._start_tick, 1)]\n self.close_trade_profit = []\n self._total_unrealized_profit = 1\n\n return self._get_observation()\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 382, "n_words": 117, "vocab_size": 73, "complexity": 4, "nloc": 31, "token_counts": 259, "n_ast_nodes": 427, "n_identifiers": 37, "random_cut": "def reset(self):\n \n # custom_info is used for episodic reports and tensorboard logging\n self.custom_info[\"Invalid\"] = 0\n self.custom_info[\"Hold\"] = 0\n self.custom_info[\"Unknown\"] = 0\n self.custom_info[\"pnl_factor\"] = 0\n self.custom_info[\"duration_factor\"] = 0\n self.custom_info[\"reward_exit\"] = 0\n self.custom_info[\"reward_hold\"] = 0\n for action in self.actions:\n self.custom_info[f\"{action.name}\"] = 0\n\n self._done = False\n\n if self.starting_point is True:\n if self.rl_config.get('randomize_starting_position', False):\n length_of_data = int(self", "d_id": 35134, "documentation": { "docstring": "\n Reset is called at the beginning of every episode\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 65288, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/non_billed_report.py", "file_name": "non_billed_report.py", "fun_name": "get_ordered_to_be_billed_data", "commit_message": "style: format code with black", "code": "def get_ordered_to_be_billed_data(args):\n\tdoctype, party = args.get(\"doctype\"), args.get(\"party\")\n\tchild_tab = doctype + \" Item\"\n\tprecision = (\n\t\tget_field_precision(\n\t\t\tfrappe.get_meta(child_tab).get_field(\"billed_amt\"), currency=get_default_currency()\n\t\t)\n\t\tor 2\n\t)\n\n\tproject_field = get_project_field(doctype, party)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tparent_tab=\"tab\" + doctype,\n\t\t\tchild_tab=\"tab\" + child_tab,\n\t\t\tprecision=precision,\n\t\t\tparty=party,\n\t\t\tdate_field=args.get(\"date\"),\n\t\t\tproject_field=project_field,\n\t\t\torder=args.get(\"order\"),\n\t\t\torder_by=args.get(\"order_by\"),\n\t\t)\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 22, "n_words": 44, "vocab_size": 35, "complexity": 2, "nloc": 46, "token_counts": 125, "n_ast_nodes": 208, "n_identifiers": 22, "random_cut": "def get_ordered_to_be_billed_data(args):\n\tdoctype, party = args.get(\"doctype\"), args.get(\"party\")\n\tchild_tab = doctype + \" Item\"\n\tprecision = (\n\t\tget_field_precision(\n\t\t\tfrappe.get_meta(child_tab).get_field(\"billed_amt\"), currency=get_default_currency()\n\t\t)\n\t\tor 2\n\t)\n\n\tproject_field = get_project_field(doctype, party)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tparent_tab=\"tab\" + doctype,\n\t\t\tchild_tab=\"tab\" + child_tab,\n\t\t\tprecision=precision,\n\t\t\tparty=party,\n\t\t\tdate_field=args.get(\"date\"),\n\t\t\tproject_field=project_field,\n\t\t\torder=args.get(\"order\"),\n\t\t\torder_by=args.get(\"order_by\"),\n\t\t)\n\t)\n\n", "d_id": 13841, "documentation": { "docstring": "\n\t\tSelect\n\t\t\t`{parent_tab}`.name, `{parent_tab}`.{date_field},\n\t\t\t`{parent_tab}`.{party}, `{parent_tab}`.{party}_name,\n\t\t\t`{child_tab}`.item_code,\n\t\t\t`{child_tab}`.base_amount,\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)),\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0)),\n\t\t\t(`{child_tab}`.base_amount -\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)) -\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))),\n\t\t\t`{child_tab}`.item_name, `{child_tab}`.description,\n\t\t\t{project_field}, `{parent_tab}`.company\n\t\tfrom\n\t\t\t`{parent_tab}`, `{child_tab}`\n\t\twhere\n\t\t\t`{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1\n\t\t\tand `{parent_tab}`.status not in ('Closed', 'Completed')\n\t\t\tand `{child_tab}`.amount > 0\n\t\t\tand (`{child_tab}`.base_amount -\n\t\t\tround(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) -\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))) > 0\n\t\torder by\n\t\t\t`{parent_tab}`.{order} {order_by}\n\t\t", "n_words": 70, "vocab_size": 48, "n_whitespaces": 47, "language": "en" } }, { "id": 142825, "commit_id": "0959f44b6fc217a4f2766ed46a721eb79b067b2c", "repo": "ray", "path": "python/ray/tune/execution/ray_trial_executor.py", "file_name": "ray_trial_executor.py", "fun_name": "get_staged_trial", "commit_message": "[tune/structure] Introduce execution package (#26015)\n\nExecution-specific packages are moved to tune.execution.\r\n\r\nCo-authored-by: Xiaowei Jiang ", "code": "def get_staged_trial(self):\n \n # TODO(xwjiang): This method should consider `self._cached_actor_pg`.\n for trial in self._staged_trials:\n if self._pg_manager.has_ready(trial):\n return trial\n\n return None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 19, "vocab_size": 17, "complexity": 3, "nloc": 5, "token_counts": 27, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "def get_staged_trial(self):\n \n # TODO(xwjiang): This method should consider `self._cached_actor_pg`.\n for trial in self._staged_trials:\n if self._pg_m", "d_id": 32811, "documentation": { "docstring": "Get a trial whose placement group was successfully staged.\n\n Can also return None if no trial is available.\n\n Returns:\n Trial object or None.\n\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 55, "language": "en" } }, { "id": 154453, "commit_id": "a6f47c8e1c27d85fc09926bb35c2f1a65a6d3e79", "repo": "modin", "path": "modin/core/dataframe/algebra/default2pandas/resample.py", "file_name": "resample.py", "fun_name": "register", "commit_message": "REFACTOR-#4942: remove call method in favor of register due to duplication (#4943)\n\nSigned-off-by: Myachev ", "code": "def register(cls, func, squeeze_self=False, **kwargs):\n \n return super().register(\n Resampler.build_resample(func, squeeze_self),\n fn_name=func.__name__,\n **kwargs\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 66, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def register(cls, func, squeeze_self=False, **kwargs):\n \n return super().regi", "d_id": 35986, "documentation": { "docstring": "\n Build function that do fallback to pandas and aggregate resampled data.\n\n Parameters\n ----------\n func : callable\n Aggregation function to execute under resampled frame.\n squeeze_self : bool, default: False\n Whether or not to squeeze frame before resampling.\n **kwargs : kwargs\n Additional arguments that will be passed to function builder.\n\n Returns\n -------\n callable\n Function that takes query compiler and does fallback to pandas to resample\n time-series data and apply aggregation on it.\n ", "n_words": 70, "vocab_size": 53, "n_whitespaces": 196, "language": "en" } }, { "id": 205775, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/query.py", "file_name": "query.py", "fun_name": "aggregate", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def aggregate(self, *args, **kwargs):\n \n if self.query.distinct_fields:\n raise NotImplementedError(\"aggregate() + distinct(fields) not implemented.\")\n self._validate_values_are_expressions(\n (*args, *kwargs.values()), method_name=\"aggregate\"\n )\n for arg in args:\n # The default_alias property raises TypeError if default_alias\n # can't be set automatically or AttributeError if it isn't an\n # attribute.\n try:\n arg.default_alias\n except (AttributeError, TypeError):\n raise TypeError(\"Complex aggregates require an alias\")\n kwargs[arg.default_alias] = arg\n\n query = self.query.chain()\n for (alias, aggregate_expr) in kwargs.items():\n query.add_annotation(aggregate_expr, alias, is_summary=True)\n annotation = query.annotations[alias]\n if not annotation.contains_aggregate:\n raise TypeError(\"%s is not an aggregate expression\" % alias)\n for expr in annotation.get_source_expressions():\n if (\n expr.contains_aggregate\n and isinstance(expr, Ref)\n and expr.refs in kwargs\n ):\n name = expr.refs\n raise exceptions.FieldError(\n \"Cannot compute %s('%s'): '%s' is an aggregate\"\n % (annotation.name, name, name)\n )\n return query.get_aggregation(self.db, kwargs)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 540, "n_words": 117, "vocab_size": 88, "complexity": 10, "nloc": 30, "token_counts": 191, "n_ast_nodes": 306, "n_identifiers": 33, "random_cut": "def aggregate(self, *args, **kwargs):\n \n if self.query.distinct_fields:\n raise NotImplementedError(\"aggregate() + distinct(fields) not implemented.\")\n self._validate_values_are_expressions(\n (*args, *kwar", "d_id": 51208, "documentation": { "docstring": "\n Return a dictionary containing the calculations (aggregation)\n over the current queryset.\n\n If args is present the expression is passed as a kwarg using\n the Aggregate object's default alias.\n ", "n_words": 28, "vocab_size": 23, "n_whitespaces": 64, "language": "en" } }, { "id": 209525, "commit_id": "08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf", "repo": "scapy", "path": "scapy/contrib/http2.py", "file_name": "http2.py", "fun_name": "_detect_bytelen_from_str", "commit_message": "E275 - Missing whitespace after keyword (#3711)\n\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: plorinquer \r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: speakinghedge \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>\r\n\r\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>", "code": "def _detect_bytelen_from_str(s):\n # type: (str) -> int\n \n assert len(s) >= 2\n tmp_len = len(s)\n\n i = 1\n while orb(s[i]) & 0x80 > 0:\n i += 1\n assert i < tmp_len, 'EINVAL: s: out-of-bound read: unfinished AbstractUVarIntField detected' # noqa: E501\n ret = i + 1\n\n assert ret >= 0\n return ret\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 137, "n_words": 51, "vocab_size": 37, "complexity": 2, "nloc": 10, "token_counts": 55, "n_ast_nodes": 93, "n_identifiers": 7, "random_cut": "def _detect_bytelen_from_str(s):\n # type: (str) -> int\n \n assert len(s) >= 2\n tmp_len = len(s)\n\n i = 1\n while orb(s[i]) & 0x80 > 0:\n i += 1\n assert i < tmp_len, 'EINVAL: s: out-of-bound read: unfinished A", "d_id": 52717, "documentation": { "docstring": " _detect_bytelen_from_str returns the length of the machine\n representation of an AbstractUVarIntField starting at the beginning\n of s and which is assumed to expand over multiple bytes\n (value > _max_prefix_value).\n\n :param str s: the string to parse. It is assumed that it is a multibyte int. # noqa: E501\n :return: The bytelength of the AbstractUVarIntField.\n :raises: AssertionError\n ", "n_words": 56, "vocab_size": 45, "n_whitespaces": 113, "language": "en" } }, { "id": 67696, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/purchase_receipt/test_purchase_receipt.py", "file_name": "test_purchase_receipt.py", "fun_name": "get_gl_entries", "commit_message": "style: format code with black", "code": "def get_gl_entries(voucher_type, voucher_no):\n\treturn frappe.db.sql(\n\t\t,\n\t\t(voucher_type, voucher_no),\n\t\tas_dict=1,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 4, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 8, "token_counts": 27, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def get_gl_entries(voucher_type, voucher_no):\n\treturn frappe.db.sql(\n\t\t,\n\t\t(voucher_type, voucher_no)", "d_id": 14597, "documentation": { "docstring": "select account, debit, credit, cost_center, is_cancelled\n\t\tfrom `tabGL Entry` where voucher_type=%s and voucher_no=%s\n\t\torder by account desc", "n_words": 17, "vocab_size": 17, "n_whitespaces": 14, "language": "en" } }, { "id": 213618, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy/core/random.py", "file_name": "random.py", "fun_name": "random_normal", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def random_normal(mean=0.0, std=1.0, shape=None, dev=None, f=None):\n \n return _cur_framework(f=f).random_normal(mean, std, shape, dev)\n\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 17, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 46, "n_ast_nodes": 61, "n_identifiers": 7, "random_cut": "def random_normal(mean=0.0, std=1.0, shape=None, dev=None, f=None):\n \n return _cur_framework(f=f).random_normal(mean,", "d_id": 53681, "documentation": { "docstring": "\n Draws samples from a normal distribution.\n\n :param mean: The mean of the normal distribution to sample from. Default is 0.\n :type mean: float\n :param std: The standard deviation of the normal distribution to sample from. Default is 1.\n :type std: float\n :param shape: Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn.\n If size is None (default), a single value is returned.\n :param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.\n :type dev: str\n :param f: Machine learning framework. Inferred from inputs if None.\n :type f: ml_framework, optional\n :return: Drawn samples from the parameterized uniform distribution.\n ", "n_words": 111, "vocab_size": 74, "n_whitespaces": 167, "language": "en" } }, { "id": 108020, "commit_id": "13147992b317c29c6e832ca7f6d05bf48aeb0718", "repo": "matplotlib", "path": "lib/matplotlib/texmanager.py", "file_name": "texmanager.py", "fun_name": "get_font_preamble", "commit_message": "Move towards making texmanager stateless.\n\nPreviously, TexManager needed to call get_font_config at a specific\nplace in the middle of processing to update some internal attributes\nbefore proceeding with TeX source generation. Instead, move towards\nmaking TexManager stateless (except for caching), i.e. the user facing\nAPI should be thought of as a bunch of independently callable functions\n`make_tex()`, `make_dvi()`, etc. (they will probably stay as methods on\na \"empty\" TexManager object for a long time for backcompat, in fact).", "code": "def get_font_preamble(cls):\n \n font_preamble, command = cls._get_font_preamble_and_command()\n return font_preamble\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def get_font_preamble(cls):\n \n font_preamble, command = cls.", "d_id": 23019, "documentation": { "docstring": "\n Return a string containing font configuration for the tex preamble.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 136154, "commit_id": "e715a8b7616f9f24839531fcefc1420f79ab13ec", "repo": "ray", "path": "rllib/utils/exploration/tests/test_explorations.py", "file_name": "test_explorations.py", "fun_name": "do_test_explorations", "commit_message": "[RLlib] AlgorithmConfig: Replace more occurrences of old config dicts; Make all Algorithms use the non-dict lookup for config properties. (#30096)", "code": "def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=None):\n \n\n # Test all frameworks.\n for _ in framework_iterator(config):\n print(f\"Algorithm={config.algo_class}\")\n\n # Test for both the default Agent's exploration AND the `Random`\n # exploration class.\n for exploration in [None, \"Random\"]:\n local_config = config.copy()\n if exploration == \"Random\":\n local_config.exploration(exploration_config={\"type\": \"Random\"})\n print(\"exploration={}\".format(exploration or \"default\"))\n\n algo = local_config.build()\n\n # Make sure all actions drawn are the same, given same\n # observations.\n actions = []\n for _ in range(25):\n actions.append(\n algo.compute_single_action(\n observation=dummy_obs,\n explore=False,\n prev_action=prev_a,\n prev_reward=1.0 if prev_a is not None else None,\n )\n )\n check(actions[-1], actions[0])\n\n # Make sure actions drawn are different\n # (around some mean value), given constant observations.\n actions = []\n for _ in range(500):\n actions.append(\n algo.compute_single_action(\n observation=dummy_obs,\n explore=True,\n prev_action=prev_a,\n prev_reward=1.0 if prev_a is not None else None,\n )\n )\n check(\n np.mean(actions),\n expected_mean_action if expected_mean_action is not None else 0.5,\n atol=0.4,\n )\n # Check that the stddev is not 0.0 (values differ).\n check(np.std(actions), 0.0, false=True)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 755, "n_words": 147, "vocab_size": 85, "complexity": 10, "nloc": 36, "token_counts": 231, "n_ast_nodes": 357, "n_identifiers": 30, "random_cut": "def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=None):\n \n\n # Test all frameworks.\n for _ in framework_iterator(config):\n print(f\"Algorithm={config.algo_class}\")\n\n # Test for both the default Agent's exploration AND the `Random`\n # exploration class.\n for exploration in [None, \"Random\"]:\n local_config = config.copy()\n if exploration == \"Random\":\n local_config.exploration(exploration_config={\"type\": \"Random\"})\n print(\"exploration={}\".format(exploration or \"default\"))\n\n algo = local_config.build()\n\n # Make sure all actions drawn are the same, given same\n # observations.\n actions = []\n for _ in range(25):\n actions.append(\n algo.compute_single_action(\n observation=dummy_obs,\n explore=Fal", "d_id": 30836, "documentation": { "docstring": "Calls an Agent's `compute_actions` with different `explore` options.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 55138, "commit_id": "c0cb1fee460c1bded9e3eb741ad7979402844bf8", "repo": "prefect", "path": "src/prefect/cli/base.py", "file_name": "base.py", "fun_name": "exit_with_success", "commit_message": "Update `set` command; allow CLI `console` object to be patched", "code": "def exit_with_success(message, **kwargs):\n \n kwargs.setdefault(\"style\", \"green\")\n app.console.print(message, **kwargs)\n raise typer.Exit(0)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 61, "n_identifiers": 9, "random_cut": "def exit_with_success(message, **kwargs):\n \n kwargs.setdefault(\"style\", \"green\")\n app.console.prin", "d_id": 11227, "documentation": { "docstring": "\n Utility to print a stylized success message and exit with a zero code\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 20, "language": "en" } }, { "id": 154845, "commit_id": "446148dbf9b66debd0a0dbf9ce778253380d5921", "repo": "modin", "path": "modin/_version.py", "file_name": "_version.py", "fun_name": "get_keywords", "commit_message": "REFACTOR-#5012: Add mypy checks for singleton files in base modin directory (#5013)\n\nSigned-off-by: Jonathan Shi ", "code": "def get_keywords() -> Dict[str, str]:\n \n # these strings will be replaced by git during git-archive.\n # setup.py/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"$Format:%d$\"\n git_full = \"$Format:%H$\"\n git_date = \"$Format:%ci$\"\n keywords = {\"refnames\": git_refnames, \"full\": git_full, \"date\": git_date}\n return keywords\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 91, "n_words": 61, "vocab_size": 51, "complexity": 1, "nloc": 7, "token_counts": 38, "n_ast_nodes": 76, "n_identifiers": 7, "random_cut": "def get_keywords() -> Dict[str, str]:\n \n # these strings will be replaced by git during git-archive.\n # setup.py/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"$Format:%d$\"\n git_full = \"", "d_id": 36153, "documentation": { "docstring": "Get the keywords needed to look up the version information.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 155520, "commit_id": "fa8dfede71677a2301d4cd602cf4b27af41cbc4f", "repo": "dask", "path": "dask/array/slicing.py", "file_name": "slicing.py", "fun_name": "take", "commit_message": "DOC: normalize whitespace in doctests in slicing.py (#8512)", "code": "def take(outname, inname, chunks, index, itemsize, axis=0):\n \n from .core import PerformanceWarning\n\n plan = slicing_plan(chunks[axis], index)\n if len(plan) >= len(chunks[axis]) * 10:\n factor = math.ceil(len(plan) / len(chunks[axis]))\n\n warnings.warn(\n \"Slicing with an out-of-order index is generating %d \"\n \"times more chunks\" % factor,\n PerformanceWarning,\n stacklevel=6,\n )\n if not is_arraylike(index):\n index = np.asarray(index)\n\n # Check for chunks from the plan that would violate the user's\n # configured chunk size.\n nbytes = utils.parse_bytes(config.get(\"array.chunk-size\"))\n other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]\n other_numel = np.prod([sum(x) for x in other_chunks])\n\n if math.isnan(other_numel):\n warnsize = maxsize = math.inf\n else:\n maxsize = math.ceil(nbytes / (other_numel * itemsize))\n warnsize = maxsize * 5\n\n split = config.get(\"array.slicing.split-large-chunks\", None)\n\n # Warn only when the default is not specified.\n warned = split is not None\n\n for _, index_list in plan:\n if not warned and len(index_list) > warnsize:\n msg = (\n \"Slicing is producing a large chunk. To accept the large\\n\"\n \"chunk and silence this warning, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\\n\"\n \" ... array[indexer]\\n\\n\"\n \"To avoid creating the large chunks, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\\n\"\n \" ... array[indexer]\"\n )\n warnings.warn(msg, PerformanceWarning, stacklevel=6)\n warned = True\n\n where_index = []\n index_lists = []\n for where_idx, index_list in plan:\n index_length = len(index_list)\n if split and index_length > maxsize:\n index_sublist = np.array_split(\n index_list, math.ceil(index_length / maxsize)\n )\n index_lists.extend(index_sublist)\n where_index.extend([where_idx] * len(index_sublist))\n else:\n if not is_arraylike(index_list):\n index_list = np.array(index_list)\n index_lists.append(index_list)\n where_index.append(where_idx)\n\n dims = [range(len(bd)) for bd in chunks]\n\n indims = list(dims)\n indims[axis] = list(range(len(where_index)))\n keys = list(product([outname], *indims))\n\n outdims = list(dims)\n outdims[axis] = where_index\n slices = [[colon] * len(bd) for bd in chunks]\n slices[axis] = index_lists\n slices = list(product(*slices))\n inkeys = list(product([inname], *outdims))\n values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]\n\n chunks2 = list(chunks)\n chunks2[axis] = tuple(map(len, index_lists))\n dsk = dict(zip(keys, values))\n\n return tuple(chunks2), dsk\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 798, "n_words": 299, "vocab_size": 181, "complexity": 17, "nloc": 66, "token_counts": 509, "n_ast_nodes": 824, "n_identifiers": 71, "random_cut": "def take(outname, inname, chunks, index, itemsize, axis=0):\n \n from .core import PerformanceWarning\n\n plan = slicing_plan(chunks[axis], index)\n if len(plan) >= len(chunks[axis]) * 10:\n factor = math.ceil(len(plan) / len(chunks[axis]))\n\n warnings.warn(\n \"Slicing with an out-of-order index is generating %d \"\n \"times more chunks\" % factor,\n PerformanceWarning,\n stacklevel=6,\n )\n if not is_arraylike(index):\n index = np.asarray(index)\n\n # Check for chunks from the plan that would violate the user's\n # configured chunk size.\n nbytes = utils.parse_bytes(config.get(\"array.chunk-size\"))\n other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]\n other_numel = np.prod([sum(x) for x in other_chunks])\n\n if math.isnan(other_numel):\n warnsize = maxsize = math.inf\n else:\n maxsize = math.ceil(nbytes / (other_numel * itemsize))\n warnsize = maxsize * 5\n\n split = config.get(\"array.slicing.split-large-chunks\", None)\n\n # Warn only when the default is not specified.\n warned = split is not None\n\n for _, index_list in plan:\n if not warned and len(index_list) > warnsize:\n msg = (\n \"Slicing is producing a large chunk. To accept the large\\n\"\n \"chunk and silence this warning, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\\n\"\n \" ... array[indexer]\\n\\n\"\n \"To avoid creating the large chunks, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\\n\"\n \" ... array[indexer]\"\n )\n warnings.warn(msg, PerformanceWarning, stacklevel=6)\n warned = True\n\n where_index = []\n index_lists = []\n for where_idx, index_list in plan:\n index_length = len(index_list)\n if split and index_length > maxsize:\n index_sublist = np.array_split(\n index_list, math.ceil(index_length / maxsize)\n )\n index_lists.extend(index_sublist)\n where_index.extend([where_idx] * len(index_sublist))\n else:\n if not is_arraylike(index_list):\n index_list = np.array(index_list)\n index_lists.append(index_list)\n where_index.append(where_idx)\n\n dims = [range(len(bd)) for bd in chunks]\n\n indims = list(dims)\n indims[axis] = list(range(len(where_index)))\n keys = list(product([outname], *indims))\n\n outdims = list(dims)\n outdims[axis] = where_index\n slices = [[colon] * len(bd) for bd in chunks]\n slices[axis] = index_lists\n slices = list(product(*slices))\n inkeys = list(product([inname], *outdims))\n values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]\n\n chunks2 = list(chunks)\n chunks2[axis] = tuple(map(len, index_lists))\n dsk = dict(zip(keys, values))\n\n return tuple(chunks2), dsk\n\n", "d_id": 36416, "documentation": { "docstring": "Index array with an iterable of index\n\n Handles a single index by a single list\n\n Mimics ``np.take``\n\n >>> from pprint import pprint\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0)\n >>> chunks\n ((2, 1, 1),)\n >>> pprint(dsk) # doctest: +ELLIPSIS\n {('y', 0): (, ('x', 0), (array([5, 1]),)),\n ('y', 1): (, ('x', 2), (array([7]),)),\n ('y', 2): (, ('x', 0), (array([3]),))}\n\n When list is sorted we retain original block structure\n\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0)\n >>> chunks\n ((3, 1),)\n >>> pprint(dsk) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n {('y', 0): (,\n ('x', 0),\n (array([1, 3, 5]),)),\n ('y', 1): (, ('x', 2), (array([7]),))}\n\n When any indexed blocks would otherwise grow larger than\n dask.config.array.chunk-size, we might split them,\n depending on the value of ``dask.config.slicing.split-large-chunks``.\n\n >>> import dask\n >>> with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n ... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)],\n ... [0] + [1] * 6 + [2], axis=0, itemsize=8)\n >>> chunks\n ((1, 3, 3, 1), (1000, 1000), (1000, 1000))\n ", "n_words": 191, "vocab_size": 108, "n_whitespaces": 339, "language": "en" } }, { "id": 202325, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/contenttypes_tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_multidb", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_multidb(self):\n \n ContentType.objects.clear_cache()\n with self.assertNumQueries(0, using=\"default\"), self.assertNumQueries(\n 1, using=\"other\"\n ):\n ContentType.objects.get_for_model(Author)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 61, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 6, "token_counts": 44, "n_ast_nodes": 78, "n_identifiers": 9, "random_cut": "def test_multidb(self):\n \n ContentType.objects.clear_cache()\n with self.assertNumQueries(0, using=\"default\"), self.assertNumQueries(\n 1, using=\"other\"\n ):\n ContentType.objects.get_for_model(Author)\n", "d_id": 50070, "documentation": { "docstring": "\n When using multiple databases, ContentType.objects.get_for_model() uses\n db_for_read().\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 29, "language": "en" } }, { "id": 282822, "commit_id": "1b914d45e8575827c05a432d56846f5c5f2559c4", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/econometrics/econometrics_model.py", "file_name": "econometrics_model.py", "fun_name": "get_engle_granger_two_step_cointegration_test", "commit_message": "Econometrics notebooks API (#1462)\n\n* Add initial implementations of the API wrappers\r\n\r\n* Fix typos in docstrings\r\n\r\n* Fix typos an markdown linting errors in docs\r\n\r\n* Ditch using insecure eval in favor of secure getattr\r\n\r\n* Add GST notebooks API documentation\r\n\r\n* Add notebook screenshot to the GST API docs", "code": "def get_engle_granger_two_step_cointegration_test(y, x):\n \n warnings.simplefilter(action=\"ignore\", category=FutureWarning)\n long_run_ols = sm.OLS(y, sm.add_constant(x))\n warnings.simplefilter(action=\"default\", category=FutureWarning)\n\n long_run_ols_fit = long_run_ols.fit()\n\n c, gamma = long_run_ols_fit.params\n z = long_run_ols_fit.resid\n\n short_run_ols = sm.OLS(y.diff().iloc[1:], (z.shift().iloc[1:]))\n short_run_ols_fit = short_run_ols.fit()\n\n alpha = short_run_ols_fit.params[0]\n\n # NOTE: The p-value returned by the adfuller function assumes we do not estimate z\n # first, but test stationarity of an unestimated series directly. This assumption\n # should have limited effect for high N, however. Critical values taking this into\n # account more accurately are provided in e.g. McKinnon (1990) and Engle & Yoo (1987).\n\n adfstat, pvalue, _, _, _ = adfuller(z, maxlag=1, autolag=None)\n\n return c, gamma, alpha, z, adfstat, pvalue\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 151, "n_words": 103, "vocab_size": 88, "complexity": 1, "nloc": 12, "token_counts": 147, "n_ast_nodes": 230, "n_identifiers": 31, "random_cut": "def get_engle_granger_two_step_cointegration_test(y, x):\n \n warnings.simplefilter(action=\"ignore\", category=FutureWarning)\n long_run_ols = sm.OLS(y, sm.add_constant(x))\n warnings.simplefilter(action=\"default\", category=FutureWarning)\n\n long_run_ols_fit = long_run_ols.fit()\n\n c, gamma = long_run_ols_fit.params\n z = long_run_ols_fit.resid\n\n short_run_ols = sm.OLS(y.diff().iloc[1:], (z.shift().iloc[1:]))\n short_run_ols_fit = short_run_ol", "d_id": 84312, "documentation": { "docstring": "Estimates long-run and short-run cointegration relationship for series y and x and apply\n the two-step Engle & Granger test for cointegration.\n\n Uses a 2-step process to first estimate coefficients for the long-run relationship\n y_t = c + gamma * x_t + z_t\n\n and then the short-term relationship,\n y_t - y_(t-1) = alpha * z_(t-1) + epsilon_t,\n\n with z the found residuals of the first equation.\n\n Then tests cointegration by Dickey-Fuller phi=1 vs phi < 1 in\n z_t = phi * z_(t-1) + eta_t\n\n If this implies phi < 1, the z series is stationary is concluded to be\n stationary, and thus the series y and x are concluded to be cointegrated.\n\n Parameters\n ----------\n y : pd.Series\n The first time series of the pair to analyse.\n\n x : pd.Series\n The second time series of the pair to analyse.\n\n Returns\n -------\n c : float\n The constant term in the long-run relationship y_t = c + gamma * x_t + z_t. This\n describes the static shift of y with respect to gamma * x.\n\n gamma : float\n The gamma term in the long-run relationship y_t = c + gamma * x_t + z_t. This\n describes the ratio between the const-shifted y and x.\n\n alpha : float\n The alpha term in the short-run relationship y_t - y_(t-1) = alpha * z_(t-1) + epsilon. This\n gives an indication of the strength of the error correction toward the long-run mean.\n\n z : pd.Series\n Series of residuals z_t from the long-run relationship y_t = c + gamma * x_t + z_t, representing\n the value of the error correction term.\n\n dfstat : float\n The Dickey Fuller test-statistic for phi = 1 vs phi < 1 in the second equation. A more\n negative value implies the existence of stronger cointegration.\n\n pvalue : float\n The p-value corresponding to the Dickey Fuller test-statistic. A lower value implies\n stronger rejection of no-cointegration, thus stronger evidence of cointegration.\n\n ", "n_words": 315, "vocab_size": 127, "n_whitespaces": 494, "language": "en" } }, { "id": 206982, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_changelist/tests.py", "file_name": "tests.py", "fun_name": "test_deterministic_order_for_unordered_model", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_deterministic_order_for_unordered_model(self):\n \n superuser = self._create_superuser(\"superuser\")\n\n for counter in range(1, 51):\n UnorderedObject.objects.create(id=counter, bool=True)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 18, "token_counts": 118, "n_ast_nodes": 63, "n_identifiers": 11, "random_cut": "def test_deterministic_order_for_unordered_model(self):\n \n superuser = self._create_superuser(\"superuser\")\n\n for counter in range(1, 51):\n ", "d_id": 51823, "documentation": { "docstring": "\n The primary key is used in the ordering of the changelist's results to\n guarantee a deterministic order, even when the model doesn't have any\n default ordering defined (#17198).\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 57, "language": "en" } }, { "id": 59709, "commit_id": "902dfa4bd3b6e330e4374eb1e04de064148a2f32", "repo": "prefect", "path": "src/prefect/orion/api/admin.py", "file_name": "admin.py", "fun_name": "read_settings", "commit_message": "Add secret flag to settings and obfuscate by default when displayed (#7465)", "code": "async def read_settings() -> prefect.settings.Settings:\n \n return prefect.settings.get_current_settings().with_obfuscated_secrets()\n\n\n@router.get(\"/version\")", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@router.get(\"/version\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 13, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 7, "token_counts": 23, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "async def read_settings() -> prefect.settings.Settings:\n \n r", "d_id": 11931, "documentation": { "docstring": "\n Get the current Orion settings.\n\n Secret setting values will be obfuscated.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 21, "language": "en" } }, { "id": 156012, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "map_overlap", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \n from dask.array.overlap import map_overlap\n\n return map_overlap(\n func, self, depth=depth, boundary=boundary, trim=trim, **kwargs\n )\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 59, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 5, "token_counts": 51, "n_ast_nodes": 71, "n_identifiers": 10, "random_cut": "def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \n from dask.array.overlap import map_overlap\n\n return map_overlap(\n ", "d_id": 36505, "documentation": { "docstring": "Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Note that this function will attempt to automatically determine the output\n array type before computing it, please refer to the ``meta`` keyword argument\n in :func:`map_blocks ` if you expect that the function will not succeed when\n operating on 0-d arrays.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in :func:`map_blocks `.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = da.from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect')\n >>> y.compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(()))\n >>> y\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>\n >>> y.compute()\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n\n >>> import cupy # doctest: +SKIP\n >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP\n >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP\n >>> y # doctest: +SKIP\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray>\n >>> y.compute() # doctest: +SKIP\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n ", "n_words": 435, "vocab_size": 223, "n_whitespaces": 1096, "language": "en" } }, { "id": 204011, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/gdal/raster/source.py", "file_name": "source.py", "fun_name": "_flush", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _flush(self):\n \n # Raise an Exception if the value is being changed in read mode.\n if not self._write:\n raise GDALException(\n \"Raster needs to be opened in write mode to change values.\"\n )\n capi.flush_ds(self._ptr)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 98, "n_words": 33, "vocab_size": 30, "complexity": 2, "nloc": 6, "token_counts": 25, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def _flush(self):\n \n # Raise an Exception if the value is being changed in read mode.\n if not self._write:\n raise GDALException(\n \"Raster needs to be opened in write mode to change values.\"\n )\n capi", "d_id": 50614, "documentation": { "docstring": "\n Flush all data from memory into the source file if it exists.\n The data that needs flushing are geotransforms, coordinate systems,\n nodata_values and pixel values. This function will be called\n automatically wherever it is needed.\n ", "n_words": 35, "vocab_size": 33, "n_whitespaces": 71, "language": "en" } }, { "id": 219783, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "_round", "commit_message": "add python 3.10.4 for windows", "code": "def _round(self, places, rounding):\n \n if places <= 0:\n raise ValueError(\"argument should be at least 1 in _round\")\n if self._is_special or not self:\n return Decimal(self)\n ans = self._rescale(self.adjusted()+1-places, rounding)\n # it can happen that the rescale alters the adjusted exponent;\n # for example when rounding 99.97 to 3 significant figures.\n # When this happens we end up with an extra 0 at the end of\n # the number; a second rescale fixes this.\n if ans.adjusted() != self.adjusted():\n ans = ans._rescale(ans.adjusted()+1-places, rounding)\n return ans\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 185, "n_words": 82, "vocab_size": 66, "complexity": 5, "nloc": 9, "token_counts": 84, "n_ast_nodes": 141, "n_identifiers": 10, "random_cut": "def _round(self, places, rounding):\n \n if places <= 0:\n raise ValueError(\"argument should be at least 1 in _round\")\n if self._is_special or not self:\n return Decimal(self)\n ans = self._rescale(self.adjusted()+1-places, rounding)\n # it ca", "d_id": 55798, "documentation": { "docstring": "Round a nonzero, nonspecial Decimal to a fixed number of\n significant figures, using the given rounding mode.\n\n Infinities, NaNs and zeros are returned unaltered.\n\n This operation is quiet: it raises no flags, and uses no\n information from the context.\n\n ", "n_words": 39, "vocab_size": 35, "n_whitespaces": 74, "language": "en" } }, { "id": 22104, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/models.py", "file_name": "models.py", "fun_name": "is_redirect", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def is_redirect(self):\n \n return \"location\" in self.headers and self.status_code in REDIRECT_STATI\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 18, "n_ast_nodes": 33, "n_identifiers": 5, "random_cut": "def is_redirect(self):\n \n r", "d_id": 4181, "documentation": { "docstring": "True if this Response is a well-formed HTTP redirect that could have\n been processed automatically (by :meth:`Session.resolve_redirects`).\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 31, "language": "en" } }, { "id": 162760, "commit_id": "9120cdffe618c6c2ff16fe6a311b6a1367efdbc8", "repo": "AutoEq", "path": "research/neo_peq/legacy_frequency_response.py", "file_name": "legacy_frequency_response.py", "fun_name": "write_eqapo_graphic_eq", "commit_message": "Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.", "code": "def write_eqapo_graphic_eq(self, file_path, normalize=True):\n \n file_path = os.path.abspath(file_path)\n s = self.eqapo_graphic_eq(normalize=normalize)\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(s)\n return s\n", "url": "https://github.com/jaakkopasanen/AutoEq.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 54, "n_ast_nodes": 92, "n_identifiers": 13, "random_cut": "def write_eqapo_graphic_eq(self, file_path, normalize=True):\n \n file_path = os.path.abspath(file_path)\n s = self.eqapo_graphic_eq(normalize=normalize)\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(s)\n return s\n", "d_id": 39293, "documentation": { "docstring": "Writes equalization graph to a file as Equalizer APO config.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 181605, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/export_tests.py", "file_name": "export_tests.py", "fun_name": "test_export_pipeline_6", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_export_pipeline_6():\n \n\n pipeline_string = (\n 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),'\n 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,'\n 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)'\n )\n pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)\n expected_code = \n exported_code = export_pipeline(pipeline, tpot_obj.operators,\n tpot_obj._pset, random_state=42,\n data_file_path='test_path')\n\n assert expected_code == exported_code\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 142, "n_words": 29, "vocab_size": 24, "complexity": 1, "nloc": 35, "token_counts": 55, "n_ast_nodes": 96, "n_identifiers": 14, "random_cut": "def test_export_pipeline_6():\n \n\n pipeline_string = (\n 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),'\n 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,'\n 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)'\n )\n pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)\n expected_code = \n exported_code = export_pipeline(pipeline, tpot_obj.operators,\n tpot_obj._pset, random_state=42,\n data_file_path='test_path')\n\n assert expected_code ", "d_id": 43393, "documentation": { "docstring": "Assert that exported_pipeline() generated a compile source file with random_state and data_file_path.import numpy as np\nimport pandas as pd\nfrom sklearn.feature_selection import SelectPercentile, f_classif\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.tree import DecisionTreeClassifier\nfrom tpot.export_utils import set_param_recursive\n\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\ntpot_data = pd.read_csv('test_path', sep='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\\\n train_test_split(features, tpot_data['target'], random_state=42)\n\nexported_pipeline = make_pipeline(\n SelectPercentile(score_func=f_classif, percentile=20),\n DecisionTreeClassifier(criterion=\"gini\", max_depth=8, min_samples_leaf=5, min_samples_split=5)\n)\n# Fix random state for all the steps in exported pipeline\nset_param_recursive(exported_pipeline.steps, 'random_state', 42)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n", "n_words": 102, "vocab_size": 82, "n_whitespaces": 102, "language": "en" } }, { "id": 176659, "commit_id": "26b7de005ac562786f72b24a73af5a59bbab6953", "repo": "networkx", "path": "networkx/algorithms/connectivity/tests/test_edge_kcomponents.py", "file_name": "test_edge_kcomponents.py", "fun_name": "_check_edge_connectivity", "commit_message": "doc: fix typos in docstring and comment (#5647)", "code": "def _check_edge_connectivity(G):\n \n # Construct the auxiliary graph that can be used to make each k-cc or k-sub\n aux_graph = EdgeComponentAuxGraph.construct(G)\n\n # memoize the local connectivity in this graph\n memo = {}\n\n for k in it.count(1):\n # Test \"local\" k-edge-components and k-edge-subgraphs\n ccs_local = fset(aux_graph.k_edge_components(k))\n ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k))\n\n # Check connectivity properties that should be guaranteed by the\n # algorithms.\n _assert_local_cc_edge_connectivity(G, ccs_local, k, memo)\n _assert_subgraph_edge_connectivity(G, ccs_subgraph, k)\n\n if k == 1 or k == 2 and not G.is_directed():\n assert (\n ccs_local == ccs_subgraph\n ), \"Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())\"\n\n if G.is_directed():\n # Test special case methods are the same as the aux graph\n if k == 1:\n alt_sccs = fset(nx.strongly_connected_components(G))\n assert alt_sccs == ccs_local, \"k=1 failed alt\"\n assert alt_sccs == ccs_subgraph, \"k=1 failed alt\"\n else:\n # Test special case methods are the same as the aux graph\n if k == 1:\n alt_ccs = fset(nx.connected_components(G))\n assert alt_ccs == ccs_local, \"k=1 failed alt\"\n assert alt_ccs == ccs_subgraph, \"k=1 failed alt\"\n elif k == 2:\n alt_bridge_ccs = fset(bridge_components(G))\n assert alt_bridge_ccs == ccs_local, \"k=2 failed alt\"\n assert alt_bridge_ccs == ccs_subgraph, \"k=2 failed alt\"\n # if new methods for k == 3 or k == 4 are implemented add them here\n\n # Check the general subgraph method works by itself\n alt_subgraph_ccs = fset(\n [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)]\n )\n assert alt_subgraph_ccs == ccs_subgraph, \"alt subgraph method failed\"\n\n # Stop once k is larger than all special case methods\n # and we cannot break down ccs any further.\n if k > 2 and all(len(cc) == 1 for cc in ccs_local):\n break\n\n\n# ----------------\n# Misc tests\n# ----------------\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 669, "n_words": 275, "vocab_size": 128, "complexity": 13, "nloc": 32, "token_counts": 235, "n_ast_nodes": 393, "n_identifiers": 32, "random_cut": "def _check_edge_connectivity(G):\n \n # Construct the auxiliary graph that can be used to make each k-cc or k-sub\n aux_graph = EdgeComponentAuxGraph.construct(G)\n\n # memoize the local connectivity in this graph\n memo = {}\n\n for k in it.count(1):\n # Test \"local\" k-edge-components and k-edge-subgraphs\n ccs_local = fset(aux_graph.k_edge_components(k))\n ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k))\n\n # Check connectivity properties that should be guaranteed by the\n # algorithms.\n _assert_local_cc_edge_connectivity(G, ccs_local, k, memo)\n _assert_subgraph_edge_connectivity(G, ccs_subgraph, k)\n\n if k == 1 or k == 2 and not G.is_directed():\n assert (\n ccs_local == ccs_subgraph\n ), \"Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())\"\n\n if G.is_directed():\n # Test special case methods are the same as the aux graph\n if k == 1:\n alt_sccs = fset(nx.strongly_connected_components(G))\n assert alt_sccs == ccs_local, \"k=1 failed alt\"\n assert alt_sccs == ccs_subgraph, \"k=1 failed alt\"\n else:\n # Test special case methods are the same as the aux gra", "d_id": 42027, "documentation": { "docstring": "\n Helper - generates all k-edge-components using the aux graph. Checks the\n both local and subgraph edge connectivity of each cc. Also checks that\n alternate methods of computing the k-edge-ccs generate the same result.\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 47, "language": "en" } }, { "id": 201816, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/tests.py", "file_name": "tests.py", "fun_name": "test_sequence_name_length_limits_flush", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_sequence_name_length_limits_flush(self):\n \n # A full flush is expensive to the full test, so we dig into the\n # internals to generate the likely offending SQL and run it manually\n\n # Some convenience aliases\n VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\n VLM_m2m = (\n VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through\n )\n tables = [\n VLM._meta.db_table,\n VLM_m2m._meta.db_table,\n ]\n sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True)\n connection.ops.execute_sql_flush(sql_list)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 163, "n_words": 53, "vocab_size": 44, "complexity": 1, "nloc": 11, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 17, "random_cut": "def test_sequence_name_length_limits_flush(self):\n \n # A full flush is expensive to the full test, so we dig into the\n # internals to generate the likely offending SQL and run it manually\n\n # Some convenience aliases\n VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\n VLM_m2m = (\n VLM.m2m_", "d_id": 50000, "documentation": { "docstring": "\n Sequence resetting as part of a flush with model with long name and\n long pk name doesn't error (#8901).\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 41, "language": "en" } }, { "id": 249109, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_media.py", "file_name": "test_media.py", "fun_name": "test_delete_media", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_delete_media(self) -> None:\n \n\n download_resource = self.media_repo.children[b\"download\"]\n upload_resource = self.media_repo.children[b\"upload\"]\n\n # Upload some media into the room\n response = self.helper.upload_media(\n upload_resource,\n SMALL_PNG,\n tok=self.admin_user_tok,\n expect_code=200,\n )\n # Extract media ID from the response\n server_and_media_id = response[\"content_uri\"][6:] # Cut off 'mxc://'\n server_name, media_id = server_and_media_id.split(\"/\")\n\n self.assertEqual(server_name, self.server_name)\n\n # Attempt to access media\n channel = make_request(\n self.reactor,\n FakeSite(download_resource, self.reactor),\n \"GET\",\n server_and_media_id,\n shorthand=False,\n access_token=self.admin_user_tok,\n )\n\n # Should be successful\n self.assertEqual(\n 200,\n channel.code,\n msg=(\n \"Expected to receive a 200 on accessing media: %s\" % server_and_media_id\n ),\n )\n\n # Test if the file exists\n local_path = self.filepaths.local_media_filepath(media_id)\n self.assertTrue(os.path.exists(local_path))\n\n url = \"/_synapse/admin/v1/media/%s/%s\" % (self.server_name, media_id)\n\n # Delete media\n channel = self.make_request(\n \"DELETE\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(1, channel.json_body[\"total\"])\n self.assertEqual(\n media_id,\n channel.json_body[\"deleted_media\"][0],\n )\n\n # Attempt to access media\n channel = make_request(\n self.reactor,\n FakeSite(download_resource, self.reactor),\n \"GET\",\n server_and_media_id,\n shorthand=False,\n access_token=self.admin_user_tok,\n )\n self.assertEqual(\n HTTPStatus.NOT_FOUND,\n channel.code,\n msg=(\n \"Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s\"\n % server_and_media_id\n ),\n )\n\n # Test if the file is deleted\n self.assertFalse(os.path.exists(local_path))\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 762, "n_words": 159, "vocab_size": 91, "complexity": 1, "nloc": 61, "token_counts": 297, "n_ast_nodes": 464, "n_identifiers": 38, "random_cut": "def test_delete_media(self) -> None:\n \n\n download_resource = self.media_repo.children[b\"download\"]\n upload_resource = self.media_repo.children[b\"upload\"]\n\n # Upload some media into the room\n response = self.helper.upload_media(\n upload_resource,\n SMALL_PNG,\n tok=self.admin_user_tok,\n expect_code=200,\n )\n # Extract media ID from the response\n server_and_media_id = r", "d_id": 72616, "documentation": { "docstring": "\n Tests that delete a media is successfully\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 7623, "commit_id": "03b4ab273abd7e22a56bb550b56f3d667200abf9", "repo": "ludwig", "path": "ludwig/visualize.py", "file_name": "visualize.py", "fun_name": "load_data_for_viz", "commit_message": "Encoder refactor V2 (#2370)\n\n* Added base files and some initial code\r\n\r\n* More files created, fleshing out binary feature and corresponding encoders\r\n\r\n* Added more schema infra\r\n\r\n* Registered all feature encoders\r\n\r\n* Separated feature utils infra\r\n\r\n* Added all preprocessing classes\r\n\r\n* Filled out rest of schema configs\r\n\r\n* Fixed preproc dataclass\r\n\r\n* Fixed small errors blocking import\r\n\r\n* Tests should be passing\r\n\r\n* Deleted unnecesssary files and removed commented out code\r\n\r\n* fixed flake8\r\n\r\n* Fixed most tests\r\n\r\n* fixed pattern validation\r\n\r\n* Fixed missing val strategies and solved custom encoder update issue\r\n\r\n* Removed preprocessing from features due to schema SSOT\r\n\r\n* fix flake 8\r\n\r\n* Started encoder schema work\r\n\r\n* Parallel CNN Encoder\r\n\r\n* StackedCNN Encoder\r\n\r\n* Added image encoders\r\n\r\n* Finished sequence encoders\r\n\r\n* Partway through text encoders\r\n\r\n* Added text encoders\r\n\r\n* Bag Encoders\r\n\r\n* Binary and Date Encoders\r\n\r\n* category, date, h3, and set encoders\r\n\r\n* Wired up encoder schemas\r\n\r\n* Switched input feature encoder schema definitions\r\n\r\n* Fixed handful of issues\r\n\r\n* Fix schema issues\r\n\r\n* Refactored a bunch of test configs\r\n\r\n* Small changes\r\n\r\n* Removed default param from register_encoder\r\n\r\n* Schema working now, working on refactoring\r\n\r\n* Finished decoder schemas\r\n\r\n* Removed default param from register_decoder\r\n\r\n* Added some default params to output features and more decoder work\r\n\r\n* Refactored all input feature encoder/decoder referencing\r\n\r\n* Refactored pretty much all the tests\r\n\r\n* Added back constants\r\n\r\n* Solved gbm issue\r\n\r\n* Fixed save_load test\r\n\r\n* various fixes\r\n\r\n* Fixed import issue\r\n\r\n* Flake 8 and various fixes\r\n\r\n* Solved more failed tests\r\n\r\n* Refactored missed tests\r\n\r\n* Removed commented lines\r\n\r\n* Added init file for decoders schema\r\n\r\n* Fixed failing tests\r\n\r\n* Fixed hyperopt shared params test\r\n\r\n* Added backwards compatability logic and test\r\n\r\n* Flake 8\r\n\r\n* removed comment\r\n\r\n* Added base files and some initial code\r\n\r\n* More files created, fleshing out binary feature and corresponding encoders\r\n\r\n* Added more schema infra\r\n\r\n* Registered all feature encoders\r\n\r\n* Separated feature utils infra\r\n\r\n* Added all preprocessing classes\r\n\r\n* Filled out rest of schema configs\r\n\r\n* Fixed preproc dataclass\r\n\r\n* Fixed small errors blocking import\r\n\r\n* Tests should be passing\r\n\r\n* Deleted unnecesssary files and removed commented out code\r\n\r\n* fixed flake8\r\n\r\n* Fixed most tests\r\n\r\n* fixed pattern validation\r\n\r\n* Fixed missing val strategies and solved custom encoder update issue\r\n\r\n* Removed preprocessing from features due to schema SSOT\r\n\r\n* fix flake 8\r\n\r\n* Started encoder schema work\r\n\r\n* Parallel CNN Encoder\r\n\r\n* StackedCNN Encoder\r\n\r\n* Added image encoders\r\n\r\n* Finished sequence encoders\r\n\r\n* Partway through text encoders\r\n\r\n* Added text encoders\r\n\r\n* Bag Encoders\r\n\r\n* Binary and Date Encoders\r\n\r\n* category, date, h3, and set encoders\r\n\r\n* Wired up encoder schemas\r\n\r\n* Switched input feature encoder schema definitions\r\n\r\n* Fixed handful of issues\r\n\r\n* Fix schema issues\r\n\r\n* Refactored a bunch of test configs\r\n\r\n* Small changes\r\n\r\n* Removed default param from register_encoder\r\n\r\n* Schema working now, working on refactoring\r\n\r\n* Finished decoder schemas\r\n\r\n* Removed default param from register_decoder\r\n\r\n* Added some default params to output features and more decoder work\r\n\r\n* Refactored all input feature encoder/decoder referencing\r\n\r\n* Refactored pretty much all the tests\r\n\r\n* Added back constants\r\n\r\n* Solved gbm issue\r\n\r\n* Fixed save_load test\r\n\r\n* various fixes\r\n\r\n* Fixed import issue\r\n\r\n* Flake 8 and various fixes\r\n\r\n* Solved more failed tests\r\n\r\n* Refactored missed tests\r\n\r\n* Removed commented lines\r\n\r\n* Added init file for decoders schema\r\n\r\n* Fixed failing tests\r\n\r\n* Fixed hyperopt shared params test\r\n\r\n* Added backwards compatability logic and test\r\n\r\n* Flake 8\r\n\r\n* removed comment\r\n\r\n* Skipping CTRL Encoder test since it's blasting memory\r\n\r\n* Fixed audio_feature test\r\n\r\n* Addressed failing tests\r\n\r\n* Fixed backwards compatability\r\n\r\n* Fixed more failing tests\r\n\r\n* Flake 8\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored default logic for all features\r\n\r\n* Fixed H3 weighted_sum encoder wrong type\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix import issue\r\n\r\n* Mark slow HF tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed defaults tests\r\n\r\n* Pin Ray nightly version\r\n\r\n* fix link\r\n\r\n* pin torch to 07/26\r\n\r\n* cleanup\r\n\r\n* upgrade ray pinned version to enable parquet partition filtering\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* downgrade Ray to ensure TensorDtypes are not inferred during Ray Dataset <=> Dask conversions\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed custom encoder decoder helper method\r\n\r\n* unpin torch\r\n\r\n* Flake 8\r\n\r\n* Daniel feedback\r\n\r\n* Small fixes\r\n\r\n* Fixed default weights init\r\n\r\n* Added test with encoder dependencies for global defaults\r\n\r\n* Fixed Arnav's test\r\n\r\n* Addressed Arnav's feedback\r\n\r\n* Address nit\r\n\r\n* Addressed feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address nit\r\n\r\n* Fix test\r\n\r\n* Initial feedback refactor\r\n\r\n* More refactoring\r\n\r\n* Added vocab field to all text_encoder configs\r\n\r\n* More refactoring\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix audio feature test, also s/logging/logger.\r\n\r\n* param names should start with lowercase s/N/n\r\n\r\n* Re-added schema utils used in encoder refactor.\r\n\r\n* Removes unused overwrite_defaults()\r\n\r\n* Oops, name is passed to feature as a kwarg not a member of the feature config. Why? Probably should change that.\r\n\r\n* Change lowercase default back to True. Fixes test_strings_utils\r\n\r\n* Set feature validation error with output size 1.\r\n\r\n* MLP mixer encoder needs num_channels.\r\n\r\n* Use schema.dump instead of .__dict__ to convert marshmallow dataclass to dict\r\n\r\n* (x,) in python is a tuple with a single element x. Watch out for this when defining schemas.\r\n\r\n* Construct features by using build_single_input/output to share code for deserializing feature configs. Also changes ECD to BaseModel, IMO its confusing to import ECD to use a class method from BaseModel.\r\n\r\n* Fix test_trainer_utils, adds convenience method BaseFeature.load_from_dictionary\r\n\r\n* Use feature load_from_dictionary instead of BaseModel in feature tests.\r\n\r\n* Populate encoder and decoder types in shared test fixtures, fixes error expectations in test_validate_config_combiner.py\r\n\r\n* Fixes test_validate_config_misc.py by ensuring only one option of OneOf allows None, because OneOf fails validation if more than one condition match.\r\n\r\n* Updates test_defaults.py\r\n\r\n* Adds type, column, proc_column to feature schemas. Revert feature tests by passing in config dict again.\r\n\r\n* decorate feature base classes with @dataclass, fixes failure building input features in trainer.\r\n\r\n* Implement _serialize for PreprocessingDataclassField.\r\n\r\n* use type(feature) to get schema class.\r\n\r\n* Fix test_trainer_utils.py\r\n\r\n* audio_feature requires embedding_size, but passthrough encoder does not have this property. Technically, passthrough encoder is not supported for audio features.\r\n\r\n* Wow, apparently the order of elements in the oneOf affects which error message we get from jsonschema.\r\n\r\n* Get default encoders from feature schema.\r\n\r\n* Get encoder defaults from schema in config_utils.py\r\n\r\n* Make number feature allow decoders without clip property\r\n\r\n* s/list/List\r\n\r\n* Adds reduce_output to h3 encoder.\r\n\r\n* Moves decoder params into nested decoder.\r\n\r\n* Update processing parameters with computed_fill_value.\r\n\r\n* Removes test code.\r\n\r\n* Adds input_size to decoder base because some features assume decoders have an input_size\r\n\r\n* dense encoder not supported for bag features, changed to embed.\r\n\r\n* Adds input_size param to dense encoder schema, since its a required parameter of dense encoder.\r\n\r\n* Fixes vector feature input_size in encoder metadata.\r\n\r\n* Fixes test reducers, set sequence reduce mode in output feature base.\r\n\r\n* Don't nest encoder parameters in decoder\r\n\r\n* Fixes test_torchscript, get num_classes from encoder config.\r\n\r\n* Audio feature padding is float, not int.\r\n\r\n* Adds temp check for threshold to fix GBM tests.\r\n\r\n* Adds missing value strategy drop_row for vector feature in test.\r\n\r\n* Drop row should work even if computed_fill_value is an empty string\r\n\r\n* Removes duplicated TOP_K constant.\r\n\r\n* Consolidated set_default_values\r\n\r\n* Removes commented-out defaults.\r\n\r\n* Remove load_config from OutputFeature, it isn't doing anything here.\r\n\r\n* Removes comment.\r\n\r\n* Fix type annotations for input/output feature constructors.\r\n\r\n* Fixes output feature dependencies being ignored.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Adds test for construction of output features with dependencies.\r\n\r\n* Encoder/Decoder config now lives on encoder/decoder object\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixes decoder params to match their respective classes. Moves fc_stack params and threshold back to output feature.\r\n\r\n* Make clip property of number output feature again.\r\n\r\n* Adds threshold property to set feature schema, use this property instead of storing it in the decoder.\r\n\r\n* input_size in output_feature instead of decoder.\r\n\r\n* Made vector_size property of vector_feature.\r\n\r\n* Fixed gbm tests\r\n\r\n* Fixed flake 8\r\n\r\n* Re-adds num_classes as member of category output feature.\r\n\r\n* Makes vocab_size match vocab used in preprocessing.\r\n\r\n* num_classes in CategoryOutputFeature.\r\n\r\n* Moves num_classes from decoder to category output feature.\r\n\r\n* Fixes test_model_training_options. Copies fc_layer keys into decoder if they are present on output features.\r\n\r\n* Adds field descriptors for fc_layers params in BaseOutputFeatureConfig.\r\n\r\nCo-authored-by: connor-mccorm \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: connor-mccorm <97468934+connor-mccorm@users.noreply.github.com>\r\nCo-authored-by: Geoffrey Angus \r\nCo-authored-by: Arnav Garg \r\nCo-authored-by: Daniel Treiman ", "code": "def load_data_for_viz(load_type, model_file_statistics, **kwargs):\n \n supported_load_types = dict(\n load_json=load_json,\n load_from_file=partial(\n load_from_file, dtype=kwargs.get(\"dtype\", int), ground_truth_split=kwargs.get(\"ground_truth_split\", 2)\n ),\n )\n loader = supported_load_types[load_type]\n try:\n stats_per_model = [loader(stats_f) for stats_f in model_file_statistics]\n except (TypeError, AttributeError):\n logger.exception(f\"Unable to open model statistics file {model_file_statistics}!\")\n raise\n return stats_per_model\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 114, "n_words": 40, "vocab_size": 37, "complexity": 3, "nloc": 14, "token_counts": 86, "n_ast_nodes": 139, "n_identifiers": 20, "random_cut": "def load_data_for_viz(load_type, model_file_statistics, **kwargs):\n \n supported_load_types = dict(\n load_json=load_json,\n load_from_file=partial(\n load_from_file, dtype=kwargs.get(\"dtype\", int), ground_truth_split=kwar", "d_id": 1233, "documentation": { "docstring": "Load model file data in to list of .\n\n :param load_type: type of the data loader to be used.\n :param model_file_statistics: JSON file or list of json files containing any\n model experiment stats.\n :return List of training statistics loaded as json objects.\n ", "n_words": 42, "vocab_size": 32, "n_whitespaces": 64, "language": "en" } }, { "id": 9733, "commit_id": "490676cc34d909b8a361fa1ae1e835263a13673b", "repo": "gensim", "path": "gensim/models/doc2vec.py", "file_name": "doc2vec.py", "fun_name": "scan_vocab", "commit_message": "re #2809: update the doc2vec notebook", "code": "def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None):\n \n logger.info(\"collecting all words and their counts\")\n if corpus_file is not None:\n corpus_iterable = TaggedLineDocument(corpus_file)\n\n total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule)\n\n logger.info(\n \"collected %i word types and %i unique tags from a corpus of %i examples and %i words\",\n len(self.raw_vocab), len(self.dv), corpus_count, total_words,\n )\n\n return total_words, corpus_count\n", "url": "https://github.com/RaRe-Technologies/gensim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 134, "n_words": 52, "vocab_size": 43, "complexity": 2, "nloc": 10, "token_counts": 83, "n_ast_nodes": 127, "n_identifiers": 15, "random_cut": "def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None):\n \n logger.info(\"collecting all words and their counts\")\n if corpus_file ", "d_id": 1663, "documentation": { "docstring": "Create the model's vocabulary: a mapping from unique words in the corpus to their frequency count.\n\n Parameters\n ----------\n documents : iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, optional\n The tagged documents used to create the vocabulary. Their tags can be either str tokens or ints (faster).\n corpus_file : str, optional\n Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.\n You may use this argument instead of `documents` to get performance boost. Only one of `documents` or\n `corpus_file` arguments need to be passed (not both of them).\n progress_per : int\n Progress will be logged every `progress_per` documents.\n trim_rule : function, optional\n Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,\n be trimmed away, or handled using the default (discard if word count < min_count).\n Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),\n or a callable that accepts parameters (word, count, min_count) and returns either\n :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.\n The rule, if given, is only used to prune vocabulary during\n :meth:`~gensim.models.doc2vec.Doc2Vec.build_vocab` and is not stored as part of the model.\n\n The input parameters are of the following types:\n * `word` (str) - the word we are examining\n * `count` (int) - the word's frequency count in the corpus\n * `min_count` (int) - the minimum count threshold.\n\n Returns\n -------\n (int, int)\n Tuple of `(total words in the corpus, number of documents)`.\n\n ", "n_words": 218, "vocab_size": 148, "n_whitespaces": 487, "language": "en" } }, { "id": 222893, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/dist.py", "file_name": "dist.py", "fun_name": "_parse_command_opts", "commit_message": "add python 3.10.4 for windows", "code": "def _parse_command_opts(self, parser, args):\n \n # late import because of mutual dependence between these modules\n from distutils.cmd import Command\n\n # Pull the current command from the head of the command line\n command = args[0]\n if not command_re.match(command):\n raise SystemExit(\"invalid command name '%s'\" % command)\n self.commands.append(command)\n\n # Dig up the command class that implements this command, so we\n # 1) know that it's a valid command, and 2) know which options\n # it takes.\n try:\n cmd_class = self.get_command_class(command)\n except DistutilsModuleError as msg:\n raise DistutilsArgError(msg)\n\n # Require that the command class be derived from Command -- want\n # to be sure that the basic \"command\" interface is implemented.\n if not issubclass(cmd_class, Command):\n raise DistutilsClassError(\n \"command class %s must subclass Command\" % cmd_class)\n\n # Also make sure that the command object provides a list of its\n # known options.\n if not (hasattr(cmd_class, 'user_options') and\n isinstance(cmd_class.user_options, list)):\n msg = (\"command class %s must provide \"\n \"'user_options' attribute (a list of tuples)\")\n raise DistutilsClassError(msg % cmd_class)\n\n # If the command class has a list of negative alias options,\n # merge it in with the global negative aliases.\n negative_opt = self.negative_opt\n if hasattr(cmd_class, 'negative_opt'):\n negative_opt = negative_opt.copy()\n negative_opt.update(cmd_class.negative_opt)\n\n # Check for help_options in command class. They have a different\n # format (tuple of four) so we need to preprocess them here.\n if (hasattr(cmd_class, 'help_options') and\n isinstance(cmd_class.help_options, list)):\n help_options = fix_help_options(cmd_class.help_options)\n else:\n help_options = []\n\n # All commands support the global options too, just by adding\n # in 'global_options'.\n parser.set_option_table(self.global_options +\n cmd_class.user_options +\n help_options)\n parser.set_negative_aliases(negative_opt)\n (args, opts) = parser.getopt(args[1:])\n if hasattr(opts, 'help') and opts.help:\n self._show_help(parser, display_options=0, commands=[cmd_class])\n return\n\n if (hasattr(cmd_class, 'help_options') and\n isinstance(cmd_class.help_options, list)):\n help_option_found=0\n for (help_option, short, desc, func) in cmd_class.help_options:\n if hasattr(opts, parser.get_attr_name(help_option)):\n help_option_found=1\n if callable(func):\n func()\n else:\n raise DistutilsClassError(\n \"invalid help function %r for help option '%s': \"\n \"must be a callable object (function, etc.)\"\n % (func, help_option))\n\n if help_option_found:\n return\n\n # Put the options from the command-line into their official\n # holding pen, the 'command_options' dictionary.\n opt_dict = self.get_option_dict(command)\n for (name, value) in vars(opts).items():\n opt_dict[name] = (\"command line\", value)\n\n return args\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1131, "n_words": 337, "vocab_size": 203, "complexity": 18, "nloc": 54, "token_counts": 357, "n_ast_nodes": 597, "n_identifiers": 50, "random_cut": "def _parse_command_opts(self, parser, args):\n \n # late import because of mutual dependence between these modules\n from distutils.cmd import Command\n\n # Pull the current command from the head of the command line\n command = args[0]\n if not command_re.match(command):\n raise SystemExit(\"invalid command name '%s'\" % command)\n self.commands.append(command)\n\n # Dig up the command class that implements this command, so we\n # 1) know that it's a valid command, and 2) know which options\n # it takes.\n try:\n cmd_class = self.get_command_class(command)\n except DistutilsModuleError as msg:\n raise DistutilsArgError(msg)\n\n # Require that the command class be derived from Command -- want\n # to be sure that the basic \"command\" interface is implemented.\n if not issubclass(cmd_class, Command):\n raise Distut", "d_id": 56800, "documentation": { "docstring": "Parse the command-line options for a single command.\n 'parser' must be a FancyGetopt instance; 'args' must be the list\n of arguments, starting with the current command (whose options\n we are about to parse). Returns a new version of 'args' with\n the next command at the front of the list; will be the empty\n list if there are no more commands on the command line. Returns\n None if the user asked for help on this command.\n ", "n_words": 75, "vocab_size": 48, "n_whitespaces": 126, "language": "en" } }, { "id": 242085, "commit_id": "547d1bb522562a1ba38961d13932fffc2bb92edf", "repo": "scipy", "path": "scipy/stats/_distn_infrastructure.py", "file_name": "_distn_infrastructure.py", "fun_name": "interval", "commit_message": "MAINT: stats: update deprecation warning version information", "code": "def interval(self, confidence=None, *args, **kwds):\n \n # This function was originally written with parameter `alpha`, but\n # `alpha` is also the name of a shape parameter of two distributions.\n # This block allows the function to accept both `alpha` and its\n # replacement `confidence` during a deprecation period; it can be\n # removed in the second release after 1.9.0.\n # See description of logic in `moment` method.\n has_shape_alpha = (self.shapes is not None\n and \"alpha\" in (self.shapes.split(\", \")))\n got_confidence = confidence is not None\n got_keyword_alpha = kwds.get(\"alpha\", None) is not None\n\n if not got_confidence and ((not got_keyword_alpha)\n or (got_keyword_alpha and has_shape_alpha)):\n message = (\"interval() missing 1 required positional argument: \"\n \"`confidence`\")\n raise TypeError(message)\n\n if got_keyword_alpha and not has_shape_alpha:\n if got_confidence:\n # this will change to \"interval got unexpected argument alpha\"\n message = \"interval() got multiple values for first argument\"\n raise TypeError(message)\n else:\n message = (\"Use of keyword argument `alpha` for method \"\n \"`interval` is deprecated. Use first positional \"\n \"argument or keyword argument `confidence` \"\n \"instead.\")\n confidence = kwds.pop(\"alpha\")\n warnings.warn(message, DeprecationWarning, stacklevel=2)\n alpha = confidence\n\n alpha = asarray(alpha)\n if np.any((alpha > 1) | (alpha < 0)):\n raise ValueError(\"alpha must be between 0 and 1 inclusive\")\n q1 = (1.0-alpha)/2\n q2 = (1.0+alpha)/2\n a = self.ppf(q1, *args, **kwds)\n b = self.ppf(q2, *args, **kwds)\n return a, b\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 658, "n_words": 213, "vocab_size": 134, "complexity": 10, "nloc": 30, "token_counts": 219, "n_ast_nodes": 366, "n_identifiers": 28, "random_cut": "def interval(self, confidence=None, *args, **kwds):\n \n # This function was originally written with parameter `alpha`, but\n # `alpha` is also the name of a shape parameter of two distributions.\n # This block allows the functi", "d_id": 69784, "documentation": { "docstring": "Confidence interval with equal areas around the median.\n\n .. deprecated:: 1.9.0\n Parameter `alpha` is replaced by parameter `confidence` to avoid\n name collisions with the shape parameter `alpha` of some\n distributions. Parameter `alpha` will be removed in the second\n release after 1.9.0.\n\n Parameters\n ----------\n confidence : array_like of float\n Probability that an rv will be drawn from the returned range.\n Each value should be in the range [0, 1].\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter, Default is 0.\n scale : array_like, optional\n scale parameter, Default is 1.\n\n Returns\n -------\n a, b : ndarray of float\n end-points of range that contain ``100 * alpha %`` of the rv's\n possible values.\n\n ", "n_words": 128, "vocab_size": 90, "n_whitespaces": 333, "language": "en" } }, { "id": 101754, "commit_id": "c79175cbde5600bebd65785f3821fc74b3a80cbe", "repo": "faceswap", "path": "tools/alignments/jobs_faces.py", "file_name": "jobs_faces.py", "fun_name": "__call__", "commit_message": "Alignments Tool updates\n - Copy info back to alignments file from faces", "code": "def __call__(self) -> bool:\n \n for meta in tqdm(self._face_alignments,\n desc=\"Updating Alignments File from PNG Header\",\n leave=False):\n src = meta[\"source\"]\n alignment = meta[\"alignments\"]\n if not any(alignment.get(key, {}) for key in self._updatable_keys):\n continue\n\n faces = self._alignments.get_faces_in_frame(src[\"source_filename\"])\n if len(faces) < src[\"face_index\"] + 1: # list index out of range\n logger.debug(\"Skipped face '%s'. Index does not exist in alignments file\",\n src[\"original_filename\"])\n continue\n\n face = faces[src[\"face_index\"]]\n self._check_and_update(alignment, face)\n\n retval = False\n if self._counts:\n retval = True\n logger.info(\"Updated alignments file from PNG Data: %s\", self._counts)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 336, "n_words": 80, "vocab_size": 62, "complexity": 6, "nloc": 27, "token_counts": 138, "n_ast_nodes": 231, "n_identifiers": 25, "random_cut": "def __call__(self) -> bool:\n \n for meta in tqdm(self._face_alignments,\n desc=\"Updating Alignments File from PNG Header\",\n leave=False):\n src = meta[\"source\"]\n alignment = meta[\"alignments\"]\n if not any(alignment.get(key, {}) for key in self._updatable_keys):\n continue\n\n faces = self._alignments.get_faces_in_frame(src[\"source_filename\"])\n if len(faces) < src[\"face_index\"] + 1: # list index out of range\n logger.debug(\"Skipped face '%s'. Index does not exist in alignments file\",\n src[\"original_filename\"])\n continue\n\n face = faces[src[\"face_index\"]]\n self._check_and_update(alignment, face)\n\n retval = False\n if self._counts:\n retval = True\n logger.info(\"Updated alignments file from PNG Data: %s\", self._counts)\n return retval\n", "d_id": 21158, "documentation": { "docstring": " Parse through the face data updating any entries in the alignments file.\n\n Returns\n -------\n bool\n ``True`` if any alignment information was updated otherwise ``False``\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 64, "language": "en" } }, { "id": 319622, "commit_id": "69ef26dab04d51e7e102dcb33cd98ddc6ad975fd", "repo": "paperless-ngx", "path": "src/documents/tests/test_file_handling.py", "file_name": "test_file_handling.py", "fun_name": "test_dynamic_path", "commit_message": "Feature: Dynamic document storage pathes (#916)\n\n* Added devcontainer\r\n\r\n* Add feature storage pathes\r\n\r\n* Exclude tests and add versioning\r\n\r\n* Check escaping\r\n\r\n* Check escaping\r\n\r\n* Check quoting\r\n\r\n* Echo\r\n\r\n* Escape\r\n\r\n* Escape :\r\n\r\n* Double escape \\\r\n\r\n* Escaping\r\n\r\n* Remove if\r\n\r\n* Escape colon\r\n\r\n* Missing \\\r\n\r\n* Esacpe :\r\n\r\n* Escape all\r\n\r\n* test\r\n\r\n* Remove sed\r\n\r\n* Fix exclude\r\n\r\n* Remove SED command\r\n\r\n* Add LD_LIBRARY_PATH\r\n\r\n* Adjusted to v1.7\r\n\r\n* Updated test-cases\r\n\r\n* Remove devcontainer\r\n\r\n* Removed internal build-file\r\n\r\n* Run pre-commit\r\n\r\n* Corrected flak8 error\r\n\r\n* Adjusted to v1.7\r\n\r\n* Updated test-cases\r\n\r\n* Corrected flak8 error\r\n\r\n* Adjusted to new plural translations\r\n\r\n* Small adjustments due to code-review backend\r\n\r\n* Adjusted line-break\r\n\r\n* Removed PAPERLESS prefix from settings variables\r\n\r\n* Corrected style change due to search+replace\r\n\r\n* First documentation draft\r\n\r\n* Revert changes to Pipfile\r\n\r\n* Add sphinx-autobuild with keep-outdated\r\n\r\n* Revert merge error that results in wrong storage path is evaluated\r\n\r\n* Adjust styles of generated files ...\r\n\r\n* Adds additional testing to cover dynamic storage path functionality\r\n\r\n* Remove unnecessary condition\r\n\r\n* Add hint to edit storage path dialog\r\n\r\n* Correct spelling of pathes to paths\r\n\r\n* Minor documentation tweaks\r\n\r\n* Minor typo\r\n\r\n* improving wrapping of filter editor buttons with new storage path button\r\n\r\n* Update .gitignore\r\n\r\n* Fix select border radius in non input-groups\r\n\r\n* Better storage path edit hint\r\n\r\n* Add note to edit storage path dialog re document_renamer\r\n\r\n* Add note to bulk edit storage path re document_renamer\r\n\r\n* Rename FILTER_STORAGE_DIRECTORY to PATH\r\n\r\n* Fix broken filter rule parsing\r\n\r\n* Show default storage if unspecified\r\n\r\n* Remove note re storage path on bulk edit\r\n\r\n* Add basic validation of filename variables\r\n\r\nCo-authored-by: Markus Kling \r\nCo-authored-by: Trenton Holmes \r\nCo-authored-by: Michael Shamoon <4887959+shamoon@users.noreply.github.com>\r\nCo-authored-by: Quinn Casey ", "code": "def test_dynamic_path(self):\n \n doc = Document.objects.create(\n title=\"does not matter\",\n created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)),\n mime_type=\"application/pdf\",\n pk=2,\n checksum=\"2\",\n storage_path=StoragePath.objects.create(path=\"TestFolder/{created}\"),\n )\n self.assertEqual(generate_filename(doc), \"TestFolder/2020-06-25.pdf\")\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 116, "n_words": 22, "vocab_size": 22, "complexity": 1, "nloc": 10, "token_counts": 81, "n_ast_nodes": 127, "n_identifiers": 19, "random_cut": "def test_dynamic_path(self):\n \n doc = Document.objects.create(\n title=\"does not matter\",\n created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)),\n ", "d_id": 116979, "documentation": { "docstring": "\n GIVEN:\n - A document with a defined storage path\n WHEN:\n - the filename is generated for the document\n THEN:\n - the generated filename uses the defined storage path for the document\n ", "n_words": 31, "vocab_size": 17, "n_whitespaces": 93, "language": "en" } }, { "id": 150412, "commit_id": "9f6bba40af1a407f190a89f5c0c8b4e3f528ba46", "repo": "freqtrade", "path": "freqtrade/rpc/replicate/__init__.py", "file_name": "__init__.py", "fun_name": "follower_loop", "commit_message": "initial concept for replicate, basic leader and follower logic", "code": "async def follower_loop(self):\n \n try:\n await self._connect_to_leaders()\n except Exception as e:\n logger.error(\"Exception occurred in follower loop: \")\n logger.exception(e)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 71, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 60, "n_identifiers": 8, "random_cut": "async def follower_loop(self):\n \n try:\n await self._connect_to_leaders()\n except Exception as e:\n logger.error(\"Exception occurred in follower loop: \")\n logger.exception(e)\n", "d_id": 34736, "documentation": { "docstring": "\n Main follower coroutine\n\n This starts all of the leader connection coros\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 103727, "commit_id": "6604e0d015fbd7a3e5602a6f3831d786b4ed659d", "repo": "kitty", "path": "kitty_tests/check_build.py", "file_name": "check_build.py", "fun_name": "test_launcher_ensures_stdio", "commit_message": "Fix regression in 0.26.0 that caused launching kitty without working STDIO handles to result in high CPU usage and prewarming failing\n\nFixes #5444", "code": "def test_launcher_ensures_stdio(self):\n from kitty.constants import kitty_exe\n import subprocess\n exe = kitty_exe()\n cp = subprocess.run([exe, '+runpy', ])\n self.assertEqual(cp.returncode, 0)\n\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 15, "token_counts": 42, "n_ast_nodes": 71, "n_identifiers": 11, "random_cut": "def test_launcher_ensures_stdio(self):\n from kitty.constants import kitty_exe\n import subprocess\n exe = kitty_exe()\n cp = subprocess.run([exe, '+runpy', ])\n self.assertEqual(cp.returncode, 0)\n\n", "d_id": 21711, "documentation": { "docstring": "\\\nimport os, sys\nif sys.stdin:\n os.close(sys.stdin.fileno())\nif sys.stdout:\n os.close(sys.stdout.fileno())\nif sys.stderr:\n os.close(sys.stderr.fileno())\nos.execlp('kitty', 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)')\n", "n_words": 34, "vocab_size": 26, "n_whitespaces": 37, "language": "en" } }, { "id": 265647, "commit_id": "c4b7ab067a914349abd88398dd9bfef9f6c2f806", "repo": "netbox", "path": "netbox/dcim/tests/test_forms.py", "file_name": "test_forms.py", "fun_name": "test_interface_label_count_mismatch", "commit_message": "Fixes #10247: Allow changing selected device/VM when creating a new component (#10312)\n\n* Initial work on #10247\r\n\r\n* Continued work on #10247\r\n\r\n* Clean up component creation tests\r\n\r\n* Move valdiation of replicated field to form\r\n\r\n* Clean up ordering of fields in component creation forms\r\n\r\n* Omit fieldset header if none\r\n\r\n* Clean up ordering of fields in component template creation forms\r\n\r\n* View tests should not move component templates to new device type\r\n\r\n* Define replication_fields on VMInterfaceCreateForm\r\n\r\n* Clean up expandable field help texts\r\n\r\n* Update comments\r\n\r\n* Update component bulk update forms & views to support new replication fields\r\n\r\n* Fix ModularDeviceComponentForm parent class\r\n\r\n* Fix bulk creation of VM interfaces (thanks @kkthxbye-code!)", "code": "def test_interface_label_count_mismatch(self):\n \n bad_interface_data = {\n 'device': self.device.pk,\n 'name': 'eth[0-9]',\n 'label': 'Interface[0-1]',\n 'type': InterfaceTypeChoices.TYPE_1GE_GBIC,\n }\n form = InterfaceCreateForm(bad_interface_data)\n\n self.assertFalse(form.is_valid())\n self.assertIn('label', form.errors)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 106, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 10, "token_counts": 58, "n_ast_nodes": 105, "n_identifiers": 13, "random_cut": "def test_interface_label_count_mismatch(self):\n \n ", "d_id": 78160, "documentation": { "docstring": "\n Check that attempting to generate a differing number of names and labels results in a validation error.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 259334, "commit_id": "5cccdef4378fcdb863467414ee638c6f5e51a19a", "repo": "scikit-learn", "path": "sklearn/preprocessing/_data.py", "file_name": "_data.py", "fun_name": "power_transform", "commit_message": "DOC Ensures that preprocessing._data.power_transform passes numpydoc validation (#22802)\n\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def power_transform(X, method=\"yeo-johnson\", *, standardize=True, copy=True):\n \n pt = PowerTransformer(method=method, standardize=standardize, copy=copy)\n return pt.fit_transform(X)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 43, "n_ast_nodes": 67, "n_identifiers": 8, "random_cut": "def power_transform(X, method=\"yeo-johnson\", *, standardize=True, copy=True):\n \n pt = PowerTransformer(method=method, stand", "d_id": 75716, "documentation": { "docstring": "Parametric, monotonic transformation to make data more Gaussian-like.\n\n Power transforms are a family of parametric, monotonic transformations\n that are applied to make data more Gaussian-like. This is useful for\n modeling issues related to heteroscedasticity (non-constant variance),\n or other situations where normality is desired.\n\n Currently, power_transform supports the Box-Cox transform and the\n Yeo-Johnson transform. The optimal parameter for stabilizing variance and\n minimizing skewness is estimated through maximum likelihood.\n\n Box-Cox requires input data to be strictly positive, while Yeo-Johnson\n supports both positive or negative data.\n\n By default, zero-mean, unit-variance normalization is applied to the\n transformed data.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data to be transformed using a power transformation.\n\n method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'\n The power transform method. Available methods are:\n\n - 'yeo-johnson' [1]_, works with positive and negative values\n - 'box-cox' [2]_, only works with strictly positive values\n\n .. versionchanged:: 0.23\n The default value of the `method` parameter changed from\n 'box-cox' to 'yeo-johnson' in 0.23.\n\n standardize : bool, default=True\n Set to True to apply zero-mean, unit-variance normalization to the\n transformed output.\n\n copy : bool, default=True\n Set to False to perform inplace computation during transformation.\n\n Returns\n -------\n X_trans : ndarray of shape (n_samples, n_features)\n The transformed data.\n\n See Also\n --------\n PowerTransformer : Equivalent transformation with the\n Transformer API (e.g. as part of a preprocessing\n :class:`~sklearn.pipeline.Pipeline`).\n\n quantile_transform : Maps data to a standard normal distribution with\n the parameter `output_distribution='normal'`.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in ``fit``, and maintained\n in ``transform``.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n `.\n\n References\n ----------\n\n .. [1] I.K. Yeo and R.A. Johnson, \"A new family of power transformations to\n improve normality or symmetry.\" Biometrika, 87(4), pp.954-959,\n (2000).\n\n .. [2] G.E.P. Box and D.R. Cox, \"An Analysis of Transformations\", Journal\n of the Royal Statistical Society B, 26, 211-252 (1964).\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import power_transform\n >>> data = [[1, 2], [3, 2], [4, 5]]\n >>> print(power_transform(data, method='box-cox'))\n [[-1.332... -0.707...]\n [ 0.256... -0.707...]\n [ 1.076... 1.414...]]\n\n .. warning:: Risk of data leak.\n Do not use :func:`~sklearn.preprocessing.power_transform` unless you\n know what you are doing. A common mistake is to apply it to the entire\n data *before* splitting into training and test sets. This will bias the\n model evaluation because information would have leaked from the test\n set to the training set.\n In general, we recommend using\n :class:`~sklearn.preprocessing.PowerTransformer` within a\n :ref:`Pipeline ` in order to prevent most risks of data\n leaking, e.g.: `pipe = make_pipeline(PowerTransformer(),\n LogisticRegression())`.\n ", "n_words": 421, "vocab_size": 267, "n_whitespaces": 771, "language": "en" } }, { "id": 176172, "commit_id": "dec723f072eb997a497a159dbe8674cd39999ee9", "repo": "networkx", "path": "networkx/generators/small.py", "file_name": "small.py", "fun_name": "desargues_graph", "commit_message": "Docstrings for the small.py module (#5240)\n\n* added description for the first 5 small graphs\r\n\r\n* modified descriptions based on comment and added description for two more functions\r\n\r\n* added doctrings to all the functions\r\n\r\n* Minor touchups.\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def desargues_graph(create_using=None):\n \n G = LCF_graph(20, [5, -5, 9, -9], 5, create_using)\n G.name = \"Desargues Graph\"\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 58, "n_identifiers": 5, "random_cut": "def desargues_graph(create_using=None):\n \n G = LCF_graph(20, [5, -5, 9, -9], 5, create_using)\n G.name = \"Desargues Graph\"\n return G\n\n", "d_id": 41742, "documentation": { "docstring": "\n Returns the Desargues Graph\n\n The Desargues Graph is a non-planar, distance-transitive cubic graph\n with 20 nodes and 30 edges [1]_.\n It is a symmetric graph. It can be represented in LCF notation\n as [5,-5,9,-9]^5 [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Desargues Graph with 20 nodes and 30 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Desargues_graph\n .. [2] https://mathworld.wolfram.com/DesarguesGraph.html\n ", "n_words": 77, "vocab_size": 56, "n_whitespaces": 139, "language": "en" } }, { "id": 7282, "commit_id": "6909ae16047d422b94ed4cbd1a753e6b34540ff9", "repo": "ludwig", "path": "ludwig/schema/features/utils.py", "file_name": "utils.py", "fun_name": "get_output_feature_jsonschema", "commit_message": "Input/Output Feature Schema Refactor (#2147)\n\n* Added base files and some initial code\r\n\r\n* More files created, fleshing out binary feature and corresponding encoders\r\n\r\n* Added more schema infra\r\n\r\n* Registered all feature encoders\r\n\r\n* Separated feature utils infra\r\n\r\n* Added all preprocessing classes\r\n\r\n* Filled out rest of schema configs\r\n\r\n* Fixed preproc dataclass\r\n\r\n* Fixed small errors blocking import\r\n\r\n* Tests should be passing\r\n\r\n* Deleted unnecesssary files and removed commented out code\r\n\r\n* fixed flake8\r\n\r\n* Fixed most tests\r\n\r\n* fixed pattern validation\r\n\r\n* Fixed missing val strategies and solved custom encoder update issue\r\n\r\n* Removed preprocessing from features due to schema SSOT\r\n\r\n* fix flake 8\r\n\r\n* fix flake 8\r\n\r\n* fix flake 8\r\n\r\n* Using encoder/decoder registries\r\n\r\n* Address NIT\r\n\r\n* Address feedback\r\n\r\n* Adding constants, remove computed_fill_value, swapped in registries\r\n\r\n* Addressed Feedback\r\n\r\n* Flake8\r\n\r\n* Making tied a constant\r\n\r\n* Added base feature classes\r\n\r\n* Added parameter metadata for computed fill value\r\n\r\n* Small fix\r\n\r\n* Add pattern back into string\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def get_output_feature_jsonschema():\n \n output_feature_types = sorted(list(output_type_registry.keys()))\n return {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"type\": {\"type\": \"string\", \"enum\": output_feature_types},\n \"column\": {\"type\": \"string\"},\n },\n \"additionalProperties\": True,\n \"allOf\": get_output_feature_conds(),\n \"required\": [\"name\", \"type\"],\n },\n }\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 180, "n_words": 36, "vocab_size": 28, "complexity": 1, "nloc": 16, "token_counts": 85, "n_ast_nodes": 167, "n_identifiers": 7, "random_cut": "def get_output_feature_jsonschema():\n \n output_feature_types = sorted(list(output_type_registry.keys()))\n return {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"type\": {\"type\": \"string\", \"enum\": output_feature_types},\n \"column\": {\"type\": \"string\"},\n },\n \"additionalProperties\": True,\n \"allOf\": get_outpu", "d_id": 1175, "documentation": { "docstring": "This function returns a JSON schema structured to only requires a `type` key and then conditionally applies\n a corresponding output feature's field constraints.\n\n Returns: JSON Schema\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 35, "language": "en" } }, { "id": 180242, "commit_id": "2de9ee8bfb43dc1f6d71e16ed1fe18ea164edd4c", "repo": "gradio", "path": "demo/blocks_component_shortcut/run.py", "file_name": "run.py", "fun_name": "greet", "commit_message": "update-shortcut-syntax (#1234)\n\n* update-shortcut-syntax\r\n\r\n- fix&update gr.component\r\n- create a demo introducing shortcuts within Blocks\r\n\r\n* update-shortcut-syntax\r\n\r\n- tweaks\r\n\r\n* update-shortcut-syntax\r\n\r\n- tweaks\r\n\r\n* update-shortcut-syntax\r\n\r\n- fix formatting\r\n\r\n* update-shortcut-syntax\r\n\r\n- tweaks\r\n- fix tests\r\n\r\n* update-shortcut-syntax\r\n\r\n- tweaks\r\n- fix tests\r\n\r\n* update-shortcut-syntax\r\n\r\n- tweaks\r\n- fix tests", "code": "def greet(str):\n return str\n\n\nwith gr.Blocks() as demo:\n \n with gr.Row():\n text1 = gr.component(\"textarea\")\n text2 = gr.TextArea()\n text3 = gr.templates.TextArea()\n text1.change(greet, text1, text2)\n text2.change(greet, text2, text3)\n text3.change(greet, text3, text1)\n demo.launch()\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 82, "n_words": 29, "vocab_size": 26, "complexity": 1, "nloc": 2, "token_counts": 7, "n_ast_nodes": 141, "n_identifiers": 14, "random_cut": "def greet(str):\n return str\n\n\nwith gr.Blocks() as demo:\n \n with gr.Row():\n text1 = gr.component(\"textarea\")\n text2 = gr.TextArea()\n text3 = gr.templates.TextArea()\n text1.change(greet, text1, text2)\n text2.change(greet, text2, text3)\n text3.change(greet, text3, text1)\n demo", "d_id": 43114, "documentation": { "docstring": "\n You can make use of str shortcuts you use in Interface within Blocks as well.\n \n Interface shortcut example:\n Interface(greet, \"textarea\", \"textarea\")\n \n You can use \n 1. gr.component()\n 2. gr.templates.Template()\n 3. gr.Template()\n All the templates are listed in gradio/templates.py\n ", "n_words": 37, "vocab_size": 31, "n_whitespaces": 74, "language": "en" } }, { "id": 181817, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/base.py", "file_name": "base.py", "fun_name": "_impute_values", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def _impute_values(self, features):\n \n if self.verbosity > 1:\n print(\"Imputing missing values in feature set\")\n\n if self._fitted_imputer is None:\n self._fitted_imputer = SimpleImputer(strategy=\"median\")\n self._fitted_imputer.fit(features)\n\n return self._fitted_imputer.transform(features)\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 23, "vocab_size": 21, "complexity": 3, "nloc": 7, "token_counts": 53, "n_ast_nodes": 91, "n_identifiers": 10, "random_cut": "def _impute_values(self, features):\n \n if self.verbosity > 1:\n ", "d_id": 43601, "documentation": { "docstring": "Impute missing values in a feature set.\n\n Parameters\n ----------\n features: array-like {n_samples, n_features}\n A feature matrix\n\n Returns\n -------\n array-like {n_samples, n_features}\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 81, "language": "en" } }, { "id": 204336, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/sites/shortcuts.py", "file_name": "shortcuts.py", "fun_name": "get_current_site", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_current_site(request):\n \n # Import is inside the function because its point is to avoid importing the\n # Site models when django.contrib.sites isn't installed.\n if apps.is_installed(\"django.contrib.sites\"):\n from .models import Site\n\n return Site.objects.get_current(request)\n else:\n return RequestSite(request)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 70, "n_words": 34, "vocab_size": 29, "complexity": 2, "nloc": 6, "token_counts": 35, "n_ast_nodes": 66, "n_identifiers": 9, "random_cut": "def get_current_site(request):\n \n # Import is inside the function because its point is to avoid importing the\n # Site models when django.contrib.sites isn't installed.\n if apps.is_installed(\"django.contrib.sites\"):\n from ", "d_id": 50700, "documentation": { "docstring": "\n Check if contrib.sites is installed and return either the current\n ``Site`` object or a ``RequestSite`` object based on the request.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 30, "language": "en" } }, { "id": 264670, "commit_id": "f13a00b2dd33bffc3048c861b494096df457f212", "repo": "netbox", "path": "netbox/extras/scripts.py", "file_name": "scripts.py", "fun_name": "get_scripts", "commit_message": "Save old JobResults", "code": "def get_scripts(use_names=False):\n \n scripts = OrderedDict()\n # Iterate through all modules within the scripts path. These are the user-created files in which reports are\n # defined.\n for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]):\n # Remove cached module to ensure consistency with filesystem\n if module_name in sys.modules:\n del sys.modules[module_name]\n\n module = importer.find_module(module_name).load_module(module_name)\n if use_names and hasattr(module, 'name'):\n module_name = module.name\n module_scripts = OrderedDict()\n script_order = getattr(module, \"script_order\", ())\n ordered_scripts = [cls for cls in script_order if is_script(cls)]\n unordered_scripts = [cls for _, cls in inspect.getmembers(module, is_script) if cls not in script_order]\n for cls in [*ordered_scripts, *unordered_scripts]:\n module_scripts[cls.__name__] = cls\n if module_scripts:\n scripts[module_name] = module_scripts\n\n return scripts\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 235, "n_words": 103, "vocab_size": 66, "complexity": 11, "nloc": 17, "token_counts": 156, "n_ast_nodes": 247, "n_identifiers": 28, "random_cut": "def get_scripts(use_names=False):\n \n scripts = OrderedDict()\n # Iterate through all modules within the scripts path. These are the user-created files in which reports are\n # defined.\n for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]):\n # Remove cached module to ensu", "d_id": 77782, "documentation": { "docstring": "\n Return a dict of dicts mapping all scripts to their modules. Set use_names to True to use each module's human-\n defined name in place of the actual module name.\n ", "n_words": 29, "vocab_size": 26, "n_whitespaces": 39, "language": "en" } }, { "id": 266015, "commit_id": "e7f54c5867cf49126bbf95e28633e4283c2bbcb2", "repo": "netbox", "path": "netbox/extras/plugins/templates.py", "file_name": "templates.py", "fun_name": "render", "commit_message": "Reorganize plugin resources", "code": "def render(self, template_name, extra_context=None):\n \n if extra_context is None:\n extra_context = {}\n elif not isinstance(extra_context, dict):\n raise TypeError(\"extra_context must be a dictionary\")\n\n return get_template(template_name).render({**self.context, **extra_context})\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 24, "vocab_size": 23, "complexity": 3, "nloc": 6, "token_counts": 53, "n_ast_nodes": 87, "n_identifiers": 9, "random_cut": "def render(self, template_name, extra_context=None):\n \n if extra_context is None:\n extra_context = {}\n elif not isinstance(extra_context, dict):\n raise TypeError(\"extra_context must be a dictionary\")\n\n return get_template(template_name).render({**self.context, *", "d_id": 78268, "documentation": { "docstring": "\n Convenience method for rendering the specified Django template using the default context data. An additional\n context dictionary may be passed as `extra_context`.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 44, "language": "en" } }, { "id": 278617, "commit_id": "3613c3defc39c236fb1592c4f7ba1a9cc887343a", "repo": "keras", "path": "keras/applications/mobilenet_v3.py", "file_name": "mobilenet_v3.py", "fun_name": "preprocess_input", "commit_message": "Remove pylint comments.\n\nPiperOrigin-RevId: 452353044", "code": "def preprocess_input(x, data_format=None):\n \n return x\n\n\n@keras_export(\"keras.applications.mobilenet_v3.decode_predictions\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.applications.mobilenet_v3.decode_predictions\")", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 32, "n_identifiers": 4, "random_cut": "def preprocess_input(x, data_format=None):\n \n return x\n\n\n@keras_export(\"keras.applications.", "d_id": 82630, "documentation": { "docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the mobilenet_v3 model\n implementation. Users are no longer required to call this method to\n normalize the input data. This method does nothing and only kept as a\n placeholder to align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ", "n_words": 95, "vocab_size": 76, "n_whitespaces": 152, "language": "en" } }, { "id": 76338, "commit_id": "1838fbfb1a720e0a286c989dbdea03dfde6af4a5", "repo": "wagtail", "path": "wagtail/admin/templatetags/wagtailadmin_tags.py", "file_name": "wagtailadmin_tags.py", "fun_name": "message_level_tag", "commit_message": "Prevent custom MESSAGE_TAGS settings from leaking into admin styles\n\nFixes a test failure against Django main.\n\nIn #2552, a fix was applied to ensure that the project-level MESSAGE_TAGS setting was ignored, allowing end-users to customise that setting for their own projects without it leaking into Wagtail admin styles.\n\nUnfortunately, the test was flawed (or was broken in a Django regression at some point): in Django <=4.0, MESSAGE_TAGS was not affected by override_settings after the first request, which meant that unless the test was run in isolation, the custom classname that was supposed to flag up the problem never got applied, and the test always succeeded.\n\nThe change to SVG icons broke the intent of #2552, since it used message.level_tag for the icon's classname (and this picks up MESSAGE_TAGS customisations), but due to the broken test this went unnoticed.\n\nhttps://github.com/django/django/commit/24b316536a7ee4c54a54f632de1852aecb4038c0 fixed the override_settings behaviour, making the test fail as it should have done long ago.\n\nHere we adjust the test to not rely on override_settings (so that it does what it's supposed to do on all Django versions), fix a test that gets broken as a side effect (because it's unnecessarily checking message.level_tag), and fixes our SVG-icon-powered message include to bypass the MESSAGE_TAGS setting like the old implementation did.\n\nConfusing? Yes.", "code": "def message_level_tag(message):\n \n return MESSAGE_TAGS.get(message.level)\n\n\n@register.simple_tag", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "@register.simple_tag", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 10, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 34, "n_identifiers": 7, "random_cut": "def message_level_tag(message):\n \n return MESSAGE_TAGS.get(message.level)\n", "d_id": 16502, "documentation": { "docstring": "\n Return the tag for this message's level as defined in\n django.contrib.messages.constants.DEFAULT_TAGS, ignoring the project-level\n MESSAGE_TAGS setting (which end-users might customise).\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 33, "language": "en" } }, { "id": 259648, "commit_id": "aeeac1c1d634dc80abc93fb30b3fe48e1d709b64", "repo": "scikit-learn", "path": "sklearn/metrics/_regression.py", "file_name": "_regression.py", "fun_name": "_check_reg_targets", "commit_message": "ENH add D2 pinbal score and D2 absolute error score (#22118)", "code": "def _check_reg_targets(y_true, y_pred, multioutput, dtype=\"numeric\"):\n \n check_consistent_length(y_true, y_pred)\n y_true = check_array(y_true, ensure_2d=False, dtype=dtype)\n y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)\n\n if y_true.ndim == 1:\n y_true = y_true.reshape((-1, 1))\n\n if y_pred.ndim == 1:\n y_pred = y_pred.reshape((-1, 1))\n\n if y_true.shape[1] != y_pred.shape[1]:\n raise ValueError(\n \"y_true and y_pred have different number of output ({0}!={1})\".format(\n y_true.shape[1], y_pred.shape[1]\n )\n )\n\n n_outputs = y_true.shape[1]\n allowed_multioutput_str = (\"raw_values\", \"uniform_average\", \"variance_weighted\")\n if isinstance(multioutput, str):\n if multioutput not in allowed_multioutput_str:\n raise ValueError(\n \"Allowed 'multioutput' string values are {}. \"\n \"You provided multioutput={!r}\".format(\n allowed_multioutput_str, multioutput\n )\n )\n elif multioutput is not None:\n multioutput = check_array(multioutput, ensure_2d=False)\n if n_outputs == 1:\n raise ValueError(\"Custom weights are useful only in multi-output cases.\")\n elif n_outputs != len(multioutput):\n raise ValueError(\n \"There must be equally many custom weights (%d) as outputs (%d).\"\n % (len(multioutput), n_outputs)\n )\n y_type = \"continuous\" if n_outputs == 1 else \"continuous-multioutput\"\n\n return y_type, y_true, y_pred, multioutput\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 422, "n_words": 141, "vocab_size": 93, "complexity": 10, "nloc": 35, "token_counts": 234, "n_ast_nodes": 371, "n_identifiers": 19, "random_cut": "def _check_reg_targets(y_true, y_pred, multioutput, dtype=\"numeric\"):\n \n check_cons", "d_id": 75848, "documentation": { "docstring": "Check that y_true and y_pred belong to the same regression task.\n\n Parameters\n ----------\n y_true : array-like\n\n y_pred : array-like\n\n multioutput : array-like or string in ['raw_values', uniform_average',\n 'variance_weighted'] or None\n None is accepted due to backward compatibility of r2_score().\n\n dtype : str or list, default=\"numeric\"\n the dtype argument passed to check_array.\n\n Returns\n -------\n type_true : one of {'continuous', continuous-multioutput'}\n The type of the true target data, as output by\n 'utils.multiclass.type_of_target'.\n\n y_true : array-like of shape (n_samples, n_outputs)\n Ground truth (correct) target values.\n\n y_pred : array-like of shape (n_samples, n_outputs)\n Estimated target values.\n\n multioutput : array-like of shape (n_outputs) or string in ['raw_values',\n uniform_average', 'variance_weighted'] or None\n Custom output weights if ``multioutput`` is array-like or\n just the corresponding argument if ``multioutput`` is a\n correct keyword.\n ", "n_words": 124, "vocab_size": 70, "n_whitespaces": 240, "language": "en" } }, { "id": 140391, "commit_id": "820cf4fdcae6b274588e23b312d5255d1b418e10", "repo": "ray", "path": "python/ray/serve/deployment.py", "file_name": "deployment.py", "fun_name": "bind", "commit_message": "[Deployment Graph] Simplify our use of DeploymentSchema (#25202)", "code": "def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]:\n \n\n copied_self = copy(self)\n copied_self._func_or_class = \"dummpy.module\"\n schema_shell = deployment_to_schema(copied_self)\n\n if inspect.isfunction(self._func_or_class):\n return FunctionNode(\n self._func_or_class,\n args, # Used to bind and resolve DAG only, can take user input\n kwargs, # Used to bind and resolve DAG only, can take user input\n self._ray_actor_options or dict(),\n other_args_to_resolve={\n \"deployment_schema\": schema_shell,\n \"is_from_serve_deployment\": True,\n },\n )\n else:\n return ClassNode(\n self._func_or_class,\n args,\n kwargs,\n cls_options=self._ray_actor_options or dict(),\n other_args_to_resolve={\n \"deployment_schema\": schema_shell,\n \"is_from_serve_deployment\": True,\n },\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 417, "n_words": 73, "vocab_size": 46, "complexity": 4, "nloc": 33, "token_counts": 128, "n_ast_nodes": 183, "n_identifiers": 18, "random_cut": "def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]:\n ", "d_id": 31945, "documentation": { "docstring": "Bind the provided arguments and return a class or function node.\n\n The returned bound deployment can be deployed or bound to other\n deployments to create a deployment graph.\n ", "n_words": 28, "vocab_size": 23, "n_whitespaces": 49, "language": "en" } }, { "id": 101184, "commit_id": "32950897376b48e0f08b46385602e4df902cf49e", "repo": "faceswap", "path": "tools/manual/faceviewer/viewport.py", "file_name": "viewport.py", "fun_name": "_obtain_mask", "commit_message": "lib.detected_face.Mask\n - Add source + target offset and coverage to set_sub_crop method", "code": "def _obtain_mask(cls, detected_face, mask_type):\n \n mask = detected_face.mask.get(mask_type)\n if not mask:\n return None\n if mask.stored_centering != \"face\":\n face = AlignedFace(detected_face.landmarks_xy)\n mask.set_sub_crop(face.pose.offset[mask.stored_centering],\n face.pose.offset[\"face\"],\n centering=\"face\")\n return mask.mask.squeeze()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 24, "vocab_size": 21, "complexity": 3, "nloc": 10, "token_counts": 77, "n_ast_nodes": 126, "n_identifiers": 15, "random_cut": "def _obtain_mask(cls, detected_face, mask_type):\n \n mas", "d_id": 20605, "documentation": { "docstring": " Obtain the mask for the correct \"face\" centering that is used in the thumbnail display.\n\n Parameters\n -----------\n detected_face: :class:`lib.align.DetectedFace`\n The Detected Face object to obtain the mask for\n mask_type: str\n The type of mask to obtain\n\n Returns\n -------\n :class:`numpy.ndarray` or ``None``\n The single channel mask of requested mask type, if it exists, otherwise ``None``\n ", "n_words": 54, "vocab_size": 40, "n_whitespaces": 144, "language": "en" } }, { "id": 322908, "commit_id": "93cae49c0c572b5c1ac972759140fbe924b0374d", "repo": "PaddleNLP", "path": "examples/model_interpretation/task/senti/rnn/model.py", "file_name": "model.py", "fun_name": "forward", "commit_message": "Add NLP model interpretation (#1752)\n\n* upload NLP interpretation\r\n\r\n* fix problems and relocate project\r\n\r\n* remove abandoned picture\r\n\r\n* remove abandoned picture\r\n\r\n* fix dead link in README\r\n\r\n* fix dead link in README\r\n\r\n* fix code style problems\r\n\r\n* fix CR round 1\r\n\r\n* remove .gitkeep files\r\n\r\n* fix code style\r\n\r\n* fix file encoding problem\r\n\r\n* fix code style\r\n\r\n* delete duplicated files due to directory rebuild\r\n\r\n* fix CR round 2\r\n\r\n* fix code style\r\n\r\n* fix ernie tokenizer\r\n\r\n* fix code style\r\n\r\n* fix problem from CR round 1\r\n\r\n* fix bugs\r\n\r\n* fix README\r\n\r\n* remove duplicated files\r\n\r\n* deal with diff of old and new tokenizer results\r\n\r\n* fix CR round 4\r\n\r\n* fix code style\r\n\r\n* add missing dependence\r\n\r\n* fix broken import path\r\n\r\n* move some data file to cloud\r\n\r\n* MRC upper case to lower case\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: binlinquge \r\nCo-authored-by: Guo Sheng ", "code": "def forward(self, input, mask=None):\n \n forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2)\n # elementwise-sum forward_x and backward_x\n # Shape: (batch_size, max_seq_len, hidden_size)\n h = paddle.add_n([forward_input, backward_input])\n # Shape: (batch_size, hidden_size, 1)\n att_weight = self.att_weight.tile(\n repeat_times=(paddle.shape(h)[0], 1, 1))\n # Shape: (batch_size, max_seq_len, 1)\n att_score = paddle.bmm(paddle.tanh(h), att_weight)\n if mask is not None:\n # mask, remove the effect of 'PAD'\n mask = paddle.cast(mask, dtype='float32')\n mask = mask.unsqueeze(axis=-1)\n inf_tensor = paddle.full(\n shape=mask.shape, dtype='float32', fill_value=-INF)\n att_score = paddle.multiply(att_score, mask) + paddle.multiply(\n inf_tensor, (1 - mask))\n # Shape: (batch_size, max_seq_len, 1)\n att_weight = F.softmax(att_score, axis=1)\n # Shape: (batch_size, lstm_hidden_size)\n reps = paddle.bmm(h.transpose(perm=(0, 2, 1)),\n att_weight).squeeze(axis=-1)\n reps = paddle.tanh(reps)\n return reps, att_weight\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 337, "n_words": 104, "vocab_size": 70, "complexity": 2, "nloc": 18, "token_counts": 211, "n_ast_nodes": 329, "n_identifiers": 33, "random_cut": "def forward(self, input, mask=None):\n \n forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2)\n # elementwise-sum forward_x and backward_x\n # Shape: (batch_size, max_seq_len, hidden_size)\n h = paddle.add_n([forward_input, backward_input])\n # Shape: (batch_size, hidden_size, 1)\n att_weight = self.att_weight.tile(\n repeat_times=(paddle.shape(h)[0], 1, 1))\n # Shape: (batch_size, max_seq_len, 1)\n att_score = paddle.bmm(paddle.tanh(h), att_weight)\n if mask is not None:\n # mask, remove the effect of 'PAD'\n mask", "d_id": 118284, "documentation": { "docstring": "\n Args:\n input (paddle.Tensor) of shape (batch, seq_len, input_size): Tensor containing the features of the input sequence.\n mask (paddle.Tensor) of shape (batch, seq_len) :\n Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not. \n Defaults to `None`.\n ", "n_words": 45, "vocab_size": 34, "n_whitespaces": 113, "language": "en" } }, { "id": 244108, "commit_id": "4bb184bae070f37febb10f82bee3a217dc1ad7c5", "repo": "mmdetection", "path": "mmdet/models/dense_heads/maskformer_head.py", "file_name": "maskformer_head.py", "fun_name": "simple_test", "commit_message": "[Enhance] MaskFormer refactor (#7471)\n\n* maskformer refactor\r\n\r\nupdate docstring\r\n\r\nupdate docstring\r\n\r\nupdate unit test\r\n\r\nupdate unit test\r\n\r\nupdate unit test\r\n\r\n* remove redundant code\r\n\r\n* update unit test", "code": "def simple_test(self, feats, img_metas, **kwargs):\n \n all_cls_scores, all_mask_preds = self(feats, img_metas)\n mask_cls_results = all_cls_scores[-1]\n mask_pred_results = all_mask_preds[-1]\n\n # upsample masks\n img_shape = img_metas[0]['batch_input_shape']\n mask_pred_results = F.interpolate(\n mask_pred_results,\n size=(img_shape[0], img_shape[1]),\n mode='bilinear',\n align_corners=False)\n\n return mask_cls_results, mask_pred_results\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 133, "n_words": 33, "vocab_size": 27, "complexity": 1, "nloc": 11, "token_counts": 80, "n_ast_nodes": 125, "n_identifiers": 15, "random_cut": "def simple_test(self, feats, img_metas, **kwargs):\n \n all_cls_scores, all_mask_preds = self(feats, img_metas)\n mask_cls_results = all_cls_scores[-1]\n mask_pred_results = all_mask_preds[-1]\n\n # upsample masks\n img_shape = img_metas[0]['batch_input_shape']\n mask_pred_results = F.interpolate(\n mask_pred_results,\n size=(img_shape[0], img_shape[1]),\n mode='bilinear',\n align_corners=False)\n\n return mask_cls_results, mask_pred_results\n", "d_id": 70242, "documentation": { "docstring": "Test without augmentaton.\n\n Args:\n feats (list[Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple: A tuple contains two tensors.\n\n - mask_cls_results (Tensor): Mask classification logits,\\\n shape (batch_size, num_queries, cls_out_channels).\n Note `cls_out_channels` should includes background.\n - mask_pred_results (Tensor): Mask logits, shape \\\n (batch_size, num_queries, h, w).\n ", "n_words": 55, "vocab_size": 49, "n_whitespaces": 191, "language": "en" } }, { "id": 208731, "commit_id": "a72418e2dcdfc3c91f70d724d16d2691a41c9c24", "repo": "ipython", "path": "IPython/core/ultratb.py", "file_name": "ultratb.py", "fun_name": "_format_list", "commit_message": "Restore lineno's for Input mapped files (#13560)\n\n* Implement lineno's for Input mapped files\r\n* Adopt In [123], line 123 format\r\n* Revert \"Set co_name for cells run line by line. Fixes https://github.com/ipython/ipykernel/issues/841\"\r\n (This reverts commit d11e987f174a15f1640f8006c86f58d884c3faa4.)\r\n* Omit mention of \", in \" for input tracebacks\r\n* Input cell -> Cell\r\n* Remove from traceback doctests\r\n* Use f-string for `in ...' format\r\n* Simplify _format_list logic, converting to f-strings", "code": "def _format_list(self, extracted_list):\n \n\n Colors = self.Colors\n list = []\n for ind, (filename, lineno, name, line) in enumerate(extracted_list):\n normalCol, nameCol, fileCol, lineCol = (\n # Emphasize the last entry\n (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line)\n if ind == len(extracted_list) - 1\n else (Colors.Normal, Colors.name, Colors.filename, \"\")\n )\n\n fns = _format_filename(filename, fileCol, normalCol, lineno=lineno)\n item = f\"{normalCol} {fns}\"\n\n if name != \"\":\n item += f\" in {nameCol}{name}{normalCol}\\n\"\n else:\n item += \"\\n\"\n if line:\n item += f\"{lineCol} {line.strip()}{normalCol}\\n\"\n list.append(item)\n\n return list\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 308, "n_words": 76, "vocab_size": 61, "complexity": 5, "nloc": 19, "token_counts": 134, "n_ast_nodes": 248, "n_identifiers": 25, "random_cut": "def _format_list(self, extracted_list):\n \n\n Colors = self.Colors\n list = []\n for ind, (filename, lineno, name, line) in enumerate(extracted_list):\n normalCol, nameCol, fileCol, lineCol = (\n # Emphasize the last entry\n (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line)\n if ind == len(extracted_list) - 1\n else (Colors.Normal, Colors.name, Colors.filename, \"\")\n )\n\n fns = _format_filename(filename, fileCol, normalCol, lineno=lineno)\n item = f\"{normalCol} {fns}\"\n\n if name != \"\":\n item += f\" in {nameCol}{name}{normalCol}\\n\"\n ", "d_id": 52490, "documentation": { "docstring": "Format a list of traceback entry tuples for printing.\n\n Given a list of tuples as returned by extract_tb() or\n extract_stack(), return a list of strings ready for printing.\n Each string in the resulting list corresponds to the item with the\n same index in the argument list. Each string ends in a newline;\n the strings may contain internal newlines as well, for those items\n whose source text line is not None.\n\n Lifted almost verbatim from traceback.py\n ", "n_words": 75, "vocab_size": 53, "n_whitespaces": 132, "language": "en" } }, { "id": 196030, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/calculus/euler.py", "file_name": "euler.py", "fun_name": "euler_equations", "commit_message": "Updated import locations", "code": "def euler_equations(L, funcs=(), vars=()):\n r\n\n funcs = tuple(funcs) if iterable(funcs) else (funcs,)\n\n if not funcs:\n funcs = tuple(L.atoms(Function))\n else:\n for f in funcs:\n if not isinstance(f, Function):\n raise TypeError('Function expected, got: %s' % f)\n\n vars = tuple(vars) if iterable(vars) else (vars,)\n\n if not vars:\n vars = funcs[0].args\n else:\n vars = tuple(sympify(var) for var in vars)\n\n if not all(isinstance(v, Symbol) for v in vars):\n raise TypeError('Variables are not symbols, got %s' % vars)\n\n for f in funcs:\n if not vars == f.args:\n raise ValueError(\"Variables %s do not match args: %s\" % (vars, f))\n\n order = max([len(d.variables) for d in L.atoms(Derivative)\n if d.expr in funcs] + [0])\n\n eqns = []\n for f in funcs:\n eq = diff(L, f)\n for i in range(1, order + 1):\n for p in combinations_with_replacement(vars, i):\n eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p)\n new_eq = Eq(eq, 0)\n if isinstance(new_eq, Eq):\n eqns.append(new_eq)\n\n return eqns\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 351, "n_words": 146, "vocab_size": 84, "complexity": 18, "nloc": 86, "token_counts": 281, "n_ast_nodes": 436, "n_identifiers": 37, "random_cut": "def euler_equations(L, funcs=(), vars=()):\n r\n\n funcs = tuple(funcs) if iterable(funcs) else (funcs,)\n\n if not funcs:\n funcs = tuple(L.atoms(Function))\n else:\n for f in funcs:\n if not isinstance(f, Function):\n raise TypeError('Function expected, got: %s' % f)\n\n vars = tuple(vars) if iterable(vars) else (vars,)\n\n if not vars:\n vars = funcs[0].args\n else:\n vars = tuple(sympify(var) for var in vars)\n\n if not all(isinstance(v, Symbol) for v in vars):\n raise TypeError('Variables are not symbols, got %s' % vars)\n\n for f in funcs:\n ", "d_id": 47530, "documentation": { "docstring": "\n Find the Euler-Lagrange equations [1]_ for a given Lagrangian.\n\n Parameters\n ==========\n\n L : Expr\n The Lagrangian that should be a function of the functions listed\n in the second argument and their derivatives.\n\n For example, in the case of two functions `f(x,y)`, `g(x,y)` and\n two independent variables `x`, `y` the Lagrangian would have the form:\n\n .. math:: L\\left(f(x,y),g(x,y),\\frac{\\partial f(x,y)}{\\partial x},\n \\frac{\\partial f(x,y)}{\\partial y},\n \\frac{\\partial g(x,y)}{\\partial x},\n \\frac{\\partial g(x,y)}{\\partial y},x,y\\right)\n\n In many cases it is not necessary to provide anything, except the\n Lagrangian, it will be auto-detected (and an error raised if this\n couldn't be done).\n\n funcs : Function or an iterable of Functions\n The functions that the Lagrangian depends on. The Euler equations\n are differential equations for each of these functions.\n\n vars : Symbol or an iterable of Symbols\n The Symbols that are the independent variables of the functions.\n\n Returns\n =======\n\n eqns : list of Eq\n The list of differential equations, one for each function.\n\n Examples\n ========\n\n >>> from sympy import euler_equations, Symbol, Function\n >>> x = Function('x')\n >>> t = Symbol('t')\n >>> L = (x(t).diff(t))**2/2 - x(t)**2/2\n >>> euler_equations(L, x(t), t)\n [Eq(-x(t) - Derivative(x(t), (t, 2)), 0)]\n >>> u = Function('u')\n >>> x = Symbol('x')\n >>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2\n >>> euler_equations(L, u(t, x), [t, x])\n [Eq(-Derivative(u(t, x), (t, 2)) + Derivative(u(t, x), (x, 2)), 0)]\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation\n\n ", "n_words": 224, "vocab_size": 139, "n_whitespaces": 454, "language": "en" } }, { "id": 81161, "commit_id": "452744b67e02823879e722fe574984a2d760ed60", "repo": "awx", "path": "awx/main/tasks/callback.py", "file_name": "callback.py", "fun_name": "delay_update", "commit_message": "Delay update of artifacts and error fields until final job save (#11832)\n\n* Delay update of artifacts until final job save\r\n\r\nSave tracebacks from receptor module to callback object\r\n\r\nMove receptor traceback check up to be more logical\r\n\r\nUse new mock_me fixture to avoid DB call with me method\r\n\r\nUpdate the special runner message to the delay_update pattern\r\n\r\n* Move special runner message into post-processing of callback fields", "code": "def delay_update(self, skip_if_already_set=False, **kwargs):\n \n for key, value in kwargs.items():\n if key in self.extra_update_fields and skip_if_already_set:\n continue\n elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'):\n if str(value) in self.extra_update_fields.get(key, ''):\n continue # if already set, avoid duplicating messages\n # In the case of these fields, we do not want to lose any prior information, so combine values\n self.extra_update_fields[key] = '\\n'.join([str(self.extra_update_fields[key]), str(value)])\n else:\n self.extra_update_fields[key] = value\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 207, "n_words": 65, "vocab_size": 50, "complexity": 7, "nloc": 10, "token_counts": 105, "n_ast_nodes": 174, "n_identifiers": 11, "random_cut": "def delay_update(self, skip_if_already_set=False, **kwargs):\n \n for key, value in kwargs.items():\n if key in self.extra_update_fields and skip_if_already_set:\n continue\n elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'):\n if str(value) in self.extra_update_fields.get(key, ''):\n continue # if already set, avoid duplicating messages\n # In the case of these fields, we do not want to lose any prior information, so combine valu", "d_id": 17162, "documentation": { "docstring": "Stash fields that should be saved along with the job status change", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 61179, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/filesystem.py", "file_name": "filesystem.py", "fun_name": "adjacent_tmp_file", "commit_message": "upd; format", "code": "def adjacent_tmp_file(path, **kwargs):\n # type: (str, **Any) -> Iterator[BinaryIO]\n \n with NamedTemporaryFile(\n delete=False,\n dir=os.path.dirname(path),\n prefix=os.path.basename(path),\n suffix=\".tmp\",\n **kwargs,\n ) as f:\n result = cast(BinaryIO, f)\n try:\n yield result\n finally:\n result.flush()\n os.fsync(result.fileno())\n\n\n# Tenacity raises RetryError by default, explictly raise the original exception\n_replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25))\n\nreplace = _replace_retry(os.replace)\n\n\n# test_writable_dir and _test_writable_dir_win are copied from Flit,\n# with the author's agreement to also place them under pip's license.", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 164, "n_words": 68, "vocab_size": 60, "complexity": 2, "nloc": 14, "token_counts": 78, "n_ast_nodes": 180, "n_identifiers": 26, "random_cut": "def adjacent_tmp_file(path, **kwargs):\n # type: (str, **Any) -> Iterator[BinaryIO]\n \n with NamedTemporaryFile(\n delete=False,\n dir=os.path.dirname(path),\n prefix=os.path.basename(path),\n suffix=\".tmp\",\n **kwargs,\n ) as f:\n result = cast(BinaryIO, f)\n try:\n yield result\n finally:\n result.flush()\n os.fsync(result.fileno())\n\n\n# Tenacity raises RetryError by default, explictly raise the original exception\n_replace_retry = retry(reraise=True,", "d_id": 12428, "documentation": { "docstring": "Return a file-like object pointing to a tmp file next to path.\n\n The file is created securely and is ensured to be written to disk\n after the context reaches its end.\n\n kwargs will be passed to tempfile.NamedTemporaryFile to control\n the way the temporary file will be opened.\n ", "n_words": 47, "vocab_size": 33, "n_whitespaces": 62, "language": "en" } }, { "id": 20046, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distro.py", "file_name": "distro.py", "fun_name": "_lsb_release_info", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _lsb_release_info(self):\n # type: () -> Dict[str, str]\n \n if not self.include_lsb:\n return {}\n with open(os.devnull, \"wb\") as devnull:\n try:\n cmd = (\"lsb_release\", \"-a\")\n stdout = subprocess.check_output(cmd, stderr=devnull)\n # Command not found or lsb_release returned error\n except (OSError, subprocess.CalledProcessError):\n return {}\n content = self._to_str(stdout).splitlines()\n return self._parse_lsb_release_content(content)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 176, "n_words": 45, "vocab_size": 38, "complexity": 3, "nloc": 11, "token_counts": 79, "n_ast_nodes": 138, "n_identifiers": 17, "random_cut": "def _lsb_release_info(self):\n # type: () -> Dict[str, str]\n \n if not self.include_lsb:\n return {}\n with open(os.devnull, \"wb\") as devnull:\n try:\n cmd = (\"lsb_release\", \"-a\")\n stdout = subprocess.check_output(cmd, stderr=devnull)\n # Command not found or lsb_release returned error\n except (OSError, subprocess.CalledProcessError):\n return {}\n content = self._to_str(stdout).splitlines()\n return self._parse_lsb_release_content(", "d_id": 3195, "documentation": { "docstring": "\n Get the information items from the lsb_release command output.\n\n Returns:\n A dictionary containing all information items.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 49, "language": "en" } }, { "id": 159507, "commit_id": "9fc462da870f69f9976be3bc081675844b9f64c2", "repo": "rasa", "path": "rasa/engine/graph.py", "file_name": "graph.py", "fun_name": "as_dict", "commit_message": "fix type annotation in rasa.engine", "code": "def as_dict(self) -> Dict[Text, Any]:\n \n serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {\"nodes\": {}}\n for node_name, node in self.nodes.items():\n serializable = dataclasses.asdict(node)\n\n # Classes are not JSON serializable (surprise)\n serializable[\"uses\"] = f\"{node.uses.__module__}.{node.uses.__name__}\"\n\n serializable_graph_schema[\"nodes\"][node_name] = serializable\n\n return serializable_graph_schema\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 35, "vocab_size": 28, "complexity": 2, "nloc": 12, "token_counts": 72, "n_ast_nodes": 137, "n_identifiers": 16, "random_cut": "def as_dict(self) -> Dict[Text, Any]:\n \n serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {\"nodes\": {}}\n for node", "d_id": 38299, "documentation": { "docstring": "Returns graph schema in a serializable format.\n\n Returns:\n The graph schema in a format which can be dumped as JSON or other formats.\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 48, "language": "en" } }, { "id": 205284, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/executor.py", "file_name": "executor.py", "fun_name": "_create_project_state", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _create_project_state(self, with_applied_migrations=False):\n \n state = ProjectState(real_apps=self.loader.unmigrated_apps)\n if with_applied_migrations:\n # Create the forwards plan Django would follow on an empty database\n full_plan = self.migration_plan(\n self.loader.graph.leaf_nodes(), clean_start=True\n )\n applied_migrations = {\n self.loader.graph.nodes[key]\n for key in self.loader.applied_migrations\n if key in self.loader.graph.nodes\n }\n for migration, _ in full_plan:\n if migration in applied_migrations:\n migration.mutate_state(state, preserve=False)\n return state\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 240, "n_words": 52, "vocab_size": 42, "complexity": 6, "nloc": 15, "token_counts": 101, "n_ast_nodes": 157, "n_identifiers": 20, "random_cut": "def _create_project_state(self, with_applied_migrations=False):\n \n state = ProjectState(real_apps=self.loader.unmigrated_apps)\n if with_applied_migrations:\n # Create the forwards plan Django would follow on an empty database\n full_plan = self.migration_plan(\n self.loader.graph.leaf_nodes(), clean_start=True\n", "d_id": 51065, "documentation": { "docstring": "\n Create a project state including all the applications without\n migrations and applied migrations if with_applied_migrations=True.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 208224, "commit_id": "5092598fb88c1f18e3fe709861cdb31df90a7264", "repo": "celery", "path": "celery/worker/control.py", "file_name": "control.py", "fun_name": "revoke", "commit_message": "New control command `revoke_by_stamped_headers` (#7838)\n\n* Added pytest-order==1.0.1\r\n\r\n* Added a new control command `revoke_by_stamped_headers` to revoke tasks by their\r\nstamped header instead of task id (terminate only works on running tasks in memory)", "code": "def revoke(state, task_id, terminate=False, signal=None, **kwargs):\n \n # pylint: disable=redefined-outer-name\n # XXX Note that this redefines `terminate`:\n # Outside of this scope that is a function.\n # supports list argument since 3.1\n task_ids, task_id = set(maybe_list(task_id) or []), None\n task_ids = _revoke(state, task_ids, terminate, signal, **kwargs)\n return ok(f'tasks {task_ids} flagged as revoked')\n\n\n@control_command(\n variadic='headers',\n signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]',\n)", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "@control_command(\n variadic='headers',\n signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]',\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 90, "n_words": 58, "vocab_size": 51, "complexity": 2, "nloc": 4, "token_counts": 56, "n_ast_nodes": 115, "n_identifiers": 14, "random_cut": "def revoke(state, task_id, terminate=False, signal=None, **kwargs):\n \n # pylint: disable=redefined-outer-name\n # XXX Note that this redefines `terminate`:\n # Outside of this scope that is a function.\n # supports list argument since 3.1\n task_ids, task_", "d_id": 52249, "documentation": { "docstring": "Revoke task by task id (or list of ids).\n\n Keyword Arguments:\n terminate (bool): Also terminate the process if the task is active.\n signal (str): Name of signal to use for terminate (e.g., ``KILL``).\n ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 53, "language": "en" } }, { "id": 261119, "commit_id": "affb0cb49412eb5992d2fad0d765b50a2db1344c", "repo": "scikit-learn", "path": "sklearn/utils/sparsefuncs.py", "file_name": "sparsefuncs.py", "fun_name": "inplace_swap_row_csc", "commit_message": "DOC Ensures that inplace_swap_row_csc passes numpydoc validation (#24513)", "code": "def inplace_swap_row_csc(X, m, n):\n \n for t in [m, n]:\n if isinstance(t, np.ndarray):\n raise TypeError(\"m and n should be valid integers\")\n\n if m < 0:\n m += X.shape[0]\n if n < 0:\n n += X.shape[0]\n\n m_mask = X.indices == m\n X.indices[X.indices == n] = m\n X.indices[m_mask] = n\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 47, "vocab_size": 32, "complexity": 5, "nloc": 11, "token_counts": 87, "n_ast_nodes": 135, "n_identifiers": 12, "random_cut": "def inplace_swap_row_csc(X, m, n):\n \n for t in [m, n]:\n if isinstance(t, np.ndarray):\n raise TypeError(\"m and n should be valid integers\")\n\n if m < 0:\n m += X.shape[0]\n if n < 0:\n n += X.shape[0]\n\n m_mask = X.indices == m\n X.indices[X.i", "d_id": 76660, "documentation": { "docstring": "Swap two rows of a CSC matrix in-place.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Matrix whose two rows are to be swapped. It should be of\n CSC format.\n\n m : int\n Index of the row of X to be swapped.\n\n n : int\n Index of the row of X to be swapped.\n ", "n_words": 56, "vocab_size": 31, "n_whitespaces": 102, "language": "en" } }, { "id": 73591, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/table_block/tests.py", "file_name": "tests.py", "fun_name": "test_render_empty_table", "commit_message": "Reformat with black", "code": "def test_render_empty_table(self):\n \n block = TableBlock()\n result = block.render(\n {\n \"first_row_is_table_header\": False,\n \"first_col_is_header\": False,\n \"data\": [[None, None, None], [None, None, None], [None, None, None]],\n }\n )\n expected = \n self.assertHTMLEqual(result, expected)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 139, "n_words": 29, "vocab_size": 22, "complexity": 1, "nloc": 19, "token_counts": 67, "n_ast_nodes": 105, "n_identifiers": 8, "random_cut": "def test_render_empty_table(self):\n \n ", "d_id": 16058, "documentation": { "docstring": "\n An empty table should render okay.\n \n \n \n \n \n \n \n
    \n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 145, "language": "en" } }, { "id": 196376, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/dense.py", "file_name": "dense.py", "fun_name": "rot_axis3", "commit_message": "Moved imports to higher level", "code": "def rot_axis3(theta):\n \n ct = cos(theta)\n st = sin(theta)\n lil = ((ct, st, 0),\n (-st, ct, 0),\n (0, 0, 1))\n return Matrix(lil)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 76, "n_identifiers": 8, "random_cut": "def rot_axis3(theta):\n \n ct = cos(theta)", "d_id": 47876, "documentation": { "docstring": "Returns a rotation matrix for a rotation of theta (in radians) about\n the 3-axis.\n\n Examples\n ========\n\n >>> from sympy import pi, rot_axis3\n\n A rotation of pi/3 (60 degrees):\n\n >>> theta = pi/3\n >>> rot_axis3(theta)\n Matrix([\n [ 1/2, sqrt(3)/2, 0],\n [-sqrt(3)/2, 1/2, 0],\n [ 0, 0, 1]])\n\n If we rotate by pi/2 (90 degrees):\n\n >>> rot_axis3(pi/2)\n Matrix([\n [ 0, 1, 0],\n [-1, 0, 0],\n [ 0, 0, 1]])\n\n See Also\n ========\n\n rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)\n about the 1-axis\n rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)\n about the 2-axis\n ", "n_words": 100, "vocab_size": 49, "n_whitespaces": 208, "language": "en" } }, { "id": 196092, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/graycode.py", "file_name": "graycode.py", "fun_name": "current", "commit_message": "Updated import locations", "code": "def current(self):\n \n rv = self._current or '0'\n if not isinstance(rv, str):\n rv = bin(rv)[2:]\n return rv.rjust(self.n, '0')\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 5, "token_counts": 43, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def current(self):\n \n rv = self._current or '0'\n if not isinstance(rv, str):\n rv = bin(rv)[2:]\n return rv.rjust(self.n, '0')", "d_id": 47592, "documentation": { "docstring": "\n Returns the currently referenced Gray code as a bit string.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> GrayCode(3, start='100').current\n '100'\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 71, "language": "en" } }, { "id": 335307, "commit_id": "ac796924dff7241d9b516ea27faaa7b2f12434fd", "repo": "diffusers", "path": "src/diffusers/models/unet_sde_score_estimation.py", "file_name": "unet_sde_score_estimation.py", "fun_name": "upsample_conv_2d", "commit_message": "add score estimation model", "code": "def upsample_conv_2d(x, w, k=None, factor=2, gain=1):\n \n\n assert isinstance(factor, int) and factor >= 1\n\n # Check weight shape.\n assert len(w.shape) == 4\n convH = w.shape[2]\n convW = w.shape[3]\n inC = w.shape[1]\n\n assert convW == convH\n\n # Setup filter kernel.\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * (gain * (factor**2))\n p = (k.shape[0] - factor) - (convW - 1)\n\n stride = (factor, factor)\n\n # Determine data dimensions.\n stride = [1, 1, factor, factor]\n output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW)\n output_padding = (\n output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH,\n output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW,\n )\n assert output_padding[0] >= 0 and output_padding[1] >= 0\n num_groups = _shape(x, 1) // inC\n\n # Transpose weights.\n w = torch.reshape(w, (num_groups, -1, inC, convH, convW))\n w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)\n w = torch.reshape(w, (num_groups * inC, -1, convH, convW))\n\n x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0)\n # Original TF code.\n # x = tf.nn.conv2d_transpose(\n # x,\n # w,\n # output_shape=output_shape,\n # strides=stride,\n # padding='VALID',\n # data_format=data_format)\n # JAX equivalent\n\n return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 360, "n_words": 210, "vocab_size": 114, "complexity": 4, "nloc": 25, "token_counts": 356, "n_ast_nodes": 547, "n_identifiers": 30, "random_cut": "def upsample_conv_2d(x, w, k=None, factor=2, gain=1):\n \n\n assert isinstance(factor, int) and factor >= 1\n\n # Check weight shape.\n assert len(w.shape) == 4\n convH = w.shape[2]\n convW = w.shape[3]\n inC = w.shape[1]\n\n assert convW == convH\n\n # Setup filter kernel.\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * (gain * (factor**2))\n p = (k.shape[0] - factor) - (convW - 1)\n\n stride = (factor, factor)\n\n # Determine data dimensions.\n stride = [1, 1, factor, factor]\n output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW)\n output_padding = (\n output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH,\n output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW,\n )\n assert output_padding[0] >= 0 and output_padding[1] >= 0\n num_groups = _shape(x, 1) // inC\n\n # Transpose weights.\n w = torch.reshape(w, (num_groups, -1, inC, convH, convW))\n w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)\n w = torch.reshape(w, (num_groups * inC, -1, convH, convW))\n\n x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0)\n # Original TF code.\n # x", "d_id": 120738, "documentation": { "docstring": "Fused `upsample_2d()` followed by `tf.nn.conv2d()`.\n\n Padding is performed only once at the beginning, not between the\n operations.\n The fused op is considerably more efficient than performing the same\n calculation\n using standard TensorFlow ops. It supports gradients of arbitrary order.\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,\n C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels =\n x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to\n nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or\n `[N, H * factor, W * factor, C]`, and same datatype as `x`.\n ", "n_words": 139, "vocab_size": 102, "n_whitespaces": 280, "language": "en" } }, { "id": 21441, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "read", "commit_message": "Vendor in pip 22.1.2", "code": "def read(self, size=None):\n \n if size is None:\n t = []\n while True:\n buf = self._read(self.bufsize)\n if not buf:\n break\n t.append(buf)\n buf = \"\".join(t)\n else:\n buf = self._read(size)\n self.pos += len(buf)\n return buf\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 175, "n_words": 32, "vocab_size": 25, "complexity": 4, "nloc": 13, "token_counts": 71, "n_ast_nodes": 121, "n_identifiers": 11, "random_cut": "def read(self, size=None):\n \n if size is None:\n t = []\n while True:\n buf = self._read(self.bufsize", "d_id": 3837, "documentation": { "docstring": "Return the next size number of bytes from the stream.\n If size is not defined, return all bytes of the stream\n up to EOF.\n ", "n_words": 24, "vocab_size": 19, "n_whitespaces": 51, "language": "en" } }, { "id": 3767, "commit_id": "a3aae8017a0a40ff2006e2567f71dccb04c997a5", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/base_streams.py", "file_name": "base_streams.py", "fun_name": "state", "commit_message": "🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)\n\n* Facebook Marketing performance improvement\r\n\r\n* add comments and little refactoring\r\n\r\n* fix integration tests with the new config\r\n\r\n* improve job status handling, limit concurrency to 10\r\n\r\n* fix campaign jobs, refactor manager\r\n\r\n* big refactoring of async jobs, support random order of slices\r\n\r\n* update source _read_incremental to hook new state logic\r\n\r\n* fix issues with timeout\r\n\r\n* remove debugging and clean up, improve retry logic\r\n\r\n* merge changes from #8234\r\n\r\n* fix call super _read_increment\r\n\r\n* generalize batch execution, add use_batch flag\r\n\r\n* improve coverage, do some refactoring of spec\r\n\r\n* update test, remove overrides of source\r\n\r\n* add split by AdSet\r\n\r\n* add smaller insights\r\n\r\n* fix end_date < start_date case\r\n\r\n* add account_id to PK\r\n\r\n* add notes\r\n\r\n* fix new streams\r\n\r\n* fix reversed incremental stream\r\n\r\n* update spec.json for SAT\r\n\r\n* upgrade CDK and bump version\r\n\r\nCo-authored-by: Dmytro Rezchykov \r\nCo-authored-by: Eugene Kulak ", "code": "def state(self) -> Mapping[str, Any]:\n \n if self._cursor_value:\n return {\n self.cursor_field: self._cursor_value,\n \"include_deleted\": self._include_deleted,\n }\n\n return {}\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 89, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 8, "token_counts": 38, "n_ast_nodes": 61, "n_identifiers": 8, "random_cut": "def state(self) -> Mapping[str, Any]:\n \n if self._cursor_value:\n return {\n self.cursor_field: self._cursor_value,\n ", "d_id": 553, "documentation": { "docstring": "State getter, get current state and serialize it to emmit Airbyte STATE message", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 220322, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/base_events.py", "file_name": "base_events.py", "fun_name": "set_task_factory", "commit_message": "add python 3.10.4 for windows", "code": "def set_task_factory(self, factory):\n \n if factory is not None and not callable(factory):\n raise TypeError('task factory must be a callable or None')\n self._task_factory = factory\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 23, "vocab_size": 20, "complexity": 3, "nloc": 4, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 6, "random_cut": "def set_task_factory(self, factory):\n \n if factory is not None and not callable(factory):\n raise TypeError('task factory must b", "d_id": 55965, "documentation": { "docstring": "Set a task factory that will be used by loop.create_task().\n\n If factory is None the default task factory will be set.\n\n If factory is a callable, it should have a signature matching\n '(loop, coro)', where 'loop' will be a reference to the active\n event loop, 'coro' will be a coroutine object. The callable\n must return a Future.\n ", "n_words": 57, "vocab_size": 39, "n_whitespaces": 100, "language": "en" } }, { "id": 163186, "commit_id": "521259299f7829da667ba39302ec77acedde9e5e", "repo": "pandas", "path": "pandas/core/arrays/datetimes.py", "file_name": "datetimes.py", "fun_name": "date", "commit_message": "DOC: Improve doc summaries in series.rst (#45237)", "code": "def date(self) -> npt.NDArray[np.object_]:\n \n # If the Timestamps have a timezone that is not UTC,\n # convert them into their i8 representation while\n # keeping their timezone and not using UTC\n timestamps = self._local_timestamps()\n\n return ints_to_pydatetime(timestamps, box=\"date\")\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 79, "n_words": 37, "vocab_size": 32, "complexity": 1, "nloc": 9, "token_counts": 31, "n_ast_nodes": 56, "n_identifiers": 10, "random_cut": "def date(self) -> npt.NDArray[np.object_]:\n \n # If the Timestamps have a timezone that is not UTC,\n #", "d_id": 39394, "documentation": { "docstring": "\n Returns numpy array of python :class:`datetime.date` objects.\n\n Namely, the date part of Timestamps without time and\n timezone information.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 47, "language": "en" } }, { "id": 243004, "commit_id": "11be1631433f252b816802aef1a3cd109bd308c7", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "apply_transparency", "commit_message": "Added apply_transparency()", "code": "def apply_transparency(self):\n \n if self.mode != \"P\" or \"transparency\" not in self.info:\n return\n\n from . import ImagePalette\n\n palette = self.getpalette(\"RGBA\")\n transparency = self.info[\"transparency\"]\n if isinstance(transparency, bytes):\n for i, alpha in enumerate(transparency):\n palette[i * 4 + 3] = alpha\n else:\n palette[transparency * 4 + 3] = 0\n self.palette = ImagePalette.ImagePalette(\"RGBA\", bytes(palette))\n self.palette.dirty = 1\n\n del self.info[\"transparency\"]\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 172, "n_words": 54, "vocab_size": 41, "complexity": 5, "nloc": 14, "token_counts": 110, "n_ast_nodes": 186, "n_identifiers": 14, "random_cut": "def apply_transparency(self):\n \n if self.mode != \"P\" or \"transparency\" not in self.info:\n return\n\n from . import ImagePalette\n\n palette = self.getpalette(\"RGBA\")\n transparency = self.info[\"transparency\"]\n if isinstance(tra", "d_id": 69952, "documentation": { "docstring": "\n If a P mode image has a \"transparency\" key in the info dictionary,\n remove the key and apply the transparency to the palette instead.\n ", "n_words": 24, "vocab_size": 19, "n_whitespaces": 46, "language": "en" } }, { "id": 93655, "commit_id": "2992f33c2d084f2542af647c6b76b54c351cc5a5", "repo": "sentry", "path": "src/sentry/utils/assets.py", "file_name": "assets.py", "fun_name": "get_frontend_app_asset_url", "commit_message": "ref(js): Remove broken frontend asset cache busting (#36953)", "code": "def get_frontend_app_asset_url(module, key):\n \n args = (settings.STATIC_FRONTEND_APP_URL.rstrip(\"/\"), module, key.lstrip(\"/\"))\n\n return \"{}/{}/{}\".format(*args)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 19, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 37, "n_ast_nodes": 65, "n_identifiers": 9, "random_cut": "def get_frontend_app_asset_url(module, key):\n \n args = (settings.STATIC_FRONTEND_APP_URL.rstrip(\"/\"), module, key.lstrip(\"/\"))\n\n return \"{}/{}/{}\".format(*", "d_id": 19002, "documentation": { "docstring": "\n Returns an asset URL that is unversioned. These assets should have a\n `Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin\n server before using their locally cached asset.\n\n Example:\n {% frontend_app_asset_url 'sentry' 'sentry.css' %}\n => \"/_static/dist/sentry/sentry.css\"\n ", "n_words": 38, "vocab_size": 37, "n_whitespaces": 65, "language": "en" } }, { "id": 119812, "commit_id": "c66f5dda60aa5df7b6aa2f09d3ce88c4249b6f34", "repo": "jax", "path": "jax/_src/lax/linalg.py", "file_name": "linalg.py", "fun_name": "tridiagonal_solve", "commit_message": "DOC: add missing linalg functionality to docs", "code": "def tridiagonal_solve(dl, d, du, b):\n r\n if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1:\n raise ValueError('dl, d and du must be vectors')\n\n if dl.shape != d.shape or d.shape != du.shape:\n raise ValueError(\n f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`')\n\n if b.ndim != 2:\n raise ValueError(f'b={b.shape} must be a matrix')\n\n m, = dl.shape\n if m < 3:\n raise ValueError(f'm ({m}) must be >= 3')\n\n ldb, n = b.shape\n if ldb < max(1, m):\n raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})')\n\n if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != b.dtype:\n raise ValueError(f'dl={dl.dtype}, d={d.dtype}, du={du.dtype} and '\n f'b={b.dtype} must be the same dtype,')\n\n t = dl.dtype\n if t not in (np.float32, np.float64):\n raise ValueError(f'Only f32/f64 are supported, got {t}')\n\n return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t)\n\n\n# Schur Decomposition\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 197, "n_words": 139, "vocab_size": 90, "complexity": 13, "nloc": 39, "token_counts": 200, "n_ast_nodes": 379, "n_identifiers": 19, "random_cut": "def tridiagonal_solve(dl, d, du, b):\n r\n if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1:\n raise ValueError('dl, d and du must be vectors')\n\n if dl.shape != d.shape or d.shape != du.shape:\n raise ValueError(\n f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`')\n\n if b.ndim != 2:\n raise ValueError(f'b={b.shape} must be a matrix')\n\n m, = dl.shape\n if m < 3:\n raise ValueError(f'm ({m}) must be >= 3')\n\n ldb, n = b.shape\n if ldb < max(1, m):\n raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})')\n\n if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != ", "d_id": 26690, "documentation": { "docstring": "Computes the solution of a tridiagonal linear system.\n\n This function computes the solution of a tridiagonal linear system:\n\n .. math::\n A . X = B\n\n Args:\n dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``.\n Note that ``dl[0] = 0``.\n d: The middle diagnoal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``.\n du: The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``.\n Note that ``dl[m - 1] = 0``.\n b: Right hand side matrix.\n\n Returns:\n Solution ``X`` of tridiagonal system.\n ", "n_words": 91, "vocab_size": 57, "n_whitespaces": 125, "language": "en" } }, { "id": 166568, "commit_id": "44b660dc4a07f4fb507c31795ae63dca2e6e9440", "repo": "pandas", "path": "pandas/util/_print_versions.py", "file_name": "_print_versions.py", "fun_name": "_get_dependency_info", "commit_message": "fix pandas.show_versions() and remove pin for setuptools (#47096)", "code": "def _get_dependency_info() -> dict[str, JSONSerializable]:\n \n deps = [\n \"pandas\",\n # required\n \"numpy\",\n \"pytz\",\n \"dateutil\",\n # install / build,\n \"setuptools\",\n \"pip\",\n \"Cython\",\n # test\n \"pytest\",\n \"hypothesis\",\n # docs\n \"sphinx\",\n # Other, need a min version\n \"blosc\",\n \"feather\",\n \"xlsxwriter\",\n \"lxml.etree\",\n \"html5lib\",\n \"pymysql\",\n \"psycopg2\",\n \"jinja2\",\n # Other, not imported.\n \"IPython\",\n \"pandas_datareader\",\n ]\n deps.extend(list(VERSIONS))\n\n result: dict[str, JSONSerializable] = {}\n for modname in deps:\n mod = import_optional_dependency(modname, errors=\"ignore\")\n result[modname] = get_version(mod) if mod else None\n return result\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 289, "n_words": 72, "vocab_size": 61, "complexity": 3, "nloc": 32, "token_counts": 106, "n_ast_nodes": 191, "n_identifiers": 14, "random_cut": "def _get_dependency_info() -> dict[str, JSONSerializable]:\n \n deps = [\n \"pandas\",\n # required\n \"numpy\",\n \"pytz\",\n \"dateutil\",\n # install / build,\n \"setuptools\",\n \"pip\",\n \"Cython\",\n # test\n \"pytest\",\n \"hypothesis\",\n # docs\n \"sphinx\",\n # Other, need a min version\n \"blosc\",\n \"feather\",\n \"xlsxwriter\",\n \"lxml.etree\",\n \"html5lib\",\n \"pymysql\",\n \"psycopg2\",\n \"jinja2\",\n ", "d_id": 39828, "documentation": { "docstring": "\n Returns dependency information as a JSON serializable dictionary.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 66758, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/germany_fill_debtor_creditor_number.py", "file_name": "germany_fill_debtor_creditor_number.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\t\n\tcompany_list = frappe.get_all(\"Company\", filters={\"country\": \"Germany\"})\n\n\tfor company in company_list:\n\t\tparty_account_list = frappe.get_all(\n\t\t\t\"Party Account\",\n\t\t\tfilters={\"company\": company.name},\n\t\t\tfields=[\"name\", \"account\", \"debtor_creditor_number\"],\n\t\t)\n\t\tfor party_account in party_account_list:\n\t\t\tif (not party_account.account) or party_account.debtor_creditor_number:\n\t\t\t\t# account empty or debtor_creditor_number already filled\n\t\t\t\tcontinue\n\n\t\t\taccount_number = frappe.db.get_value(\"Account\", party_account.account, \"account_number\")\n\t\t\tif not account_number:\n\t\t\t\tcontinue\n\n\t\t\tfrappe.db.set_value(\n\t\t\t\t\"Party Account\", party_account.name, \"debtor_creditor_number\", account_number\n\t\t\t)\n\t\t\tfrappe.db.set_value(\"Party Account\", party_account.name, \"account\", \"\")\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 41, "n_words": 60, "vocab_size": 46, "complexity": 6, "nloc": 18, "token_counts": 126, "n_ast_nodes": 218, "n_identifiers": 16, "random_cut": "def execute():\n\t\n\tcompany_list = frappe.get_all(\"Company\", filters={\"country\": \"Germany\"})\n\n\tfor company in company_list:\n\t\tparty_account_list = frappe.get_all(\n\t\t\t\"Party Account\",\n\t\t\tfilters={\"company\": company.name},\n\t\t\tfields=[\"name\", \"account\", \"debtor_creditor_number\"],\n\t\t)\n\t\tfor party_account in party_account_list:\n\t\t\tif (not party_account.account) or party_account.debtor_creditor_number:\n\t\t\t\t# account empty or debtor_creditor_number already filled\n\t\t\t\tcontinue\n\n\t\t\taccount_number = frappe.db.get_value(\"Account\", party_account.account, \"account_number\")\n\t\t\tif not account_number:\n\t\t\t\tcontinue\n\n\t\t\tfrappe.db.set_value(\n\t\t\t\t\"Party Account\", party_account.name", "d_id": 14318, "documentation": { "docstring": "Move account number into the new custom field debtor_creditor_number.\n\n\tGerman companies used to use a dedicated payable/receivable account for\n\tevery party to mimick party accounts in the external accounting software\n\t\"DATEV\". This is no longer necessary. The reference ID for DATEV will be\n\tstored in a new custom field \"debtor_creditor_number\".\n\t", "n_words": 50, "vocab_size": 40, "n_whitespaces": 45, "language": "en" } }, { "id": 262049, "commit_id": "176b712c1a40cf630da9a77f1826836723c40fde", "repo": "TTS", "path": "TTS/tts/datasets/dataset.py", "file_name": "dataset.py", "fun_name": "compute_or_load", "commit_message": "Refactor TTSDataset ⚡️", "code": "def compute_or_load(self, wav_file):\n \n pitch_file = self.create_pitch_file_path(wav_file, self.cache_path)\n if not os.path.exists(pitch_file):\n pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file)\n else:\n pitch = np.load(pitch_file)\n return pitch.astype(np.float32)\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 64, "n_ast_nodes": 102, "n_identifiers": 16, "random_cut": "def compute_or_load(self, wav_file):\n \n pitch_file = self.create_pitch_file_path(wav_file, self.cache_path)\n if not os.path.exists(pitch_file):\n pitch", "d_id": 77108, "documentation": { "docstring": "\n compute pitch and return a numpy array of pitch values\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 200282, "commit_id": "6d2bbf80752549276a968fd4af78231c569d55c5", "repo": "sympy", "path": "sympy/testing/runtests.py", "file_name": "runtests.py", "fun_name": "check_output", "commit_message": "runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy", "code": "def check_output(self, want, got, optionflags):\n \n # Handle the common case first, for efficiency:\n # if they're string-identical, always return true.\n if got == want:\n return True\n\n # TODO parse integers as well ?\n # Parse floats and compare them. If some of the parsed floats contain\n # ellipses, skip the comparison.\n matches = self.num_got_rgx.finditer(got)\n numbers_got = [match.group(1) for match in matches] # list of strs\n matches = self.num_want_rgx.finditer(want)\n numbers_want = [match.group(1) for match in matches] # list of strs\n if len(numbers_got) != len(numbers_want):\n return False\n\n if len(numbers_got) > 0:\n nw_ = []\n for ng, nw in zip(numbers_got, numbers_want):\n if '...' in nw:\n nw_.append(ng)\n continue\n else:\n nw_.append(nw)\n\n if abs(float(ng)-float(nw)) > 1e-5:\n return False\n\n got = self.num_got_rgx.sub(r'%s', got)\n got = got % tuple(nw_)\n\n # can be used as a special sequence to signify a\n # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.\n if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):\n # Replace in want with a blank line.\n want = re.sub(r'(?m)^%s\\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),\n '', want)\n # If a line in got contains only spaces, then remove the\n # spaces.\n got = re.sub(r'(?m)^\\s*?$', '', got)\n if got == want:\n return True\n\n # This flag causes doctest to ignore any differences in the\n # contents of whitespace strings. Note that this can be used\n # in conjunction with the ELLIPSIS flag.\n if optionflags & pdoctest.NORMALIZE_WHITESPACE:\n got = ' '.join(got.split())\n want = ' '.join(want.split())\n if got == want:\n return True\n\n # The ELLIPSIS flag says to let the sequence \"...\" in `want`\n # match any substring in `got`.\n if optionflags & pdoctest.ELLIPSIS:\n if pdoctest._ellipsis_match(want, got):\n return True\n\n # We didn't find any match; return false.\n return False\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 816, "n_words": 272, "vocab_size": 149, "complexity": 15, "nloc": 36, "token_counts": 276, "n_ast_nodes": 459, "n_identifiers": 33, "random_cut": "def check_output(self, want, got, optionflags):\n \n # Handle the common case first, for efficiency:\n # if they're string-identical, always return true.\n if got == want:\n return True\n\n # TODO parse integers as well ?\n # Parse floats and compare them. If some of the parsed floats contain\n # ellipses, skip the comparison.\n matches = self.num_got_rgx.finditer(got)\n numbers_got = [match.group(1) for match in matches] # list of strs\n matches = self.num_want_rgx.finditer(want)\n numbers_want = [match.group(1) for match in matches] # list of strs\n if len(numbers_got) != len(numbers_want):\n return False\n\n if len(numbers_got) > 0:\n nw_ = []\n for ng, nw in zip(numbers_got, numbers_want):\n if '...' in nw:\n nw_.append(ng)\n continue\n else:\n nw_.append(nw)\n\n if abs(float(ng)-float(nw)) > 1e-5:\n return False\n\n got = self.num_got_rgx.sub(r'%s', got)\n got = got % tuple(nw_)\n\n # can be used as a special sequence to signify a\n # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.\n if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):\n # Replace in want with a blank line.\n want = re.sub(r'(?m)^%s\\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),\n '', want)\n # If a line in got contains only spaces, then remove the\n # spaces.\n got = re.sub(r'(?m)^\\s*?$', '', got)\n if got == want:\n return True\n\n # This flag causes doctest to ignore any differences in the\n # contents of whitespace strings. Note that this can be used\n # in conjunction with the ELLIPSIS flag.\n if optionflags & pdoctest.NORMALIZE_WHITESPACE:\n got = ' '.join(got.split())\n want = ' '.join(want.split())\n if got == want:\n return True\n\n # The ELLIPSIS flag", "d_id": 49581, "documentation": { "docstring": "\n Return True iff the actual output from an example (`got`)\n matches the expected output (`want`). These strings are\n always considered to match if they are identical; but\n depending on what option flags the test runner is using,\n several non-exact match types are also possible. See the\n documentation for `TestRunner` for more information about\n option flags.\n ", "n_words": 55, "vocab_size": 46, "n_whitespaces": 114, "language": "en" } }, { "id": 73775, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "start", "commit_message": "Reformat with black", "code": "def start(self, workflow_state, user=None):\n \n task_state = self.get_task_state_class()(workflow_state=workflow_state)\n task_state.status = TaskState.STATUS_IN_PROGRESS\n task_state.page_revision = workflow_state.page.get_latest_revision()\n task_state.task = self\n task_state.save()\n task_submitted.send(\n sender=task_state.specific.__class__,\n instance=task_state.specific,\n user=user,\n )\n return task_state\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 120, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 12, "token_counts": 77, "n_ast_nodes": 122, "n_identifiers": 20, "random_cut": "def start(self, workflow_state, user=None):\n \n task_state = self.get_task_state_class()(workflow_state=workflow_state)\n task_state.status = TaskState.STATUS_IN_PROGRESS\n task_state.page_revision = workflow_state.page.get_latest_revision()\n task_state.task = self\n task_state.save()\n task_submitted.send(\n sender=task_state.specific.__class__,\n instance=task_state.specific,\n user=user,\n )\n return task_state\n", "d_id": 16102, "documentation": { "docstring": "Start this task on the provided workflow state by creating an instance of TaskState", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 67084, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/germany/utils/datev/datev_csv.py", "file_name": "datev_csv.py", "fun_name": "get_datev_csv", "commit_message": "style: format code with black", "code": "def get_datev_csv(data, filters, csv_class):\n\t\n\tempty_df = pd.DataFrame(columns=csv_class.COLUMNS)\n\tdata_df = pd.DataFrame.from_records(data)\n\tresult = empty_df.append(data_df, sort=True)\n\n\tif csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS:\n\t\tresult[\"Belegdatum\"] = pd.to_datetime(result[\"Belegdatum\"])\n\n\t\tresult[\"Beleginfo - Inhalt 6\"] = pd.to_datetime(result[\"Beleginfo - Inhalt 6\"])\n\t\tresult[\"Beleginfo - Inhalt 6\"] = result[\"Beleginfo - Inhalt 6\"].dt.strftime(\"%d%m%Y\")\n\n\t\tresult[\"Fälligkeit\"] = pd.to_datetime(result[\"Fälligkeit\"])\n\t\tresult[\"Fälligkeit\"] = result[\"Fälligkeit\"].dt.strftime(\"%d%m%y\")\n\n\t\tresult.sort_values(by=\"Belegdatum\", inplace=True, kind=\"stable\", ignore_index=True)\n\n\tif csv_class.DATA_CATEGORY == DataCategory.ACCOUNT_NAMES:\n\t\tresult[\"Sprach-ID\"] = \"de-DE\"\n\n\tdata = result.to_csv(\n\t\t# Reason for str(';'): https://github.com/pandas-dev/pandas/issues/6035\n\t\tsep=\";\",\n\t\t# European decimal seperator\n\t\tdecimal=\",\",\n\t\t# Windows \"ANSI\" encoding\n\t\tencoding=\"latin_1\",\n\t\t# format date as DDMM\n\t\tdate_format=\"%d%m\",\n\t\t# Windows line terminator\n\t\tline_terminator=\"\\r\\n\",\n\t\t# Do not number rows\n\t\tindex=False,\n\t\t# Use all columns defined above\n\t\tcolumns=csv_class.COLUMNS,\n\t\t# Quote most fields, even currency values with \",\" separator\n\t\tquoting=QUOTE_NONNUMERIC,\n\t)\n\n\tdata = data.encode(\"latin_1\", errors=\"replace\")\n\n\theader = get_header(filters, csv_class)\n\theader = \";\".join(header).encode(\"latin_1\", errors=\"replace\")\n\n\t# 1st Row: Header with meta data\n\t# 2nd Row: Data heading (Überschrift der Nutzdaten), included in `data` here.\n\t# 3rd - nth Row: Data (Nutzdaten)\n\treturn header + b\"\\r\\n\" + data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 117, "n_words": 155, "vocab_size": 107, "complexity": 3, "nloc": 27, "token_counts": 247, "n_ast_nodes": 435, "n_identifiers": 40, "random_cut": "def get_datev_csv(data, filters, csv_class):\n\t\n\tempty_df = pd.DataFrame(columns=csv_class.COLUMNS)\n\tdata_df = pd.DataFrame.from_records(data)\n\tresult = empty_df.append(data_df, sort=True)\n\n\tif csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS:\n\t\tresult[\"Belegdatum\"] = pd.to_datetime(result[\"", "d_id": 14424, "documentation": { "docstring": "\n\tFill in missing columns and return a CSV in DATEV Format.\n\n\tFor automatic processing, DATEV requires the first line of the CSV file to\n\thold meta data such as the length of account numbers oder the category of\n\tthe data.\n\n\tArguments:\n\tdata -- array of dictionaries\n\tfilters -- dict\n\tcsv_class -- defines DATA_CATEGORY, FORMAT_NAME and COLUMNS\n\t", "n_words": 56, "vocab_size": 42, "n_whitespaces": 48, "language": "en" } }, { "id": 200453, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/stats/random_matrix_models.py", "file_name": "random_matrix_models.py", "fun_name": "CircularSymplecticEnsemble", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def CircularSymplecticEnsemble(sym, dim):\n \n sym, dim = _symbol_converter(sym), _sympify(dim)\n model = CircularSymplecticEnsembleModel(sym, dim)\n rmp = RandomMatrixPSpace(sym, model=model)\n return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 36, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 52, "n_ast_nodes": 80, "n_identifiers": 11, "random_cut": "def CircularSymplecticEnsemble(sym, dim):\n \n sym, dim = _symbol_converter(sym), _sympify(dim)\n model = CircularSymplecticEnsembleModel(sym, dim)\n rmp = RandomMatrixPSpace(sym, model=model)\n return RandomMatrixSymbol(sym, dim, di", "d_id": 49659, "documentation": { "docstring": "\n Represents Circular Symplectic Ensembles.\n\n Examples\n ========\n\n >>> from sympy.stats import CircularSymplecticEnsemble as CSE\n >>> from sympy.stats import joint_eigen_distribution\n >>> C = CSE('S', 1)\n >>> joint_eigen_distribution(C)\n Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))\n\n Note\n ====\n\n As can be seen above in the example, density of CiruclarSymplecticEnsemble\n is not evaluated because the exact definition is based on haar measure of\n unitary group which is not unique.\n ", "n_words": 69, "vocab_size": 57, "n_whitespaces": 112, "language": "en" } }, { "id": 43154, "commit_id": "0c41f437674f135fe7232a368bf9c198b0ecd2f0", "repo": "airflow", "path": "airflow/models/taskinstance.py", "file_name": "taskinstance.py", "fun_name": "_executor_config_comparator", "commit_message": "Don't crash scheduler if exec config has old k8s objects (#24117)\n\nFrom time to time k8s library objects change their attrs. If executor config is stored with old version, and unpickled with new version, we can get attribute errors that can crash the scheduler (see https://github.com/apache/airflow/issues/23727).\r\n\r\nHere we update handling so that we fail the task but don't crash the scheduler.", "code": "def _executor_config_comparator(x, y):\n \n try:\n return x == y\n except AttributeError:\n return False\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 4, "random_cut": "def _executor_config_comparator(x, y):\n \n try:\n return x == y\n except AttributeError:\n return False\n\n", "d_id": 7845, "documentation": { "docstring": "\n The TaskInstance.executor_config attribute is a pickled object that may contain\n kubernetes objects. If the installed library version has changed since the\n object was originally pickled, due to the underlying ``__eq__`` method on these\n objects (which converts them to JSON), we may encounter attribute errors. In this\n case we should replace the stored object.\n ", "n_words": 53, "vocab_size": 45, "n_whitespaces": 73, "language": "en" } }, { "id": 288605, "commit_id": "47d0598e75487f63901931875f69f802a477df13", "repo": "core", "path": "tests/util/test_color.py", "file_name": "test_color.py", "fun_name": "test_color_temperature_to_rgbww", "commit_message": "Use Kelvin as the preferred color temperature unit (#79591)\n\n* Use Kelvin as the preferred white temperature unit\r\n\r\n* Update homekit\r\n\r\n* Adjust tests", "code": "def test_color_temperature_to_rgbww():\n \n # Coldest color temperature -> only cold channel enabled\n assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == (\n 0,\n 0,\n 0,\n 255,\n 0,\n )\n assert color_util.color_temperature_to_rgbww(6535, 128, 2000, 6535) == (\n 0,\n 0,\n 0,\n 128,\n 0,\n )\n # Warmest color temperature -> only cold channel enabled\n assert color_util.color_temperature_to_rgbww(2000, 255, 2000, 6535) == (\n 0,\n 0,\n 0,\n 0,\n 255,\n )\n assert color_util.color_temperature_to_rgbww(2000, 128, 2000, 6535) == (\n 0,\n 0,\n 0,\n 0,\n 128,\n )\n # Warmer than mid point color temperature -> More warm than cold channel enabled\n assert color_util.color_temperature_to_rgbww(2881, 255, 2000, 6535) == (\n 0,\n 0,\n 0,\n 112,\n 143,\n )\n assert color_util.color_temperature_to_rgbww(2881, 128, 2000, 6535) == (\n 0,\n 0,\n 0,\n 56,\n 72,\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 370, "n_words": 112, "vocab_size": 34, "complexity": 1, "nloc": 43, "token_counts": 161, "n_ast_nodes": 207, "n_identifiers": 3, "random_cut": "def test_color_temperature_to_rgbww():\n \n # Coldest color temperature -> only cold channel enabled\n assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == (\n 0,\n 0,\n 0,\n 255,\n 0,\n )\n assert color_util.color_temperatu", "d_id": 87761, "documentation": { "docstring": "Test color temp to warm, cold conversion.\n\n Temperature values must be in mireds\n Home Assistant uses rgbcw for rgbww\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 28, "language": "en" } }, { "id": 118626, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/tests/streamlit/report_context_test.py", "file_name": "report_context_test.py", "fun_name": "test_set_page_config_first", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def test_set_page_config_first(self):\n \n\n fake_enqueue = lambda msg: None\n ctx = ScriptRunContext(\n \"TestSessionID\",\n fake_enqueue,\n \"\",\n SessionState(),\n UploadedFileManager(),\n )\n\n ctx.on_script_start()\n\n markdown_msg = ForwardMsg()\n markdown_msg.delta.new_element.markdown.body = \"foo\"\n\n msg = ForwardMsg()\n msg.page_config_changed.title = \"foo\"\n\n ctx.enqueue(markdown_msg)\n with self.assertRaises(StreamlitAPIException):\n ctx.enqueue(msg)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 176, "n_words": 33, "vocab_size": 26, "complexity": 1, "nloc": 17, "token_counts": 84, "n_ast_nodes": 148, "n_identifiers": 20, "random_cut": "def test_set_page_config_first(self):\n \n\n fake_enqueue = lambda msg: None\n ", "d_id": 26330, "documentation": { "docstring": "st.set_page_config must be called before other st commands\n when the script has been marked as started", "n_words": 16, "vocab_size": 16, "n_whitespaces": 22, "language": "en" } }, { "id": 101909, "commit_id": "dab823a3eb7a5257cb1e0818ee10ed234d3de97f", "repo": "faceswap", "path": "lib/gui/display_command.py", "file_name": "display_command.py", "fun_name": "set_vars", "commit_message": "Typing - lib.gui.display_command", "code": "def set_vars(self) -> None:\n \n tk_vars = super().set_vars()\n\n smoothgraph = tk.DoubleVar()\n smoothgraph.set(0.900)\n tk_vars[\"smoothgraph\"] = smoothgraph\n\n raw_var = tk.BooleanVar()\n raw_var.set(True)\n tk_vars[\"raw_data\"] = raw_var\n\n smooth_var = tk.BooleanVar()\n smooth_var.set(True)\n tk_vars[\"smooth_data\"] = smooth_var\n\n iterations_var = tk.IntVar()\n iterations_var.set(10000)\n tk_vars[\"display_iterations\"] = iterations_var\n\n logger.debug(tk_vars)\n return tk_vars\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 150, "n_words": 38, "vocab_size": 24, "complexity": 1, "nloc": 25, "token_counts": 103, "n_ast_nodes": 177, "n_identifiers": 15, "random_cut": "def set_vars(self) -> None:\n \n tk_vars = super().set_vars()\n\n smoothgraph = tk.DoubleVar()\n smoothgraph.set(0.900)\n tk_vars[\"smoothg", "d_id": 21290, "documentation": { "docstring": " Add graphing specific variables to the default variables.\n\n Overrides original method.\n\n Returns\n -------\n dict\n The variable names with their corresponding tkinter variable\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 69, "language": "en" } }, { "id": 60026, "commit_id": "0c9ee0876133bde14ce070a89557fc31cd905bac", "repo": "prefect", "path": "src/prefect/infrastructure/kubernetes.py", "file_name": "kubernetes.py", "fun_name": "_configure_kubernetes_library_client", "commit_message": "Use cluster uid and namespace instead of cluster \"name\" for Kubernetes job identifiers (#7747)\n\nCo-authored-by: peytonrunyan \r\nCo-authored-by: Peyton <44583861+peytonrunyan@users.noreply.github.com>", "code": "def _configure_kubernetes_library_client(self) -> None:\n \n # TODO: Investigate returning a configured client so calls on other threads\n # will not invalidate the config needed here\n\n # if a k8s cluster block is provided to the flow runner, use that\n if self.cluster_config:\n self.cluster_config.configure_client()\n else:\n # If no block specified, try to load Kubernetes configuration within a cluster. If that doesn't\n # work, try to load the configuration from the local environment, allowing\n # any further ConfigExceptions to bubble up.\n try:\n kubernetes.config.load_incluster_config()\n except kubernetes.config.ConfigException:\n kubernetes.config.load_kube_config()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 226, "n_words": 82, "vocab_size": 62, "complexity": 3, "nloc": 14, "token_counts": 45, "n_ast_nodes": 85, "n_identifiers": 9, "random_cut": "def _configure_kubernetes_library_client(self) -> None:\n \n # TODO: Investigate returning a configured client so calls on other threads\n # will not invalidate the config needed here\n\n # if a k8s cluster block is provided to the flow runner, use that\n if self.cluster_config:\n self.cluster_config.configure_client()\n else:\n # If no block specified, try to load Kubernetes configuration within a cluster. If that doesn't\n # work, try to load the configuration from the local environment, a", "d_id": 11975, "documentation": { "docstring": "\n Set the correct kubernetes client configuration.\n\n WARNING: This action is not threadsafe and may override the configuration\n specified by another `KubernetesJob` instance.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 61, "language": "en" } }, { "id": 95966, "commit_id": "3c8b4477340a7fe276c57c9b598c161b309c4fbd", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_rules.py", "file_name": "test_project_rules.py", "fun_name": "test_runs_alert_rule_action_creator", "commit_message": "feat(alert-rule-action): New data structure for alert-rule-action settings (#31444)\n\nObjective:\r\nOriginally the issue was with serializing the settings field for alert webhooks and fighting with the serializers. Instead we decided to convert the dictionary to an array of dictionaries with keys name and value.", "code": "def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator):\n \n self.login_as(user=self.user)\n\n project = self.create_project()\n\n self.create_sentry_app(\n name=\"Pied Piper\",\n organization=project.organization,\n schema={\"elements\": [self.create_alert_rule_action_schema()]},\n )\n install = self.create_sentry_app_installation(\n slug=\"pied-piper\", organization=project.organization\n )\n\n actions = [\n {\n \"id\": \"sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction\",\n \"settings\": [\n {\"name\": \"title\", \"value\": \"Team Rocket\"},\n {\"name\": \"summary\", \"value\": \"We're blasting off again.\"},\n ],\n \"sentryAppInstallationUuid\": install.uuid,\n \"hasSchemaFormConfig\": True,\n },\n ]\n\n url = reverse(\n \"sentry-api-0-project-rules\",\n kwargs={\"organization_slug\": project.organization.slug, \"project_slug\": project.slug},\n )\n\n response = self.client.post(\n url,\n data={\n \"name\": \"my super cool rule\",\n \"owner\": f\"user:{self.user.id}\",\n \"conditions\": [],\n \"filters\": [],\n \"actions\": actions,\n \"filterMatch\": \"any\",\n \"actionMatch\": \"any\",\n \"frequency\": 30,\n },\n format=\"json\",\n )\n\n assert response.status_code == 200, response.content\n assert response.data[\"id\"]\n\n rule = Rule.objects.get(id=response.data[\"id\"])\n assert rule.data[\"actions\"] == actions\n\n kwargs = {\n \"install\": install,\n \"fields\": actions[0].get(\"settings\"),\n }\n\n call_kwargs = mock_alert_rule_action_creator.call_args[1]\n\n assert call_kwargs[\"install\"].id == kwargs[\"install\"].id\n assert call_kwargs[\"fields\"] == kwargs[\"fields\"]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 657, "n_words": 116, "vocab_size": 91, "complexity": 1, "nloc": 51, "token_counts": 291, "n_ast_nodes": 513, "n_identifiers": 34, "random_cut": "def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator):\n \n self.login_as(user=self.user)\n\n project = self.create_project()\n\n self.create_sentry_app(\n name=\"Pied Piper\",\n organization=project.organization,\n schema={\"elements\": [self.create_alert_rule_action_schema()]},\n )\n install = self.create_sentry_app_installation(\n slug=\"pied-piper\", organization=project.organization\n )\n\n actions = [\n {\n \"id\": \"sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction\",\n \"settings\": [\n {\"name\": \"title\", \"value\": \"Team Rocket\"},\n {\"name\": \"summary\", \"value\": \"We're blasting off again.\"},\n ],\n \"sentryAppInstallationUuid\": install.uuid,\n \"hasSchemaFormConfig\": True,\n },\n ]\n\n url = reverse(\n \"sentry-api-0-proj", "d_id": 19261, "documentation": { "docstring": "\n Ensures that Sentry Apps with schema forms (UI components)\n receive a payload when an alert rule is created with them.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 42, "language": "en" } }, { "id": 317011, "commit_id": "6ac05784a63f7490f875959139ef903034bc45b0", "repo": "core", "path": "homeassistant/components/icloud/account.py", "file_name": "account.py", "fun_name": "_determine_interval", "commit_message": "Remove icloud from mypy ignore list (#75007)", "code": "def _determine_interval(self) -> int:\n \n intervals = {\"default\": self._max_interval}\n for device in self._devices.values():\n # Max interval if no location\n if device.location is None:\n continue\n\n current_zone = run_callback_threadsafe(\n self.hass.loop,\n async_active_zone,\n self.hass,\n device.location[DEVICE_LOCATION_LATITUDE],\n device.location[DEVICE_LOCATION_LONGITUDE],\n device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY],\n ).result()\n\n # Max interval if in zone\n if current_zone is not None:\n continue\n\n zones = (\n self.hass.states.get(entity_id)\n for entity_id in sorted(self.hass.states.entity_ids(\"zone\"))\n )\n\n distances = []\n for zone_state in zones:\n if zone_state is None:\n continue\n zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE]\n zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE]\n zone_distance = distance(\n device.location[DEVICE_LOCATION_LATITUDE],\n device.location[DEVICE_LOCATION_LONGITUDE],\n zone_state_lat,\n zone_state_long,\n )\n if zone_distance is not None:\n distances.append(round(zone_distance / 1000, 1))\n\n # Max interval if no zone\n if not distances:\n continue\n mindistance = min(distances)\n\n # Calculate out how long it would take for the device to drive\n # to the nearest zone at 120 km/h:\n interval = round(mindistance / 2)\n\n # Never poll more than once per minute\n interval = max(interval, 1)\n\n if interval > 180:\n # Three hour drive?\n # This is far enough that they might be flying\n interval = self._max_interval\n\n if (\n device.battery_level is not None\n and device.battery_level <= 33\n and mindistance > 3\n ):\n # Low battery - let's check half as often\n interval = interval * 2\n\n intervals[device.name] = interval\n\n return max(\n int(min(intervals.items(), key=operator.itemgetter(1))[1]),\n self._max_interval,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 987, "n_words": 199, "vocab_size": 120, "complexity": 13, "nloc": 52, "token_counts": 290, "n_ast_nodes": 454, "n_identifiers": 43, "random_cut": "def _determine_interval(self) -> int:\n \n intervals = {\"default\": self._max_interval}\n for device in self._devices.values():\n # Max interval if no location\n if device.location is None:\n continue\n\n current_zone = run_callback_threadsafe(\n self.hass.loop,\n async_active_zone,\n self.hass,\n device.location[DEVICE_LOCATION_LATITUDE],\n device.location[DEVICE_LOCATION_LONGITUDE],\n device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY],\n ).result()\n\n # Max interval if in zone\n if current_zone is not None:\n continue\n\n zones = (\n self.hass.states.get(entity_id)\n for entity_id in sorted(self.hass.states.entity_ids(\"zone\"))\n )\n\n distances = []\n for zone_state in zones:\n if zone_state is None:\n continue\n zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE]\n zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE]\n zone_distance = distance(\n ", "d_id": 115587, "documentation": { "docstring": "Calculate new interval between two API fetch (in minutes).", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 22084, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/models.py", "file_name": "models.py", "fun_name": "prepare_cookies", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def prepare_cookies(self, cookies):\n \n if isinstance(cookies, cookielib.CookieJar):\n self._cookies = cookies\n else:\n self._cookies = cookiejar_from_dict(cookies)\n\n cookie_header = get_cookie_header(self._cookies, self)\n if cookie_header is not None:\n self.headers[\"Cookie\"] = cookie_header\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 93, "n_words": 25, "vocab_size": 18, "complexity": 3, "nloc": 8, "token_counts": 57, "n_ast_nodes": 93, "n_identifiers": 11, "random_cut": "def prepare_cookies(self, cookies):\n \n if isinstance(cookies, cookielib.CookieJar", "d_id": 4164, "documentation": { "docstring": "Prepares the given HTTP cookie data.\n\n This function eventually generates a ``Cookie`` header from the\n given cookies using cookielib. Due to cookielib's design, the header\n will not be regenerated if it already exists, meaning this function\n can only be called once for the life of the\n :class:`PreparedRequest ` object. Any subsequent calls\n to ``prepare_cookies`` will have no actual effect, unless the \"Cookie\"\n header is removed beforehand.\n ", "n_words": 66, "vocab_size": 54, "n_whitespaces": 122, "language": "en" } }, { "id": 40216, "commit_id": "c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c", "repo": "dash", "path": "dash/testing/browser.py", "file_name": "browser.py", "fun_name": "wait_for_contains_text", "commit_message": "f-strings everywhere! fffff", "code": "def wait_for_contains_text(self, selector, text, timeout=None):\n \n return self._wait_for(\n method=contains_text,\n args=(selector, text),\n timeout=timeout,\n msg=f\"text -> {text} not found inside element within {timeout or self._wait_timeout}s\",\n )\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 88, "n_words": 23, "vocab_size": 23, "complexity": 1, "nloc": 7, "token_counts": 41, "n_ast_nodes": 73, "n_identifiers": 11, "random_cut": "def wait_for_contains_text(self, selector, text, timeout=None):\n \n return self._wait_for(\n method=contains_text,\n args=(selector, text),\n timeout=timeout,\n msg=f\"text -> {text} not found inside element within {timeout or self._wait_timeout}s\",\n )\n", "d_id": 7355, "documentation": { "docstring": "Explicit wait until the element's text contains the expected `text`.\n\n timeout if not set, equals to the fixture's `wait_timeout`\n shortcut to `WebDriverWait` with customized `contains_text`\n condition.\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 54, "language": "en" } }, { "id": 198681, "commit_id": "73b2975a89b45ef437f11b697d39796f755a856b", "repo": "sympy", "path": "sympy/physics/continuum_mechanics/truss.py", "file_name": "truss.py", "fun_name": "remove_member", "commit_message": "default values for supports and loads removed along with other changes", "code": "def remove_member(self, label):\n \n if label not in list(self._members):\n raise ValueError(\"No such member exists in the Truss\")\n\n else:\n self._nodes_occupied.pop(tuple([self._members[label][0], self._members[label][1]]))\n self._nodes_occupied.pop(tuple([self._members[label][1], self._members[label][0]]))\n self._members.pop(label)\n self._internal_forces.pop(label)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 99, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 8, "token_counts": 104, "n_ast_nodes": 162, "n_identifiers": 10, "random_cut": "def remove_member(self, label):\n \n if label not in list(self._members):\n raise ValueError(\"No such member exists in the Truss\")\n\n else:\n ", "d_id": 49035, "documentation": { "docstring": "\n This method removes a member from the given truss.\n\n Parameters\n ==========\n label: String or Symbol\n The label for the member to be removed.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.add_node('C', 2, 2)\n >>> t.add_member('AB', 'A', 'B')\n >>> t.add_member('AC', 'A', 'C')\n >>> t.add_member('BC', 'B', 'C')\n >>> t.members\n {'AB': ['A', 'B'], 'AC': ['A', 'C'], 'BC': ['B', 'C']}\n >>> t.remove_member('AC')\n >>> t.members\n {'AB': ['A', 'B'], 'BC': ['B', 'C']}\n ", "n_words": 79, "vocab_size": 55, "n_whitespaces": 231, "language": "en" } }, { "id": 118583, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/tests/server_test_case.py", "file_name": "server_test_case.py", "fun_name": "_create_mock_app_session", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def _create_mock_app_session(*args, **kwargs):\n \n mock_id = mock.PropertyMock(\n return_value=\"mock_id:%s\" % ServerTestCase._next_session_id\n )\n ServerTestCase._next_session_id += 1\n\n mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs)\n type(mock_session).id = mock_id\n return mock_session\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 24, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 57, "n_ast_nodes": 93, "n_identifiers": 15, "random_cut": "def _create_mock_app_session(*args, **kwargs):\n \n mock_id = mock.PropertyMock(\n return_value=\"mock_id:%s\" % ServerTestCase._next_session_id\n )\n ServerTestCase._next_session_id += 1\n\n mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs)\n type(mock_sessi", "d_id": 26306, "documentation": { "docstring": "Create a mock AppSession. Each mocked instance will have\n its own unique ID.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 42543, "commit_id": "8a4cf5d94eb94b6427c5d1d7907ba07b119932c5", "repo": "nltk", "path": "nltk/parse/util.py", "file_name": "util.py", "fun_name": "taggedsents_to_conll", "commit_message": "Docstring tests (#3050)\n\n* fixed pytests\r\n\r\n* fixed more pytests\r\n\r\n* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py\r\n\r\n* fixed pytests (mainly multiline or rounding issues)\r\n\r\n* fixed treebank pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed destructive.py pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed pytest (rounding issues)\r\n\r\n* fixed pytest (initialised missing object)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* added pytest +SKIP for deprecated module stanford\r\n\r\n* updated AUTHORS.md\r\n\r\n* changed docstring corrections by usage of ELLIPSIS and different roundings\r\n\r\n* fixed AUTHORS.md to be consistent\r\n\r\n* Fix framenet doctest formatting with pprint\r\n\r\n* Change docstring on MultiListBox.__init__\r\n\r\nI believe the original typo was misinterpreted and changed to something that was not originally intended.\r\n\r\nCo-authored-by: Jan Lennartz \r\nCo-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>\r\nCo-authored-by: Tom Aarsen ", "code": "def taggedsents_to_conll(sentences):\n \n for sentence in sentences:\n yield from taggedsent_to_conll(sentence)\n yield \"\\n\\n\"\n\n\n######################################################################\n# { Test Suites\n######################################################################\n\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 4, "token_counts": 19, "n_ast_nodes": 41, "n_identifiers": 4, "random_cut": "def taggedsents_to_conll(sentences):\n \n for sentence in sentences:\n yield from taggedsent_to_conll(sentence)\n yield \"\\n\\n\"\n\n\n#############################################################", "d_id": 7605, "documentation": { "docstring": "\n A module to convert the a POS tagged document stream\n (i.e. list of list of tuples, a list of sentences) and yield lines\n in CONLL format. This module yields one line per word and two newlines\n for end of sentence.\n\n >>> from nltk import word_tokenize, sent_tokenize, pos_tag\n >>> text = \"This is a foobar sentence. Is that right?\"\n >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)]\n >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE\n ... if line:\n ... print(line, end=\"\")\n 1\tThis\t_\tDT\tDT\t_\t0\ta\t_\t_\n 2\tis\t_\tVBZ\tVBZ\t_\t0\ta\t_\t_\n 3\ta\t_\tDT\tDT\t_\t0\ta\t_\t_\n 4\tfoobar\t_\tJJ\tJJ\t_\t0\ta\t_\t_\n 5\tsentence\t_\tNN\tNN\t_\t0\ta\t_\t_\n 6\t.\t\t_\t.\t.\t_\t0\ta\t_\t_\n \n \n 1\tIs\t_\tVBZ\tVBZ\t_\t0\ta\t_\t_\n 2\tthat\t_\tIN\tIN\t_\t0\ta\t_\t_\n 3\tright\t_\tNN\tNN\t_\t0\ta\t_\t_\n 4\t?\t_\t.\t.\t_\t0\ta\t_\t_\n \n \n\n :param sentences: Input sentences to parse\n :type sentence: list(list(tuple(str, str)))\n :rtype: iter(str)\n :return: a generator yielding sentences in CONLL format.\n ", "n_words": 204, "vocab_size": 91, "n_whitespaces": 214, "language": "en" } }, { "id": 100480, "commit_id": "0189029dbaad486e623353ee4a8451af8c85f4e4", "repo": "faceswap", "path": "plugins/train/model/phaze_a.py", "file_name": "phaze_a.py", "fun_name": "_get_input_shape", "commit_message": "Phaze-A: Add MobileNetV3 encoder", "code": "def _get_input_shape(self):\n \n arch = self.config[\"enc_architecture\"]\n enforce_size = _MODEL_MAPPING[arch].get(\"enforce_for_weights\", False)\n default_size = _MODEL_MAPPING[arch][\"default_size\"]\n scaling = self.config[\"enc_scaling\"] / 100\n\n min_size = _MODEL_MAPPING[arch].get(\"min_size\", 32)\n size = int(max(min_size, min(default_size, ((default_size * scaling) // 16) * 16)))\n\n if self.config[\"enc_load_weights\"] and enforce_size and scaling != 1.0:\n logger.warning(\"%s requires input size to be %spx when loading imagenet weights. \"\n \"Adjusting input size from %spx to %spx\",\n arch, default_size, size, default_size)\n retval = (default_size, default_size, 3)\n else:\n retval = (size, size, 3)\n\n logger.debug(\"Encoder input set to: %s\", retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 244, "n_words": 82, "vocab_size": 60, "complexity": 4, "nloc": 16, "token_counts": 139, "n_ast_nodes": 232, "n_identifiers": 18, "random_cut": "def _get_input_shape(self):\n \n arch = self.config[\"enc_architecture\"]\n enforce_size = _MODEL_MAPPING[arch].get(\"enforce_for_weig", "d_id": 19953, "documentation": { "docstring": " Obtain the input shape for the model.\n\n Input shape is calculated from the selected Encoder's input size, scaled to the user\n selected Input Scaling, rounded down to the nearest 16 pixels.\n\n Notes\n -----\n Some models (NasNet) require the input size to be of a certain dimension if loading\n imagenet weights. In these instances resize inputs and raise warning message\n\n Returns\n -------\n tuple\n The shape tuple for the input size to the Phaze-A model\n ", "n_words": 73, "vocab_size": 53, "n_whitespaces": 155, "language": "en" } }, { "id": 76578, "commit_id": "ae79eb4cb29b84bb8379fcf0957e6837164c5933", "repo": "wagtail", "path": "wagtail/admin/panels.py", "file_name": "panels.py", "fun_name": "get_form_options", "commit_message": "Introduce a get_form_options method to combine widget_overrides / required_fields / required_formsets / field_permissions", "code": "def get_form_options(self):\n \n options = {}\n\n if not getattr(self.widget_overrides, \"is_original_method\", False):\n warn(\n \"The `widget_overrides` method (on %r) is deprecated; \"\n \"these should be returned from `get_form_options` as a \"\n \"`widgets` item instead.\" % type(self),\n category=RemovedInWagtail219Warning,\n )\n options[\"widgets\"] = self.widget_overrides()\n\n if not getattr(self.required_fields, \"is_original_method\", False):\n warn(\n \"The `required_fields` method (on %r) is deprecated; \"\n \"these should be returned from `get_form_options` as a \"\n \"`fields` item instead.\" % type(self),\n category=RemovedInWagtail219Warning,\n )\n options[\"fields\"] = self.required_fields()\n\n if not getattr(self.required_formsets, \"is_original_method\", False):\n warn(\n \"The `required_formsets` method (on %r) is deprecated; \"\n \"these should be returned from `get_form_options` as a \"\n \"`formsets` item instead.\" % type(self),\n category=RemovedInWagtail219Warning,\n )\n options[\"formsets\"] = self.required_formsets()\n\n if not getattr(self.field_permissions, \"is_original_method\", False):\n warn(\n \"The `field_permissions` method (on %r) is deprecated; \"\n \"these should be returned from `get_form_options` as a \"\n \"`field_permissions` item instead.\" % type(self),\n category=RemovedInWagtail219Warning,\n )\n options[\"field_permissions\"] = self.field_permissions()\n\n return options\n\n # RemovedInWagtail219Warning - edit handlers should override get_form_options instead", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 572, "n_words": 148, "vocab_size": 60, "complexity": 5, "nloc": 35, "token_counts": 168, "n_ast_nodes": 300, "n_identifiers": 12, "random_cut": "def get_form_options(self):\n \n options = {}\n\n if not getattr(self.widget_overrides, \"is_original_method\", False):\n warn(\n \"The `widget_overrides` method (on %r) is deprecated; ", "d_id": 16543, "documentation": { "docstring": "\n Return a dictionary of attributes such as 'fields', 'formsets' and 'widgets'\n which should be incorporated into the form class definition to generate a form\n that this EditHandler can use.\n This will only be called after binding to a model (i.e. self.model is available).\n ", "n_words": 43, "vocab_size": 38, "n_whitespaces": 79, "language": "en" } }, { "id": 60730, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py", "file_name": "package_finder.py", "fun_name": "evaluate_links", "commit_message": "upd; format", "code": "def evaluate_links(self, link_evaluator, links):\n # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate]\n \n candidates = []\n for link in self._sort_links(links):\n candidate = self.get_install_candidate(link_evaluator, link)\n if candidate is not None:\n candidates.append(candidate)\n\n return candidates\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 101, "n_words": 29, "vocab_size": 26, "complexity": 3, "nloc": 7, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 10, "random_cut": "def evaluate_links(self, link_evaluator, links):\n # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate]\n \n candidates = []\n for link in self._sort_links(links):\n candidate = self.get_install_candidate(link_evaluator,", "d_id": 12267, "documentation": { "docstring": "\n Convert links that are candidates to InstallationCandidate objects.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 154199, "commit_id": "3f985ed6864cc1b5b587094d75ca5b2695e4139f", "repo": "modin", "path": "modin/core/storage_formats/base/query_compiler.py", "file_name": "query_compiler.py", "fun_name": "columnarize", "commit_message": "REFACTOR-#4796: Introduce constant for __reduced__ column name (#4799)\n\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Jonathan Shi ", "code": "def columnarize(self):\n \n if len(self.columns) != 1 or (\n len(self.index) == 1 and self.index[0] == MODIN_UNNAMED_SERIES_LABEL\n ):\n return self.transpose()\n return self\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 70, "n_words": 20, "vocab_size": 17, "complexity": 4, "nloc": 6, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 7, "random_cut": "def columnarize(self):\n \n ", "d_id": 35855, "documentation": { "docstring": "\n Transpose this QueryCompiler if it has a single row but multiple columns.\n\n This method should be called for QueryCompilers representing a Series object,\n i.e. ``self.is_series_like()`` should be True.\n\n Returns\n -------\n BaseQueryCompiler\n Transposed new QueryCompiler or self.\n ", "n_words": 36, "vocab_size": 32, "n_whitespaces": 97, "language": "en" } }, { "id": 60858, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/models/wheel.py", "file_name": "wheel.py", "fun_name": "get_formatted_file_tags", "commit_message": "upd; format", "code": "def get_formatted_file_tags(self):\n # type: () -> List[str]\n \n return sorted(str(tag) for tag in self.file_tags)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 2, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 6, "random_cut": "def get_formatted_file_tags(self):\n # type: () -> List[str]\n \n return sorted(str(tag) for tag in self.", "d_id": 12305, "documentation": { "docstring": "Return the wheel's tags as a sorted list of strings.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 161892, "commit_id": "ac69488768e9c54cdef26e45b26a1b42ebf2f5d3", "repo": "rich", "path": "tests/test_syntax.py", "file_name": "test_syntax.py", "fun_name": "test_python_render", "commit_message": "fix for syntax measure", "code": "def test_python_render():\n syntax = Panel.fit(\n Syntax(\n CODE,\n lexer=\"python\",\n line_numbers=True,\n line_range=(2, 10),\n theme=\"monokai\",\n code_width=60,\n word_wrap=True,\n ),\n padding=0,\n )\n rendered_syntax = render(syntax)\n print(repr(rendered_syntax))\n expected = '╭─────────────────────────────────────────────────────────────────╮\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 2 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;230;219;116;48;2;39;40;34m\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 3 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter_values\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m(\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mvalues\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m)\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 4 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mtry\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 5 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mnext\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m(\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter_values\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m)\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 6 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mexcept\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 7 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mreturn\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 8 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mfirst\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mTrue\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 9 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mfor\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mvalue\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34min\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter_values\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m10 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34myield\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mfirst\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m,\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mFalse\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m,\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n╰─────────────────────────────────────────────────────────────────╯\\n'\n assert rendered_syntax == expected\n\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 555, "n_words": 85, "vocab_size": 57, "complexity": 1, "nloc": 17, "token_counts": 69, "n_ast_nodes": 289, "n_identifiers": 18, "random_cut": "def test_python_render():\n syntax = Panel.fit(\n Syntax(\n CODE,\n lexer=\"python\",\n line_numbers=True,\n line_range=(2, 10),\n theme=\"monokai\",\n code_width=60,\n word_wrap=True,\n ),\n padding=0,\n )\n rendered_syntax = render(syntax)\n print(repr(rendered_syntax))\n expected = '╭─────────────────────────────────────────────────────────────────╮\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 2 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;230;219;116;48;2;39;40;34m\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 3 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter_values\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m(\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mvalues\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m)\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 4 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mtry\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 5 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mnext\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m(\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter_values\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m)\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 6 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mexcept\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 7 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39", "d_id": 39090, "documentation": { "docstring": "Iterate and generate a tuple with a flag for first \\x1b[0m\\x1b[48;2;39;40;34m \\x1b[0m│\\n│\\x1b[48;2;39;40;34m \\x1b[0m\\x1b[38;2;230;219;116;48;2;39;40;34mand last value.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 19, "language": "en" } }, { "id": 303311, "commit_id": "de2e9b6d77adb7f86c6ec4aa0a50428ec8606dc3", "repo": "core", "path": "tests/components/recorder/test_history.py", "file_name": "test_history.py", "fun_name": "test_state_changes_during_period_multiple_entities_single_test", "commit_message": "Fix state_changes_during_period history query when no entities are passed (#73139)", "code": "def test_state_changes_during_period_multiple_entities_single_test(hass_recorder):\n \n hass = hass_recorder()\n start = dt_util.utcnow()\n test_entites = {f\"sensor.{i}\": str(i) for i in range(30)}\n for entity_id, value in test_entites.items():\n hass.states.set(entity_id, value)\n\n wait_recording_done(hass)\n end = dt_util.utcnow()\n\n hist = history.state_changes_during_period(hass, start, end, None)\n for entity_id, value in test_entites.items():\n hist[entity_id][0].state == value\n\n for entity_id, value in test_entites.items():\n hist = history.state_changes_during_period(hass, start, end, entity_id)\n assert len(hist) == 1\n hist[entity_id][0].state == value\n\n hist = history.state_changes_during_period(hass, start, end, None)\n for entity_id, value in test_entites.items():\n hist[entity_id][0].state == value\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 151, "n_words": 73, "vocab_size": 32, "complexity": 6, "nloc": 18, "token_counts": 183, "n_ast_nodes": 284, "n_identifiers": 22, "random_cut": "def test_state_changes_during_period_multiple_entities_single_test(hass_recorder):\n \n hass = hass_recorder()\n start = dt_util.utcnow()\n test_entites = {f\"sensor.{i}\": str(i) for i in range(30)}\n for entity_id, value in test_entites.items():\n hass.states.set(entity_id, value)\n\n wait_recording_done(hass)\n end = dt_util.utcnow()\n\n hist = history.state_changes_during_period(hass, start, end, None)\n for entity_id, value in test_entites.items():\n hist[entity_id][0].state == value\n\n for entity_id, value in test_entites.items():\n hist = history.state_changes_during_period(hass, start, end, entity_id)\n assert len(hist) == 1\n hist[entity_id][0].state == value\n\n hist = history.state_change", "d_id": 102133, "documentation": { "docstring": "Test state change during period with multiple entities in the same test.\n\n This test ensures the sqlalchemy query cache does not\n generate incorrect results.\n ", "n_words": 24, "vocab_size": 23, "n_whitespaces": 33, "language": "en" } }, { "id": 206612, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/dateformat.py", "file_name": "dateformat.py", "fun_name": "O", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def O(self): # NOQA: E743, E741\n \n if self._no_timezone_or_datetime_is_ambiguous_or_imaginary:\n return \"\"\n\n seconds = self.Z()\n sign = \"-\" if seconds < 0 else \"+\"\n seconds = abs(seconds)\n return \"%s%02d%02d\" % (sign, seconds // 3600, (seconds // 60) % 60)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 91, "n_words": 37, "vocab_size": 27, "complexity": 3, "nloc": 7, "token_counts": 43, "n_ast_nodes": 93, "n_identifiers": 7, "random_cut": "def O(self): # NOQA: E743, E741\n \n if self._no_timezone_or_datetime_is_ambiguous_or_imaginary:\n return \"\"\n\n seconds = sel", "d_id": 51586, "documentation": { "docstring": "\n Difference to Greenwich time in hours; e.g. '+0200', '-0430'.\n\n If timezone information is not available, return an empty string.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 80656, "commit_id": "799968460d4794bcd9959f57a2b97846b9a00bb7", "repo": "awx", "path": "awx/main/utils/common.py", "file_name": "common.py", "fun_name": "convert_mem_str_to_bytes", "commit_message": "Fixup conversion of memory and cpu settings to support k8s resource request format (#11725)\n\nfix memory and cpu settings to suport k8s resource request format\r\n\r\n* fix conversion of memory setting to bytes\r\n\r\nThis setting has not been getting set by default, and needed some fixing\r\nup to be compatible with setting the memory in the same way as we set it\r\nin the operator, as well as with other changes from last year which\r\nassume that ansible runner is returning memory in bytes.\r\n\r\nThis way we can start setting this setting in the operator, and get a\r\nmore accurate reflection of how much memory is available to the control\r\npod in k8s.\r\n\r\nOn platforms where services are all sharing memory, we deduct a\r\npenalty from the memory available. On k8s we don't need to do this\r\nbecause the web, redis, and task containers each have memory\r\nallocated to them.\r\n\r\n* Support CPU setting expressed in units used by k8s\r\n\r\nThis setting has not been getting set by default, and needed some fixing\r\nup to be compatible with setting the CPU resource request/limits in the\r\nsame way as we set it in the resource requests/limits.\r\n\r\nThis way we can start setting this setting in the\r\noperator, and get a more accurate reflection of how much cpu is\r\navailable to the control pod in k8s.\r\n\r\nBecause cpu on k8s can be partial cores, migrate cpu field to decimal.\r\n\r\nk8s does not allow granularity of less than 100m (equivalent to 0.1 cores), so only\r\nstore up to 1 decimal place.\r\n\r\nfix analytics to deal with decimal cpu\r\n\r\nneed to use DjangoJSONEncoder when Decimal fields in data passed to\r\njson.dumps", "code": "def convert_mem_str_to_bytes(mem_str):\n \n # If there is no suffix, the memory sourced from the request is in bytes\n if mem_str.isdigit():\n return int(mem_str)\n\n conversions = {\n 'Ei': lambda x: x * 2**60,\n 'E': lambda x: x * 10**18,\n 'Pi': lambda x: x * 2**50,\n 'P': lambda x: x * 10**15,\n 'Ti': lambda x: x * 2**40,\n 'T': lambda x: x * 10**12,\n 'Gi': lambda x: x * 2**30,\n 'G': lambda x: x * 10**9,\n 'Mi': lambda x: x * 2**20,\n 'M': lambda x: x * 10**6,\n 'Ki': lambda x: x * 2**10,\n 'K': lambda x: x * 10**3,\n }\n mem = 0\n mem_unit = None\n for i, char in enumerate(mem_str):\n if not char.isdigit():\n mem_unit = mem_str[i:]\n mem = int(mem_str[:i])\n break\n if not mem_unit or mem_unit not in conversions.keys():\n error = f\"Unsupported value for SYSTEM_TASK_ABS_MEM: {mem_str}, memory must be expressed in bytes or with known suffix: {conversions.keys()}. Falling back to 1 byte\"\n logger.warning(error)\n return 1\n return max(1, conversions[mem_unit](mem))\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 337, "n_words": 155, "vocab_size": 86, "complexity": 6, "nloc": 29, "token_counts": 234, "n_ast_nodes": 400, "n_identifiers": 16, "random_cut": "def convert_mem_str_to_bytes(mem_str):\n \n # If there is no suffix, the memory sourced from the request is in bytes\n if mem_str.isdigit():\n return int(mem_str)\n\n conversions = {\n 'Ei': lambda x: x * 2**60,\n 'E': lambda x: x * 1", "d_id": 17087, "documentation": { "docstring": "Convert string with suffix indicating units to memory in bytes (base 2)\n\n Useful for dealing with memory setting that may be expressed in units compatible with\n kubernetes.\n\n See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 41, "language": "en" } }, { "id": 93732, "commit_id": "2fbf550ec05c8501cbc9eca62e73526e717dcbdf", "repo": "sentry", "path": "src/sentry/integrations/jira_server/integration.py", "file_name": "integration.py", "fun_name": "sync_status_outbound", "commit_message": "ref(Jira): Split Jira Cloud and Jira Server (#37034)\n\n* Split Jira Cloud and Jira Server", "code": "def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs):\n \n client = self.get_client()\n jira_issue = client.get_issue(external_issue.key)\n jira_project = jira_issue[\"fields\"][\"project\"]\n\n try:\n external_project = IntegrationExternalProject.objects.get(\n external_id=jira_project[\"id\"],\n organization_integration_id__in=OrganizationIntegration.objects.filter(\n organization_id=external_issue.organization_id,\n integration_id=external_issue.integration_id,\n ),\n )\n except IntegrationExternalProject.DoesNotExist:\n return\n\n jira_status = (\n external_project.resolved_status if is_resolved else external_project.unresolved_status\n )\n\n # don't bother updating if it's already the status we'd change it to\n if jira_issue[\"fields\"][\"status\"][\"id\"] == jira_status:\n return\n try:\n transitions = client.get_transitions(external_issue.key)\n except ApiHostError:\n raise IntegrationError(\"Could not reach host to get transitions.\")\n\n try:\n transition = [t for t in transitions if t.get(\"to\", {}).get(\"id\") == jira_status][0]\n except IndexError:\n # TODO(jess): Email for failure\n logger.warning(\n \"jira.status-sync-fail\",\n extra={\n \"organization_id\": external_issue.organization_id,\n \"integration_id\": external_issue.integration_id,\n \"issue_key\": external_issue.key,\n },\n )\n return\n\n client.transition_issue(external_issue.key, transition[\"id\"])\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 525, "n_words": 103, "vocab_size": 81, "complexity": 8, "nloc": 36, "token_counts": 213, "n_ast_nodes": 352, "n_identifiers": 37, "random_cut": "def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs):\n \n client = self.get_client()\n jira_issue = client.get_issue(external_issue.key)\n jira_project = jira_issue[\"fields\"][\"project\"]\n\n try:\n external_project = IntegrationExternalProject.objects.get(\n external_id=jira_project[\"id\"],\n organization_integration_id__in=OrganizationIntegration.objects.filter(\n organization_id=external_issue.organization_id,\n integration_id=external_issue.integration_id,\n ),\n )\n except IntegrationExternalProject.DoesNotExist:\n return\n\n ", "d_id": 19015, "documentation": { "docstring": "\n Propagate a sentry issue's status to a linked issue's status.\n ", "n_words": 10, "vocab_size": 8, "n_whitespaces": 25, "language": "en" } }, { "id": 108514, "commit_id": "032316bc6c7798fca6c82de24167c975f237687f", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "_sci", "commit_message": "Cleanup documentation generation for pyplot\n\n- remove the awkward `pyplot.plotting()` function, which only served\n as a namespace to take up the docs for pyplot and output them via\n `.. autofunction`\n- Instead generate the same information using `.. autosummary::`. We\n have to list the desired methods here explicitly. I've added a test\n that these are the same as previously auto-generated in the\n `plotting()` docstring. If we change anything in pyplot, we'll be\n notified through the test failure that we have to adapt the\n autosummary list.\n- Removed the docstring generation logic\n `_setup_pyplot_info_docstrings()`. Apart from generating the\n `plotting()` docstring, this added docstrings to the pyplot colormap\n setters. Instead, we now add these docstrings directly via\n boilerplate.py\n\nCo-authored-by: Elliott Sales de Andrade ", "code": "def _sci(self, im):\n \n _api.check_isinstance(\n (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage),\n im=im)\n if isinstance(im, mpl.contour.ContourSet):\n if im.collections[0] not in self._children:\n raise ValueError(\"ContourSet must be in current Axes\")\n elif im not in self._children:\n raise ValueError(\"Argument must be an image, collection, or \"\n \"ContourSet in this Axes\")\n self._current_image = im\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 166, "n_words": 44, "vocab_size": 33, "complexity": 4, "nloc": 11, "token_counts": 81, "n_ast_nodes": 130, "n_identifiers": 17, "random_cut": "def _sci(self, im):\n \n _api.check_isinstance(\n (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage),\n im=im)\n if isinstance(im, mpl.contour.ContourSet):\n if im.collections[0] not in self._children:\n raise ValueError(\"ContourSet must be in current Axes\")\n elif im ", "d_id": 23225, "documentation": { "docstring": "\n Set the current image.\n\n This image will be the target of colormap functions like\n ``pyplot.viridis``, and other functions such as `~.pyplot.clim`. The\n current image is an attribute of the current Axes.\n ", "n_words": 31, "vocab_size": 24, "n_whitespaces": 68, "language": "en" } }, { "id": 156512, "commit_id": "1e783d9a714160e968936cb22d54d085959ab09e", "repo": "dask", "path": "dask/typing.py", "file_name": "typing.py", "fun_name": "__dask_postpersist__", "commit_message": "Collection Protocol (#8674)\n\n[PEP 544](https://www.python.org/dev/peps/pep-0544/) introduces the `Protocol` class to the `typing` module in Python 3.8 (the soon be the minimum supported version, https://github.com/dask/community/issues/213). Writing new Dask collections for [dask-awkward](https://github.com/ContinuumIO/dask-awkward/) has had me thinking about working on a `DaskCollection` protocol. I imagine the benefits to be:\r\n\r\n- usage with static type checkers\r\n - other activity in this area at\r\n - #8295 \r\n - #8706 \r\n - #8854\r\n - Python supporting IDEs take advantage of typing\r\n- self-documenting; some improvements to [the custom collections page](https://docs.dask.org/en/latest/custom-collections.html) of the docs. The protocol docs can be autogenerated and added to that page.\r\n- purely opt-in feature\r\n\r\nThe `typing.runtime_checkable` decorator allows use of `isinstance(x, DaskCollection)` in any code base\r\nthat uses Dask collections; for example:\r\n\r\n```python\r\n>>> from dask.typing import DaskCollection\r\n>>> import dask.array as da\r\n>>> x = da.zeros((10, 3))\r\n>>> isinstance(x, DaskCollection)\r\nTrue\r\n```\r\n(though this is an order of magnitude slower than `dask.base.is_dask_collection` which only checks for `x.__dask_graph__() is not None`; static typing checking & built-in interface documentation are the core benefits IMO)\r\n\r\nSomething else that came up in the brief discussion on a call last week was having `{Scheduler,Worker,Nanny}Plugin` protocols in `distributed`; and perhaps those are better places to start introducing protocols to Dask since on the user side typically more folks would write plugins than new collections.", "code": "def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]:\n \n raise NotImplementedError(\"Inheriting class must implement this method.\")\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 21, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 5, "random_cut": "def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]:\n \n raise NotImplementedError(\"Inheriting c", "d_id": 36665, "documentation": { "docstring": "Rebuilder function and optional arguments to contruct a persisted collection.\n\n Returns\n -------\n PostPersistCallable\n Callable that rebuilds the collection. The signature\n should be\n ``rebuild(dsk: Mapping, *args: Any, rename: Mapping[str, str] | None)``.\n The callable should return an equivalent Dask collection\n with the same keys as `self`, but with results that are\n computed through a different graph. In the case of\n :py:func:`dask.persist`, the new graph will have just the\n output keys and the values already computed.\n tuple[Any, ...]\n Optional arugments passed to the rebuild callable. If no\n additional arguments are to be passed then this must be an\n empty tuple.\n\n ", "n_words": 98, "vocab_size": 76, "n_whitespaces": 254, "language": "en" } }, { "id": 64407, "commit_id": "c36bd7e1a6fe48c5fff4765e843571a0d6560dd1", "repo": "erpnext", "path": "erpnext/patches/v4_2/repost_reserved_qty.py", "file_name": "repost_reserved_qty.py", "fun_name": "execute", "commit_message": "fix: avoid creating bins without item-wh\n\nCo-Authored-By: Shadrak Gurupnor <30501401+shadrak98@users.noreply.github.com>\nCo-Authored-By: Saurabh ", "code": "def execute():\n\tfor doctype in (\"Sales Order Item\", \"Bin\"):\n\t\tfrappe.reload_doctype(doctype)\n\n\trepost_for = frappe.db.sql()\n\n\tfor item_code, warehouse in repost_for:\n\t\tif not (item_code and warehouse):\n\t\t\tcontinue\n\t\tupdate_bin_qty(item_code, warehouse, {\n\t\t\t\"reserved_qty\": get_reserved_qty(item_code, warehouse)\n\t\t})\n\n\tfrappe.db.sql()\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 21, "n_words": 32, "vocab_size": 29, "complexity": 5, "nloc": 30, "token_counts": 70, "n_ast_nodes": 118, "n_identifiers": 11, "random_cut": "def execute():\n\tfor doctype in (\"Sales Order Item\", \"Bin", "d_id": 13627, "documentation": { "docstring": "\n\t\tselect\n\t\t\tdistinct item_code, warehouse\n\t\tfrom\n\t\t\t(\n\t\t\t\t(\n\t\t\t\t\tselect distinct item_code, warehouse\n\t\t\t\t\t\t\t\tfrom `tabSales Order Item` where docstatus=1\n\t\t\t\t) UNION (\n\t\t\t\t\tselect distinct item_code, warehouse\n\t\t\t\t\tfrom `tabPacked Item` where docstatus=1 and parenttype='Sales Order'\n\t\t\t\t)\n\t\t\t) so_item\n\t\twhere\n\t\t\texists(select name from tabItem where name=so_item.item_code and ifnull(is_stock_item, 0)=1)\n\tdelete from tabBin\n\t\twhere exists(\n\t\t\tselect name from tabItem where name=tabBin.item_code and ifnull(is_stock_item, 0) = 0\n\t\t)\n\t", "n_words": 62, "vocab_size": 31, "n_whitespaces": 44, "language": "en" } }, { "id": 30043, "commit_id": "72c120ae8eeb34e5a3f9840fb1ab1de1fca52fb5", "repo": "saleor", "path": "saleor/account/migrations/0071_group.py", "file_name": "0071_group.py", "fun_name": "rename_group_tables_reverse", "commit_message": "Drop Djanog Auth", "code": "def rename_group_tables_reverse(apps, schema_editor):\n Group = apps.get_model(\"auth\", \"Group\")\n schema_editor.alter_db_table(\n Group,\n \"account_group\",\n \"auth_group\",\n )\n PermissionGroup = Group.permissions.through\n schema_editor.alter_db_table(\n PermissionGroup,\n \"account_group_permissions\",\n \"auth_group_permissions\",\n )\n\n\nRENAME_CONSTRAINTS_AND_INDEX_REVERSE = \n\nDROP_OLD_CONSTRAINTS_REVERSE_FROM_0072 = \n\nDROP_OLD_CONSTRAINTS_REVERSE_FROM_APP_0018 = \n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 85, "n_words": 26, "vocab_size": 20, "complexity": 1, "nloc": 13, "token_counts": 46, "n_ast_nodes": 100, "n_identifiers": 12, "random_cut": "def rename_group_tables_reverse(apps, schema_editor):\n Group = apps.get_model(\"auth\", \"Group\")\n schema_editor.alter_db_table(\n Group,\n \"account_group\",\n \"auth_group\",\n )\n PermissionGroup = Group.permissions.through\n schema_editor.alter_db_table(\n PermissionGroup,\n \"account_group_permissions\"", "d_id": 5291, "documentation": { "docstring": "\nALTER TABLE account_group RENAME CONSTRAINT account_group_pkey\n TO auth_group_pkey;\n\nALTER TABLE account_group RENAME CONSTRAINT account_group_name_key\n TO auth_group_name_key;\n\nALTER INDEX IF EXISTS account_group_name_034e9f3f_like\n RENAME TO auth_group_name_a6ea08ec_like;\n\nALTER TABLE auth_group_permissions\n ADD CONSTRAINT auth_group_permissions_group_id_permission_id_0cd325b0_uniq\n UNIQUE (group_id, permission_id);\n\nALTER TABLE auth_group_permissions\n ADD CONSTRAINT auth_group_permissions_group_id_b120cbf9_fk_auth_group_id\n FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE auth_group_permissions\n ADD CONSTRAINT auth_group_permissio_permission_id_84c5c92e_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE account_user_groups\n ADD CONSTRAINT userprofile_user_groups_group_id_c7eec74e_fk_auth_group_id\n FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE account_user_user_permissions\n ADD CONSTRAINT userprofile_user_use_permission_id_1caa8a71_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE app_app_permissions\n ADD CONSTRAINT account_serviceaccou_permission_id_449791f0_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE app_appextension_permissions\n ADD CONSTRAINT app_appextension_per_permission_id_cb6c3ce0_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n\nALTER TABLE app_appinstallation_permissions\n ADD CONSTRAINT app_appinstallation__permission_id_4ee9f6c8_fk_auth_perm\n FOREIGN KEY (permission_id) REFERENCES auth_permission (id)\n DEFERRABLE INITIALLY DEFERRED;\n", "n_words": 138, "vocab_size": 44, "n_whitespaces": 199, "language": "en" } }, { "id": 201197, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_context_processors.py", "file_name": "test_context_processors.py", "fun_name": "test_session_is_accessed", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_session_is_accessed(self):\n \n response = self.client.get(\"/auth_processor_attr_access/\")\n self.assertContains(response, \"Session accessed\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 45, "n_identifiers": 6, "random_cut": "def test_session_is_accessed(self):\n ", "d_id": 49897, "documentation": { "docstring": "\n The session is accessed if the auth context processor\n is used and relevant attributes accessed.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 222539, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/_msvccompiler.py", "file_name": "_msvccompiler.py", "fun_name": "_find_vc2017", "commit_message": "add python 3.10.4 for windows", "code": "def _find_vc2017():\n \n root = os.environ.get(\"ProgramFiles(x86)\") or os.environ.get(\"ProgramFiles\")\n if not root:\n return None, None\n\n try:\n path = subprocess.check_output([\n os.path.join(root, \"Microsoft Visual Studio\", \"Installer\", \"vswhere.exe\"),\n \"-latest\",\n \"-prerelease\",\n \"-requires\", \"Microsoft.VisualStudio.Component.VC.Tools.x86.x64\",\n \"-property\", \"installationPath\",\n \"-products\", \"*\",\n ], encoding=\"mbcs\", errors=\"strict\").strip()\n except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):\n return None, None\n\n path = os.path.join(path, \"VC\", \"Auxiliary\", \"Build\")\n if os.path.isdir(path):\n return 15, path\n\n return None, None\n\nPLAT_SPEC_TO_RUNTIME = {\n 'x86' : 'x86',\n 'x86_amd64' : 'x64',\n 'x86_arm' : 'arm',\n 'x86_arm64' : 'arm64'\n}\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 206, "n_words": 71, "vocab_size": 55, "complexity": 5, "nloc": 19, "token_counts": 135, "n_ast_nodes": 275, "n_identifiers": 17, "random_cut": "def _find_vc2017():\n \n root = os.environ.get(\"ProgramFiles(x86)\") or os.environ.get(\"ProgramFiles\")\n if not root:\n return None, None\n\n try:\n path = subprocess.check_output([\n os.path.join(root, \"Micro", "d_id": 56628, "documentation": { "docstring": "Returns \"15, path\" based on the result of invoking vswhere.exe\n If no install is found, returns \"None, None\"\n\n The version is returned to avoid unnecessarily changing the function\n result. It may be ignored when the path is not None.\n\n If vswhere.exe is not available, by definition, VS 2017 is not\n installed.\n ", "n_words": 51, "vocab_size": 41, "n_whitespaces": 69, "language": "en" } }, { "id": 275868, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/hdf5_format.py", "file_name": "hdf5_format.py", "fun_name": "save_attributes_to_hdf5_group", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def save_attributes_to_hdf5_group(group, name, data):\n \n # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`\n # because in that case even chunking the array would not make the saving\n # possible.\n bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]\n\n # Expecting this to never be true.\n if bad_attributes:\n raise RuntimeError(\n \"The following attributes cannot be saved to HDF5 file because they \"\n f\"are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}\"\n )\n\n data_npy = np.asarray(data)\n\n num_chunks = 1\n chunked_data = np.array_split(data_npy, num_chunks)\n\n # This will never loop forever thanks to the test above.\n while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):\n num_chunks += 1\n chunked_data = np.array_split(data_npy, num_chunks)\n\n if num_chunks > 1:\n for chunk_id, chunk_data in enumerate(chunked_data):\n group.attrs[\"%s%d\" % (name, chunk_id)] = chunk_data\n else:\n group.attrs[name] = data\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 244, "n_words": 127, "vocab_size": 88, "complexity": 8, "nloc": 18, "token_counts": 123, "n_ast_nodes": 208, "n_identifiers": 21, "random_cut": "def save_attributes_to_hdf5_group(group, name, data):\n \n # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`\n # because in that case even chunking the array would not make the saving\n # possible.\n bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]\n\n # Expecting this to never be true.\n if bad_attributes:\n raise RuntimeError(\n \"The following attributes cannot be saved to HDF5 file because they \"\n f\"are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}\"\n )\n\n data_npy = np.asarray(data)\n\n num_chunks = 1\n chunked_data = np.array_split(data_npy, num_chunks)\n\n # This will never loop forever thanks to the test above.\n while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):\n num_chunks += 1\n chunked_data = np.array_split(data_npy, num_chunks)\n\n if num_chunks > 1:\n for chunk_id, chunk_data in enumerate(chunked_data):\n group.attrs[\"%s%d\" % (name, chunk_id)] = chunk_data\n else:\n group.attrs[name] = data\n\n", "d_id": 81493, "documentation": { "docstring": "Saves attributes (data) of the specified name into the HDF5 group.\n\n This method deals with an inherent problem of HDF5 file which is not\n able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n Args:\n group: A pointer to a HDF5 group.\n name: A name of the attributes to save.\n data: Attributes data to store.\n\n Raises:\n RuntimeError: If any single attribute is too large to be saved.\n ", "n_words": 65, "vocab_size": 49, "n_whitespaces": 106, "language": "en" } }, { "id": 209197, "commit_id": "c96fbb8487051e209dfee788eff857e9ca1fed72", "repo": "scapy", "path": "scapy/layers/tls/record.py", "file_name": "record.py", "fun_name": "dispatch_hook", "commit_message": "Update the TLS13 notebook to spec", "code": "def dispatch_hook(cls, _pkt=None, *args, **kargs):\n \n if _pkt is not None:\n plen = len(_pkt)\n if plen >= 2:\n byte0, byte1 = struct.unpack(\"BB\", _pkt[:2])\n s = kargs.get(\"tls_session\", None)\n if byte0 not in _tls_type or byte1 != 3: # Unknown type\n # Check SSLv2: either the session is already SSLv2,\n # either the packet looks like one. As said above, this\n # isn't 100% reliable, but Wireshark does the same\n if s and (s.tls_version == 0x0002 or\n s.advertised_tls_version == 0x0002) or \\\n (_ssl_looks_like_sslv2(_pkt) and (not s or\n s.tls_version is None)):\n from scapy.layers.tls.record_sslv2 import SSLv2\n return SSLv2\n # Not SSLv2: continuation\n return _TLSEncryptedContent\n # Check TLS 1.3\n if s and _tls_version_check(s.tls_version, 0x0304):\n _has_cipher = lambda x: (\n x and not isinstance(x.cipher, Cipher_NULL)\n )\n if (_has_cipher(s.rcs) or _has_cipher(s.prcs)) and \\\n byte0 == 0x17:\n from scapy.layers.tls.record_tls13 import TLS13\n return TLS13\n if plen < 5:\n # Layer detected as TLS but too small to be a\n # parsed. Scapy should not try to decode them\n return _TLSEncryptedContent\n return TLS\n\n # Parsing methods\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 742, "n_words": 165, "vocab_size": 107, "complexity": 18, "nloc": 25, "token_counts": 192, "n_ast_nodes": 302, "n_identifiers": 34, "random_cut": "def dispatch_hook(cls, _pkt=None, *args, **kargs):\n \n if _pkt is not None:\n plen = len(_pkt)\n if plen >= 2:\n byte0, byte1 = struct.unpack(\"BB\", _pkt[:2])\n s = kargs.get(\"tls_", "d_id": 52639, "documentation": { "docstring": "\n If the TLS class was called on raw SSLv2 data, we want to return an\n SSLv2 record instance. We acknowledge the risk of SSLv2 packets with a\n msglen of 0x1403, 0x1503, 0x1603 or 0x1703 which will never be casted\n as SSLv2 records but TLS ones instead, but hey, we can't be held\n responsible for low-minded extensibility choices.\n ", "n_words": 57, "vocab_size": 48, "n_whitespaces": 100, "language": "en" } }, { "id": 66292, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/utils.py", "file_name": "utils.py", "fun_name": "get_leave_period", "commit_message": "style: format code with black", "code": "def get_leave_period(from_date, to_date, company):\n\tleave_period = frappe.db.sql(\n\t\t,\n\t\t{\"from_date\": from_date, \"to_date\": to_date, \"company\": company},\n\t\tas_dict=1,\n\t)\n\n\tif leave_period:\n\t\treturn leave_period\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 15, "token_counts": 43, "n_ast_nodes": 69, "n_identifiers": 9, "random_cut": "def get_leave_period(from_date, to_date, company):\n\tleave_period = frappe.db.sql(\n\t\t,\n\t\t{\"from_date\": from_date, \"to_date\": to_date, \"company\": company},\n\t\tas_dict=1,\n\t)", "d_id": 14164, "documentation": { "docstring": "\n\t\tselect name, from_date, to_date\n\t\tfrom `tabLeave Period`\n\t\twhere company=%(company)s and is_active=1\n\t\t\tand (from_date between %(from_date)s and %(to_date)s\n\t\t\t\tor to_date between %(from_date)s and %(to_date)s\n\t\t\t\tor (from_date < %(from_date)s and to_date > %(to_date)s))\n\t", "n_words": 31, "vocab_size": 19, "n_whitespaces": 25, "language": "en" } }, { "id": 143826, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/policy/sample_batch.py", "file_name": "sample_batch.py", "fun_name": "rows", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def rows(self) -> Iterator[Dict[str, TensorType]]:\n \n\n # Do we add seq_lens=[1] to each row?\n seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1])\n\n self_as_dict = {k: v for k, v in self.items()}\n\n for i in range(self.count):\n yield tree.map_structure_with_path(\n lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens,\n self_as_dict,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 136, "n_words": 49, "vocab_size": 42, "complexity": 5, "nloc": 27, "token_counts": 95, "n_ast_nodes": 144, "n_identifiers": 22, "random_cut": "def rows(self) -> Iterator[Dict[str, TensorType]]:\n \n\n # Do we add seq_lens=[1] to each row?\n seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1])\n\n self_as_dict = {k: v for k, v in self.items()}\n\n for i in range(self.count):\n yield tree.map_structure_with_path(\n lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens,\n self_as_dict,\n )\n", "d_id": 33065, "documentation": { "docstring": "Returns an iterator over data rows, i.e. dicts with column values.\n\n Note that if `seq_lens` is set in self, we set it to [1] in the rows.\n\n Yields:\n The column values of the row in this iteration.\n\n Examples:\n >>> batch = SampleBatch({\n ... \"a\": [1, 2, 3],\n ... \"b\": [4, 5, 6],\n ... \"seq_lens\": [1, 2]\n ... })\n >>> for row in batch.rows():\n print(row)\n {\"a\": 1, \"b\": 4, \"seq_lens\": [1]}\n {\"a\": 2, \"b\": 5, \"seq_lens\": [1]}\n {\"a\": 3, \"b\": 6, \"seq_lens\": [1]}\n ", "n_words": 82, "vocab_size": 58, "n_whitespaces": 247, "language": "en" } }, { "id": 270715, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_dedup_weights", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _dedup_weights(self, weights):\n \n output, seen_ids = [], set()\n for w in weights:\n if id(w) not in seen_ids:\n output.append(w)\n # Track the Variable's identity to avoid __eq__ issues.\n seen_ids.add(id(w))\n\n return output\n\n # SavedModel properties. Please see keras/saving/saved_model for details.\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 125, "n_words": 38, "vocab_size": 35, "complexity": 3, "nloc": 7, "token_counts": 49, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def _dedup_weights(self, weights):\n \n output, seen_ids = [], set()\n for w in weights:\n if id(w) not in seen_ids:\n output.append(w)\n # Track the Variable's identity to avoid __eq__ issues.\n seen_ids.add(id(w))\n\n return output\n\n # SavedModel properties. Please see keras/saving/saved_model for", "d_id": 80539, "documentation": { "docstring": "Dedupe weights while maintaining order as much as possible.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 185717, "commit_id": "e3130f95c69648916f121e779a325b6f6f87e6ba", "repo": "textual", "path": "src/textual/dom.py", "file_name": "dom.py", "fun_name": "ancestors_with_self", "commit_message": "Don't include self in DOMNode.ancestors any more\n\nAs well as dropping `self` from the list that DOMNode.ancestors provides,\nthis commit also adds DOMNode.ancestors_with_self, which maintains the\nprevious behaviour of DOMNode.ancestors.", "code": "def ancestors_with_self(self) -> list[DOMNode]:\n \n nodes: list[MessagePump | None] = []\n add_node = nodes.append\n node: MessagePump | None = self\n while node is not None:\n add_node(node)\n node = node._parent\n return cast(\"list[DOMNode]\", nodes)\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 95, "n_words": 31, "vocab_size": 26, "complexity": 2, "nloc": 12, "token_counts": 56, "n_ast_nodes": 92, "n_identifiers": 11, "random_cut": "def ancestors_with_self(self) -> list[DOMNode]:\n \n nodes: list[MessagePump | None] = []\n add_node = nodes.append\n node: MessagePump | None = self\n while node is not None:\n ", "d_id": 45126, "documentation": { "docstring": "list[DOMNode]: A list of Nodes by tracing a path all the way back to App.\n\n Note: This is inclusive of ``self``.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 35, "language": "en" } }, { "id": 85981, "commit_id": "0099fe517a2044e70567e969f19bcf3fa3b26122", "repo": "sentry", "path": "src/sentry/search/events/datasets/metrics.py", "file_name": "metrics.py", "fun_name": "function_converter", "commit_message": "fix(mep): Include the column so its countmerge (#39005)\n\n- This was causing these results to overcount since we werent merging\r\nrows correctly. For the purposes of the endpoint we just needed >0 so it\r\nwasn't as noticeable", "code": "def function_converter(self) -> Mapping[str, fields.MetricsFunction]:\n \n resolve_metric_id = {\n \"name\": \"metric_id\",\n \"fn\": lambda args: self.resolve_metric(args[\"column\"]),\n }\n\n function_converter = {\n function.name: function\n for function in [\n # Note while the discover version of apdex, count_miserable, user_misery\n # accepts arguments, because this is precomputed with tags no parameters\n # are available\n fields.MetricsFunction(\n \"apdex\",\n optional_args=[fields.NullableNumberRange(\"satisfaction\", 0, None)],\n snql_distribution=self._resolve_apdex_function,\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"avg\",\n required_args=[\n fields.MetricArg(\n \"column\",\n allowed_columns=constants.METRIC_DURATION_COLUMNS,\n )\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"avgIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n args[\"metric_id\"],\n ],\n ),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_miserable\",\n required_args=[\n fields.MetricArg(\n \"column\", allowed_columns=[\"user\"], allow_custom_measurements=False\n )\n ],\n optional_args=[fields.NullableNumberRange(\"satisfaction\", 0, None)],\n calculated_args=[resolve_metric_id],\n snql_set=self._resolve_count_miserable_function,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_unparameterized_transactions\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"equals\",\n [\n self.builder.column(\"transaction\"),\n self.builder.resolve_tag_value(\"<< unparameterized >>\"),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n # Not yet exposed, need to add far more validation around tag&value\n private=True,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_null_transactions\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"equals\",\n [\n self.builder.column(\"transaction\"),\n \"\" if self.builder.tag_values_are_strings else 0,\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n private=True,\n ),\n fields.MetricsFunction(\n \"count_has_transaction_name\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"and\",\n [\n Function(\n \"notEquals\",\n [\n self.builder.column(\"transaction\"),\n \"\"\n if self.builder.tag_values_are_strings\n else 0,\n ],\n ),\n Function(\n \"notEquals\",\n [\n self.builder.column(\"transaction\"),\n self.builder.resolve_tag_value(\n \"<< unparameterized >>\"\n ),\n ],\n ),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n private=True,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"user_misery\",\n optional_args=[\n fields.NullableNumberRange(\"satisfaction\", 0, None),\n fields.with_default(\n constants.MISERY_ALPHA, fields.NumberRange(\"alpha\", 0, None)\n ),\n fields.with_default(\n constants.MISERY_BETA, fields.NumberRange(\"beta\", 0, None)\n ),\n ],\n calculated_args=[],\n snql_set=self._resolve_user_misery_function,\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"p50\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.5\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p75\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.75\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p90\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.90\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p95\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.95\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p99\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.99\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p100\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"max\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"maxIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n ),\n fields.MetricsFunction(\n \"min\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"minIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n ),\n fields.MetricsFunction(\n \"sum\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"sumIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n ),\n fields.MetricsFunction(\n \"sumIf\",\n required_args=[\n fields.ColumnTagArg(\"if_col\"),\n fields.FunctionArg(\"if_val\"),\n ],\n calculated_args=[\n {\n \"name\": \"resolved_val\",\n \"fn\": lambda args: self.builder.resolve_tag_value(args[\"if_val\"]),\n }\n ],\n snql_counter=lambda args, alias: Function(\n \"sumIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [args[\"if_col\"], args[\"resolved_val\"]]),\n ],\n alias,\n ),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"percentile\",\n required_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n fields.NumberRange(\"percentile\", 0, 1),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=self._resolve_percentile,\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"count_unique\",\n required_args=[\n fields.MetricArg(\n \"column\", allowed_columns=[\"user\"], allow_custom_measurements=False\n )\n ],\n calculated_args=[resolve_metric_id],\n snql_set=lambda args, alias: Function(\n \"uniqIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"uniq\",\n snql_set=lambda args, alias: Function(\n \"uniq\",\n [Column(\"value\")],\n alias,\n ),\n ),\n fields.MetricsFunction(\n \"uniqIf\",\n required_args=[\n fields.ColumnTagArg(\"if_col\"),\n fields.FunctionArg(\"if_val\"),\n ],\n calculated_args=[\n {\n \"name\": \"resolved_val\",\n \"fn\": lambda args: self.builder.resolve_tag_value(args[\"if_val\"]),\n }\n ],\n snql_set=lambda args, alias: Function(\n \"uniqIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [args[\"if_col\"], args[\"resolved_val\"]]),\n ],\n alias,\n ),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n ],\n alias,\n ),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_web_vitals\",\n required_args=[\n fields.MetricArg(\n \"column\",\n allowed_columns=[\n \"measurements.fp\",\n \"measurements.fcp\",\n \"measurements.lcp\",\n \"measurements.fid\",\n \"measurements.cls\",\n ],\n allow_custom_measurements=False,\n ),\n fields.SnQLStringArg(\n \"quality\", allowed_strings=[\"good\", \"meh\", \"poor\", \"any\"]\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=self._resolve_web_vital_function,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"epm\",\n snql_distribution=lambda args, alias: Function(\n \"divide\",\n [\n Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n ],\n ),\n Function(\"divide\", [args[\"interval\"], 60]),\n ],\n alias,\n ),\n optional_args=[fields.IntervalDefault(\"interval\", 1, None)],\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"eps\",\n snql_distribution=lambda args, alias: Function(\n \"divide\",\n [\n Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n ],\n ),\n args[\"interval\"],\n ],\n alias,\n ),\n optional_args=[fields.IntervalDefault(\"interval\", 1, None)],\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"failure_count\",\n snql_distribution=self._resolve_failure_count,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"failure_rate\",\n snql_distribution=lambda args, alias: Function(\n \"divide\",\n [\n self._resolve_failure_count(args),\n Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n default_result_type=\"percentage\",\n ),\n fields.MetricsFunction(\n \"histogram\",\n required_args=[fields.MetricArg(\"column\")],\n calculated_args=[resolve_metric_id],\n snql_distribution=self._resolve_histogram_function,\n default_result_type=\"number\",\n private=True,\n ),\n ]\n }\n\n for alias, name in constants.FUNCTION_ALIASES.items():\n if name in function_converter:\n function_converter[alias] = function_converter[name].alias_as(alias)\n\n return function_converter\n\n # Field Aliases", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 28, "n_whitespaces": 14028, "n_words": 747, "vocab_size": 202, "complexity": 6, "nloc": 548, "token_counts": 2133, "n_ast_nodes": 3312, "n_identifiers": 53, "random_cut": "def function_converter(self) -> Mapping[str, fields.MetricsFunction]:\n \n resolve_metric_id = {\n \"name\": \"metric_id\",\n \"fn\": lambda args: self.resolve_metric(args[\"column\"]),\n }\n\n function_converter = {\n function.name: function\n for function in [\n # Note while the discover version of apdex, count_miserable, user_misery\n # accepts arguments, because this is precomputed with tags no parameters\n # are available\n fields.MetricsFunction(\n \"apdex\",\n optional_args=[fields.NullableNumberRange(\"satisfaction\", 0, None)],\n snql_distribution=self._resolve_apdex_function,\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"avg\",\n required_args=[\n fields.MetricArg(\n \"column\",\n allowed_columns=constants.METRIC_DURATION_COLUMNS,\n )\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"avgIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n args[\"metric_id\"],\n ],\n ),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_miserable\",\n required_args=[\n fields.MetricArg(\n \"column\", allowed_columns=[\"user\"], allow_custom_measurements=False\n )\n ],\n optional_args=[fields.NullableNumberRange(\"satisfaction\", 0, None)],\n calculated_args=[resolve_metric_id],\n snql_set=self._resolve_count_miserable_function,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_unparameterized_transactions\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"equals\",\n [\n self.builder.column(\"transaction\"),\n self.builder.resolve_tag_value(\"<< unparameterized >>\"),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n # Not yet exposed, need to add far more validation around tag&value\n private=True,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_null_transactions\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"equals\",\n [\n self.builder.column(\"transaction\"),\n \"\" if self.builder.tag_values_are_strings else 0,\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n private=True,\n ),\n fields.MetricsFunction(\n \"count_has_transaction_name\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"and\",\n [\n Function(\n \"notEquals\",\n [\n self.builder.column(\"transaction\"),\n \"\"\n if self.builder.tag_values_are_strings\n else 0,\n ],\n ),\n Function(\n \"notEquals\",\n [\n self.builder.column(\"transaction\"),\n self.builder.resolve_tag_value(\n \"<< unparameterized >>\"\n ),\n ],\n ),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n private=True,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"user_misery\",\n optional_args=[\n fields.NullableNumberRange(\"satisfaction\", 0, None),\n fields.with_default(\n constants.MISERY_ALPHA, fields.NumberRange(\"alpha\", 0, None)\n ),\n fields.with_default(\n constants.MISERY_BETA, fields.NumberRange(\"beta\", 0, None)\n ),\n ],\n calculated_args=[],\n snql_set=self._resolve_user_misery_function,\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"p50\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.5\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p75\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.75\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p90\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.90\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p95\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.95\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p99\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.99\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p100\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"max\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"maxIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n ),\n fields.MetricsFunction(\n \"min\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"minIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n ),\n fields.MetricsFunction(\n \"sum\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"sumIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ", "d_id": 18067, "documentation": { "docstring": "While the final functions in clickhouse must have their -Merge combinators in order to function, we don't\n need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions\n like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles)\n Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the\n Metric Layer will actually handle which dataset each function goes to\n ", "n_words": 68, "vocab_size": 57, "n_whitespaces": 103, "language": "en" } }, { "id": 178391, "commit_id": "2c20b90946a8aa5ad4ee39ad365ff1b83f182770", "repo": "Nuitka", "path": "nuitka/freezer/Standalone.py", "file_name": "Standalone.py", "fun_name": "copyUsedDLLs", "commit_message": "UI: In case of PermissionError, allow uses to retry\n\n* Esp. on Windows it happens a lot that running programs cannot be\n updated by Nuitka, this avoids the cryptic error somewhere ranomly.", "code": "def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points):\n # This is terribly complex, because we check the list of used DLLs\n # trying to avoid duplicates, and detecting errors with them not\n # being binary identical, so we can report them. And then of course\n # we also need to handle OS specifics.\n # pylint: disable=too-many-branches,too-many-locals,too-many-statements\n\n used_dlls = detectUsedDLLs(\n source_dir=source_dir,\n standalone_entry_points=standalone_entry_points,\n use_cache=not Options.shallNotUseDependsExeCachedResults()\n and not Options.getWindowsDependencyTool() == \"depends.exe\",\n update_cache=not Options.shallNotStoreDependsExeCachedResults()\n and not Options.getWindowsDependencyTool() == \"depends.exe\",\n )\n\n removed_dlls = set()\n warned_about = set()\n\n # Fist make checks and remove some.\n for dll_filename1, sources1 in tuple(iterItems(used_dlls)):\n if dll_filename1 in removed_dlls:\n continue\n\n for dll_filename2, sources2 in tuple(iterItems(used_dlls)):\n if dll_filename1 == dll_filename2:\n continue\n\n if dll_filename2 in removed_dlls:\n continue\n\n # Colliding basenames are an issue to us.\n if os.path.basename(dll_filename1) != os.path.basename(dll_filename2):\n continue\n\n # May already have been removed earlier\n if dll_filename1 not in used_dlls:\n continue\n\n if dll_filename2 not in used_dlls:\n continue\n\n dll_name = os.path.basename(dll_filename1)\n\n if Options.isShowInclusion():\n inclusion_logger.info(\n \n % (dll_name, dll_filename1, dll_filename2)\n )\n\n # Check that if a DLL has the same name, if it's identical, then it's easy.\n if haveSameFileContents(dll_filename1, dll_filename2):\n del used_dlls[dll_filename2]\n removed_dlls.add(dll_filename2)\n\n continue\n\n # For Win32 we can check out file versions.\n if Utils.isWin32Windows():\n dll_version1 = getWindowsDLLVersion(dll_filename1)\n dll_version2 = getWindowsDLLVersion(dll_filename2)\n\n if dll_version2 < dll_version1:\n del used_dlls[dll_filename2]\n removed_dlls.add(dll_filename2)\n\n solved = True\n elif dll_version1 < dll_version2:\n del used_dlls[dll_filename1]\n removed_dlls.add(dll_filename1)\n\n solved = True\n else:\n solved = False\n\n if solved:\n if dll_name not in warned_about and dll_name not in ms_runtime_dlls:\n warned_about.add(dll_name)\n\n inclusion_logger.warning(\n \"Conflicting DLLs for '%s' in your installation, newest file version used, hoping for the best.\"\n % dll_name\n )\n\n continue\n\n # So we have conflicting DLLs, in which case we do report the fact.\n inclusion_logger.warning(\n \n % (\n dll_name,\n dll_filename1,\n \"\\n \".join(sources1),\n dll_filename2,\n \"\\n \".join(sources2),\n )\n )\n\n del used_dlls[dll_filename2]\n removed_dlls.add(dll_filename2)\n\n dll_map = []\n\n for dll_filename, sources in iterItems(used_dlls):\n dll_name = os.path.basename(dll_filename)\n\n target_path = os.path.join(dist_dir, dll_name)\n\n # Sometimes DLL dependencies were copied there already.\n if not os.path.exists(target_path):\n copyFile(dll_filename, target_path)\n\n dll_map.append((dll_filename, dll_name))\n\n if Options.isShowInclusion():\n inclusion_logger.info(\n \"Included used shared library '%s' (used by %s).\"\n % (dll_filename, \", \".join(sources))\n )\n\n if Utils.isMacOS():\n # For macOS, the binary and the DLLs needs to be changed to reflect\n # the relative DLL location in the \".dist\" folder.\n for standalone_entry_point in standalone_entry_points:\n fixupBinaryDLLPathsMacOS(\n binary_filename=standalone_entry_point.dest_path,\n dll_map=dll_map,\n original_location=standalone_entry_point.source_path,\n )\n\n for original_path, dll_filename in dll_map:\n fixupBinaryDLLPathsMacOS(\n binary_filename=os.path.join(dist_dir, dll_filename),\n dll_map=dll_map,\n original_location=original_path,\n )\n\n # Remove code signature from CPython installed library\n candidate = os.path.join(\n dist_dir,\n \"Python\",\n )\n\n if os.path.exists(candidate):\n removeMacOSCodeSignature(candidate)\n\n # Remove or update rpath settings.\n if Utils.getOS() in (\"Linux\", \"Darwin\"):\n # For Linux, the \"rpath\" of libraries may be an issue and must be\n # removed.\n if Utils.isMacOS():\n start = 0\n else:\n start = 1\n\n for standalone_entry_point in standalone_entry_points[start:]:\n count = relpath(\n path=standalone_entry_point.dest_path, start=dist_dir\n ).count(os.path.sep)\n\n rpath = os.path.join(\"$ORIGIN\", *([\"..\"] * count))\n setSharedLibraryRPATH(standalone_entry_point.dest_path, rpath)\n\n for _original_path, dll_filename in dll_map:\n setSharedLibraryRPATH(os.path.join(dist_dir, dll_filename), \"$ORIGIN\")\n\n if Utils.isWin32Windows():\n if python_version < 0x300:\n # For Win32, we might have to remove SXS paths\n for standalone_entry_point in standalone_entry_points[1:]:\n removeSxsFromDLL(standalone_entry_point.dest_path)\n\n for _original_path, dll_filename in dll_map:\n removeSxsFromDLL(os.path.join(dist_dir, dll_filename))\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 2042, "n_words": 477, "vocab_size": 262, "complexity": 34, "nloc": 125, "token_counts": 660, "n_ast_nodes": 1077, "n_identifiers": 66, "random_cut": "def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points):\n # This is terribly complex, because we check the list of used DLLs\n # trying to avoid duplicates, and detecting errors with them not\n # being binary identical, so we can report them. And then of course\n # we also need to handle OS specifics.\n # pylint: disable=too-many-branches,too-many-locals,too-many-statements\n\n used_dlls = detectUsedDLLs(\n source_dir=source_dir,\n standalone_entry_points=standalone_entry_points,\n use_cache=not Options.shallNotUseDependsExeCachedResults()\n and not Options.getWindowsDependencyTool() == \"depends.exe\",\n update_cache=not Options.shallNotStoreDependsExeCachedResults()\n and not Options.getWindowsDependencyTool() == \"depends.exe\",\n )\n\n removed_dlls = set()\n warned_about = set()\n\n # Fist make checks and remove some.\n for dll_filename1, sources1 in tuple(iterItems(used_dlls)):\n if dll_filename1 in removed_dlls:\n continue\n\n for dll_filename2, sources2 in tuple(iterItems(used_dlls)):\n if dll_fi", "d_id": 42682, "documentation": { "docstring": "Colliding DLL names for %s, checking identity of \\\n'%s' <-> '%s'.\\\nIgnoring non-identical DLLs for '%s'.\n%s used by:\n %s\ndifferent from\n%s used by\n %s", "n_words": 27, "vocab_size": 22, "n_whitespaces": 25, "language": "en" } }, { "id": 197718, "commit_id": "392c40aceadd4c7cdeed0fceb93a763927dc0ca1", "repo": "sympy", "path": "sympy/integrals/transforms.py", "file_name": "transforms.py", "fun_name": "_laplace_rule_diff", "commit_message": "include the coefficient in L(A*x')", "code": "def _laplace_rule_diff(f, t, s, doit=True, **hints):\n \n hints.pop('simplify', True)\n a = Wild('a', exclude=[t])\n y = Wild('y')\n n = Wild('n', exclude=[t])\n g = WildFunction('g', nargs=1)\n ma1 = f.match(a*Derivative(g, (t, n)))\n if ma1 and ma1[g].args[0] == t and ma1[n].is_integer:\n debug('_laplace_apply_rules match:')\n debug(' f: %s'%(f,))\n debug(' rule: time derivative (1.11, 1.12)')\n d = []\n for k in range(ma1[n]):\n if k==0:\n y = ma1[g].func(t).subs(t, 0)\n else:\n y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0)\n d.append(s**(ma1[n]-k-1)*y)\n r = s**ma1[n]*_laplace_apply_rules(ma1[g].func(t), t, s, doit=doit,\n **hints)\n return ma1[a]*(r - Add(*d))\n return None\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 275, "n_words": 81, "vocab_size": 61, "complexity": 6, "nloc": 22, "token_counts": 258, "n_ast_nodes": 404, "n_identifiers": 30, "random_cut": "def _laplace_rule_diff(f, t, s, doit=True, **hints):\n \n hints.pop('simplify', True)\n a = Wild('a', exclude=[t])\n y = Wild('y')\n n = Wild('n', exclude=[t])\n g = WildFunction('g', nargs=1)\n ma1 = f.match(a*Derivative(g, (t, n)))\n if ma1 and ma1[g].args[0] == t and ma1[n].is_integer:\n debug('_laplace_apply_rules match:')\n debug(' f: %s'%(f,))\n debug(' rule: time derivative (1.11, 1.12)')\n d = []\n for k in range(ma1[n]):\n if k==0:\n y = ma1[g].func(t).subs(t, 0)\n else:\n y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0)\n d.append(s**(ma1[n]-k", "d_id": 48671, "documentation": { "docstring": "\n This internal helper function tries to transform an expression containing\n a derivative of an undefined function and returns `None` if it cannot\n do it.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 37, "language": "en" } }, { "id": 81376, "commit_id": "782667a34ee45bfe825b29db39c67d4465391bdb", "repo": "awx", "path": "awx/sso/pipeline.py", "file_name": "pipeline.py", "fun_name": "_check_flag", "commit_message": "Allow multiple values in SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR.is_*_[value|role] settings (#12558)", "code": "def _check_flag(user, flag, attributes, user_flags_settings):\n \n new_flag = False\n is_role_key = \"is_%s_role\" % (flag)\n is_attr_key = \"is_%s_attr\" % (flag)\n is_value_key = \"is_%s_value\" % (flag)\n remove_setting = \"remove_%ss\" % (flag)\n\n # Check to see if we are respecting a role and, if so, does our user have that role?\n required_roles = user_flags_settings.get(is_role_key, None)\n if required_roles:\n matching_roles = _get_matches(required_roles, attributes.get('Role', []))\n\n # We do a 2 layer check here so that we don't spit out the else message if there is no role defined\n if matching_roles:\n logger.debug(\"User %s has %s role(s) %s\" % (user.username, flag, ', '.join(matching_roles)))\n new_flag = True\n else:\n logger.debug(\"User %s is missing the %s role(s) %s\" % (user.username, flag, ', '.join(required_roles)))\n\n # Next, check to see if we are respecting an attribute; this will take priority over the role if its defined\n attr_setting = user_flags_settings.get(is_attr_key, None)\n if attr_setting and attributes.get(attr_setting, None):\n # Do we have a required value for the attribute\n required_value = user_flags_settings.get(is_value_key, None)\n if required_value:\n # If so, check and see if the value of the attr matches the required value\n saml_user_attribute_value = attributes.get(attr_setting, None)\n matching_values = _get_matches(required_value, saml_user_attribute_value)\n\n if matching_values:\n logger.debug(\"Giving %s %s from attribute %s with matching values %s\" % (user.username, flag, attr_setting, ', '.join(matching_values)))\n new_flag = True\n # if they don't match make sure that new_flag is false\n else:\n logger.debug(\n \"Refusing %s for %s because attr %s (%s) did not match value(s) %s\"\n % (flag, user.username, attr_setting, \", \".join(saml_user_attribute_value), ', '.join(required_value))\n )\n new_flag = False\n # If there was no required value then we can just allow them in because of the attribute\n else:\n logger.debug(\"Giving %s %s from attribute %s\" % (user.username, flag, attr_setting))\n new_flag = True\n\n # Get the users old flag\n old_value = getattr(user, \"is_%s\" % (flag))\n\n # If we are not removing the flag and they were a system admin and now we don't want them to be just return\n remove_flag = user_flags_settings.get(remove_setting, True)\n if not remove_flag and (old_value and not new_flag):\n logger.debug(\"Remove flag %s preventing removal of %s for %s\" % (remove_flag, flag, user.username))\n return old_value, False\n\n # If the user was flagged and we are going to make them not flagged make sure there is a message\n if old_value and not new_flag:\n logger.debug(\"Revoking %s from %s\" % (flag, user.username))\n\n return new_flag, old_value != new_flag\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 752, "n_words": 374, "vocab_size": 171, "complexity": 12, "nloc": 40, "token_counts": 339, "n_ast_nodes": 561, "n_identifiers": 25, "random_cut": "def _check_flag(user, flag, attributes, user_flags_settings):\n \n new_flag = False\n is_role_key = \"is_%s_role\" % (flag)\n is_attr_key = \"is_%s_attr\" % (flag)\n is_value_key = \"is_%s_value\" % (flag)\n remove_setting = \"remove_%ss\" % (flag)\n\n # Check to see if we are respecting a role and, if so, does our user have that role?\n required_roles = user_flags_settings.get(is_role_key, None)\n if required_roles:\n matching_roles = _get_matches(required_roles, attributes.get('Role', []))\n\n # We do a 2 layer check here so that we don't spit out the else message if there is no role defined\n if matching_roles:\n logger.debug(\"User %s has %s role(s) %s\" % (user.username, flag, ', '.join(matching_roles)))\n new_flag = True\n else:\n logger.debug(\"User %s is missing the %s role(s) %s\" % (user.username, flag, ', '.join(required_roles)))\n\n # Next, check to see if we are respecting an attribute; this will take priority over the role if its defined\n attr_setting = user_flags_settings.get(is_attr_key, None)\n if attr_setting and attributes.get(attr_setting, None):\n # Do we have a required value for the attribute\n required_value = user_flags_settings.get(is_value_key, None)\n if required_value:\n ", "d_id": 17193, "documentation": { "docstring": "\n Helper function to set the is_superuser is_system_auditor flags for the SAML adapter\n Returns the new flag and whether or not it changed the flag\n ", "n_words": 24, "vocab_size": 20, "n_whitespaces": 34, "language": "en" } }, { "id": 249543, "commit_id": "ac1a31740b6d0dfda4d57a25762aaddfde981caf", "repo": "synapse", "path": "tests/storage/test_event_federation.py", "file_name": "test_event_federation.py", "fun_name": "test_get_backfill_points_in_room", "commit_message": "Only try to backfill event if we haven't tried before recently (#13635)\n\nOnly try to backfill event if we haven't tried before recently (exponential backoff). No need to keep trying the same backfill point that fails over and over.\r\n\r\nFix https://github.com/matrix-org/synapse/issues/13622\r\nFix https://github.com/matrix-org/synapse/issues/8451\r\n\r\nFollow-up to https://github.com/matrix-org/synapse/pull/13589\r\n\r\nPart of https://github.com/matrix-org/synapse/issues/13356", "code": "def test_get_backfill_points_in_room(self):\n \n setup_info = self._setup_room_for_backfill_tests()\n room_id = setup_info.room_id\n\n backfill_points = self.get_success(\n self.store.get_backfill_points_in_room(room_id)\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]\n self.assertListEqual(\n backfill_event_ids, [\"b6\", \"b5\", \"b4\", \"2\", \"b3\", \"b2\", \"b1\"]\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 108, "n_words": 30, "vocab_size": 26, "complexity": 2, "nloc": 10, "token_counts": 67, "n_ast_nodes": 115, "n_identifiers": 12, "random_cut": "def test_get_backfill_points_in_room(self):\n \n setup_info = self._setup_room_for_backfill_tests()\n room_id = setup_info.room_id\n\n backfill_points = self.get_success(\n self.store.get_backfill_points_in_room(room_id)\n )\n backfill_event_ids = [backfill_point[0] for backfi", "d_id": 72983, "documentation": { "docstring": "\n Test to make sure we get some backfill points\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 176448, "commit_id": "cc1db275efc709cb964ce88abbfa877798d58c10", "repo": "networkx", "path": "networkx/algorithms/asteroidal.py", "file_name": "asteroidal.py", "fun_name": "find_asteroidal_triple", "commit_message": "Minor improvements from general code readthrough (#5414)\n\n* Add deprecated directive to reversed docstring.\r\n\r\n* Add missing dep directives to shpfiles.\r\n\r\n* Remove defn of INF sentinel.\r\n\r\n* typo.\r\n\r\n* str -> comment in forloop.\r\n\r\n* STY: appropriate casing for var name.", "code": "def find_asteroidal_triple(G):\n r\n V = set(G.nodes)\n\n if len(V) < 6:\n # An asteroidal triple cannot exist in a graph with 5 or less vertices.\n return None\n\n component_structure = create_component_structure(G)\n E_complement = set(nx.complement(G).edges)\n\n for e in E_complement:\n u = e[0]\n v = e[1]\n u_neighborhood = set(G[u]).union([u])\n v_neighborhood = set(G[v]).union([v])\n union_of_neighborhoods = u_neighborhood.union(v_neighborhood)\n for w in V - union_of_neighborhoods:\n # Check for each pair of vertices whether they belong to the\n # same connected component when the closed neighborhood of the\n # third is removed.\n if (\n component_structure[u][v] == component_structure[u][w]\n and component_structure[v][u] == component_structure[v][w]\n and component_structure[w][u] == component_structure[w][v]\n ):\n return [u, v, w]\n return None\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 294, "n_words": 105, "vocab_size": 80, "complexity": 7, "nloc": 61, "token_counts": 169, "n_ast_nodes": 280, "n_identifiers": 21, "random_cut": "def find_asteroidal_triple(G):\n r\n V = set(G.nodes)\n\n if len(V) < 6:\n # An asteroidal triple cannot exist in a graph with 5 or less vertices.\n return None\n\n component_structure = create_component_structure(G)\n E_complement = set(nx.complement(G).edges)\n\n for e in E_complement:\n u = e[0]\n v = e[1]\n u_neighborhood = set(G[u]).union([u])\n v_neighborhood = set(G[v]).union([v])\n union_of_neighborhoods = u_neighborhood.union(v_neighborhood)\n ", "d_id": 41909, "documentation": { "docstring": "Find an asteroidal triple in the given graph.\n\n An asteroidal triple is a triple of non-adjacent vertices such that\n there exists a path between any two of them which avoids the closed\n neighborhood of the third. It checks all independent triples of vertices\n and whether they are an asteroidal triple or not. This is done with the\n help of a data structure called a component structure.\n A component structure encodes information about which vertices belongs to\n the same connected component when the closed neighborhood of a given vertex\n is removed from the graph. The algorithm used to check is the trivial\n one, outlined in [1]_, which has a runtime of\n :math:`O(|V||\\overline{E} + |V||E|)`, where the second term is the\n creation of the component structure.\n\n Parameters\n ----------\n G : NetworkX Graph\n The graph to check whether is AT-free or not\n\n Returns\n -------\n list or None\n An asteroidal triple is returned as a list of nodes. If no asteroidal\n triple exists, i.e. the graph is AT-free, then None is returned.\n The returned value depends on the certificate parameter. The default\n option is a bool which is True if the graph is AT-free, i.e. the\n given graph contains no asteroidal triples, and False otherwise, i.e.\n if the graph contains at least one asteroidal triple.\n\n Notes\n -----\n The component structure and the algorithm is described in [1]_. The current\n implementation implements the trivial algorithm for simple graphs.\n\n References\n ----------\n .. [1] Ekkehard Köhler,\n \"Recognizing Graphs without asteroidal triples\",\n Journal of Discrete Algorithms 2, pages 439-452, 2004.\n https://www.sciencedirect.com/science/article/pii/S157086670400019X\n ", "n_words": 253, "vocab_size": 145, "n_whitespaces": 395, "language": "en" } }, { "id": 66155, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/leave_application/leave_application.py", "file_name": "leave_application.py", "fun_name": "get_leave_entries", "commit_message": "style: format code with black", "code": "def get_leave_entries(employee, leave_type, from_date, to_date):\n\t\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"from_date\": from_date, \"to_date\": to_date, \"employee\": employee, \"leave_type\": leave_type},\n\t\tas_dict=1,\n\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 12, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 18, "token_counts": 44, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def get_leave_entries(employee, leave_type, from_date, to_date):\n\t\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"from_date\": from_date, \"to_date\": to_date, \"employee\": employee, \"leav", "d_id": 14114, "documentation": { "docstring": "Returns leave entries between from_date and to_date.\n\t\tSELECT\n\t\t\temployee, leave_type, from_date, to_date, leaves, transaction_name, transaction_type, holiday_list,\n\t\t\tis_carry_forward, is_expired\n\t\tFROM `tabLeave Ledger Entry`\n\t\tWHERE employee=%(employee)s AND leave_type=%(leave_type)s\n\t\t\tAND docstatus=1\n\t\t\tAND (leaves<0\n\t\t\t\tOR is_expired=1)\n\t\t\tAND (from_date between %(from_date)s AND %(to_date)s\n\t\t\t\tOR to_date between %(from_date)s AND %(to_date)s\n\t\t\t\tOR (from_date < %(from_date)s AND to_date > %(to_date)s))\n\t", "n_words": 52, "vocab_size": 37, "n_whitespaces": 40, "language": "en" } }, { "id": 206236, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/defaultfilters.py", "file_name": "defaultfilters.py", "fun_name": "addslashes", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def addslashes(value):\n \n return value.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\")\n\n\n@register.filter(is_safe=True)\n@stringfilter", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.filter(is_safe=True)\n@stringfilter", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 13, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 29, "n_ast_nodes": 81, "n_identifiers": 7, "random_cut": "def addslashes(value):\n \n return value.replace(", "d_id": 51427, "documentation": { "docstring": "\n Add slashes before quotes. Useful for escaping strings in CSV, for\n example. Less useful for escaping JavaScript; use the ``escapejs``\n filter instead.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 35, "language": "en" } }, { "id": 154083, "commit_id": "02363589aa5105e091fa3d790b29cddf94cc8118", "repo": "modin", "path": "modin/config/envvars.py", "file_name": "envvars.py", "fun_name": "_get_raw_from_config", "commit_message": "REFACTOR-#4629: Add type annotations to `modin/config` (#4685)\n\nSigned-off-by: Karthik Velayutham ", "code": "def _get_raw_from_config(cls) -> str:\n \n if cls.varname is None:\n raise TypeError(\"varname should not be None\")\n return os.environ[cls.varname]\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 19, "token_counts": 29, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def _get_raw_from_config(cls) -> str:\n \n ", "d_id": 35765, "documentation": { "docstring": "\n Read the value from environment variable.\n\n Returns\n -------\n str\n Config raw value.\n\n Raises\n ------\n TypeError\n If `varname` is None.\n KeyError\n If value is absent.\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 121, "language": "en" } }, { "id": 153821, "commit_id": "57e29bc5d82348006c5170ef9ac0a9eedcd9acf9", "repo": "modin", "path": "modin/core/storage_formats/base/query_compiler.py", "file_name": "query_compiler.py", "fun_name": "is_monotonic_decreasing", "commit_message": "REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514)\n\nCo-authored-by: Rehan Sohail Durrani \r\nSigned-off-by: jeffreykennethli ", "code": "def is_monotonic_decreasing(self):\n \n return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 6, "random_cut": "def is_monotonic_decreasing(self):\n \n return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self)\n", "d_id": 35636, "documentation": { "docstring": "\n Return boolean if values in the object are monotonically decreasing.\n\n Returns\n -------\n bool\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 49, "language": "en" } }, { "id": 96147, "commit_id": "09726d7fc95e53bb516e328fc1811fc9a0704cac", "repo": "sentry", "path": "src/sentry/buffer/redis.py", "file_name": "redis.py", "fun_name": "get", "commit_message": "fix(post_process): Fetch buffered `times_seen` values and add them to `Group.times_seen` (#31624)\n\nIn `post_process_group` we process issue alert rules and also ignored groups. Both of these can have\r\nconditions that read from the `times_seen` value on the `Group`.\r\n\r\nThe problem here is that updates to `times_seen` are buffered and only written every 45s or so. This\r\nmeans that most of the time when a `Group` goes through `post_process_group` it has an out of date\r\n`times_seen` value. For infrequently updated groups, this can just mean that the count is -1. But\r\nfor high volume groups this could mean that we're considerably below the count.\r\n\r\nTo improve this, we read the current value from buffers and store it as pending updates on the group.\r\nWe then use this pending value when checking rules and snoozes in post process. There's a potential \r\nrace condition here where we fetch the `Group`, and before we fetch the value from buffers it is \r\ncleared, and so we miss out on the update. This should be infrequent enough that it's not a problem, \r\nand either way we will be considerably more accurate most of the time.", "code": "def get(self, model, columns, filters):\n \n key = self._make_key(model, filters)\n conn = self.cluster.get_local_client_for_key(key)\n pipe = conn.pipeline()\n\n for col in columns:\n pipe.hget(key, f\"i+{col}\")\n results = pipe.execute()\n\n return {\n col: (int(results[i]) if results[i] is not None else 0) for i, col in enumerate(columns)\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 119, "n_words": 41, "vocab_size": 35, "complexity": 4, "nloc": 10, "token_counts": 93, "n_ast_nodes": 146, "n_identifiers": 19, "random_cut": "def get(self, model, columns, filters):\n \n key = self._make_key(model, filters)\n conn = self.cluster.get_local_client_for_key(key)\n pipe = conn.pipeline()\n\n for col in columns:\n ", "d_id": 19281, "documentation": { "docstring": "\n Fetches buffered values for a model/filter. Passed columns must be integer columns.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 83385, "commit_id": "4b9770e270823b7ed2bbbeda0e4450f0ba6a288b", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_non_ascii_subscription_for_principal", "commit_message": "stream_settings: Show stream privacy & description in stream events.\n\nProvide stream privacy and description in stream notification events\nwhen stream is created.\nIn function \"send_messages_for_new_subscribers\" for when stream is\ncreated, put policy name and description of the stream.\n\nFixes #21004", "code": "def test_non_ascii_subscription_for_principal(self) -> None:\n \n iago = self.example_user(\"iago\")\n self.assert_adding_subscriptions_for_principal(\n iago.id, get_realm(\"zulip\"), [\"hümbüǵ\"], policy_name=\"Public\"\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 9, "token_counts": 37, "n_ast_nodes": 67, "n_identifiers": 8, "random_cut": "def test_non_ascii_subscription_for_principal(self) -> None:\n \n iago = self.example_user(\"iago\")\n self.assert_adding_subscriptions_for_principal(\n iago.id, get_realm(\"zulip\"), [\"hümbüǵ\"], policy_name=\"Public\"", "d_id": 17671, "documentation": { "docstring": "\n You can subscribe other people to streams even if they containing\n non-ASCII characters.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 42453, "commit_id": "8ffd0d8190552d45f8b92e18da3fc41639e5185d", "repo": "nltk", "path": "nltk/corpus/reader/wordnet.py", "file_name": "wordnet.py", "fun_name": "add_provs", "commit_message": "Initialize empty provenance for default English", "code": "def add_provs(self, reader):\n \n fileids = reader.fileids()\n for fileid in fileids:\n prov, langfile = os.path.split(fileid)\n file_name, file_extension = os.path.splitext(langfile)\n if file_extension == \".tab\":\n lang = file_name.split(\"-\")[-1]\n if lang in self.provenances.keys():\n # We already have another resource for this lang,\n # so we need to further specify the lang id:\n lang = f\"{lang}_{prov}\"\n self.provenances[lang] = prov\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 210, "n_words": 54, "vocab_size": 41, "complexity": 4, "nloc": 10, "token_counts": 84, "n_ast_nodes": 150, "n_identifiers": 16, "random_cut": "def add_provs(self, reader):\n \n fileids = reader.fileids()\n for fileid in fileids:\n prov, langfile = os.path.split(fileid)\n file_name, file_extension = os.path.splitext(langfile)\n if file_extension == \".tab\":\n lang = file_name.split(\"-\")[-1]\n if lang in self.provenance", "d_id": 7546, "documentation": { "docstring": "Add languages from Multilingual Wordnet to the provenance dictionary", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 248149, "commit_id": "75dff3dc980974960f55fa21fc8e672201f63045", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "test_thread_with_bundled_aggregations_for_latest", "commit_message": "Include bundled aggregations for the latest event in a thread. (#12273)\n\nThe `latest_event` field of the bundled aggregations for `m.thread` relations\r\ndid not include bundled aggregations itself. This resulted in clients needing to\r\nimmediately request the event from the server (and thus making it useless that\r\nthe latest event itself was serialized instead of just including an event ID).", "code": "def test_thread_with_bundled_aggregations_for_latest(self) -> None:\n \n self._send_relation(RelationTypes.THREAD, \"m.room.test\")\n channel = self._send_relation(RelationTypes.THREAD, \"m.room.test\")\n thread_2 = channel.json_body[\"event_id\"]\n\n self._send_relation(\n RelationTypes.ANNOTATION, \"m.reaction\", \"a\", parent_id=thread_2\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 12, "token_counts": 68, "n_ast_nodes": 93, "n_identifiers": 10, "random_cut": "def test_thread_with_bundled_aggregations_for_latest(self) -> None:\n \n self._send_relation(Rel", "d_id": 72127, "documentation": { "docstring": "\n Bundled aggregations should get applied to the latest thread event.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 56933, "commit_id": "574d10ff7612661b37801c811862f18998521d58", "repo": "prefect", "path": "src/prefect/blocks/kubernetes.py", "file_name": "kubernetes.py", "fun_name": "from_environment", "commit_message": "organizational changes for the KubernetesClusterConfig and add from_environment classmethod", "code": "def from_environment(cls):\n \n\n return cls.from_file(path=KUBE_CONFIG_DEFAULT_LOCATION)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 5, "random_cut": "def from_environment(cls):\n \n\n return cls.from_file(path=KUBE_CONFI", "d_id": 11592, "documentation": { "docstring": "\n Factory method to produce an instance of this class using the default kube config location\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 30, "language": "en" } }, { "id": 247511, "commit_id": "32c828d0f760492711a98b11376e229d795fd1b3", "repo": "synapse", "path": "tests/rest/media/v1/test_media_storage.py", "file_name": "test_media_storage.py", "fun_name": "test_thumbnail_repeated_thumbnail", "commit_message": "Add type hints to `tests/rest`. (#12208)\n\nCo-authored-by: Patrick Cloke ", "code": "def test_thumbnail_repeated_thumbnail(self) -> None:\n \n self._test_thumbnail(\n \"scale\", self.test_image.expected_scaled, self.test_image.expected_found\n )\n\n if not self.test_image.expected_found:\n return\n\n # Fetching again should work, without re-requesting the image from the\n # remote.\n params = \"?width=32&height=32&method=scale\"\n channel = make_request(\n self.reactor,\n FakeSite(self.thumbnail_resource, self.reactor),\n \"GET\",\n self.media_id + params,\n shorthand=False,\n await_result=False,\n )\n self.pump()\n\n self.assertEqual(channel.code, 200)\n if self.test_image.expected_scaled:\n self.assertEqual(\n channel.result[\"body\"],\n self.test_image.expected_scaled,\n channel.result[\"body\"],\n )\n\n # Deleting the thumbnail on disk then re-requesting it should work as\n # Synapse should regenerate missing thumbnails.\n origin, media_id = self.media_id.split(\"/\")\n info = self.get_success(self.store.get_cached_remote_media(origin, media_id))\n file_id = info[\"filesystem_id\"]\n\n thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir(\n origin, file_id\n )\n shutil.rmtree(thumbnail_dir, ignore_errors=True)\n\n channel = make_request(\n self.reactor,\n FakeSite(self.thumbnail_resource, self.reactor),\n \"GET\",\n self.media_id + params,\n shorthand=False,\n await_result=False,\n )\n self.pump()\n\n self.assertEqual(channel.code, 200)\n if self.test_image.expected_scaled:\n self.assertEqual(\n channel.result[\"body\"],\n self.test_image.expected_scaled,\n channel.result[\"body\"],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 586, "n_words": 112, "vocab_size": 68, "complexity": 4, "nloc": 49, "token_counts": 263, "n_ast_nodes": 414, "n_identifiers": 33, "random_cut": "def test_thumbnail_repeated_thumbnail(self) -> None:\n \n self._test_thumbnail(\n \"scale\", self.test_image.expected_scaled, self.test_image.expected_found\n )\n\n if not self.test_image.expected_found:\n return\n\n # Fetching again should work, without re-requesting the image from the\n # remote.\n params = \"?width=32&height=32&method=scale\"\n channel = make_request(\n self.reactor,\n FakeSite(self.thumbnail_resource, self.reactor),\n \"GET\",\n self.media_id + params,\n shorthand=False,\n await_result=False,\n )\n self.pump()\n\n self.assertEqual(channel.code, 200)\n if self.test_image.expected_scaled:\n self.assertEqual(\n channel.result[\"body\"],\n self.test_image.expected_scaled,\n channel.result[\"body\"],\n )\n\n # Deleting the thumbnail on disk then re-requesting it should work as\n # Synapse should regenerate missing thumbnails.\n origin, media_id = self.media_id.split(\"/\")\n info = self.get_success(self.store.get_cached_remote_media(origin, media_id))\n file_id = info[\"filesystem_id\"]\n\n thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir(\n origin, file_id\n )\n shutil.rmtree(thumbnail_dir, ignore_errors=True)\n\n ch", "d_id": 71705, "documentation": { "docstring": "Test that fetching the same thumbnail works, and deleting the on disk\n thumbnail regenerates it.\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 306647, "commit_id": "a6b6949793e2571bf46cdca2e541ddf64cb1fc71", "repo": "core", "path": "homeassistant/components/wake_on_lan/switch.py", "file_name": "switch.py", "fun_name": "update", "commit_message": "Improve entity type hints [w] (#77886)", "code": "def update(self) -> None:\n \n ping_cmd = [\n \"ping\",\n \"-c\",\n \"1\",\n \"-W\",\n str(DEFAULT_PING_TIMEOUT),\n str(self._host),\n ]\n\n status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n self._state = not bool(status)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 124, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 14, "random_cut": "def update(self) -> None:\n \n ping_cmd = [\n \"ping\",\n \"-c\",\n \"1\",\n \"-W\",\n str(DEFAULT_PING_TIMEOUT),\n str(self._host),\n ]\n\n status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n self._state = not bool(status)\n", "d_id": 105431, "documentation": { "docstring": "Check if device is on and update the state. Only called if assumed state is false.", "n_words": 16, "vocab_size": 14, "n_whitespaces": 15, "language": "en" } }, { "id": 286538, "commit_id": "8e9e6bd57f4bc5d57ccedfacccda6342d5881266", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_model.py", "file_name": "portfolio_model.py", "fun_name": "get_transactions", "commit_message": "Incorporate portfolio class into SDK (#3401)\n\n* create functions to interact with portfolio\r\n\r\n* fix some docstrings\r\n\r\n* view docstrings\r\n\r\n* make portfolio loading available in sdk\r\n\r\n* reorder some methods\r\n\r\n* fix bug\r\n\r\n* update controller\r\n\r\n* update website\r\n\r\n* remove import\r\n\r\n* change input name\r\n\r\n* regenerate website\r\n\r\n* change portfolio arg name\r\n\r\n* fix metrics bugs\r\n\r\n* fix report\r\n\r\n* refactor assets alloc\r\n\r\n* refactor assets sectors alloc\r\n\r\n* remove unecessary attributes\r\n\r\n* refactor allocaasset sector\r\n\r\n* reorganize class\r\n\r\n* first refactor alloc\r\n\r\n* refactor portfolio alloc\r\n\r\n* black\r\n\r\n* fix alloc bug\r\n\r\n* regenerate sdk website\r\n\r\n* fix alloc bugs\r\n\r\n* forgot this exception\r\n\r\n* some refactor on portfolio alloc country region\r\n\r\n* fix some allocation bugs\r\n\r\n* add examples\r\n\r\n* regenerate website\r\n\r\nCo-authored-by: James Maslek ", "code": "def get_transactions(self):\n \n\n df = self.__transactions[\n [\n \"Date\",\n \"Type\",\n \"Ticker\",\n \"Side\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Investment\",\n \"Currency\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n ]\n ]\n df = df.replace(np.nan, \"-\")\n df[\"Date\"] = df[\"Date\"].dt.strftime(\"%Y-%m-%d\")\n df.sort_values(by=\"Date\", ascending=False, inplace=True)\n return df\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 299, "n_words": 33, "vocab_size": 28, "complexity": 1, "nloc": 22, "token_counts": 87, "n_ast_nodes": 157, "n_identifiers": 13, "random_cut": "def get_transactions(self):\n \n\n df = self.__transactions[\n [\n \"Date\",\n \"Type\",\n \"Ticker\",\n \"Side\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Investment\",\n \"Currency\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n ", "d_id": 85861, "documentation": { "docstring": "Get formatted transactions\n\n Returns\n -------\n pd.DataFrame: formatted transactions\n ", "n_words": 8, "vocab_size": 6, "n_whitespaces": 40, "language": "en" } }, { "id": 83055, "commit_id": "dd1c9c45c778dc5280c2b02c3b9fb327d2507cc1", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_pick_colors", "commit_message": "stream colors: Try harder to avoid collisions.\n\nWe now use recipient_id % 24 for new stream colors\nwhen users have already used all 24 of our canned\ncolors.\n\nThis fix doesn't address the scenario that somebody\ndislikes one of our current canned colors, so if a\nuser continually changes canned color N to some other\ncolor for new streams, their new streams will continue\nto include color N (and the user will still need to\nchange them).\n\nThis fix doesn't address the fact that it can be expensive\nduring bulk-add situations to query for all the colors\nthat users have already used up.\n\nSee https://chat.zulip.org/#narrow/stream/3-backend/topic/assigning.20stream.20colors\nfor more discussion.", "code": "def test_pick_colors(self) -> None:\n used_colors: Set[str] = set()\n color_map: Dict[int, str] = {}\n recipient_ids = list(range(30))\n user_color_map = pick_colors(used_colors, color_map, recipient_ids)\n self.assertEqual(\n user_color_map,\n {\n 0: \"#76ce90\",\n 1: \"#fae589\",\n 2: \"#a6c7e5\",\n 3: \"#e79ab5\",\n 4: \"#bfd56f\",\n 5: \"#f4ae55\",\n 6: \"#b0a5fd\",\n 7: \"#addfe5\",\n 8: \"#f5ce6e\",\n 9: \"#c2726a\",\n 10: \"#94c849\",\n 11: \"#bd86e5\",\n 12: \"#ee7e4a\",\n 13: \"#a6dcbf\",\n 14: \"#95a5fd\",\n 15: \"#53a063\",\n 16: \"#9987e1\",\n 17: \"#e4523d\",\n 18: \"#c2c2c2\",\n 19: \"#4f8de4\",\n 20: \"#c6a8ad\",\n 21: \"#e7cc4d\",\n 22: \"#c8bebf\",\n 23: \"#a47462\",\n # start repeating\n 24: \"#76ce90\",\n 25: \"#fae589\",\n 26: \"#a6c7e5\",\n 27: \"#e79ab5\",\n 28: \"#bfd56f\",\n 29: \"#f4ae55\",\n },\n )\n\n color_map = {98: \"color98\", 99: \"color99\"}\n used_colors = set(STREAM_ASSIGNMENT_COLORS) - {\"#c6a8ad\", \"#9987e1\"}\n recipient_ids = [99, 98, 1, 2, 3, 4]\n user_color_map = pick_colors(used_colors, color_map, recipient_ids)\n self.assertEqual(\n user_color_map,\n {98: \"color98\", 99: \"color99\", 1: \"#9987e1\", 2: \"#c6a8ad\", 3: \"#e79ab5\", 4: \"#bfd56f\"},\n )\n\n \n used_colors = set(STREAM_ASSIGNMENT_COLORS)\n color_map = {}\n recipient_ids = [2, 26, 50, 74]\n user_color_map = pick_colors(used_colors, color_map, recipient_ids)\n self.assertEqual(\n user_color_map,\n {2: \"#a6c7e5\", 26: \"#a6c7e5\", 50: \"#a6c7e5\", 74: \"#a6c7e5\"},\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 832, "n_words": 157, "vocab_size": 106, "complexity": 1, "nloc": 70, "token_counts": 315, "n_ast_nodes": 520, "n_identifiers": 16, "random_cut": "def test_pick_colors(self) -> None:\n used_colors: Set[str] = set()\n color_map: Dict[int, str] = {}\n recipient_ids = list(range(30))\n user_color_map = pick_colors(used_colors, color_map, recipient_ids)\n self.assertEqual(\n user_color_map,\n {\n 0: \"#76ce90\",\n 1: \"#fae589\",\n 2: \"#a6c7e5\",\n 3: \"#e79ab5\",\n 4: \"#bfd56f\",\n 5: \"#f4ae55\",\n 6: \"#b0a5fd\",\n 7: \"#addfe5\",\n 8: \"#f5ce6e\",\n 9: \"#c2726a\",\n 10: \"#94c849\",\n 11: \"#bd86e5\",\n 12: \"#ee7e4a\",\n 13: \"#a6dcbf\",\n 14: \"#95a5fd\",\n 15: \"#53a063\",\n 16: \"#9987e1\",\n 17: \"#e4523d\",\n 18: \"#c2c2c2\",\n 19: \"#4f8de4\",\n 20: \"#c6a8ad\",\n 21: \"#e7cc4d\",\n 22: \"#c8bebf\",\n 23: \"#a47462\",\n # start repeating\n 24: \"#76ce90\",\n 25: \"#fae589\",\n 26: \"#a6c7e5\",\n 27: \"#e79ab5\",\n ", "d_id": 17589, "documentation": { "docstring": "\n If we are assigning colors to a user with 24+ streams, we have to start\n re-using old colors. Our algorithm basically uses recipient_id % 24, so\n the following code reflects the worse case scenario that our new\n streams have recipient ids spaced out by exact multiples of 24. We\n don't try to work around this edge case, since users who really depend\n on the stream colors can always just assign themselves custom colors\n for the streams that they really want to stand out.\n\n Even if recipient_ids were completely random, the odds of collisions\n are low, but it's often the case that bulk-adds are done for streams\n that either were or are being created at roughly the same time, so the\n recipient_ids tend to have even fewer collisions.\n ", "n_words": 127, "vocab_size": 96, "n_whitespaces": 214, "language": "en" } }, { "id": 264221, "commit_id": "7421e5f7d7e579ed1a0acf840c39ae61fd851504", "repo": "netbox", "path": "netbox/extras/tests/test_customfields.py", "file_name": "test_customfields.py", "fun_name": "test_import", "commit_message": "Fixes #8317: Fix CSV import of multi-select custom field values", "code": "def test_import(self):\n \n data = (\n ('name', 'slug', 'status', 'cf_text', 'cf_longtext', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_json', 'cf_select', 'cf_multiselect'),\n ('Site 1', 'site-1', 'active', 'ABC', 'Foo', '123', 'True', '2020-01-01', 'http://example.com/1', '{\"foo\": 123}', 'Choice A', '\"Choice A,Choice B\"'),\n ('Site 2', 'site-2', 'active', 'DEF', 'Bar', '456', 'False', '2020-01-02', 'http://example.com/2', '{\"bar\": 456}', 'Choice B', '\"Choice B,Choice C\"'),\n ('Site 3', 'site-3', 'active', '', '', '', '', '', '', '', '', ''),\n )\n csv_data = '\\n'.join(','.join(row) for row in data)\n\n response = self.client.post(reverse('dcim:site_import'), {'csv': csv_data})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(Site.objects.count(), 3)\n\n # Validate data for site 1\n site1 = Site.objects.get(name='Site 1')\n self.assertEqual(len(site1.custom_field_data), 9)\n self.assertEqual(site1.custom_field_data['text'], 'ABC')\n self.assertEqual(site1.custom_field_data['longtext'], 'Foo')\n self.assertEqual(site1.custom_field_data['integer'], 123)\n self.assertEqual(site1.custom_field_data['boolean'], True)\n self.assertEqual(site1.custom_field_data['date'], '2020-01-01')\n self.assertEqual(site1.custom_field_data['url'], 'http://example.com/1')\n self.assertEqual(site1.custom_field_data['json'], {\"foo\": 123})\n self.assertEqual(site1.custom_field_data['select'], 'Choice A')\n self.assertEqual(site1.custom_field_data['multiselect'], ['Choice A', 'Choice B'])\n\n # Validate data for site 2\n site2 = Site.objects.get(name='Site 2')\n self.assertEqual(len(site2.custom_field_data), 9)\n self.assertEqual(site2.custom_field_data['text'], 'DEF')\n self.assertEqual(site2.custom_field_data['longtext'], 'Bar')\n self.assertEqual(site2.custom_field_data['integer'], 456)\n self.assertEqual(site2.custom_field_data['boolean'], False)\n self.assertEqual(site2.custom_field_data['date'], '2020-01-02')\n self.assertEqual(site2.custom_field_data['url'], 'http://example.com/2')\n self.assertEqual(site2.custom_field_data['json'], {\"bar\": 456})\n self.assertEqual(site2.custom_field_data['select'], 'Choice B')\n self.assertEqual(site2.custom_field_data['multiselect'], ['Choice B', 'Choice C'])\n\n # No custom field data should be set for site 3\n site3 = Site.objects.get(name='Site 3')\n self.assertFalse(any(site3.custom_field_data.values()))\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 449, "n_words": 167, "vocab_size": 128, "complexity": 2, "nloc": 35, "token_counts": 501, "n_ast_nodes": 888, "n_identifiers": 25, "random_cut": "def test_import(self):\n \n data = (\n ('name', 'slug', 'status', 'cf_text', 'cf_longtext', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_json', 'cf_select', 'cf_multiselect'),\n ('Site 1', 'site-1', 'active', 'ABC', 'Foo', '123', 'True', '2020-01-01', 'http://example.com/1', '{\"foo\": 123}', 'Choice A', '\"Choice A,Choice B\"'),\n ('Site 2', 'site-2', 'active', 'DEF', 'Bar', '456', 'False', '2020-01-02', 'http://example.com/2', '{\"bar\": 456}', 'Choice B', '\"Choice B,Choice C\"'),\n ('Site 3', 'site-3', 'active', '', '', '', '', '', '', '', '', ''),\n )\n csv_data = '\\n'.join(','.join(row) for row in data)\n\n response = self.client.post(reverse('dcim:site_import'), {'csv': csv_data})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(Site.objects.count(), 3)\n\n # Validate data for site 1\n site1 = Site.objects.get(name='Site 1')\n self.assertEqual(len(site1.custom_field_data), 9)\n self.assertEqual(site1.custom_field_data['text'], 'ABC')\n self.assertEqual(site1.custom_field_data['longtext'], 'Foo')\n self.assertEqual(site1.custom_field_data['integer'], 123)\n self.assertEqual(site1.custom_field_data['boolean'], True)\n self.assertEqual(site1.custom_field_data['date'], '2020-01-01')\n self.assertEqual(site1.custom_field_data['url'], 'http://example.com/1')\n self.assertEqual(site1.custom_field_data['json'], {\"foo\": 123})\n self.assertEqual(site1.custom_field_data['select'], 'Choice A')\n self.assertEqual(site1.custom_field_data['multiselect'], ['Choice A', 'Choice B'])\n\n # Validate data for site 2\n site2 = Site", "d_id": 77645, "documentation": { "docstring": "\n Import a Site in CSV format, including a value for each CustomField.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 218720, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/fixes/fix_renames.py", "file_name": "fix_renames.py", "fun_name": "build_pattern", "commit_message": "add python 3.10.4 for windows", "code": "def build_pattern():\n #bare = set()\n for module, replace in list(MAPPING.items()):\n for old_attr, new_attr in list(replace.items()):\n LOOKUP[(module, old_attr)] = new_attr\n #bare.add(module)\n #bare.add(old_attr)\n #yield % (module, module)\n yield % (module, old_attr, old_attr)\n yield % (module, old_attr)\n #yield % alternates(bare)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 122, "n_words": 37, "vocab_size": 24, "complexity": 3, "nloc": 11, "token_counts": 60, "n_ast_nodes": 104, "n_identifiers": 9, "random_cut": "def build_pattern():\n #bare = set()\n for module, replace in list(MAPPING.items()):\n for old_attr, new_attr in list(replace.items()):\n LOOKUP[(module, old_attr)] = new_attr\n #bare.add(module)\n #bare.add(old_attr)\n #yield", "d_id": 55450, "documentation": { "docstring": "\n # import_name< 'import' (module=%r\n # | dotted_as_names< any* module=%r any* >) >\n # \n import_from< 'from' module_name=%r 'import'\n ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >\n \n power< module_name=%r trailer< '.' attr_name=%r > any* >\n bare_name=%s", "n_words": 35, "vocab_size": 22, "n_whitespaces": 178, "language": "en" } }, { "id": 305878, "commit_id": "474844744bdd2b0dcba46b82d9d3fcd8e3dbad24", "repo": "core", "path": "homeassistant/components/plex/sensor.py", "file_name": "sensor.py", "fun_name": "async_refresh_sensor", "commit_message": "Improve entity type hints [p] (#77871)", "code": "async def async_refresh_sensor(self) -> None:\n \n _LOGGER.debug(\"Refreshing library sensor for '%s'\", self.name)\n try:\n await self.hass.async_add_executor_job(self._update_state_and_attrs)\n self._attr_available = True\n except NotFound:\n self._attr_available = False\n except requests.exceptions.RequestException as err:\n _LOGGER.error(\n \"Could not update library sensor for '%s': %s\",\n self.library_section.title,\n err,\n )\n self._attr_available = False\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 195, "n_words": 42, "vocab_size": 33, "complexity": 3, "nloc": 16, "token_counts": 78, "n_ast_nodes": 132, "n_identifiers": 18, "random_cut": "async def async_refresh_sensor(self) -> None:\n \n _LOGGER.debug(\"Refreshing library sensor for '%s'\", self.name)\n try:\n await self.hass.async_add_executor_job(self._update_state_and_attrs)\n self._attr_available = True\n except NotFound:\n self._attr_available = False\n except requests.exceptions.RequestException as err:\n _LOGGER.error(\n \"Could not update library sensor for '%s': %s\",\n sel", "d_id": 104662, "documentation": { "docstring": "Update state and attributes for the library sensor.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 61941, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "__hash__", "commit_message": "upd; format", "code": "def __hash__(self):\n \n return hash(self.name) + hash(self.version) + hash(self.source_url)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 27, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "def __hash__(self):\n \n return hash(self", "d_id": 12765, "documentation": { "docstring": "\n Compute hash in a way which matches the equality test.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 100746, "commit_id": "a99049711f289b435e710d5b15f9c0e45c4251c3", "repo": "faceswap", "path": "plugins/train/model/phaze_a.py", "file_name": "phaze_a.py", "fun_name": "_min_nodes", "commit_message": "Model updates\n- Increase model summary width\n- Phaze A updates\n - Update some min/max values\n - Add Decoder Filter Slope Mode\n - Add additional arguments for Upsampling2D\n - Adjust upsampling method for multiple upsamples in FC layers\n - Typing", "code": "def _min_nodes(self) -> int:\n \n if self._side == \"gblock\":\n return self._config[\"fc_gblock_min_nodes\"]\n retval = self._scale_filters(self._config[\"fc_min_filters\"])\n retval = int(retval * self._config[\"fc_dimensions\"] ** 2)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 68, "n_words": 22, "vocab_size": 18, "complexity": 2, "nloc": 9, "token_counts": 52, "n_ast_nodes": 91, "n_identifiers": 7, "random_cut": "def _min_nodes(self) -> int:\n \n if self._side == \"gblock\":\n ", "d_id": 20199, "documentation": { "docstring": " int: The number of nodes for the first Dense. For non g-block layers this will be the\n given minimum filters multiplied by the dimensions squared. For g-block layers, this is the\n given value ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 48, "language": "en" } }, { "id": 155114, "commit_id": "c51ab405efec920dbb4baa2e2389409df04e8d43", "repo": "modin", "path": "modin/config/envvars.py", "file_name": "envvars.py", "fun_name": "_get", "commit_message": "FIX-#5187: Fixed RecursionError in OmnisciLaunchParameters.get() (#5199)\n\nSigned-off-by: Andrey Pavlenko ", "code": "def _get(cls) -> dict:\n \n custom_parameters = super().get()\n result = cls.default.copy()\n result.update(\n {key.replace(\"-\", \"_\"): value for key, value in custom_parameters.items()}\n )\n return result\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 75, "n_words": 22, "vocab_size": 19, "complexity": 2, "nloc": 15, "token_counts": 55, "n_ast_nodes": 95, "n_identifiers": 14, "random_cut": "def _get(cls) -> dict:\n \n custom_parameters = super().get()\n result = cls.default.copy()\n result.update(\n {key.replace(\"-\", \"_\"): value for key, value in custom_parameters.items()}", "d_id": 36242, "documentation": { "docstring": "\n Get the resulted command-line options.\n\n Returns\n -------\n dict\n Decoded and verified config value.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 60, "language": "en" } }, { "id": 217597, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/graphlib.py", "file_name": "graphlib.py", "fun_name": "add", "commit_message": "add python 3.10.4 for windows", "code": "def add(self, node, *predecessors):\n \n if self._ready_nodes is not None:\n raise ValueError(\"Nodes cannot be added after a call to prepare()\")\n\n # Create the node -> predecessor edges\n nodeinfo = self._get_nodeinfo(node)\n nodeinfo.npredecessors += len(predecessors)\n\n # Create the predecessor -> node edges\n for pred in predecessors:\n pred_info = self._get_nodeinfo(pred)\n pred_info.successors.append(node)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 129, "n_words": 47, "vocab_size": 39, "complexity": 3, "nloc": 8, "token_counts": 61, "n_ast_nodes": 102, "n_identifiers": 14, "random_cut": "def add(self, node, *predecessors):\n \n ", "d_id": 54846, "documentation": { "docstring": "Add a new node and its predecessors to the graph.\n\n Both the *node* and all elements in *predecessors* must be hashable.\n\n If called multiple times with the same node argument, the set of dependencies\n will be the union of all dependencies passed in.\n\n It is possible to add a node with no dependencies (*predecessors* is not provided)\n as well as provide a dependency twice. If a node that has not been provided before\n is included among *predecessors* it will be automatically added to the graph with\n no predecessors of its own.\n\n Raises ValueError if called after \"prepare\".\n ", "n_words": 97, "vocab_size": 63, "n_whitespaces": 160, "language": "en" } }, { "id": 3840, "commit_id": "a3aae8017a0a40ff2006e2567f71dccb04c997a5", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py", "file_name": "test_base_insight_streams.py", "fun_name": "test_stream_slices_with_state_and_slices", "commit_message": "🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)\n\n* Facebook Marketing performance improvement\r\n\r\n* add comments and little refactoring\r\n\r\n* fix integration tests with the new config\r\n\r\n* improve job status handling, limit concurrency to 10\r\n\r\n* fix campaign jobs, refactor manager\r\n\r\n* big refactoring of async jobs, support random order of slices\r\n\r\n* update source _read_incremental to hook new state logic\r\n\r\n* fix issues with timeout\r\n\r\n* remove debugging and clean up, improve retry logic\r\n\r\n* merge changes from #8234\r\n\r\n* fix call super _read_increment\r\n\r\n* generalize batch execution, add use_batch flag\r\n\r\n* improve coverage, do some refactoring of spec\r\n\r\n* update test, remove overrides of source\r\n\r\n* add split by AdSet\r\n\r\n* add smaller insights\r\n\r\n* fix end_date < start_date case\r\n\r\n* add account_id to PK\r\n\r\n* add notes\r\n\r\n* fix new streams\r\n\r\n* fix reversed incremental stream\r\n\r\n* update spec.json for SAT\r\n\r\n* upgrade CDK and bump version\r\n\r\nCo-authored-by: Dmytro Rezchykov \r\nCo-authored-by: Eugene Kulak ", "code": "def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date):\n \n end_date = start_date + duration(days=10)\n cursor_value = start_date + duration(days=5)\n state = {\n AdsInsights.cursor_field: cursor_value.date().isoformat(),\n \"slices\": [(cursor_value + duration(days=1)).date().isoformat(), (cursor_value + duration(days=3)).date().isoformat()],\n }\n stream = AdsInsights(api=api, start_date=start_date, end_date=end_date)\n async_manager_mock.completed_jobs.return_value = [1, 2, 3]\n\n slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental))\n\n assert slices == [{\"insight_job\": 1}, {\"insight_job\": 2}, {\"insight_job\": 3}]\n async_manager_mock.assert_called_once()\n args, kwargs = async_manager_mock.call_args\n generated_jobs = list(kwargs[\"jobs\"])\n assert len(generated_jobs) == (end_date - cursor_value).days - 2, \"should be 2 slices short because of state\"\n assert generated_jobs[0].interval.start == cursor_value.date() + duration(days=2)\n assert generated_jobs[1].interval.start == cursor_value.date() + duration(days=4)\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 214, "n_words": 87, "vocab_size": 62, "complexity": 1, "nloc": 17, "token_counts": 244, "n_ast_nodes": 386, "n_identifiers": 32, "random_cut": "def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date):\n \n end_date = start_date + duration(days=10)\n cursor_value = start_date + duration(days=5)\n state = {\n AdsInsights.cursor_field: cursor_value.date().isoformat(),\n \"slices\": [(cursor_value + duration(days=1)).date().isoformat(), (cursor_value + duration(days=3)).date().isoformat()],\n }\n stream = AdsInsights(api=api, start_date=start_date, end_date=end_date)\n async_manager_mock.completed_jobs.return_value = [1, 2, 3]\n\n slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental))\n\n assert slices == [{\"insight_job\": 1}, {\"insight_job\": 2}, {\"insight_job", "d_id": 577, "documentation": { "docstring": "Stream will use cursor_value from state, but will skip saved slices", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 207776, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "_test_readonly_foreignkey_links", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _test_readonly_foreignkey_links(self, admin_site):\n \n chapter = Chapter.objects.create(\n title=\"Chapter 1\",\n content=\"content\",\n book=Book.objects.create(name=\"Book 1\"),\n )\n language = Language.objects.create(iso=\"_40\", name=\"Test\")\n obj = ReadOnlyRelatedField.objects.create(\n chapter=chapter,\n language=language,\n user=self.superuser,\n )\n response = self.client.get(\n reverse(\n f\"{admin_site}:admin_views_readonlyrelatedfield_change\", args=(obj.pk,)\n ),\n )\n # Related ForeignKey object registered in admin.\n user_url = reverse(f\"{admin_site}:auth_user_change\", args=(self.superuser.pk,))\n self.assertContains(\n response,\n '' % user_url,\n html=True,\n )\n # Related ForeignKey with the string primary key registered in admin.\n language_url = reverse(\n f\"{admin_site}:admin_views_language_change\",\n args=(quote(language.pk),),\n )\n self.assertContains(\n response,\n '' % language_url,\n html=True,\n )\n # Related ForeignKey object not registered in admin.\n self.assertContains(\n response, '
    Chapter 1
    ', html=True\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 434, "n_words": 92, "vocab_size": 58, "complexity": 1, "nloc": 35, "token_counts": 181, "n_ast_nodes": 299, "n_identifiers": 30, "random_cut": "def _test_readonly_foreignkey_links(self, admin_site):\n \n chapter = Chapter.objects.create(\n title=\"Chapter 1\",\n content=\"content\",\n book=Book.objects.create(name=\"Book 1\"),\n )\n language = Language.objects.create(iso=\"_40\", name=\"Test\")\n obj = ReadOnlyRelatedField.objects.create(\n chapter=chapter,\n language=language,\n user=self.superuser,\n )\n response = self.client.get(\n reverse(\n f\"{admin_site}:admin_views_readonlyrelatedfield_change\", args=(obj.pk,)\n ),\n )\n # Related ForeignKey object registered in admin.\n user_url = reverse(f\"{admin_site}:auth_user_change\", args=(self.superuser.pk,))\n self.assertContains(\n response,\n '' % user_url,\n html=True,\n )\n # Related ForeignKey with the string primary key registered in admin.\n language_url = reverse(\n f\"{admin_site}:admin_views_language_change\",\n args=(quote(language.pk),),\n )\n self.assertContains(\n response,\n '' % language_url,\n html=True,\n )\n # Related Forei", "d_id": 52095, "documentation": { "docstring": "\n ForeignKey readonly fields render as links if the target model is\n registered in admin.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 66616, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v12_0/fix_percent_complete_for_projects.py", "file_name": "fix_percent_complete_for_projects.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfor project in frappe.get_all(\"Project\", fields=[\"name\", \"percent_complete_method\"]):\n\t\ttotal = frappe.db.count(\"Task\", dict(project=project.name))\n\t\tif project.percent_complete_method == \"Task Completion\" and total > 0:\n\t\t\tcompleted = frappe.db.sql(\n\t\t\t\t,\n\t\t\t\tproject.name,\n\t\t\t)[0][0]\n\t\t\tpercent_complete = flt(flt(completed) / total * 100, 2)\n\t\t\tif project.percent_complete != percent_complete:\n\t\t\t\tfrappe.db.set_value(\"Project\", project.name, \"percent_complete\", percent_complete)\n\t\t\t\tif percent_complete == 100:\n\t\t\t\t\tfrappe.db.set_value(\"Project\", project.name, \"status\", \"Completed\")\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 38, "n_words": 51, "vocab_size": 40, "complexity": 6, "nloc": 14, "token_counts": 132, "n_ast_nodes": 217, "n_identifiers": 16, "random_cut": "def execute():\n\tfor project in frappe.get_all(\"Project\", fields=[\"name\", \"percent_complete_method\"]):\n\t\ttotal = frappe.db.count(\"Task\", dict(project=project.name))\n\t\tif project.percent_complete_method == \"Task Completion\" and total > 0:\n\t\t\tcompleted = frappe.db.sql(\n\t\t\t\t,\n\t\t\t\tproject.name,\n\t\t\t)[0][0]\n\t\t\tper", "d_id": 14247, "documentation": { "docstring": "select count(name) from tabTask where\n\t\t\t\t\tproject=%s and status in ('Cancelled', 'Completed')", "n_words": 11, "vocab_size": 11, "n_whitespaces": 9, "language": "en" } }, { "id": 196972, "commit_id": "35a158ece2bec4d77d78a193fcafa4dd5fd5f691", "repo": "sympy", "path": "sympy/parsing/mathematica.py", "file_name": "mathematica.py", "fun_name": "mathematica", "commit_message": "Support parsing functions and some more Mathematica nodes. Commented Mathematica code is now parsed correctly.", "code": "def mathematica(s, additional_translations=None):\n \n parser = MathematicaParser(additional_translations)\n\n if additional_translations is not None:\n SymPyDeprecationWarning(\n feature=\"additional_translations parameter for the Mathematica parser\",\n last_supported_version=\"1.9\",\n useinstead=\"Use SymPy's .replace( ) or .subs( ) methods on the output expression\",\n issue=\"23042\",\n ).warn()\n return sympify(parser._parse_old(s))\n\n return parser.parse(s)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 114, "n_words": 37, "vocab_size": 34, "complexity": 2, "nloc": 11, "token_counts": 62, "n_ast_nodes": 105, "n_identifiers": 14, "random_cut": "def mathematica(s, additional_translations=None):\n \n parser = MathematicaParser(additional_translations)\n\n if additional_translations is not None:\n SymPyDeprecationWarning(\n feature=\"additional_translations parameter for the Mathematica parser\",\n last_supported_version=\"1.9\",\n useinstead=\"Use SymPy's .replace( ) or .subs( ) methods on the output express", "d_id": 48268, "documentation": { "docstring": "\n Translate a string containing a Wolfram Mathematica expression to a SymPy\n expression.\n\n If the translator is unable to find a suitable SymPy expression, the\n ``FullForm`` of the Mathematica expression will be output, using SymPy\n ``Function`` objects as nodes of the syntax tree.\n\n Examples\n ========\n\n >>> from sympy.parsing.mathematica import mathematica\n >>> mathematica(\"Sin[x]^2 Tan[y]\")\n sin(x)**2*tan(y)\n >>> e = mathematica(\"F[7,5,3]\")\n >>> e\n F(7, 5, 3)\n >>> from sympy import Function, Max, Min\n >>> e.replace(Function(\"F\"), lambda *x: Max(*x)*Min(*x))\n 21\n\n Both standard input form and Mathematica full form are supported:\n\n >>> mathematica(\"x*(a + b)\")\n x*(a + b)\n >>> mathematica(\"Times[x, Plus[a, b]]\")\n x*(a + b)\n\n To get a matrix from Wolfram's code:\n\n >>> m = mathematica(\"{{a, b}, {c, d}}\")\n >>> m\n ((a, b), (c, d))\n >>> from sympy import Matrix\n >>> Matrix(m)\n Matrix([\n [a, b],\n [c, d]])\n\n If the translation into equivalent SymPy expressions fails, an SymPy\n expression equivalent to Wolfram Mathematica's \"FullForm\" will be created:\n\n >>> mathematica(\"x_.\")\n Optional(Pattern(x, Blank()))\n >>> mathematica(\"Plus @@ {x, y, z}\")\n Apply(Plus, (x, y, z))\n >>> mathematica(\"f[x_, 3] := x^3 /; x > 0\")\n SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0))\n ", "n_words": 180, "vocab_size": 125, "n_whitespaces": 298, "language": "en" } }, { "id": 67468, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/setup/doctype/company/company.py", "file_name": "company.py", "fun_name": "update_company_current_month_sales", "commit_message": "style: format code with black", "code": "def update_company_current_month_sales(company):\n\tcurrent_month_year = formatdate(today(), \"MM-yyyy\")\n\n\tresults = frappe.db.sql(\n\t\t.format(\n\t\t\tcurrent_month_year=current_month_year, company=frappe.db.escape(company)\n\t\t),\n\t\tas_dict=True,\n\t)\n\n\tmonthly_total = results[0][\"total\"] if len(results) > 0 else 0\n\n\tfrappe.db.set_value(\"Company\", company, \"total_monthly_sales\", monthly_total)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 18, "n_words": 28, "vocab_size": 25, "complexity": 2, "nloc": 22, "token_counts": 80, "n_ast_nodes": 129, "n_identifiers": 15, "random_cut": "def update_company_current_month_sales(company):\n\tcurrent_month_year = formatdate(today(), \"MM-yyyy\")\n\n\tresults = frappe.db.sql(\n\t\t.format(\n\t\t\tcurrent_month_year=current_month_year, company=frappe.db.escape(company)\n\t\t),\n\t\tas_dict=True,\n\t)\n\n\tmonthly_total = results[0][\"total\"] if ", "d_id": 14535, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\tSUM(base_grand_total) AS total,\n\t\t\tDATE_FORMAT(`posting_date`, '%m-%Y') AS month_year\n\t\tFROM\n\t\t\t`tabSales Invoice`\n\t\tWHERE\n\t\t\tDATE_FORMAT(`posting_date`, '%m-%Y') = '{current_month_year}'\n\t\t\tAND docstatus = 1\n\t\t\tAND company = {company}\n\t\tGROUP BY\n\t\t\tmonth_year\n\t", "n_words": 27, "vocab_size": 20, "n_whitespaces": 16, "language": "en" } }, { "id": 60778, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/metadata/base.py", "file_name": "base.py", "fun_name": "metadata_version", "commit_message": "upd; format", "code": "def metadata_version(self):\n # type: () -> Optional[str]\n \n raise NotImplementedError()\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 21, "n_identifiers": 3, "random_cut": "def metadata_version(self):\n # type: () -> Optional[str]\n \n raise NotImplementedErro", "d_id": 12290, "documentation": { "docstring": "Value of \"Metadata-Version:\" in the distribution, if available.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 281280, "commit_id": "006b3570b795215a17c64841110b649b03db9a98", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/options/screener_controller.py", "file_name": "screener_controller.py", "fun_name": "call_ca", "commit_message": "Baseclass (#1141)\n\n* A working decorator\r\n\r\n* Basic intro\r\n\r\n* Added more\r\n\r\n* Refactor\r\n\r\n* Refactor\r\n\r\n* Cleaned code\r\n\r\n* Simplified function (thanks Chavi)\r\n\r\n* Small change\r\n\r\n* Updating tests : fix issue with mock\r\n\r\n* Updating tests : fix remaining mocks after merging\r\n\r\n* Updating tests : black\r\n\r\n* Cleaned up\r\n\r\n* Finished base cases\r\n\r\n* Notes\r\n\r\n* Slight changes\r\n\r\n* Added dynamic options handling, error persists\r\n\r\n* Fixed pylint issues\r\n\r\n* Fixed mock\r\n\r\n* fix decorator with dynamic dictionary of args\r\n\r\n* move choices from dynamic to const in crypto/ov\r\n\r\n* Updated var names\r\n\r\n* Check\r\n\r\n* Moved decorators\r\n\r\n* Fixed import issues\r\n\r\n* Fixed tests, update payoff controller\r\n\r\n* Fixed tests\r\n\r\n* Fixed pylint\r\n\r\n* Updated files\r\n\r\n* Added base class\r\n\r\n* Added reset\r\n\r\n* Improved base class\r\n\r\n* For James\r\n\r\n* More menues converted\r\n\r\n* Added contexts\r\n\r\n* 24 controllers left\r\n\r\n* 18 Controllers left\r\n\r\n* Changes choices\r\n\r\n* 9 controllers left\r\n\r\n* Added all controllers\r\n\r\n* Fixed glitch\r\n\r\n* Replaced all improper callings of class\r\n\r\n* Removed menu decorator\r\n\r\n* refactored try_except\r\n\r\n* Last commit\r\n\r\n* Black fix\r\n\r\n* Bug fix\r\n\r\n* Added James' new menus\r\n\r\n* Fixed tests\r\n\r\n* Fixed 8 tests\r\n\r\n* Fixing mypy issue\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Fixed options\r\n\r\n* Fixed tests\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Fixed tests\r\n\r\n* More test fixes\r\n\r\n* Updating tests : stocks/ba\r\n\r\n* Fixed options test\r\n\r\n* More bug fixes\r\n\r\n* Fixed tests\r\n\r\n* fixed pylint\r\n\r\n* Skipped test_call_load\r\n\r\n* Add typings to base class\r\n\r\n* Fix issue with appending auto completer options + bugfixes\r\n\r\n* Add typings to base class\r\n\r\n* Terminal throws error for bad path\r\n\r\n* sexy solution to auto completer in runtime\r\n\r\n* more sexy reset with reset_level stored\r\n\r\n* no so sexy jump between indirect menus\r\n\r\n* Removing choices argument\r\n\r\n* refactor custom_reset\r\n\r\n* Fixed tests\r\n\r\n* Theo fixes\r\n\r\n* Added back function\r\n\r\n* Fixed tests\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: DidierRLopes ", "code": "def call_ca(self, _):\n \n if self.screen_tickers:\n self.queue = ca_controller.ComparisonAnalysisController(\n self.screen_tickers, self.queue\n ).menu(custom_path_menu_above=\"/stocks/\")\n else:\n print(\"Some tickers must be screened first through one of the presets!\\n\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 92, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 42, "n_ast_nodes": 74, "n_identifiers": 10, "random_cut": "def call_ca(self, _):\n \n if self.screen_tickers:\n self.queue = ca_controller.ComparisonAnalysisController(\n self.screen_tickers, self.queue\n ).menu(custom_path_menu_above=", "d_id": 83683, "documentation": { "docstring": "Call the comparison analysis menu with selected tickers", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 177607, "commit_id": "1c4328c5a8b10ee20ac4328ce30612d106350699", "repo": "label-studio", "path": "label_studio/data_manager/actions/basic.py", "file_name": "basic.py", "fun_name": "delete_tasks_predictions", "commit_message": "feat: DEV-1205: Add task.updated_at column (#1784)\n\n* Update task.updated_at on annotation update (DEV-1205)\r\n\r\n* Fix set updated_at on annotation delete (DEV-1205)\r\n\r\n* Set update_at for every dm action (DEV-1205)\r\n\r\n* Stop changing updated_at on actions (DEV-1205)\r\n\r\n* Update experimental.py\r\n\r\nCo-authored-by: Max Tkachenko \r\nCo-authored-by: niklub ", "code": "def delete_tasks_predictions(project, queryset, **kwargs):\n \n task_ids = queryset.values_list('id', flat=True)\n predictions = Prediction.objects.filter(task__id__in=task_ids)\n count = predictions.count()\n predictions.delete()\n queryset.update(updated_at=datetime.now())\n return {'processed_items': count, 'detail': 'Deleted ' + str(count) + ' predictions'}\n\n\nactions = [\n {\n 'entry_point': retrieve_tasks_predictions,\n 'permission': all_permissions.predictions_any,\n 'title': 'Retrieve Predictions',\n 'order': 90,\n 'dialog': {\n 'text': 'Send the selected tasks to all ML backends connected to the project.'\n 'This operation might be abruptly interrupted due to a timeout. '\n 'The recommended way to get predictions is to update tasks using the Label Studio API.'\n 'See more in the documentation.'\n 'Please confirm your action.',\n 'type': 'confirm'\n }\n },\n {\n 'entry_point': delete_tasks,\n 'permission': all_permissions.tasks_delete,\n 'title': 'Delete Tasks',\n 'order': 100,\n 'reload': True,\n 'dialog': {\n 'text': 'You are going to delete the selected tasks. Please confirm your action.',\n 'type': 'confirm'\n }\n },\n {\n 'entry_point': delete_tasks_annotations,\n 'permission': all_permissions.tasks_delete,\n 'title': 'Delete Annotations',\n 'order': 101,\n 'dialog': {\n 'text': 'You are going to delete all annotations from the selected tasks. Please confirm your action.',\n 'type': 'confirm'\n }\n },\n {\n 'entry_point': delete_tasks_predictions,\n 'permission': all_permissions.predictions_any,\n 'title': 'Delete Predictions',\n 'order': 102,\n 'dialog': {\n 'text': 'You are going to delete all predictions from the selected tasks. Please confirm your action.',\n 'type': 'confirm'\n }\n }\n]\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 573, "n_words": 191, "vocab_size": 100, "complexity": 1, "nloc": 7, "token_counts": 76, "n_ast_nodes": 406, "n_identifiers": 26, "random_cut": "def delete_tasks_predictions(project, queryset, **kwargs):\n \n task_ids = queryset.values_list('id', flat=True)\n predictions = Prediction.objects.filter(task__id__in=task_ids)\n count = predictions.count()\n predictions.delete()\n queryset.update(updated_at=datetime.now())\n return {'processed_items': count, 'detail': 'Deleted ' + str(count) + ' predictions'}\n\n\nactions = [\n {\n 'e", "d_id": 42460, "documentation": { "docstring": " Delete all predictions by tasks ids\n\n :param project: project instance\n :param queryset: filtered tasks db queryset\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 26, "language": "en" } }, { "id": 113577, "commit_id": "d68c786ff81bad19c04619d6a999ff34aaa724e7", "repo": "nni", "path": "nni/compression/pytorch/base/scheduler.py", "file_name": "scheduler.py", "fun_name": "clean_up", "commit_message": "[Compression] remove pruning v1 & refactor directory (#5228)", "code": "def clean_up(self):\n \n if not self._cleaned:\n for ref in self.referenced_paths():\n self._reference_counter[ref] -= 1\n if self._reference_counter[ref] <= 0:\n os.remove(ref)\n if self._reference_counter[ref] < 0:\n _logger.warning('Referance counter error, the number of %s is %d',\n ref, self._reference_counter[ref])\n self._cleaned = True\n else:\n _logger.warning('Already clean up task %d', self.task_id)\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 226, "n_words": 42, "vocab_size": 36, "complexity": 5, "nloc": 12, "token_counts": 87, "n_ast_nodes": 141, "n_identifiers": 11, "random_cut": "def clean_up(self):\n \n if not self._cleaned:\n for ref in self.referenced_paths():\n self._reference_counter[ref] -= 1\n if self._reference_counter[ref] <= 0:\n os.remove(ref)\n if s", "d_id": 24962, "documentation": { "docstring": "\n Counter of referenced file paths subtract 1. If the counter reach 0, then delete the file.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 205911, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/utils.py", "file_name": "utils.py", "fun_name": "load_backend", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def load_backend(backend_name):\n \n # This backend was renamed in Django 1.9.\n if backend_name == \"django.db.backends.postgresql_psycopg2\":\n backend_name = \"django.db.backends.postgresql\"\n\n try:\n return import_module(\"%s.base\" % backend_name)\n except ImportError as e_user:\n # The database backend wasn't found. Display a helpful error message\n # listing all built-in database backends.\n import django.db.backends\n\n builtin_backends = [\n name\n for _, name, ispkg in pkgutil.iter_modules(django.db.backends.__path__)\n if ispkg and name not in {\"base\", \"dummy\"}\n ]\n if backend_name not in [\"django.db.backends.%s\" % b for b in builtin_backends]:\n backend_reprs = map(repr, sorted(builtin_backends))\n raise ImproperlyConfigured(\n \"%r isn't an available database backend or couldn't be \"\n \"imported. Check the above exception. To use one of the \"\n \"built-in backends, use 'django.db.backends.XXX', where XXX \"\n \"is one of:\\n\"\n \" %s\" % (backend_name, \", \".join(backend_reprs))\n ) from e_user\n else:\n # If there's some other error, this must be an error in Django\n raise\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 380, "n_words": 136, "vocab_size": 100, "complexity": 8, "nloc": 23, "token_counts": 119, "n_ast_nodes": 211, "n_identifiers": 22, "random_cut": "def load_backend(backend_name):\n \n # This backend was renamed in Django 1.9.\n ", "d_id": 51277, "documentation": { "docstring": "\n Return a database backend's \"base\" module given a fully qualified database\n backend name, or raise an error if it doesn't exist.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 31, "language": "en" } }, { "id": 122870, "commit_id": "4b587fa1f0049db5366fd04812ab940d80a71a22", "repo": "jax", "path": "jax/_src/pjit.py", "file_name": "pjit.py", "fun_name": "explode_superdims", "commit_message": "Move `pjit.py` to `jax/_src` in preparation for merging the `jit` and `pjit` frontend APIs\n\nPiperOrigin-RevId: 495944279", "code": "def explode_superdims(sizes, dims):\n \n strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))}\n dims = list(reversed(dims))\n final_dims = []\n for size, stride in dims:\n target_size = strides_to_sizes[stride]\n new_dims = []\n while size > target_size:\n assert target_size > 1 # Ensure progress\n assert size % target_size == 0\n new_dims.append((target_size, stride))\n size //= target_size\n stride *= target_size\n target_size = strides_to_sizes[stride]\n assert size == target_size\n new_dims.append((size, stride))\n final_dims += reversed(new_dims)\n return final_dims\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 124, "n_words": 69, "vocab_size": 40, "complexity": 4, "nloc": 18, "token_counts": 118, "n_ast_nodes": 186, "n_identifiers": 14, "random_cut": "def explode_superdims(sizes, dims):\n \n strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))}\n dims = list(reversed(dims))\n final_dims = []\n for size, stride in ", "d_id": 27260, "documentation": { "docstring": "Explode superdims to fit a known shape.\n\n The unflattening process might mistakenly generate too few too large dimensions.\n For example, ``unflatten_superdims(np.arange(n))`` always returns ``[(n, 1)]``.\n This function takes a list of such contiguous super-dimensions and splits them\n into smaller dimensions such that::\n\n set(map(fst, explode_superdims(sizes, dims))) == set(sizes)\n ", "n_words": 47, "vocab_size": 44, "n_whitespaces": 55, "language": "en" } }, { "id": 176421, "commit_id": "eb22e121816896ec0664c41a0232e2f80a259b96", "repo": "networkx", "path": "networkx/classes/function.py", "file_name": "function.py", "fun_name": "path_weight", "commit_message": "Correct typo in docstring (int -> float) (#5398)\n\n* Correct typo in docstring (int -> float)\r\n\r\nThis is based on https://stackoverflow.com/q/71494698/10693596\r\n\r\n* Update function.py\r\n\r\n* Update function.py", "code": "def path_weight(G, path, weight):\n \n multigraph = G.is_multigraph()\n cost = 0\n\n if not nx.is_path(G, path):\n raise nx.NetworkXNoPath(\"path does not exist\")\n for node, nbr in nx.utils.pairwise(path):\n if multigraph:\n cost += min(v[weight] for v in G[node][nbr].values())\n else:\n cost += G[node][nbr][weight]\n return cost\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 100, "n_words": 39, "vocab_size": 30, "complexity": 5, "nloc": 11, "token_counts": 94, "n_ast_nodes": 148, "n_identifiers": 17, "random_cut": "def path_weight(G, path, weight):\n \n multigraph = G.is_multigraph()\n cost = 0\n\n if not nx.is_path(G, path):\n raise nx.NetworkXNoPath(\"path does not exist\")\n for node, nbr in nx.utils.pairwise(path):\n if multigraph:\n cost += min(v[weight] for v in G[", "d_id": 41886, "documentation": { "docstring": "Returns total cost associated with specified path and weight\n\n Parameters\n ----------\n G : graph\n A NetworkX graph.\n\n path: list\n A list of node labels which defines the path to traverse\n\n weight: string\n A string indicating which edge attribute to use for path cost\n\n Returns\n -------\n cost: int or float\n An integer or a float representing the total cost with respect to the\n specified weight of the specified path\n\n Raises\n ------\n NetworkXNoPath\n If the specified edge does not exist.\n ", "n_words": 78, "vocab_size": 51, "n_whitespaces": 156, "language": "en" } }, { "id": 208718, "commit_id": "dc5bcc1c50892a5128fcf128af28887226144927", "repo": "ipython", "path": "IPython/core/history.py", "file_name": "history.py", "fun_name": "_run_sql", "commit_message": "This fixed the mixing of multiple history seen in #13631\n\nIt forces get_tail to put the current session last in the returned\nresults.", "code": "def _run_sql(self, sql, params, raw=True, output=False, latest=False):\n \n toget = 'source_raw' if raw else 'source'\n sqlfrom = \"history\"\n if output:\n sqlfrom = \"history LEFT JOIN output_history USING (session, line)\"\n toget = \"history.%s, output_history.output\" % toget\n if latest:\n toget += \", MAX(session * 128 * 1024 + line)\"\n this_querry = \"SELECT session, line, %s FROM %s \" % (toget, sqlfrom) + sql\n cur = self.db.execute(this_querry, params)\n if latest:\n cur = (row[:-1] for row in cur)\n if output: # Regroup into 3-tuples, and parse JSON\n return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)\n return cur\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 224, "n_words": 96, "vocab_size": 68, "complexity": 8, "nloc": 15, "token_counts": 118, "n_ast_nodes": 188, "n_identifiers": 18, "random_cut": "def _run_sql(self, sql, params, raw=True, output=False, latest=False):\n \n toget = 'source_raw' if raw else 'source'\n sqlfrom = \"history\"\n if output:\n sqlfrom = \"history LEFT JOIN output_history USING (session, line)\"\n toget = \"history.%s, output_history.output\" % toget\n if latest:\n toget += \", MAX(session * 128 * 1024 + line)\"\n this_querry = \"SELECT session, line, %s FROM %s \" % (toget, sqlfrom) + sql\n cur = self.db.execute(this_querry, params)\n if latest:\n cur = (row[:-1] for row in cur)\n if output: # Regroup into 3-t", "d_id": 52477, "documentation": { "docstring": "Prepares and runs an SQL query for the history database.\n\n Parameters\n ----------\n sql : str\n Any filtering expressions to go after SELECT ... FROM ...\n params : tuple\n Parameters passed to the SQL query (to replace \"?\")\n raw, output : bool\n See :meth:`get_range`\n latest : bool\n Select rows with max (session, line)\n\n Returns\n -------\n Tuples as :meth:`get_range`\n ", "n_words": 57, "vocab_size": 46, "n_whitespaces": 171, "language": "en" } }, { "id": 130247, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/pathspec.py", "file_name": "pathspec.py", "fun_name": "__add__", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def __add__(self, other):\n \n if isinstance(other, PathSpec):\n return PathSpec(self.patterns + other.patterns)\n else:\n return NotImplemented\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def __add__(self, other):\n \n ", "d_id": 29172, "documentation": { "docstring": "\n Combines the :attr:`Pathspec.patterns` patterns from two\n :class:`PathSpec` instances.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 30, "language": "en" } }, { "id": 129004, "commit_id": "70db5c5592d94b611fee0a334414f1f4f5cc151a", "repo": "ray", "path": "python/ray/node.py", "file_name": "node.py", "fun_name": "address", "commit_message": "[GCS][Bootstrap n/n] Do not start Redis in GCS bootstrapping mode (#21232)\n\nAfter this change in GCS bootstrapping mode, Redis no longer starts and `address` is treated as the GCS address of the Ray cluster.\r\n\r\nCo-authored-by: Yi Cheng \r\nCo-authored-by: Yi Cheng <74173148+iycheng@users.noreply.github.com>", "code": "def address(self):\n \n if use_gcs_for_bootstrap():\n return self._gcs_address\n return self._redis_address\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 8, "vocab_size": 7, "complexity": 2, "nloc": 4, "token_counts": 19, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def address(self):\n \n if use_gcs_for_bootstrap():\n return self._gcs_address\n return self._redis_address\n", "d_id": 28871, "documentation": { "docstring": "Get the address for bootstrapping, e.g. the address to pass to\n `ray start` or `ray.int()` to start worker nodes, that has been\n converted to ip:port format.\n ", "n_words": 26, "vocab_size": 21, "n_whitespaces": 47, "language": "en" } }, { "id": 49675, "commit_id": "8468e1ac6cfe165aa1e3cf4f77ab6fb66ce98614", "repo": "PaddleHub", "path": "modules/text/language_model/simnet_bow/module.py", "file_name": "module.py", "fun_name": "similarity", "commit_message": "Remove fluid api in modules and pkg. (#1906)", "code": "def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1):\n \n if use_gpu:\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n except:\n raise RuntimeError(\n \"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id.\"\n )\n\n data = self.check_data(texts, data)\n\n start_idx = 0\n iteration = int(math.ceil(len(data['text_1']) / batch_size))\n results = []\n for i in range(iteration):\n batch_data = {'text_1': [], 'text_2': []}\n if i < (iteration - 1):\n batch_data['text_1'] = data['text_1'][start_idx:(start_idx + batch_size)]\n batch_data['text_2'] = data['text_2'][start_idx:(start_idx + batch_size)]\n else:\n batch_data['text_1'] = data['text_1'][start_idx:(start_idx + batch_size)]\n batch_data['text_2'] = data['text_2'][start_idx:(start_idx + batch_size)]\n start_idx = start_idx + batch_size\n processed_results = preprocess(self.word_seg_module, self.vocab, batch_data, use_gpu, batch_size)\n\n data_1, lod_1, shape_1 = self._texts_process(processed_results[\"text_1\"])\n data_2, lod_2, shape_2 = self._texts_process(processed_results[\"text_2\"])\n\n predictor = self.gpu_predictor if use_gpu else self.cpu_predictor\n\n input_names = predictor.get_input_names()\n input_handle = predictor.get_input_handle(input_names[0])\n input_handle.copy_from_cpu(data_1)\n input_handle.set_lod(lod_1)\n input_handle.reshape(shape_1)\n\n input_handle = predictor.get_input_handle(input_names[1])\n input_handle.copy_from_cpu(data_2)\n input_handle.set_lod(lod_2)\n input_handle.reshape(shape_2)\n\n predictor.run()\n output_names = predictor.get_output_names()\n output_handle = predictor.get_output_handle(output_names[1])\n batch_out = output_handle.copy_to_cpu()\n\n batch_result = postprocess(batch_out, processed_results)\n results += batch_result\n return results\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 619, "n_words": 149, "vocab_size": 106, "complexity": 6, "nloc": 42, "token_counts": 363, "n_ast_nodes": 590, "n_identifiers": 51, "random_cut": "def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1):\n \n if use_gpu:\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n except:\n raise RuntimeError(\n \"Environment Variable CUDA_VISIBLE_DEVICES", "d_id": 9863, "documentation": { "docstring": "\n Get the sentiment prediction results results with the texts as input\n Args:\n texts(list): the input texts to be predicted which the first element is text_1(list)\n and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']]\n if texts not data.\n data(dict): key must be 'text_1' and 'text_2', value is the texts(list) to be predicted\n use_gpu(bool): whether use gpu to predict or not\n batch_size(int): the program deals once with one batch\n Returns:\n results(list): the word segmentation results\n ", "n_words": 75, "vocab_size": 51, "n_whitespaces": 214, "language": "en" } }, { "id": 8699, "commit_id": "6ee67ef2d2098d236e06d1d7672d92fc192c55b0", "repo": "ludwig", "path": "ludwig/collect.py", "file_name": "collect.py", "fun_name": "cli_collect_weights", "commit_message": "[Annotations] Logging Level Registry (#2814)\n\n* Add DeveloperAPI annotations to some utils\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* remove annotations for private methods\r\n\r\n* [Annotations] Logging Level Registry\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def cli_collect_weights(sys_argv):\n \n parser = argparse.ArgumentParser(\n description=\"This script loads a pretrained model \" \"and uses it collect weights.\",\n prog=\"ludwig collect_weights\",\n usage=\"%(prog)s [options]\",\n )\n\n # ----------------\n # Model parameters\n # ----------------\n parser.add_argument(\"-m\", \"--model_path\", help=\"model to load\", required=True)\n parser.add_argument(\"-t\", \"--tensors\", help=\"tensors to collect\", nargs=\"+\", required=True)\n\n # -------------------------\n # Output results parameters\n # -------------------------\n parser.add_argument(\n \"-od\", \"--output_directory\", type=str, default=\"results\", help=\"directory that contains the results\"\n )\n\n # ------------------\n # Runtime parameters\n # ------------------\n parser.add_argument(\n \"-l\",\n \"--logging_level\",\n default=\"info\",\n help=\"the level of logging to use\",\n choices=[\"critical\", \"error\", \"warning\", \"info\", \"debug\", \"notset\"],\n )\n\n add_contrib_callback_args(parser)\n args = parser.parse_args(sys_argv)\n\n args.callbacks = args.callbacks or []\n for callback in args.callbacks:\n callback.on_cmdline(\"collect_weights\", *sys_argv)\n\n args.logging_level = get_logging_level_registry()[args.logging_level]\n logging.getLogger(\"ludwig\").setLevel(args.logging_level)\n global logger\n logger = logging.getLogger(\"ludwig.collect\")\n\n print_ludwig(\"Collect Weights\", LUDWIG_VERSION)\n\n collect_weights(**vars(args))\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 267, "n_words": 113, "vocab_size": 88, "complexity": 3, "nloc": 29, "token_counts": 202, "n_ast_nodes": 365, "n_identifiers": 32, "random_cut": "def cli_collect_weights(sys_argv):\n \n parser = argparse.ArgumentParser(\n description=\"This script loads a pretrained model \" \"and uses it collect weights.\",\n prog=\"ludwig collect_weights\",\n usage=\"%(prog)s [options]\",\n )\n\n # ----------------\n # Model parameters\n # ----------------\n parser.add_argument(\"-m\", \"--model_path\", help=\"model to load\", required=True)\n parser.add_argument(\"-t\", \"--tensors\", help=\"tensors to collect\", nargs=\"+\", required=True)\n\n # -------------------------\n # Output results parameters\n # -------------------------\n parser.add_argument(\n \"-od\", \"--output_directory\", type=str, default=\"results\", help=\"directory that contains the results\"\n )\n\n # ------------", "d_id": 1484, "documentation": { "docstring": "Command Line Interface to collecting the weights for the model.\n\n --m: Input model that is necessary to collect to the tensors, this is a\n required *option*\n --t: Tensors to collect\n --od: Output directory of the model, defaults to results\n --v: Verbose: Defines the logging level that the user will be exposed to\n ", "n_words": 52, "vocab_size": 39, "n_whitespaces": 75, "language": "en" } }, { "id": 66051, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/daily_work_summary/daily_work_summary.py", "file_name": "daily_work_summary.py", "fun_name": "get_user_emails_from_group", "commit_message": "style: format code with black", "code": "def get_user_emails_from_group(group):\n\t\n\tgroup_doc = group\n\tif isinstance(group_doc, str):\n\t\tgroup_doc = frappe.get_doc(\"Daily Work Summary Group\", group)\n\n\temails = get_users_email(group_doc)\n\n\treturn emails\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 14, "n_words": 20, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 35, "n_ast_nodes": 60, "n_identifiers": 9, "random_cut": "def get_user_emails_from_group(group):\n\t\n\tgroup_doc = group\n\tif isinstance(group_doc, str):\n\t\tgroup_doc = frappe.g", "d_id": 14093, "documentation": { "docstring": "Returns list of email of enabled users from the given group\n\n\t:param group: Daily Work Summary Group `name`", "n_words": 18, "vocab_size": 17, "n_whitespaces": 16, "language": "en" } }, { "id": 204859, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/operations.py", "file_name": "operations.py", "fun_name": "adapt_unknown_value", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def adapt_unknown_value(self, value):\n \n if isinstance(value, datetime.datetime): # must be before date\n return self.adapt_datetimefield_value(value)\n elif isinstance(value, datetime.date):\n return self.adapt_datefield_value(value)\n elif isinstance(value, datetime.time):\n return self.adapt_timefield_value(value)\n elif isinstance(value, decimal.Decimal):\n return self.adapt_decimalfield_value(value)\n else:\n return value\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 129, "n_words": 31, "vocab_size": 22, "complexity": 5, "nloc": 11, "token_counts": 80, "n_ast_nodes": 127, "n_identifiers": 13, "random_cut": "def adapt_unknown_value(self, value):\n \n if isinstance(value, datetime.datetime): #", "d_id": 50936, "documentation": { "docstring": "\n Transform a value to something compatible with the backend driver.\n\n This method only depends on the type of the value. It's designed for\n cases where the target type isn't known, such as .raw() SQL queries.\n As a consequence it may not work perfectly in all circumstances.\n ", "n_words": 46, "vocab_size": 41, "n_whitespaces": 82, "language": "en" } }, { "id": 73532, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/test_templates.py", "file_name": "test_templates.py", "fun_name": "test_get_settings_variable_assignment_request_context", "commit_message": "Reformat with black", "code": "def test_get_settings_variable_assignment_request_context(self):\n \n request = self.get_request(site=self.other_site)\n context = Context({\"request\": request})\n template = Template(\n \"{% load wagtailsettings_tags %}\"\n \"{% get_settings as wagtail_settings %}\"\n \"{{ wagtail_settings.tests.testsetting.title}}\"\n )\n\n self.assertEqual(template.render(context), self.other_site_settings.title)\n # Also check that the default 'settings' variable hasn't been set\n template = Template(\n \"{% load wagtailsettings_tags %}\"\n \"{% get_settings as wagtail_settings %}\"\n \"{{ settings.tests.testsetting.title}}\"\n )\n\n self.assertEqual(template.render(context), \"\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 190, "n_words": 54, "vocab_size": 35, "complexity": 1, "nloc": 15, "token_counts": 74, "n_ast_nodes": 137, "n_identifiers": 14, "random_cut": "def test_get_settings_variable_assignment_request_context(self):\n \n request = self.get_request(site=self.other_site)\n context = Context({\"request\": request})\n template = Template(\n \"{% load wagtailsettings_tags %}\"\n \"{% get_settings as wagtai", "d_id": 16049, "documentation": { "docstring": "\n Check that assigning the setting to a context variable with\n {% get_settings as wagtail_settings %} works.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 154275, "commit_id": "b240370bf83c88589d293b76b4a2409294e06f90", "repo": "modin", "path": "modin/core/io/column_stores/parquet_dispatcher.py", "file_name": "parquet_dispatcher.py", "fun_name": "get_dataset", "commit_message": "FEAT-#4733: Support fastparquet as engine for `read_parquet` (#4807)\n\nSigned-off-by: Karthik Velayutham ", "code": "def get_dataset(cls, path, engine, storage_options):\n \n if engine == \"auto\":\n # We follow in concordance with pandas\n engine_classes = [PyArrowDataset, FastParquetDataset]\n\n error_msgs = \"\"\n for engine_class in engine_classes:\n try:\n return engine_class(path, storage_options)\n except ImportError as err:\n error_msgs += \"\\n - \" + str(err)\n\n raise ImportError(\n \"Unable to find a usable engine; \"\n + \"tried using: 'pyarrow', 'fastparquet'.\\n\"\n + \"A suitable version of \"\n + \"pyarrow or fastparquet is required for parquet \"\n + \"support.\\n\"\n + \"Trying to import the above resulted in these errors:\"\n + f\"{error_msgs}\"\n )\n elif engine == \"pyarrow\":\n return PyArrowDataset(path, storage_options)\n elif engine == \"fastparquet\":\n return FastParquetDataset(path, storage_options)\n else:\n raise ValueError(\"engine must be one of 'pyarrow', 'fastparquet'\")\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 416, "n_words": 109, "vocab_size": 82, "complexity": 6, "nloc": 24, "token_counts": 103, "n_ast_nodes": 193, "n_identifiers": 14, "random_cut": "def get_dataset(cls, path, engine, storage_options):\n \n if engine == \"auto\":\n # We follow in concordance with pandas\n engine_classes = ", "d_id": 35894, "documentation": { "docstring": "\n Retrieve Parquet engine specific Dataset implementation.\n\n Parameters\n ----------\n path : str, path object or file-like object\n The filepath of the parquet file in local filesystem or hdfs.\n engine : str\n Parquet library to use (only 'PyArrow' is supported for now).\n storage_options : dict\n Parameters for specific storage engine.\n\n Returns\n -------\n Dataset\n Either a PyArrowDataset or FastParquetDataset object.\n ", "n_words": 57, "vocab_size": 45, "n_whitespaces": 172, "language": "en" } }, { "id": 35529, "commit_id": "8635407bc724c45142c1f91dbc9ef3ea681e1a56", "repo": "transformers", "path": "templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py", "file_name": "test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py", "fun_name": "test_causal_lm_model_past_with_attn_mask", "commit_message": "Fix tf.concatenate + test past_key_values for TF models (#15774)\n\n* fix wrong method name tf.concatenate\r\n\r\n* add tests related to causal LM / decoder\r\n\r\n* make style and quality\r\n\r\n* clean-up\r\n\r\n* Fix TFBertModel's extended_attention_mask when past_key_values is provided\r\n\r\n* Fix tests\r\n\r\n* fix copies\r\n\r\n* More tf.int8 -> tf.int32 in TF test template\r\n\r\n* clean-up\r\n\r\n* Update TF test template\r\n\r\n* revert the previous commit + update the TF test template\r\n\r\n* Fix TF template extended_attention_mask when past_key_values is provided\r\n\r\n* Fix some styles manually\r\n\r\n* clean-up\r\n\r\n* Fix ValueError: too many values to unpack in the test\r\n\r\n* Fix more: too many values to unpack in the test\r\n\r\n* Add a comment for extended_attention_mask when there is past_key_values\r\n\r\n* Fix TFElectra extended_attention_mask when past_key_values is provided\r\n\r\n* Add tests to other TF models\r\n\r\n* Fix for TF Electra test: add prepare_config_and_inputs_for_decoder\r\n\r\n* Fix not passing training arg to lm_head in TFRobertaForCausalLM\r\n\r\n* Fix tests (with past) for TF Roberta\r\n\r\n* add testing for pask_key_values for TFElectra model\r\n\r\nCo-authored-by: ydshieh ", "code": "def test_causal_lm_model_past_with_attn_mask(self):\n \n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def test_causal_lm_model_past_with_attn_mask(self):\n \n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_causal_lm_model_past", "d_id": 6472, "documentation": { "docstring": "Test the causal LM model with `past_key_values` and `attention_mask`", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 223838, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/mime/audio.py", "file_name": "audio.py", "fun_name": "_whatsnd", "commit_message": "add python 3.10.4 for windows", "code": "def _whatsnd(data):\n \n hdr = data[:512]\n fakefile = BytesIO(hdr)\n for testfn in sndhdr.tests:\n res = testfn(hdr, fakefile)\n if res is not None:\n return _sndhdr_MIMEmap.get(res[0])\n return None", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 25, "vocab_size": 21, "complexity": 3, "nloc": 8, "token_counts": 52, "n_ast_nodes": 83, "n_identifiers": 11, "random_cut": "def _whatsnd(data):\n \n hdr = data[:512]\n fakefile = BytesIO(hdr)\n for testfn in sndhdr.tests:\n res = testfn(hdr, fakefile)\n if res is not None:\n ", "d_id": 57097, "documentation": { "docstring": "Try to identify a sound file type.\n\n sndhdr.what() has a pretty cruddy interface, unfortunately. This is why\n we re-do it here. It would be easier to reverse engineer the Unix 'file'\n command and use the standard 'magic' file, as shipped with a modern Unix.\n ", "n_words": 44, "vocab_size": 40, "n_whitespaces": 58, "language": "en" } }, { "id": 311921, "commit_id": "5a34feb7de440e0df748c9db500facc72a4c2646", "repo": "core", "path": "tests/util/test_async.py", "file_name": "test_async.py", "fun_name": "test_protect_loop_debugger_sleep", "commit_message": "Don't warn on time.sleep injected by the debugger (#65420)", "code": "async def test_protect_loop_debugger_sleep(caplog):\n \n block_async_io.enable()\n\n with patch(\n \"homeassistant.util.async_.extract_stack\",\n return_value=[\n Mock(\n filename=\"/home/paulus/homeassistant/.venv/blah/pydevd.py\",\n lineno=\"23\",\n line=\"do_something()\",\n ),\n Mock(\n filename=\"/home/paulus/homeassistant/util/async.py\",\n lineno=\"123\",\n line=\"protected_loop_func\",\n ),\n Mock(\n filename=\"/home/paulus/homeassistant/util/async.py\",\n lineno=\"123\",\n line=\"check_loop()\",\n ),\n ],\n ):\n time.sleep(0)\n assert \"Detected blocking call inside the event loop\" not in caplog.text\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 281, "n_words": 37, "vocab_size": 31, "complexity": 1, "nloc": 24, "token_counts": 84, "n_ast_nodes": 148, "n_identifiers": 13, "random_cut": "async def test_protect_loop_debugger_sleep(caplog):\n \n block_async_io.enable()\n\n with patch(\n \"homeassistant.util.async_.extract_stack\",\n return_value=[\n Mock(\n filename=\"/home/paulus/homeassistant/.venv/blah/pydevd.py\",\n lineno=\"23\",\n ", "d_id": 110574, "documentation": { "docstring": "Test time.sleep injected by the debugger is not reported.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 9894, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/peapods/pods/__init__.py", "file_name": "__init__.py", "fun_name": "update_worker_pea_args", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def update_worker_pea_args(self):\n \n self.peas_args['peas'] = self._set_peas_args(self.args)\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 38, "n_identifiers": 5, "random_cut": "def update_worker_pea_args(self):\n \n self.peas_args['peas'] = self._set_peas_args(self.args)\n", "d_id": 1761, "documentation": { "docstring": " Update args of all its worker peas based on Pod args. Does not touch head and tail", "n_words": 17, "vocab_size": 17, "n_whitespaces": 17, "language": "en" } }, { "id": 197040, "commit_id": "e0dc14eca132f37c5f49369eb4051eae37c9b119", "repo": "sympy", "path": "sympy/ntheory/generate.py", "file_name": "generate.py", "fun_name": "prevprime", "commit_message": "Refactored import ordering in functions", "code": "def prevprime(n):\n \n n = _as_int_ceiling(n)\n if n < 3:\n raise ValueError(\"no preceding primes\")\n if n < 8:\n return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n]\n if n <= sieve._list[-1]:\n l, u = sieve.search(n)\n if l == u:\n return sieve[l-1]\n else:\n return sieve[l]\n nn = 6*(n//6)\n if n - nn <= 1:\n n = nn - 1\n if isprime(n):\n return n\n n -= 4\n else:\n n = nn + 1\n while 1:\n if isprime(n):\n return n\n n -= 2\n if isprime(n):\n return n\n n -= 4\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 261, "n_words": 88, "vocab_size": 45, "complexity": 10, "nloc": 27, "token_counts": 154, "n_ast_nodes": 248, "n_identifiers": 11, "random_cut": "def prevprime(n):\n \n n = _as_int_ceiling(n)\n if n < 3:\n raise ValueError(\"no preceding primes\")\n if n < 8:\n return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n]\n if n <= sieve._list[-1]:\n l, u = sieve.search(n)\n if l == u:\n return sieve[l-1]\n else:\n return sieve[l]\n nn = 6*(n//6)\n if n - nn <= 1:\n n = nn - 1\n if isprime(n):\n return n\n n -= 4\n else:\n n = nn + 1\n while 1:\n if isprime(n):\n return n\n n -= 2\n if isprime(n):\n ", "d_id": 48297, "documentation": { "docstring": " Return the largest prime smaller than n.\n\n Notes\n =====\n\n Potential primes are located at 6*j +/- 1. This\n property is used during searching.\n\n >>> from sympy import prevprime\n >>> [(i, prevprime(i)) for i in range(10, 15)]\n [(10, 7), (11, 7), (12, 11), (13, 11), (14, 13)]\n\n See Also\n ========\n\n nextprime : Return the ith prime greater than n\n primerange : Generates all primes in a given range\n ", "n_words": 67, "vocab_size": 57, "n_whitespaces": 148, "language": "en" } }, { "id": 307759, "commit_id": "dd20a7ea62fc003748c5f0cf99be25c69c9b5a05", "repo": "core", "path": "tests/components/recorder/test_statistics.py", "file_name": "test_statistics.py", "fun_name": "test_duplicate_statistics_handle_integrity_error", "commit_message": "Display statistics in the source's unit (#78031)", "code": "def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog):\n \n hass = hass_recorder()\n wait_recording_done(hass)\n\n period1 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-01 00:00:00\"))\n period2 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 23:00:00\"))\n\n external_energy_metadata_1 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"state_unit_of_measurement\": \"kWh\",\n \"statistic_id\": \"test:total_energy_import_tariff_1\",\n \"unit_of_measurement\": \"kWh\",\n }\n external_energy_statistics_1 = [\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 5,\n },\n ]\n external_energy_statistics_2 = [\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 6,\n }\n ]\n\n with patch.object(\n statistics, \"_statistics_exists\", return_value=False\n ), patch.object(\n statistics, \"_insert_statistics\", wraps=statistics._insert_statistics\n ) as insert_statistics_mock:\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_2\n )\n wait_recording_done(hass)\n assert insert_statistics_mock.call_count == 3\n\n with session_scope(hass=hass) as session:\n tmp = session.query(recorder.db_schema.Statistics).all()\n assert len(tmp) == 2\n\n assert \"Blocked attempt to insert duplicated statistic rows\" in caplog.text\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 447, "n_words": 117, "vocab_size": 79, "complexity": 1, "nloc": 50, "token_counts": 224, "n_ast_nodes": 387, "n_identifiers": 32, "random_cut": "def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog):\n \n hass = hass_recorder()\n wait_recording_done(hass)\n\n period1 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-01 00:00:00\"))\n period2 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 23:00:00\"))\n\n external_energy_metadata_1 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"state_unit_of_measurement\": \"kWh\",\n \"statistic_id\": \"test:total_energy_import_tariff_1\",\n \"unit_of_measurement\": \"kWh\",\n }\n external_energy_statistics_1 = [\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 5,\n },\n ]\n external_energy_statistics_2 = [\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 6,\n }\n ]\n\n with patch.object(\n statistics, \"_statistics_exists\", return_value=False\n ), patch.object(\n statistics, \"_insert_statistics\", wraps=statistics._insert_statistics\n ) as insert_statistics_mock:\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n as", "d_id": 106525, "documentation": { "docstring": "Test the recorder does not blow up if statistics is duplicated.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 131786, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_resource_demand_scheduler.py", "file_name": "test_resource_demand_scheduler.py", "fun_name": "testRequestResourcesRaceConditionWithResourceDemands", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testRequestResourcesRaceConditionWithResourceDemands(self):\n \n config = copy.deepcopy(MULTI_WORKER_CLUSTER)\n config[\"available_node_types\"].update(\n {\n \"empty_node\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2, \"GPU\": 1},\n \"max_workers\": 1,\n },\n \"def_worker\": {\n \"node_config\": {},\n \"resources\": {\"CPU\": 2, \"GPU\": 1, \"WORKER\": 1},\n \"max_workers\": 3,\n },\n }\n )\n config[\"idle_timeout_minutes\"] = 0\n\n config_path = self.write_config(config)\n self.provider = MockProvider()\n self.provider.create_node(\n {},\n {\n TAG_RAY_NODE_KIND: \"head\",\n TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,\n TAG_RAY_USER_NODE_TYPE: \"empty_node\",\n },\n 1,\n )\n\n runner = MockProcessRunner()\n runner.respond_to_call(\"json .Config.Env\", [\"[]\" for i in range(2)])\n lm = LoadMetrics()\n autoscaler = MockAutoscaler(\n config_path,\n lm,\n MockNodeInfoStub(),\n max_failures=0,\n process_runner=runner,\n update_interval_s=0,\n )\n lm.update(\n \"127.0.0.0\",\n mock_raylet_id(),\n {\"CPU\": 2, \"GPU\": 1},\n {\"CPU\": 2},\n {},\n waiting_bundles=[{\"CPU\": 2}],\n )\n autoscaler.load_metrics.set_resource_requests([{\"CPU\": 2, \"GPU\": 1}] * 2)\n autoscaler.update()\n # 1 head, 1 worker.\n self.waitForNodes(2)\n lm.update(\n \"127.0.0.0\",\n mock_raylet_id(),\n {\"CPU\": 2, \"GPU\": 1},\n {\"CPU\": 2},\n {},\n waiting_bundles=[{\"CPU\": 2}],\n )\n # make sure it stays consistent.\n for _ in range(10):\n autoscaler.update()\n self.waitForNodes(2)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 799, "n_words": 130, "vocab_size": 78, "complexity": 3, "nloc": 61, "token_counts": 310, "n_ast_nodes": 521, "n_identifiers": 35, "random_cut": "def testRequestResourcesRaceConditionWithResourceDemands(self):\n \n config = copy.deepcopy(MULTI_WORKER_CLUSTER)\n config[\"available_node_types\"].update(\n {\n \"empty_node\":", "d_id": 29587, "documentation": { "docstring": "Test request_resources() with resource_demands.\n\n Tests when request_resources() is called simultaneously with resource\n demands in multiple orders.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 82431, "commit_id": "c1290c9ff89cb00caa5469129fd527e9d82cd820", "repo": "django-cms", "path": "cms/tests/test_sitemap.py", "file_name": "test_sitemap.py", "fun_name": "test_sitemap_unpublished_titles", "commit_message": "ci: Added codespell (#7355)\n\nCo-authored-by: Christian Clauss \r\n\r\n* ci: codespell config taken from #7292", "code": "def test_sitemap_unpublished_titles(self):\n \n sitemap = CMSSitemap()\n locations = []\n urlset = sitemap.get_urls()\n unpublished_titles = set()\n for item in urlset:\n locations.append(item['location'])\n for page in Page.objects.drafts():\n if page.get_public_object():\n set1 = set(page.get_public_object().title_set.values_list('path', flat=True))\n set2 = set(page.title_set.values_list('path', flat=True))\n unpublished_titles.update(set2.difference(set1))\n else:\n unpublished_titles.update(page.title_set.values_list('path', flat=True))\n\n for path in unpublished_titles:\n title = Title.objects.get(path=path)\n if title.path:\n url = f'http://example.com/{title.language}/{title.path}/'\n else:\n url = f'http://example.com/{title.language}/{title.path}'\n self.assertFalse(url in locations)\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 279, "n_words": 56, "vocab_size": 38, "complexity": 6, "nloc": 21, "token_counts": 167, "n_ast_nodes": 308, "n_identifiers": 30, "random_cut": "def test_sitemap_unpublished_titles(self):\n \n sitemap = CMSSitemap()\n locations = []\n urlset = sitemap.get_urls()\n unpublished_titles = set()\n for item in urlset:\n locations.append(item['location'])\n for page in Page.objects.drafts():\n if page.get_public_object():\n set1 = set(page.get_public_object().title_set.values_list('path', flat=True))\n set2 = set(page.title_set.values_list('path', flat=True))\n unpublished_titles.update(set2.difference(set1))\n else:\n unpublished_titles.update(page.title_set.values_list('path', flat=True))\n\n for path in unpublished_titles:\n title = Title.objects.get(path=path)\n if title.path:\n url = f'http://example.com/{title.language}/{title.path}/'\n else:\n ", "d_id": 17397, "documentation": { "docstring": "\n Check that titles attached to unpublished pages are not in the urlset.\n As titles are 'published' depending on their attached page, we create a\n set of unpublished titles by checking titles attached to the draft and\n public version of each page\n ", "n_words": 41, "vocab_size": 31, "n_whitespaces": 77, "language": "en" } }, { "id": 13120, "commit_id": "cdaf7f87ececf9e13b517379ca183b17f0d7b007", "repo": "jina", "path": "jina/parsers/orchestrate/runtimes/remote.py", "file_name": "remote.py", "fun_name": "mixin_gateway_parser", "commit_message": "feat: allow passing custom gateway in Flow (#5189)", "code": "def mixin_gateway_parser(parser):\n \n gp = add_arg_group(parser, title='Gateway')\n _add_host(gp)\n _add_proxy(gp)\n\n gp.add_argument(\n '--uses',\n type=str,\n default=None,\n # TODO: add Jina Hub Gateway\n help=,\n )\n\n gp.add_argument(\n '--uses-with',\n action=KVAppendAction,\n metavar='KEY: VALUE',\n nargs='*',\n help=,\n )\n\n gp.add_argument(\n '--py-modules',\n type=str,\n nargs='*',\n metavar='PATH',\n help=,\n )\n\n mixin_base_runtime_parser(gp)\n\n gp.add_argument(\n '--port-expose',\n type=int,\n dest='port',\n default=helper.random_port(),\n help='The port that the gateway exposes for clients for GRPC connections.',\n )\n\n parser.add_argument(\n '--graph-description',\n type=str,\n help='Routing graph for the gateway',\n default='{}',\n )\n\n parser.add_argument(\n '--graph-conditions',\n type=str,\n help='Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.',\n default='{}',\n )\n\n parser.add_argument(\n '--deployments-addresses',\n type=str,\n help='dictionary JSON with the input addresses of each Deployment',\n default='{}',\n )\n\n parser.add_argument(\n '--deployments-disable-reduce',\n type=str,\n help='list JSON disabling the built-in merging mechanism for each Deployment listed',\n default='[]',\n )\n\n gp.add_argument(\n '--compression',\n choices=['NoCompression', 'Deflate', 'Gzip'],\n help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, '\n 'check https://grpc.github.io/grpc/python/grpc.html#compression.',\n )\n\n gp.add_argument(\n '--timeout-send',\n type=int,\n default=None,\n help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default',\n )\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 543, "n_words": 160, "vocab_size": 108, "complexity": 1, "nloc": 87, "token_counts": 237, "n_ast_nodes": 404, "n_identifiers": 22, "random_cut": "def mixin_gateway_parser(parser):\n \n gp = add_arg_group(parser, title='Gateway')\n _add_host(gp)\n _add_proxy(gp)\n\n gp.add_argument(\n '--uses',\n type=str,\n default=None,\n # TODO: add Jina Hub Gateway\n help=,\n )\n\n gp.add_argument(\n '--uses-with',\n action=KVAppendAction,\n metavar='KEY: VALUE',\n nargs='*',\n help=,\n )\n\n gp.add_argument(\n '--py-modules',\n type=str,\n nargs='*',\n metavar='PATH',\n help=,\n )\n\n mixin_base_runtime_parser(gp)\n\n gp.add_argument(\n '--port-expose',\n type=int,\n dest='port'", "d_id": 2555, "documentation": { "docstring": "Add the options for remote expose at the Gateway\n :param parser: the parser\n \n The config of the gateway, it could be one of the followings:\n * the string literal of an Gateway class name\n * a Gateway YAML file (.yml, .yaml, .jaml)\n * a docker image (must start with `docker://`)\n * the string literal of a YAML config (must start with `!` or `jtype: `)\n * the string literal of a JSON config\n\n When use it under Python, one can use the following values additionally:\n - a Python dict that represents the config\n - a text file stream has `.read()` interface\n \n Dictionary of keyword arguments that will override the `with` configuration in `uses`\n \nThe customized python modules need to be imported before loading the gateway\n\nNote that the recommended way is to only import a single module - a simple python file, if your\ngateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files,\nwhich should be structured as a python package.\n", "n_words": 169, "vocab_size": 102, "n_whitespaces": 249, "language": "en" } }, { "id": 222649, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/bdist_rpm.py", "file_name": "bdist_rpm.py", "fun_name": "_format_changelog", "commit_message": "add python 3.10.4 for windows", "code": "def _format_changelog(self, changelog):\n \n if not changelog:\n return changelog\n new_changelog = []\n for line in changelog.strip().split('\\n'):\n line = line.strip()\n if line[0] == '*':\n new_changelog.extend(['', line])\n elif line[0] == '-':\n new_changelog.append(line)\n else:\n new_changelog.append(' ' + line)\n\n # strip trailing newline inserted by first changelog entry\n if not new_changelog[0]:\n del new_changelog[0]\n\n return new_changelog\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 211, "n_words": 50, "vocab_size": 40, "complexity": 6, "nloc": 15, "token_counts": 95, "n_ast_nodes": 165, "n_identifiers": 9, "random_cut": "def _format_changelog(self, changelog):\n \n if not changelog:\n return changelog\n new_changelog = []\n for line in changelog.strip().split('\\n'):\n line = line.strip()\n if line[0] == '*':\n new_changelog.extend(['', line])\n elif line[0] == '-':\n new_changelog.append(line)\n else:\n new_changelog.append(' ' + line)\n\n # strip trailing newline inserted by first changelog entry\n if not new_changelog[0]:\n del new_changelog[0]\n\n return ne", "d_id": 56687, "documentation": { "docstring": "Format the changelog correctly and convert it to a list of strings\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 183499, "commit_id": "15df75919744fbea824bbf029cfb56029a3d0dc8", "repo": "textual", "path": "src/textual/_animator.py", "file_name": "_animator.py", "fun_name": "_get_time", "commit_message": "[App] Finally, time mocking in tests seems to be working! 😅\n\nI had to add a flag in the `_timer` module that allows us to completely disable the \"skip\" feature of Timers, though - but it shouldn't cause too much trouble 🤞", "code": "def _get_time(self) -> float:\n \n # N.B. We could remove this method and always call `self._timer.get_time()` internally,\n # but it's handy to have in mocking situations\n return self._timer.get_time()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 55, "n_words": 27, "vocab_size": 26, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def _get_time(self) -> float:\n \n # N.B. We could remove this method and always call `self._timer.get_time()", "d_id": 44221, "documentation": { "docstring": "Get the current wall clock time, via the internal Timer.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 181829, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/base.py", "file_name": "base.py", "fun_name": "_generate", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def _generate(self, pset, min_, max_, condition, type_=None):\n \n if type_ is None:\n type_ = pset.ret\n expr = []\n height = np.random.randint(min_, max_)\n stack = [(0, type_)]\n while len(stack) != 0:\n depth, type_ = stack.pop()\n\n # We've added a type_ parameter to the condition function\n if condition(height, depth, type_):\n try:\n term = np.random.choice(pset.terminals[type_])\n except IndexError:\n _, _, traceback = sys.exc_info()\n raise IndexError(\n \"The gp.generate function tried to add \"\n \"a terminal of type {}, but there is\"\n \"none available. {}\".format(type_, traceback)\n )\n if inspect.isclass(term):\n term = term()\n expr.append(term)\n else:\n try:\n prim = np.random.choice(pset.primitives[type_])\n except IndexError:\n _, _, traceback = sys.exc_info()\n raise IndexError(\n \"The gp.generate function tried to add \"\n \"a primitive of type {}, but there is\"\n \"none available. {}\".format(type_, traceback)\n )\n expr.append(prim)\n for arg in reversed(prim.args):\n stack.append((depth + 1, arg))\n return expr\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 683, "n_words": 131, "vocab_size": 83, "complexity": 8, "nloc": 35, "token_counts": 221, "n_ast_nodes": 357, "n_identifiers": 34, "random_cut": "def _generate(self, pset, min_, max_, condition, type_=None):\n \n if type_ is None:\n type_ = pset.ret\n expr = []\n height = np.random.randint(min_, max_)\n stack = [(0, type_)]\n while len(stack) != 0:\n depth, type_ = stack.pop()\n\n # We've added a type_ parameter to the condition function\n ", "d_id": 43607, "documentation": { "docstring": "Generate a Tree as a list of lists.\n\n The tree is build from the root to the leaves, and it stop growing when\n the condition is fulfilled.\n\n Parameters\n ----------\n pset: PrimitiveSetTyped\n Primitive set from which primitives are selected.\n min_: int\n Minimum height of the produced trees.\n max_: int\n Maximum height of the produced trees.\n condition: function\n The condition is a function that takes two arguments,\n the height of the tree to build and the current\n depth in the tree.\n type_: class\n The type that should return the tree when called, when\n :obj:None (default) no return type is enforced.\n\n Returns\n -------\n individual: list\n A grown tree with leaves at possibly different depths\n depending on the condition function.\n ", "n_words": 116, "vocab_size": 75, "n_whitespaces": 317, "language": "en" } }, { "id": 259230, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/preprocessing/tests/test_encoders.py", "file_name": "test_encoders.py", "fun_name": "test_ohe_infrequent_multiple_categories_dtypes", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_ohe_infrequent_multiple_categories_dtypes():\n \n\n pd = pytest.importorskip(\"pandas\")\n X = pd.DataFrame(\n {\n \"str\": [\"a\", \"f\", \"c\", \"f\", \"f\", \"a\", \"c\", \"b\", \"b\"],\n \"int\": [5, 3, 0, 10, 10, 12, 0, 3, 5],\n },\n columns=[\"str\", \"int\"],\n )\n\n ohe = OneHotEncoder(\n categories=\"auto\", max_categories=3, handle_unknown=\"infrequent_if_exist\"\n )\n # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be\n # considered infrequent because they are greater\n\n # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1.\n # 0, 3, 12 will be considered infrequent\n\n X_trans = ohe.fit_transform(X).toarray()\n assert_array_equal(ohe.infrequent_categories_[0], [\"a\", \"b\"])\n assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12])\n\n expected = [\n [0, 0, 1, 1, 0, 0],\n [0, 1, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 1],\n [1, 0, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 1],\n [0, 0, 1, 1, 0, 0],\n ]\n\n assert_allclose(expected, X_trans)\n\n X_test = pd.DataFrame({\"str\": [\"b\", \"f\"], \"int\": [14, 12]}, columns=[\"str\", \"int\"])\n\n expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]]\n X_test_trans = ohe.transform(X_test)\n assert_allclose(expected, X_test_trans.toarray())\n\n X_inv = ohe.inverse_transform(X_test_trans)\n expected_inv = np.array(\n [[\"infrequent_sklearn\", \"infrequent_sklearn\"], [\"f\", \"infrequent_sklearn\"]],\n dtype=object,\n )\n assert_array_equal(expected_inv, X_inv)\n\n # only infrequent or known categories\n X_test = pd.DataFrame({\"str\": [\"c\", \"b\"], \"int\": [12, 5]}, columns=[\"str\", \"int\"])\n X_test_trans = ohe.transform(X_test).toarray()\n expected = [[1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0]]\n assert_allclose(expected, X_test_trans)\n\n X_inv = ohe.inverse_transform(X_test_trans)\n expected_inv = np.array(\n [[\"c\", \"infrequent_sklearn\"], [\"infrequent_sklearn\", 5]], dtype=object\n )\n assert_array_equal(expected_inv, X_inv)\n\n\n@pytest.mark.parametrize(\"kwargs\", [{\"min_frequency\": 21, \"max_categories\": 1}])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"kwargs\", [{\"min_frequency\": 21, \"max_categories\": 1}])", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 484, "n_words": 252, "vocab_size": 119, "complexity": 1, "nloc": 46, "token_counts": 510, "n_ast_nodes": 782, "n_identifiers": 31, "random_cut": "def test_ohe_infrequent_multiple_categories_dtypes():\n \n\n pd = pytest.importorskip(\"pandas\")\n X = pd.DataFrame(\n {\n \"str\": [\"a\", \"f\", \"c\", \"f\", \"f\", \"a\", \"c\", \"b\", \"b\"],\n \"int\": [5, 3, 0, 10, 10, 12, 0, 3, 5],\n },\n columns=[\"str\", \"int\"],\n )\n\n ohe = OneHotEncoder(\n categories=\"auto\", max_categories=3, handle_unknown=\"infrequent_if_exist\"\n )\n # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be\n # considered infrequent because they are greater\n\n # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1.\n # 0, 3, 12 will be considered infrequent\n\n X_trans = ohe.fit_transform(X).toarray()\n assert_array_equal(ohe.infrequent_categories_[0], [\"a\", \"b\"])\n assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12])\n\n expected = [\n [0, 0, 1, 1, 0, 0],\n [0, 1, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 1],\n [1, 0, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 1],\n [0, 0, 1, 1, 0, 0],\n ]\n\n assert_allclose(expected, X_trans)\n\n X_test = pd.DataFrame({\"str\": [\"b\", \"f\"], \"int\": [14, 12]}, columns=[\"str\", \"int\"])\n\n expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]]\n X_test_trans = ohe.transform(X_test)\n assert_allclose(expected, X_test_trans.toarray())\n\n X_inv = ohe.inverse_transform(X_test_trans)\n expected_inv ", "d_id": 75664, "documentation": { "docstring": "Test infrequent categories with a pandas dataframe with multiple dtypes.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 39805, "commit_id": "41e322bd17bcbaa34e315b27b8f33f07e6671142", "repo": "dash", "path": "dash/development/base_component.py", "file_name": "base_component.py", "fun_name": "_set_random_id", "commit_message": "error when autogenerated IDs are used with persistence or snapshots\nalso give set_random_id a leading underscore so it doesn't\nneed to become a reserved word (disallowed prop name)", "code": "def _set_random_id(self):\n if getattr(self, \"persistence\", False):\n raise RuntimeError(\n \n )\n if \"dash_snapshots\" in sys.modules:\n raise RuntimeError(\n \n )\n\n if not hasattr(self, \"id\"):\n v = str(uuid.UUID(int=rd.randint(0, 2 ** 128)))\n setattr(self, \"id\", v)\n return getattr(self, \"id\")\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 157, "n_words": 32, "vocab_size": 26, "complexity": 4, "nloc": 26, "token_counts": 78, "n_ast_nodes": 133, "n_identifiers": 15, "random_cut": "def _set_random_id(self):\n if getattr(self, \"persistence\", False):\n raise RuntimeError(\n \n )\n if \"dash_snapshots\" in sys.modules:\n raise RuntimeError(\n \n )\n\n if not hasattr(self, \"id\"):\n v = str(uuid.UUID(int=rd.randint(0, 2 ** 128)))\n setattr(self, \"id\", v)\n return getattr(self, \"id\")\n", "d_id": 7261, "documentation": { "docstring": "\n Attempting to use an auto-generated ID with the `persistence` prop.\n This is prohibited because persistence is tied to component IDs and\n auto-generated IDs can easily change.\n\n Please assign an explicit ID to this component.\n \n Attempting to use an auto-generated ID in an app with `dash_snapshots`.\n This is prohibited because snapshots saves the whole app layout,\n including component IDs, and auto-generated IDs can easily change.\n Callbacks referencing the new IDs will not work old snapshots.\n\n Please assign an explicit ID to this component.\n ", "n_words": 82, "vocab_size": 44, "n_whitespaces": 241, "language": "en" } }, { "id": 38638, "commit_id": "adc0ff25028d29af30386f2d7d3f85e290fbef57", "repo": "transformers", "path": "src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py", "file_name": "convert_cvt_original_pytorch_checkpoint_to_pytorch.py", "fun_name": "final", "commit_message": "Add CvT (#17299)\n\n* Adding cvt files\r\n\r\n* Adding cvt files\r\n\r\n* changes in init file\r\n\r\n* Adding cvt files\r\n\r\n* changes in init file\r\n\r\n* Style fixes\r\n\r\n* Address comments from code review\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Format lists in docstring\r\n\r\n* Fix copies\r\n\r\n* Apply suggestion from code review\r\n\r\nCo-authored-by: AnugunjNaman \r\nCo-authored-by: Ayushman Singh \r\nCo-authored-by: Niels Rogge \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def final():\n \n head = []\n head.append((\"layernorm.weight\", \"norm.weight\"))\n head.append((\"layernorm.bias\", \"norm.bias\"))\n head.append((\"classifier.weight\", \"head.weight\"))\n head.append((\"classifier.bias\", \"head.bias\"))\n return head\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 36, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 98, "n_identifiers": 3, "random_cut": "def final():\n \n head = []\n head.append((\"layernorm.weight\", \"norm.weight\"))\n head.append((\"layernorm.bias\", \"norm.bias\"))\n head.append((\"cl", "d_id": 7004, "documentation": { "docstring": "\n Function helps in renaming final classification layer\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 258142, "commit_id": "2bb81331b75aec68de0d45c4cb116170d265f1fe", "repo": "haystack", "path": "test/document_stores/test_sql.py", "file_name": "test_sql.py", "fun_name": "test_delete_index", "commit_message": "feat: add SQLDocumentStore tests (#3517)\n\n* port SQL tests\r\n\r\n* cleanup document_store_tests.py from sql tests\r\n\r\n* leftover\r\n\r\n* Update .github/workflows/tests.yml\r\n\r\nCo-authored-by: Sara Zan \r\n\r\n* review comments\r\n\r\n* Update test/document_stores/test_base.py\r\n\r\nCo-authored-by: bogdankostic \r\n\r\nCo-authored-by: Sara Zan \r\nCo-authored-by: bogdankostic ", "code": "def test_delete_index(self, ds, documents):\n \n ds.write_documents(documents, index=\"custom_index\")\n assert ds.get_document_count(index=\"custom_index\") == len(documents)\n ds.delete_index(index=\"custom_index\")\n assert ds.get_document_count(index=\"custom_index\") == 0\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 15, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 53, "n_ast_nodes": 92, "n_identifiers": 9, "random_cut": "def test_delete_index(self, ds, documents):\n \n ds.write_documents(documents, index=\"custom_index\")\n assert ds.get_document_count(index=\"custom_index\") == len(documents)\n ds.delete_index(index=\"custom_index\")\n assert ds.get_document_count(index=\"custom_index\") == 0\n", "d_id": 75191, "documentation": { "docstring": "Contrary to other Document Stores, SQLDocumentStore doesn't raise if the index is empty", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 66161, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/leave_application/leave_application.py", "file_name": "leave_application.py", "fun_name": "add_holidays", "commit_message": "style: format code with black", "code": "def add_holidays(events, start, end, employee, company):\n\tapplicable_holiday_list = get_holiday_list_for_employee(employee, company)\n\tif not applicable_holiday_list:\n\t\treturn\n\n\tfor holiday in frappe.db.sql(\n\t\t,\n\t\t(applicable_holiday_list, start, end),\n\t\tas_dict=True,\n\t):\n\t\tevents.append(\n\t\t\t{\n\t\t\t\t\"doctype\": \"Holiday\",\n\t\t\t\t\"from_date\": holiday.holiday_date,\n\t\t\t\t\"to_date\": holiday.holiday_date,\n\t\t\t\t\"title\": _(\"Holiday\") + \": \" + cstr(holiday.description),\n\t\t\t\t\"name\": holiday.name,\n\t\t\t}\n\t\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 25, "n_words": 44, "vocab_size": 41, "complexity": 3, "nloc": 19, "token_counts": 96, "n_ast_nodes": 165, "n_identifiers": 20, "random_cut": "def add_holidays(events, start, end, employee, company):\n\tapplicable_holiday_list = get_holiday_list_for_employee(employee, company)\n\tif not applicable_holiday_list:\n\t\treturn\n\n\tfor holiday in ", "d_id": 14118, "documentation": { "docstring": "select name, holiday_date, description\n\t\tfrom `tabHoliday` where parent=%s and holiday_date between %s and %s", "n_words": 14, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 179091, "commit_id": "2be32787538f1b0ef83f648ee60d2d4d4868d3fd", "repo": "DeepFaceLive", "path": "xlib/api/win32/dshow/helper.py", "file_name": "helper.py", "fun_name": "get_video_input_devices_names", "commit_message": "update xlib.api.win32", "code": "def get_video_input_devices_names() -> List[str]:\n \n # based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device\n\n names = []\n sys_dev_enum = strmif.ICreateDevEnum()\n if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmif.ICreateDevEnum.IID, sys_dev_enum) == wintypes.ERROR.SUCCESS:\n pEnumCat = objidl.IEnumMoniker()\n\n if sys_dev_enum.CreateClassEnumerator(uuids.CLSID_VideoInputDeviceCategory, pEnumCat, 0) == wintypes.ERROR.SUCCESS:\n\n moniker = objidl.IMoniker()\n\n while pEnumCat.Next(1, moniker, None) == wintypes.ERROR.SUCCESS:\n\n prop_bag = oaidl.IPropertyBag()\n if moniker.BindToStorage(None, None, oaidl.IPropertyBag.IID, prop_bag) == wintypes.ERROR.SUCCESS:\n var = wintypes.VARIANT()\n\n hr = prop_bag.Read(wintypes.LPCOLESTR('Description'), var, None )\n if hr != wintypes.ERROR.SUCCESS:\n hr = prop_bag.Read(wintypes.LPCOLESTR('FriendlyName'), var, None )\n\n names.append(var.value.bstrVal.value if hr == wintypes.ERROR.SUCCESS else 'unnamed')\n\n prop_bag.Release()\n moniker.Release()\n pEnumCat.Release()\n sys_dev_enum.Release()\n\n return names", "url": "https://github.com/iperov/DeepFaceLive.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 317, "n_words": 82, "vocab_size": 55, "complexity": 7, "nloc": 25, "token_counts": 230, "n_ast_nodes": 363, "n_identifiers": 38, "random_cut": "def get_video_input_devices_names() -> List[str]:\n \n # based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device\n\n names = []\n sys_dev_enum = strmif.ICreateDevEnum()\n if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmif.ICreateDevEnum.IID, sys_dev_enum) == wintypes.ERROR.SUCCESS:\n pEnumCat = objidl.IEnumMoniker()\n\n if sys_dev_enum.CreateClassEnumerator(uuids.CLSID_VideoInputDeviceCategory, pEnumCat, 0) == wintypes.ERROR.SUCCESS:\n\n moniker = objidl.IMoniker()\n\n while pEnumCat.Next(1, moniker, None) == wintypes.ERROR.SUCCESS:\n\n prop_bag = oaidl.IPropertyBag()\n if moniker.BindToStorage(None, None, oaidl.IPropertyBag.IID, prop_bag) == wintypes.ERROR.SUCCESS:\n var = wintypes.VARIANT()\n\n hr = prop_bag.Read(wintypes.LPCOLESTR('Description'), var, None )\n if hr != wintypes.ERROR.SUCCESS:\n hr = prop_bag.Read(wintypes.LPCOLESTR('FriendlyName'), var, None )\n\n names.append(var.value.bstrVal.value if hr == wintypes.ERROR.SU", "d_id": 42899, "documentation": { "docstring": "\n returns a list of available names of VideoInputDevice's\n\n ole32 should be initialized before use\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 24, "language": "en" } }, { "id": 30421, "commit_id": "deca40c2e26afed62e1f9ec4be14aff9e125929b", "repo": "spotify-downloader", "path": "spotdl/utils/console.py", "file_name": "console.py", "fun_name": "check_for_updates", "commit_message": "moved console actions to a new file", "code": "def check_for_updates():\n \n\n version_message = get_update_status()\n\n print(version_message)\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 15, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 28, "n_identifiers": 4, "random_cut": "def check_for_updates():\n \n\n version_message = get_update_status()\n\n print(version_message)\n\n", "d_id": 5565, "documentation": { "docstring": "\n Check for updates to the current version.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 259052, "commit_id": "34f9dbf54164e3c62d68765fe45f27f067a45562", "repo": "scikit-learn", "path": "sklearn/preprocessing/_polynomial.py", "file_name": "_polynomial.py", "fun_name": "_get_base_knot_positions", "commit_message": "MNT Clean fixes and compat for old versions of our dependencies (#22642)\n\nCo-authored-by: Olivier Grisel ", "code": "def _get_base_knot_positions(X, n_knots=10, knots=\"uniform\", sample_weight=None):\n \n if knots == \"quantile\":\n percentiles = 100 * np.linspace(\n start=0, stop=1, num=n_knots, dtype=np.float64\n )\n\n if sample_weight is None:\n knots = np.percentile(X, percentiles, axis=0)\n else:\n knots = np.array(\n [\n _weighted_percentile(X, sample_weight, percentile)\n for percentile in percentiles\n ]\n )\n\n else:\n # knots == 'uniform':\n # Note that the variable `knots` has already been validated and\n # `else` is therefore safe.\n # Disregard observations with zero weight.\n mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0\n x_min = np.amin(X[mask], axis=0)\n x_max = np.amax(X[mask], axis=0)\n\n knots = np.linspace(\n start=x_min,\n stop=x_max,\n num=n_knots,\n endpoint=True,\n dtype=np.float64,\n )\n\n return knots\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 491, "n_words": 101, "vocab_size": 72, "complexity": 5, "nloc": 26, "token_counts": 172, "n_ast_nodes": 259, "n_identifiers": 24, "random_cut": "def _get_base_knot_positions(X, n_knots=10, knots=\"uniform\", sample_weight=None):\n \n if knots == \"quantile\":\n percentiles = 100 * np.linspace(\n start=0, stop=1, num=n_knots, dtype=np.float64\n )\n\n if sample_weight is None:\n knots = np.percentile(X, percentiles, axis=0)\n else:\n knots = np.array(\n [\n _weighted_percentile(X, sample_weight, percentile)\n for percentile in percentiles\n ]\n )\n\n else:\n # knots == 'uniform':\n # Note that the variable `knots` has already been validated an", "d_id": 75544, "documentation": { "docstring": "Calculate base knot positions.\n\n Base knots such that first knot <= feature <= last knot. For the\n B-spline construction with scipy.interpolate.BSpline, 2*degree knots\n beyond the base interval are added.\n\n Returns\n -------\n knots : ndarray of shape (n_knots, n_features), dtype=np.float64\n Knot positions (points) of base interval.\n ", "n_words": 45, "vocab_size": 37, "n_whitespaces": 105, "language": "en" } }, { "id": 273161, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/preprocessing/index_lookup.py", "file_name": "index_lookup.py", "fun_name": "_num_tokens", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _num_tokens(self, data):\n \n if tf_utils.is_sparse(data):\n flat_values = data.values\n elif tf_utils.is_ragged(data):\n flat_values = data.flat_values\n else:\n flat_values = tf.reshape(data, [-1])\n tokens, _, counts = tf.unique_with_counts(flat_values, out_idx=tf.int64)\n return tokens, counts\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 102, "n_words": 27, "vocab_size": 20, "complexity": 3, "nloc": 9, "token_counts": 71, "n_ast_nodes": 113, "n_identifiers": 16, "random_cut": "def _num_tokens(self, data):\n ", "d_id": 81090, "documentation": { "docstring": "Count the number of tokens in a ragged, sparse or dense tensor.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 303762, "commit_id": "ebbff7b60e43f17d65ead811d314602b9daddfc4", "repo": "core", "path": "tests/components/awair/conftest.py", "file_name": "conftest.py", "fun_name": "no_devicess_fixture", "commit_message": "Add Awair Local API support (#75535)", "code": "def no_devicess_fixture():\n \n return json.loads(load_fixture(\"awair/no_devices.json\"))\n\n\n@pytest.fixture(name=\"awair_offline\", scope=\"session\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"awair_offline\", scope=\"session\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def no_devicess_fixture():\n \n return jso", "d_id": 102571, "documentation": { "docstring": "Fixture representing when no devices are found in Awair's cloud API.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 166944, "commit_id": "89be1f053b695c4ce1c0569f737caf3f03c12128", "repo": "pandas", "path": "pandas/tests/arrays/boolean/test_reduction.py", "file_name": "test_reduction.py", "fun_name": "data", "commit_message": "DOC: Added docstrings to fixtures defined in array module (#47211)", "code": "def data():\n \n return pd.array(\n [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],\n dtype=\"boolean\",\n )\n\n\n@pytest.mark.parametrize(\n \"values, exp_any, exp_all, exp_any_noskip, exp_all_noskip\",\n [\n ([True, pd.NA], True, True, True, pd.NA),\n ([False, pd.NA], False, False, pd.NA, False),\n ([pd.NA], False, True, pd.NA, pd.NA),\n ([], False, True, False, True),\n # GH-33253: all True / all False values buggy with skipna=False\n ([True, True], True, True, True, True),\n ([False, False], False, False, False, False),\n ],\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"values, exp_any, exp_all, exp_any_noskip, exp_all_noskip\",\n [\n ([True, pd.NA], True, True, True, pd.NA),\n ([False, pd.NA], False, False, pd.NA, False),\n ([pd.NA], False, True, pd.NA, pd.NA),\n ([], False, True, False, True),\n # GH-33253: all True / all False values buggy with skipna=False\n ([True, True], True, True, True, True),\n ([False, False], False, False, False, False),\n ],\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 155, "n_words": 76, "vocab_size": 44, "complexity": 1, "nloc": 5, "token_counts": 49, "n_ast_nodes": 223, "n_identifiers": 10, "random_cut": "def data():\n \n return pd.array(\n [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],\n dtyp", "d_id": 39876, "documentation": { "docstring": "Fixture returning boolean array, with valid and missing values.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 101038, "commit_id": "7b9fc0454d982a2425ec44e90e5b05a87d149953", "repo": "faceswap", "path": "scripts/train.py", "file_name": "train.py", "fun_name": "should_toggle_mask", "commit_message": "Live Preview - Replace cv2 with matplotlib viewer", "code": "def should_toggle_mask(self) -> bool:\n \n with self._lock:\n retval = self._toggle_mask\n if retval:\n logger.debug(\"Sending toggle mask\")\n self._toggle_mask = False\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 92, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 14, "token_counts": 34, "n_ast_nodes": 62, "n_identifiers": 8, "random_cut": "def should_toggle_mask(self) -> bool:\n \n with self._lock:\n retval = self._toggle_mask\n if retval:\n logger.debug(\"Sending toggle mask\")\n self._toggle_mask = False\n ", "d_id": 20477, "documentation": { "docstring": " Check whether the mask should be toggled and return the value. If ``True`` is returned\n then resets :attr:`_toggle_mask` back to ``False``\n\n Returns\n -------\n bool\n ``True`` if the mask should be toggled otherwise ``False``. ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 73, "language": "en" } }, { "id": 259877, "commit_id": "a47d569e670fd4102af37c3165c9b1ddf6fd3005", "repo": "scikit-learn", "path": "examples/linear_model/plot_tweedie_regression_insurance_claims.py", "file_name": "plot_tweedie_regression_insurance_claims.py", "fun_name": "load_mtpl2", "commit_message": "ENH improve ARFF parser using pandas (#21938)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Adrin Jalali ", "code": "def load_mtpl2(n_samples=100000):\n \n # freMTPL2freq dataset from https://www.openml.org/d/41214\n df_freq = fetch_openml(data_id=41214, as_frame=True, parser=\"pandas\").data\n df_freq[\"IDpol\"] = df_freq[\"IDpol\"].astype(int)\n df_freq.set_index(\"IDpol\", inplace=True)\n\n # freMTPL2sev dataset from https://www.openml.org/d/41215\n df_sev = fetch_openml(data_id=41215, as_frame=True, parser=\"pandas\").data\n\n # sum ClaimAmount over identical IDs\n df_sev = df_sev.groupby(\"IDpol\").sum()\n\n df = df_freq.join(df_sev, how=\"left\")\n df[\"ClaimAmount\"].fillna(0, inplace=True)\n\n # unquote string fields\n for column_name in df.columns[df.dtypes.values == object]:\n df[column_name] = df[column_name].str.strip(\"'\")\n return df.iloc[:n_samples]\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 106, "n_words": 57, "vocab_size": 43, "complexity": 2, "nloc": 11, "token_counts": 145, "n_ast_nodes": 242, "n_identifiers": 27, "random_cut": "def load_mtpl2(n_samples=100000):\n \n # freMTPL2freq dataset ", "d_id": 75964, "documentation": { "docstring": "Fetch the French Motor Third-Party Liability Claims dataset.\n\n Parameters\n ----------\n n_samples: int, default=100000\n number of samples to select (for faster run time). Full dataset has\n 678013 samples.\n ", "n_words": 27, "vocab_size": 27, "n_whitespaces": 49, "language": "en" } }, { "id": 100579, "commit_id": "bdbbad4d310fb606b6f412aa81e9f57ccd994e97", "repo": "faceswap", "path": "lib/gpu_stats/nvidia.py", "file_name": "nvidia.py", "fun_name": "_get_driver", "commit_message": "Refactor lib.gpu_stats (#1218)\n\n* inital gpu_stats refactor\r\n\r\n* Add dummy CPU Backend\r\n\r\n* Update Sphinx documentation", "code": "def _get_driver(self) -> str:\n \n try:\n driver = pynvml.nvmlSystemGetDriverVersion().decode(\"utf-8\")\n except pynvml.NVMLError as err:\n self._log(\"debug\", f\"Unable to obtain driver. Original error: {str(err)}\")\n driver = \"No Nvidia driver found\"\n self._log(\"debug\", f\"GPU Driver: {driver}\")\n return driver\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 100, "n_words": 32, "vocab_size": 27, "complexity": 2, "nloc": 15, "token_counts": 52, "n_ast_nodes": 109, "n_identifiers": 10, "random_cut": "def _get_driver(self) -> str:\n \n try:\n driver = pynvml.nvmlSystemGetDriverVersion().decode(\"utf-8\")\n except pynvml.NVMLError as err:\n self._log(\"debug\", f\"Unable to obtain driver. Original error: {str(err)}\")\n driver = \"No Nvidia driver found\"\n self._log(\"debug\", f\"GPU Driver: {driver}\")\n return driver\n", "d_id": 20043, "documentation": { "docstring": " Obtain the Nvidia driver version currently in use.\n\n Returns\n -------\n str\n The current GPU driver version\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 56, "language": "en" } }, { "id": 204734, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/serializers/__init__.py", "file_name": "__init__.py", "fun_name": "_load_serializers", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _load_serializers():\n \n global _serializers\n serializers = {}\n for format in BUILTIN_SERIALIZERS:\n register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)\n if hasattr(settings, \"SERIALIZATION_MODULES\"):\n for format in settings.SERIALIZATION_MODULES:\n register_serializer(\n format, settings.SERIALIZATION_MODULES[format], serializers\n )\n _serializers = serializers\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 98, "n_words": 29, "vocab_size": 22, "complexity": 4, "nloc": 11, "token_counts": 58, "n_ast_nodes": 91, "n_identifiers": 9, "random_cut": "def _load_serializers():\n \n global _serializers\n serializers = {}\n for forma", "d_id": 50862, "documentation": { "docstring": "\n Register built-in and settings-defined serializers. This is done lazily so\n that user code has a chance to (e.g.) set up custom settings without\n needing to be careful of import order.\n ", "n_words": 30, "vocab_size": 29, "n_whitespaces": 43, "language": "en" } }, { "id": 213075, "commit_id": "a5db070f446b7cfebdaa6ad2e3dcf78f6105a272", "repo": "serverless-application-model", "path": "samtranslator/utils/py27hash_fix.py", "file_name": "py27hash_fix.py", "fun_name": "pop", "commit_message": "fix: Py27hash fix (#2182)\n\n* Add third party py27hash code\r\n\r\n* Add Py27UniStr and unit tests\r\n\r\n* Add py27hash_fix utils and tests\r\n\r\n* Add to_py27_compatible_template and tests\r\n\r\n* Apply py27hash fix to wherever it is needed\r\n\r\n* Apply py27hash fix, all tests pass except api_with_any_method_in_swagger\r\n\r\n* apply py27hash fix in openapi + run black\r\n\r\n* remove py27 testing\r\n\r\n* remove other py27 references\r\n\r\n* black fixes\r\n\r\n* fixes/typos\r\n\r\n* remove py27 from tox.ini\r\n\r\n* refactoring\r\n\r\n* third party notice\r\n\r\n* black\r\n\r\n* Fix py27hash fix to deal with null events\r\n\r\n* Fix Py27UniStr repr for unicode literals\r\n\r\n* black reformat\r\n\r\n* Update _template_has_api_resource to check data type more defensively\r\n\r\n* Apply py27Dict in _get_authorizers\r\n\r\n* Apply Py27Dict to authorizers and gateway responses which will go into swagger\r\n\r\n* Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class\r\n\r\n* Rename _convert_to_py27_dict to _convert_to_py27_type\r\n\r\n* Apply Py27UniStr to path param name\r\n\r\n* Handle HttpApi resource under to_py27_compatible_template\r\n\r\n* Fix InvalidDocumentException to not sort different exceptions\r\n\r\n* black reformat\r\n\r\n* Remove unnecessary test files\r\n\r\nCo-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com>", "code": "def pop(self):\n \n if self.keyorder:\n value = self.keys()[0]\n self.remove(value)\n return value\n return None\n\n", "url": "https://github.com/aws/serverless-application-model.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 12, "vocab_size": 10, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 53, "n_identifiers": 6, "random_cut": "def pop(self):\n \n if self.keyorder:\n value = self.keys()[0]\n self.remove(value)\n return value\n return N", "d_id": 53626, "documentation": { "docstring": "\n Pops the top element from the sorted keys if it exists. Returns None otherwise.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 173550, "commit_id": "52828dc160781f422e670d414406ffe91c30066b", "repo": "magenta", "path": "magenta/models/onsets_frames_transcription/infer_util.py", "file_name": "infer_util.py", "fun_name": "probs_to_pianoroll_viterbi", "commit_message": "[NumPy] Remove references to deprecated NumPy type aliases.\n\nThis change replaces references to a number of deprecated NumPy type aliases (np.bool, np.int, np.float, np.complex, np.object, np.str) with their recommended replacement (bool, int, float, complex, object, str).\n\nNumPy 1.24 drops the deprecated aliases, so we must remove uses before updating NumPy.\n\nPiperOrigin-RevId: 497026048", "code": "def probs_to_pianoroll_viterbi(frame_probs, onset_probs, alpha=0.5):\n \n n, d = onset_probs.shape\n\n loss_matrix = np.zeros([n, d, 2], dtype=float)\n path_matrix = np.zeros([n, d, 2], dtype=bool)\n\n frame_losses = (1 - alpha) * -np.log(np.stack([1 - frame_probs,\n frame_probs], axis=-1))\n onset_losses = alpha * -np.log(np.stack([1 - onset_probs,\n onset_probs], axis=-1))\n\n loss_matrix[0, :, :] = frame_losses[0, :, :] + onset_losses[0, :, :]\n\n for i in range(1, n):\n transition_loss = np.tile(loss_matrix[i - 1, :, :][:, :, np.newaxis],\n [1, 1, 2])\n\n transition_loss[:, 0, 0] += onset_losses[i, :, 0]\n transition_loss[:, 0, 1] += onset_losses[i, :, 1]\n transition_loss[:, 1, 0] += onset_losses[i, :, 0]\n transition_loss[:, 1, 1] += onset_losses[i, :, 0]\n\n path_matrix[i, :, :] = np.argmin(transition_loss, axis=1)\n\n loss_matrix[i, :, 0] = transition_loss[\n np.arange(d), path_matrix[i, :, 0].astype(int), 0]\n loss_matrix[i, :, 1] = transition_loss[\n np.arange(d), path_matrix[i, :, 1].astype(int), 1]\n\n loss_matrix[i, :, :] += frame_losses[i, :, :]\n\n pianoroll = np.zeros([n, d], dtype=bool)\n pianoroll[n - 1, :] = np.argmin(loss_matrix[n - 1, :, :], axis=-1)\n for i in range(n - 2, -1, -1):\n pianoroll[i, :] = path_matrix[\n i + 1, np.arange(d), pianoroll[i + 1, :].astype(int)]\n\n return pianoroll\n\n", "url": "https://github.com/magenta/magenta.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 349, "n_words": 167, "vocab_size": 75, "complexity": 3, "nloc": 28, "token_counts": 454, "n_ast_nodes": 649, "n_identifiers": 29, "random_cut": "def probs_to_pianoroll_viterbi(frame_probs, onset_probs, alpha=0.5):\n \n n, d = onset_probs.shape\n\n loss_matrix = np.zeros([n, d, 2], dtype=float)\n path_matrix = np.zeros([n, d, 2], dtype=bool)\n\n frame_losses = (1 - alpha) * -np.log(np.stack([1 - frame_probs,\n frame_probs], axis=-1))\n onset_losses = alpha * -np.log(np.stack([1 - onset_probs,\n onset_probs], axis=-1))\n\n loss_matrix[0, :, :] = frame_losses[0, :, :] + onset_losses[0, :, :]\n\n for i in range(1, n):\n transition_loss = np.tile(loss_matrix[i - 1, :, :][:, :, np.newaxis],\n [1, 1, 2])\n\n transition_loss[:, 0, 0] += onset_losses[i, :, 0]\n transition_loss[:, 0, 1] += onset_losses[i, :, 1]\n transition_loss[:, 1, 0] += onset_losses[i, :, 0]\n transition_loss[:, 1, 1] += onset_losses[i, :, 0]\n\n path_matrix[i, :, :] = np.argmin(transition_loss, axis=1)\n\n loss_matrix[i, :, 0] = transition_loss[\n np.arange(d), path_matrix[i, :, 0].astype(int), 0]\n loss_matrix[i, :, 1] = transition_loss[\n np.arange(d), path_matrix[i, :, 1].astype(int), 1]\n\n loss_matrix[i, :, :] += frame_losses[i, :, :]\n\n pianoroll = np.zeros([n, d], dtype=bool)\n pianoroll[n - 1, :] = np.argmin(loss_matrix[n - 1, :, :], axis=-1)\n for i in range(n - 2, -1, -1):\n pianoroll[i, :] = path_matrix[\n i + 1, np.arange(d), pi", "d_id": 40862, "documentation": { "docstring": "Viterbi decoding of frame & onset probabilities to pianoroll.\n\n Args:\n frame_probs: A numpy array (num-frames-by-num-pitches) of frame\n probabilities.\n onset_probs: A numpy array (num-frames-by-num-pitches) of onset\n probabilities.\n alpha: Relative weight of onset and frame loss, a float between 0 and 1.\n With alpha = 0, onset probabilities will be ignored. With alpha = 1, frame\n probabilities will be ignored.\n\n Returns:\n A numpy array (num-frames-by-num-pitches) representing the boolean-valued\n pianoroll.\n ", "n_words": 67, "vocab_size": 39, "n_whitespaces": 105, "language": "en" } }, { "id": 22040, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/_internal_utils.py", "file_name": "_internal_utils.py", "fun_name": "to_native_string", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def to_native_string(string, encoding=\"ascii\"):\n \n if isinstance(string, builtin_str):\n out = string\n else:\n out = string.decode(encoding)\n\n return out\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 15, "vocab_size": 12, "complexity": 2, "nloc": 6, "token_counts": 33, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "def to_native_string(string, encoding=\"ascii\"):\n \n if isinstance(string, builtin_str):\n out = string\n else:\n out = string.decode(encodin", "d_id": 4129, "documentation": { "docstring": "Given a string object, regardless of type, returns a representation of\n that string in the native string type, encoding and decoding where\n necessary. This assumes ASCII unless told otherwise.\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 38, "language": "en" } }, { "id": 197177, "commit_id": "cddb6451ed54ab1f84cffb5313cbff709bbaf8e5", "repo": "sympy", "path": "sympy/parsing/mathematica.py", "file_name": "mathematica.py", "fun_name": "mathematica", "commit_message": "Adapt to new deprecation policy", "code": "def mathematica(s, additional_translations=None):\n \n parser = MathematicaParser(additional_translations)\n\n if additional_translations is not None:\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.11\",\n active_deprecations_target=\"mathematica-parser-additional-translations\",\n )\n return sympify(parser._parse_old(s))\n\n return parser.parse(s)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 11, "token_counts": 52, "n_ast_nodes": 88, "n_identifiers": 11, "random_cut": "def mathematica(s, additional_translations=None):\n \n parser = MathematicaParser(additional_translations)\n\n if additional_translations", "d_id": 48365, "documentation": { "docstring": "\n Translate a string containing a Wolfram Mathematica expression to a SymPy\n expression.\n\n If the translator is unable to find a suitable SymPy expression, the\n ``FullForm`` of the Mathematica expression will be output, using SymPy\n ``Function`` objects as nodes of the syntax tree.\n\n Examples\n ========\n\n >>> from sympy.parsing.mathematica import mathematica\n >>> mathematica(\"Sin[x]^2 Tan[y]\")\n sin(x)**2*tan(y)\n >>> e = mathematica(\"F[7,5,3]\")\n >>> e\n F(7, 5, 3)\n >>> from sympy import Function, Max, Min\n >>> e.replace(Function(\"F\"), lambda *x: Max(*x)*Min(*x))\n 21\n\n Both standard input form and Mathematica full form are supported:\n\n >>> mathematica(\"x*(a + b)\")\n x*(a + b)\n >>> mathematica(\"Times[x, Plus[a, b]]\")\n x*(a + b)\n\n To get a matrix from Wolfram's code:\n\n >>> m = mathematica(\"{{a, b}, {c, d}}\")\n >>> m\n ((a, b), (c, d))\n >>> from sympy import Matrix\n >>> Matrix(m)\n Matrix([\n [a, b],\n [c, d]])\n\n If the translation into equivalent SymPy expressions fails, an SymPy\n expression equivalent to Wolfram Mathematica's \"FullForm\" will be created:\n\n >>> mathematica(\"x_.\")\n Optional(Pattern(x, Blank()))\n >>> mathematica(\"Plus @@ {x, y, z}\")\n Apply(Plus, (x, y, z))\n >>> mathematica(\"f[x_, 3] := x^3 /; x > 0\")\n SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0))\n The ``additional_translations`` parameter for the Mathematica parser is now deprecated.\nUse SymPy's .replace( ) or .subs( ) methods on the output expression instead.", "n_words": 203, "vocab_size": 142, "n_whitespaces": 319, "language": "en" } }, { "id": 194779, "commit_id": "81f722d29045a7a5841d0931a082ded1d1f13863", "repo": "ParlAI", "path": "parlai/scripts/generate_model_card.py", "file_name": "generate_model_card.py", "fun_name": "evaluation", "commit_message": "autoformat (#4378)", "code": "def evaluation(self):\n \n # adding info about the eval tasks\n if self.eval_tasks == self.train_tasks:\n msg = \"For evalution, we used the same training datasets; check the [Datasets Used](#datasets-used) section for more information\"\n eval_list = ''\n else:\n msg = f\"This model was evaluated on the datasets below (use the `parlai display_data` commands to show data). Visit the {make_link('task (dataset) list', task_site)} for more details about the datasets.\\n\"\n eval_list = get_dataset_info(self.eval_tasks)\n eval_list = '\\n' + '\\n'.join(eval_list)\n content = [msg + eval_list]\n\n # validation metric info: getting metric name and description\n splitted = re.sub(r'_+', ' ', self.valid_metric).split()\n key = splitted[-1]\n if extra_metric_info.get(key):\n mname, description = extra_metric_info[key]\n elif METRICS_DISPLAY_DATA.get(key):\n mname = METRICS_DISPLAY_DATA[key].title\n description = METRICS_DISPLAY_DATA[key].description\n else:\n description, mname = (None, None)\n\n # adding description for validation metric and re-wording it:\n msg = f\"\\n\\nWe used the metric {metric_format(self.valid_metric)}\"\n if len(splitted) == 3 and splitted[0] == 'class' and mname:\n msg += f\", the {mname.lower()} scores for the class {splitted[1]}\"\n content.append(msg + ' as the validation metric. ')\n if description:\n description = description[0].lower() + description[1:]\n content[-1] += f\"Recall that `{self.valid_metric}` is {description}.\"\n\n # evaluation table\n # getting list of subtasks and making columns\n eval_tasks = self.eval_tasks\n if len(self.eval_tasks) > 1:\n eval_tasks.insert(0, 'All')\n columns = [' '] + [taskname(subtask) for subtask in eval_tasks]\n # only one row: validation\n row = [metric_format(self.valid_metric)]\n for subtask in eval_tasks:\n # creating the key to get metric and formatting\n pre = '' if subtask == 'All' or len(eval_tasks) == 1 else subtask + '/'\n key = pre + self.valid_metric\n fmt = '{:.4f}' if self.valid_metric in not_percent else '{:.2%}'\n row.append(fmt.format(self.eval_results[key]))\n return '\\n'.join(content) + '\\n\\n' + '\\n'.join(make_md_table([row], columns))\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 635, "n_words": 262, "vocab_size": 159, "complexity": 14, "nloc": 36, "token_counts": 318, "n_ast_nodes": 605, "n_identifiers": 38, "random_cut": "def evaluation(self):\n \n # adding info about the eval tasks\n if self.eval_tasks == self.train_tasks:\n msg = \"For evalution, we used the same training datasets; check the [Datasets Used](#datasets-used) section for more information\"\n eval_list = ''\n else:\n msg = f\"This model was evaluated on the datasets below (use the `parlai display_data` commands to show data). Visit the {make_link('task (dataset) list', task_site)} for more details about the datasets.\\n\"\n eval_list = get_dataset_info(self.eval_tasks)\n eval_list = '\\n' + '\\n'.join(eval_list)\n content = [msg + eval_list]\n\n # validation metric info: getting metric name and description\n splitted = re.sub(r'_+', ' ', self.valid_metric).split()\n key = splitted[-1]\n if extra_metric_info.get(key):\n mname, description = extra_metric_info[key]\n elif METRICS_DISPLAY_DATA.get(key):\n mname = METRICS_DISPLAY_DATA[key].title\n description = METRICS_DISPLAY_DATA[key].description\n else:\n description, mname = (None, None)\n\n # adding description for validation metric and re-wording it:\n msg = f\"\\n\\nWe used the metric {metric_format(self.valid_metric)}\"\n if len(splitted) == 3 and splitted[0] == 'class' and mname:\n msg += f\", the {mname.lower()} scores for the class {splitted[1]}\"\n content.append(msg + ' as the validation metric. ')\n if description:\n description = description[0].lower() + description[1:]\n content[-1] += f\"Recall that `{self.valid_metric}` is {description}.\"\n\n # evaluation table\n # getting list of subtasks and making columns\n eval_tasks = self.eval_tasks\n if len(self.eval_tasks) > 1:\n ", "d_id": 47072, "documentation": { "docstring": "\n returns a section with dataset info about the eval tasks if they exist,\n information about the validation metric if it exists, and create a table with\n the validation metric.\n ", "n_words": 29, "vocab_size": 22, "n_whitespaces": 58, "language": "en" } }, { "id": 272207, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/integration_test/gradient_checkpoint_test.py", "file_name": "gradient_checkpoint_test.py", "fun_name": "_train_with_recompute", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _train_with_recompute(n_steps):\n \n img_dim, n_channels, batch_size = 256, 1, 4\n x, y = _get_dummy_data(img_dim, n_channels, batch_size)\n # This model is the same model as _get_big_cnn_model but split into 3 parts.\n models = _get_split_cnn_model(\n img_dim, n_channels, num_partitions=3, blocks_per_partition=2\n )\n model1, model2, model3 = models\n # Apply gradient checkpointing to the submodels using tf.recompute_grad.\n model1_re = tf.recompute_grad(model1)\n model2_re = tf.recompute_grad(model2)\n model3_re = tf.recompute_grad(model3)\n optimizer = optimizers.SGD()\n tr_vars = (\n model1.trainable_variables\n + model2.trainable_variables\n + model3.trainable_variables\n )\n losses = []\n for _ in range(n_steps):\n with tf.GradientTape() as tape:\n logits1 = model1_re(x)\n logits2 = model2_re(logits1)\n logits3 = model3_re(logits2)\n loss = _compute_loss(logits3, y)\n losses.append(loss)\n grads = tape.gradient(loss, tr_vars) # tr_vars\n optimizer.apply_gradients(zip(grads, tr_vars))\n del grads\n return losses\n\n\n@tf_test_utils.with_eager_op_as_function", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@tf_test_utils.with_eager_op_as_function", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 284, "n_words": 110, "vocab_size": 82, "complexity": 2, "nloc": 28, "token_counts": 176, "n_ast_nodes": 288, "n_identifiers": 42, "random_cut": "def _train_with_recompute(n_steps):\n \n img_dim, n_channels, batch_size = 256, 1, 4\n x, y = _get_dummy_data(img_dim, n_channels, batch_size)\n # This model is the same model as _get_big_cnn_model but split into 3 parts.\n models = _get_split_cnn_model(\n img_dim, n_channels, num_partitions=3, blocks_per_partition=2\n )\n model1, model2, model3 = models\n # Appl", "d_id": 80977, "documentation": { "docstring": "Trains a single large model with gradient checkpointing using tf.recompute_grad.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 134124, "commit_id": "fc9f8e458c4dad7a51e0d781917b1a003cb55cd7", "repo": "ray", "path": "python/ray/tune/tests/test_syncer_callback.py", "file_name": "test_syncer_callback.py", "fun_name": "test_syncer_callback_dead_node_log_error", "commit_message": "[Tune] Catch SyncerCallback failure with dead node (#29438)\n\n### Context\r\nThis issue was uncovered by this long running test: `long_running_distributed_pytorch_pbt_failure`. This test randomly kills nodes via `FailureInjectorCallback`, and the test failure happens when:\r\n\r\n1. A trial result comes in and is processed\r\n2. The node this trial is running on is requested to be killed by the failure injector\r\n3. The driver's syncer callback runs on the on_trial_result event\r\n4. The node dies\r\n5. The driver is in the middle of syncing, trying to access the node ip, which errors\r\n\r\n### What's in this PR?\r\n1. Gracefully handle this race condition by catching the error thrown by the sync operation on a dead node\r\n2. Log an error to the user\r\n3. Adds a test for this sync with dead node scenario\r\n\r\nSigned-off-by: Justin Yu ", "code": "def test_syncer_callback_dead_node_log_error(caplog, ray_start_2_cpus, temp_data_dirs):\n \n caplog.set_level(logging.ERROR, logger=\"ray.tune.syncer\")\n\n tmp_source, tmp_target = temp_data_dirs\n\n syncer_callback = TestSyncerCallback(\n sync_period=0,\n local_logdir_override=tmp_target,\n )\n\n trial1 = MockTrial(trial_id=\"a\", logdir=tmp_source, on_dead_node=True)\n\n syncer_callback.on_trial_result(iteration=1, trials=[], trial=trial1, result={})\n\n assert (\n \"An error occurred when trying to get the node ip where this trial is running\"\n in caplog.text\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 100, "n_words": 45, "vocab_size": 42, "complexity": 1, "nloc": 13, "token_counts": 86, "n_ast_nodes": 135, "n_identifiers": 25, "random_cut": "def test_syncer_callback_dead_node_log_error(caplog, ray_start_2_cpus, temp_data_dirs):\n \n caplog.set_level(logging.ERROR, logger=\"ray.tune.syncer\")\n\n tmp_source, tmp_target = temp_data_dirs\n\n syncer_callback = TestSyncerCallback(\n sync_period=0,\n local_logdir_override=tmp_target,\n )\n\n trial1 = MockTrial(trial_id=\"a\", logdir=tmp_source, on_dead_node=True)\n\n syncer_callback.on_trial_result(iteration=1, trials=[], trial=trial1, result={})\n\n assert (\n \"An erro", "d_id": 30200, "documentation": { "docstring": "Check that we catch + log errors when trying syncing with a dead remote node.", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 177335, "commit_id": "8a325d26aa7fdd3a72580c4720fa97f971bbefcb", "repo": "networkx", "path": "networkx/linalg/modularitymatrix.py", "file_name": "modularitymatrix.py", "fun_name": "directed_modularity_matrix", "commit_message": "Use scipy.sparse array datastructure (#6037)\n\n* Use scipy.sparse array datastructure\r\n\r\n* Add reminder to rm wrapper when scipy adds creation fns.\r\n\r\n* Rm mention of np matrix from code comment.\r\n\r\n* Update networkx/algorithms/bipartite/matrix.py\r\n\r\nCo-authored-by: Stefan van der Walt \r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Stefan van der Walt ", "code": "def directed_modularity_matrix(G, nodelist=None, weight=None):\n \n import numpy as np\n\n if nodelist is None:\n nodelist = list(G)\n A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=\"csr\")\n k_in = A.sum(axis=0)\n k_out = A.sum(axis=1)\n m = k_in.sum()\n # Expected adjacency matrix\n X = np.outer(k_out, k_in) / m\n\n return A - X\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 81, "n_words": 44, "vocab_size": 35, "complexity": 2, "nloc": 10, "token_counts": 92, "n_ast_nodes": 147, "n_identifiers": 18, "random_cut": "def directed_modularity_matrix(G, nodelist=None, weight=None):\n \n import numpy as np\n\n if nodelist is None:\n nodelist = list(G)\n ", "d_id": 42354, "documentation": { "docstring": "Returns the directed modularity matrix of G.\n\n The modularity matrix is the matrix B = A - , where A is the adjacency\n matrix and is the expected adjacency matrix, assuming that the graph\n is described by the configuration model.\n\n More specifically, the element B_ij of B is defined as\n\n .. math::\n B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m\n\n where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree\n of node j, with m the number of edges in the graph. When weight is set\n to a name of an attribute edge, Aij, k_i, k_j and m are computed using\n its value.\n\n Parameters\n ----------\n G : DiGraph\n A NetworkX DiGraph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default=None)\n The edge attribute that holds the numerical value used for\n the edge weight. If None then all edge weights are 1.\n\n Returns\n -------\n B : Numpy array\n The modularity matrix of G.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_edges_from(\n ... (\n ... (1, 2),\n ... (1, 3),\n ... (3, 1),\n ... (3, 2),\n ... (3, 5),\n ... (4, 5),\n ... (4, 6),\n ... (5, 4),\n ... (5, 6),\n ... (6, 4),\n ... )\n ... )\n >>> B = nx.directed_modularity_matrix(G)\n\n\n Notes\n -----\n NetworkX defines the element A_ij of the adjacency matrix as 1 if there\n is a link going from node i to node j. Leicht and Newman use the opposite\n definition. This explains the different expression for B_ij.\n\n See Also\n --------\n to_numpy_array\n modularity_spectrum\n adjacency_matrix\n modularity_matrix\n\n References\n ----------\n .. [1] E. A. Leicht, M. E. J. Newman,\n \"Community structure in directed networks\",\n Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008.\n ", "n_words": 303, "vocab_size": 177, "n_whitespaces": 598, "language": "en" } }, { "id": 155706, "commit_id": "d98c1dd63e0d7f6a003e3ff70eca796c19b81d42", "repo": "dask", "path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "fun_name": "project_columns", "commit_message": "Use map_partitions (Blockwise) in to_parquet (#8487)", "code": "def project_columns(self, columns):\n \n if columns == self.columns:\n return self\n return ParquetFunctionWrapper(\n self.engine,\n self.fs,\n self.meta,\n columns,\n self.index,\n None, # Already merged into common_kwargs\n self.common_kwargs,\n )\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 141, "n_words": 24, "vocab_size": 23, "complexity": 2, "nloc": 12, "token_counts": 45, "n_ast_nodes": 66, "n_identifiers": 9, "random_cut": "def project_columns(self, columns):\n \n if columns == se", "d_id": 36452, "documentation": { "docstring": "Return a new ParquetFunctionWrapper object\n with a sub-column projection.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 198198, "commit_id": "a69c49bec6caf2cb460dc4eedf0fec184db92f0e", "repo": "sympy", "path": "sympy/tensor/array/expressions/array_expressions.py", "file_name": "array_expressions.py", "fun_name": "sort_args_by_name", "commit_message": "Rename files for array expression conversions in order to avoid naming conflicts in TAB-completion of the corresponding functions", "code": "def sort_args_by_name(self):\n \n expr = self.expr\n if not isinstance(expr, ArrayTensorProduct):\n return self\n args = expr.args\n sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1]))\n pos_sorted, args_sorted = zip(*sorted_data)\n reordering_map = {i: pos_sorted.index(i) for i, arg in enumerate(args)}\n contraction_tuples = self._get_contraction_tuples()\n contraction_tuples = [[(reordering_map[j], k) for j, k in i] for i in contraction_tuples]\n c_tp = _array_tensor_product(*args_sorted)\n new_contr_indices = self._contraction_tuples_to_contraction_indices(\n c_tp,\n contraction_tuples\n )\n return _array_contraction(c_tp, *new_contr_indices)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 193, "n_words": 61, "vocab_size": 46, "complexity": 5, "nloc": 16, "token_counts": 135, "n_ast_nodes": 211, "n_identifiers": 28, "random_cut": "def sort_args_by_name(self):\n \n expr = self.expr\n if not isinstance(expr, ArrayTensorProduct):\n return self\n arg", "d_id": 48818, "documentation": { "docstring": "\n Sort arguments in the tensor product so that their order is lexicographical.\n\n Examples\n ========\n\n >>> from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array\n >>> from sympy import MatrixSymbol\n >>> from sympy.abc import N\n >>> A = MatrixSymbol(\"A\", N, N)\n >>> B = MatrixSymbol(\"B\", N, N)\n >>> C = MatrixSymbol(\"C\", N, N)\n >>> D = MatrixSymbol(\"D\", N, N)\n\n >>> cg = convert_matrix_to_array(C*D*A*B)\n >>> cg\n ArrayContraction(ArrayTensorProduct(A, D, C, B), (0, 3), (1, 6), (2, 5))\n >>> cg.sort_args_by_name()\n ArrayContraction(ArrayTensorProduct(A, D, B, C), (0, 3), (1, 4), (2, 7))\n ", "n_words": 81, "vocab_size": 51, "n_whitespaces": 194, "language": "en" } }, { "id": 133158, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/iter.py", "file_name": "iter.py", "fun_name": "gather_async", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def gather_async(self, batch_ms=0, num_async=1) -> \"LocalIterator[T]\":\n \n\n if num_async < 1:\n raise ValueError(\"queue depth must be positive\")\n if batch_ms < 0:\n raise ValueError(\"batch time must be positive\")\n\n # Forward reference to the returned iterator.\n local_iter = None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 93, "n_words": 36, "vocab_size": 30, "complexity": 3, "nloc": 33, "token_counts": 61, "n_ast_nodes": 70, "n_identifiers": 6, "random_cut": "def gather_async(self, batch_ms=0, num_async=1) -> \"LocalIterator[T]\":\n \n\n if num_async < 1:\n raise ValueError(\"queue depth must be positive\")\n if batch_ms < 0:\n", "d_id": 29944, "documentation": { "docstring": "Returns a local iterable for asynchronous iteration.\n\n New items will be fetched from the shards asynchronously as soon as\n the previous one is computed. Items arrive in non-deterministic order.\n\n Arguments:\n batch_ms (int): Batches items for batch_ms milliseconds\n on each shard before retrieving it.\n Increasing batch_ms increases latency but improves throughput.\n If this value is 0, then items are returned immediately.\n num_async (int): The max number of async requests in flight\n per actor. Increasing this improves the amount of pipeline\n parallelism in the iterator.\n\n Examples:\n >>> it = from_range(100, 1).gather_async()\n >>> next(it)\n ... 3\n >>> next(it)\n ... 0\n >>> next(it)\n ... 1\n ", "n_words": 101, "vocab_size": 77, "n_whitespaces": 310, "language": "en" } }, { "id": 146010, "commit_id": "b267be475863a66e9feedb2be5f0a30a2ed8c493", "repo": "ray", "path": "python/ray/ml/tests/test_checkpoints.py", "file_name": "test_checkpoints.py", "fun_name": "test_dict_checkpoint_dict", "commit_message": "[ml] Add Ray ML / AIR checkpoint implementation (#22691)\n\nThis PR splits up the changes in #22393 and introduces an implementation of the ML Checkpoint interface used by Ray Tune.\r\n\r\nThis means, the TuneCheckpoint class implements the to/from_[bytes|dict|directory|object_ref|uri] conversion functions, as well as more high-level functions to transition between the different TuneCheckpoint classes. It also includes test cases for Tune's main conversion modes, i.e. dict - intermediate - dict and fs - intermediate - fs.\r\n\r\nThese changes will be the basis for refactoring the tune interface to use TuneCheckpoint objects instead of TrialCheckpoints (externally) and instead of paths/objects (internally).", "code": "def test_dict_checkpoint_dict(self):\n \n checkpoint = self._prepare_dict_checkpoint()\n\n # Convert into dict checkpoint\n data_dict = checkpoint.to_dict()\n self.assertIsInstance(data_dict, dict)\n\n # Create from dict\n checkpoint = Checkpoint.from_dict(data_dict)\n self.assertTrue(checkpoint._data_dict)\n\n self._assert_dict_checkpoint(checkpoint)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 87, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 50, "n_ast_nodes": 87, "n_identifiers": 13, "random_cut": "def test_dict_checkpoint_dict(self):\n \n checkpoint = self._prepare_dict_checkpoint()\n\n # Convert into dict checkpoint\n data_dict = checkpoint.to_dict()\n self.assertIsInstance(data_dict, dict)\n\n # Create from dict\n checkpoint = Checkpoint.from_dict(da", "d_id": 33585, "documentation": { "docstring": "Test conversion from dict to dict checkpoint and back.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 117189, "commit_id": "7c02e15aa403a4ca1fa34489dd2df9136d6c961c", "repo": "mindsdb", "path": "tests/integration_tests/flows/test_company_independent.py", "file_name": "test_company_independent.py", "fun_name": "test_5_model", "commit_message": "Projects structure (#3532)\n\nProjects structure", "code": "def test_5_model(self):\n query = \n\n predict_query = \n\n for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:\n self.sql_via_http(\n query.format(char, char),\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.OK\n )\n\n response = self.sql_via_http(\n predict_query.format(char),\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.TABLE\n )\n self.assertTrue(len(response['data']), 1)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 196, "n_words": 29, "vocab_size": 24, "complexity": 2, "nloc": 23, "token_counts": 90, "n_ast_nodes": 142, "n_identifiers": 18, "random_cut": "def test_5_model(self):\n query = \n\n predict_query = \n\n for ", "d_id": 25918, "documentation": { "docstring": "\n CREATE MODEL mindsdb.model_{}\n FROM test_integration_{} (\n select * from test_data.home_rentals limit 50\n ) PREDICT rental_price\n USING join_learn_process=true, time_aim=5\n \n select * from mindsdb.model_{} where sqft = 100\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 112, "language": "en" } }, { "id": 242342, "commit_id": "9cdb0508b6cbd3a3061017760a5eab4d13c3924a", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "putpalette", "commit_message": "Attach RGBA palettes from putpalette() when suitable", "code": "def putpalette(self, data, rawmode=\"RGB\"):\n \n from . import ImagePalette\n\n if self.mode not in (\"L\", \"LA\", \"P\", \"PA\"):\n raise ValueError(\"illegal image mode\")\n if isinstance(data, ImagePalette.ImagePalette):\n palette = ImagePalette.raw(data.rawmode, data.palette)\n else:\n if not isinstance(data, bytes):\n data = bytes(data)\n palette = ImagePalette.raw(rawmode, data)\n self.mode = \"PA\" if \"A\" in self.mode else \"P\"\n self.palette = palette\n self.palette.mode = \"RGB\"\n self.load() # install new palette\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 182, "n_words": 59, "vocab_size": 43, "complexity": 5, "nloc": 14, "token_counts": 118, "n_ast_nodes": 204, "n_identifiers": 12, "random_cut": "def putpalette(self, data, rawmode=\"RGB\"):\n \n from . import ImagePalette\n\n if self.mode not in (\"L\", \"LA\", \"P\", \"PA\"):\n raise ValueError(\"illegal image mode\")\n if isinstance(data, ImagePalette.ImagePalette):\n palette = ImagePalette.raw(data.rawmode, data.palette)\n else:\n if not isinstance(data, bytes):\n data = bytes(data)\n palette = ImagePalette.raw(rawmode, data)\n self.mode = \"PA\" if \"A\" in self.mode else \"P\"\n self.palette = palette\n self.palette.mode =", "d_id": 69840, "documentation": { "docstring": "\n Attaches a palette to this image. The image must be a \"P\", \"PA\", \"L\"\n or \"LA\" image.\n\n The palette sequence must contain at most 256 colors, made up of one\n integer value for each channel in the raw mode.\n For example, if the raw mode is \"RGB\", then it can contain at most 768\n values, made up of red, green and blue values for the corresponding pixel\n index in the 256 colors.\n If the raw mode is \"RGBA\", then it can contain at most 1024 values,\n containing red, green, blue and alpha values.\n\n Alternatively, an 8-bit string may be used instead of an integer sequence.\n\n :param data: A palette sequence (either a list or a string).\n :param rawmode: The raw mode of the palette. Either \"RGB\", \"RGBA\", or a mode\n that can be transformed to \"RGB\" or \"RGBA\" (e.g. \"R\", \"BGR;15\", \"RGBA;L\").\n ", "n_words": 142, "vocab_size": 86, "n_whitespaces": 245, "language": "en" } }, { "id": 268055, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/timeout.py", "file_name": "timeout.py", "fun_name": "get_timeout", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def get_timeout() -> t.Optional[t.Dict[str, t.Any]]:\n \n if not os.path.exists(TIMEOUT_PATH):\n return None\n\n data = read_json_file(TIMEOUT_PATH)\n data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ')\n\n return data\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 7, "token_counts": 60, "n_ast_nodes": 100, "n_identifiers": 14, "random_cut": "def get_timeout() -> t.Optional[t.Dict[str, t.Any]]:\n \n if not os.path.exists(TIMEOUT_PATH):\n return None\n\n data = read_json_file(TIMEOUT_PATH)\n data['deadline'] = datetime.datetime.strptime(data['deadline']", "d_id": 79329, "documentation": { "docstring": "Return details about the currently set timeout, if any, otherwise return None.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 280504, "commit_id": "5a105aadbdc6fde2c2529280c4789864adbb81c7", "repo": "keras", "path": "keras/saving/experimental/saving_lib_test.py", "file_name": "saving_lib_test.py", "fun_name": "my_mean_squared_error", "commit_message": "Move new optimizer out of optimizer_experimental/ directory.\n\nPiperOrigin-RevId: 488998585", "code": "def my_mean_squared_error(y_true, y_pred):\n \n return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1)\n\n\nmodule_my_mean_squared_error = my_mean_squared_error\n\n\n@test_utils.run_v2_only", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@test_utils.run_v2_only", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 15, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 29, "n_ast_nodes": 58, "n_identifiers": 12, "random_cut": "def my_mean_squared_error(y_true, y_pred):\n \n return backend.mean(tf.math.squared_dif", "d_id": 83360, "documentation": { "docstring": "Identical to built-in `mean_squared_error`, added here as a custom\n\n func.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 100438, "commit_id": "aa39234538a8f83e6aa2b60b8275a570e8876ac2", "repo": "faceswap", "path": "lib/model/session.py", "file_name": "session.py", "fun_name": "_amd_predict_with_optimized_batchsizes", "commit_message": "Update all Keras Imports to be conditional (#1214)\n\n* Remove custom keras importer\r\n\r\n* first round keras imports fix\r\n\r\n* launcher.py: Remove KerasFinder references\r\n\r\n* 2nd round keras imports update (lib and extract)\r\n\r\n* 3rd round keras imports update (train)\r\n\r\n* remove KerasFinder from tests\r\n\r\n* 4th round keras imports update (tests)", "code": "def _amd_predict_with_optimized_batchsizes(self, feed, batch_size):\n \n if isinstance(feed, np.ndarray):\n feed = [feed]\n items = feed[0].shape[0]\n done_items = 0\n results = []\n while done_items < items:\n if batch_size < 4: # Not much difference in BS < 4\n batch_size = 1\n batch_items = ((items - done_items) // batch_size) * batch_size\n if batch_items:\n pred_data = [x[done_items:done_items + batch_items] for x in feed]\n pred = self._model.predict(pred_data, batch_size=batch_size)\n done_items += batch_items\n results.append(pred)\n batch_size //= 2\n if isinstance(results[0], np.ndarray):\n return np.concatenate(results)\n return [np.concatenate(x) for x in zip(*results)]\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 278, "n_words": 80, "vocab_size": 56, "complexity": 8, "nloc": 19, "token_counts": 146, "n_ast_nodes": 235, "n_identifiers": 20, "random_cut": "def _amd_predict_with_optimized_batchsizes(self, feed, batch_size):\n \n if isinstance(feed, np.ndarray):\n feed = [feed]\n items = feed[0].shape[0]\n done_items = 0\n results = []\n while done_items < items:\n if batch_size < 4: # Not much difference in BS < 4\n batch_size = 1\n batch_items = ((items - done_items) // batch_size) * batch_size\n if batch_items:\n pred_data = [x[done_items:done_items + batch_items] for x in feed]\n pred = self._model.predict(pred_data, batch_size=batch_size)\n done_items += batch_items\n results.append(pred)\n batch_size //= 2\n if isinstance(results[0], np.ndarray):\n return np.concatenate(results)\n return ", "d_id": 19916, "documentation": { "docstring": " Minimizes the amount of kernels to be compiled when using the ``amd`` backend with\n varying batch sizes while trying to keep the batchsize as high as possible.\n\n Parameters\n ----------\n feed: numpy.ndarray or list\n The feed to be provided to the model as input. This should be a ``numpy.ndarray``\n for single inputs or a ``list`` of ``numpy.ndarray`` objects for multiple inputs.\n batch_size: int\n The upper batchsize to use.\n ", "n_words": 67, "vocab_size": 49, "n_whitespaces": 143, "language": "en" } }, { "id": 110304, "commit_id": "03a0b5ea238014ba87f74ef766928287726aa00a", "repo": "matplotlib", "path": "lib/matplotlib/patches.py", "file_name": "patches.py", "fun_name": "set_positions", "commit_message": "Doc: Fix grammar and spelling", "code": "def set_positions(self, posA, posB):\n \n if posA is not None:\n self._posA_posB[0] = posA\n if posB is not None:\n self._posA_posB[1] = posB\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 23, "vocab_size": 15, "complexity": 3, "nloc": 6, "token_counts": 43, "n_ast_nodes": 67, "n_identifiers": 6, "random_cut": "def set_positions(self, posA, posB):\n \n if posA is not None:\n ", "d_id": 24044, "documentation": { "docstring": "\n Set the start and end positions of the connecting path.\n\n Parameters\n ----------\n posA, posB : None, tuple\n (x, y) coordinates of arrow tail and arrow head respectively. If\n `None` use current value.\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 90, "language": "en" } }, { "id": 86579, "commit_id": "c67c560f667e6fc7fee2c6d62ac3987ba54f89d5", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data", "commit_message": "feat(metrics): Standardize tests and fix overall flakiness [TET-437] (#39660)", "code": "def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(self):\n \n for tag, value, numbers in (\n (\"transaction\", \"/foo/\", [10, 11, 12]),\n (\"transaction\", \"/bar/\", [4, 5, 6]),\n ):\n for subvalue in numbers:\n self.store_performance_metric(\n name=TransactionMRI.MEASUREMENTS_LCP.value,\n tags={tag: value},\n value=subvalue,\n )\n\n response = self.get_success_response(\n self.organization.slug,\n field=[\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n f\"count_unique({TransactionMetricKey.USER.value})\",\n ],\n statsPeriod=\"1h\",\n interval=\"1h\",\n groupBy=[\"project_id\", \"transaction\"],\n orderBy=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n useCase=\"performance\",\n )\n groups = response.data[\"groups\"]\n assert len(groups) == 2\n\n expected = [\n (\"/bar/\", 5.0, 5),\n (\"/foo/\", 11.0, 1),\n ]\n for (expected_tag_value, expected_lcp_count, users), group in zip(expected, groups):\n # With orderBy, you only get totals:\n assert group[\"by\"] == {\"transaction\": expected_tag_value, \"project_id\": self.project.id}\n assert group[\"totals\"] == {\n f\"count_unique({TransactionMetricKey.USER.value})\": 0,\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\": expected_lcp_count,\n }\n assert group[\"series\"] == {\n f\"count_unique({TransactionMetricKey.USER.value})\": [0],\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\": [expected_lcp_count],\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 557, "n_words": 101, "vocab_size": 82, "complexity": 4, "nloc": 39, "token_counts": 239, "n_ast_nodes": 431, "n_identifiers": 34, "random_cut": "def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(self):\n \n for tag, value, numbers in (\n (\"transaction\", \"/foo/\", [10, 11, 12]),\n (\"transaction\", \"/bar/\", [4, 5, 6]),\n ):\n for subvalue in numbers:\n self.store_performance_metric(\n name=TransactionMRI.MEASUREMENTS_LCP.value,\n tags={tag: value},\n value=subvalue,\n )\n\n response = self.get_success_response(\n self.organization.slug,\n field=[\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n f\"count_unique({TransactionMetricKey.USER.value})\",\n ],\n statsPeriod=\"1h\",\n interval=\"1h\",\n groupBy=[\"project_id\", \"transaction\"],\n orderBy=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n useCase=\"performance\",\n )\n groups = response.data[\"groups\"]\n assert len(groups) == 2\n\n expected = [\n (\"/bar/\", 5.", "d_id": 18130, "documentation": { "docstring": "\n Test that ensures when transactions table has null values for some fields (i.e. fields\n with a different entity than the entity of the field in the order by), then the table gets\n populated accordingly\n ", "n_words": 34, "vocab_size": 28, "n_whitespaces": 63, "language": "en" } }, { "id": 66873, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/payroll/doctype/employee_benefit_application/employee_benefit_application.py", "file_name": "employee_benefit_application.py", "fun_name": "get_earning_components_max_benefits", "commit_message": "style: format code with black", "code": "def get_earning_components_max_benefits(employee, date, earning_component):\n\tsalary_structure = get_assigned_salary_structure(employee, date)\n\tamount = frappe.db.sql(\n\t\t,\n\t\tsalary_structure,\n\t\tearning_component,\n\t)\n\n\treturn amount if amount else 0\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 13, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 14, "token_counts": 38, "n_ast_nodes": 56, "n_identifiers": 10, "random_cut": "def get_earning_components_max_benefits(employee, date, earning_component):\n\t", "d_id": 14369, "documentation": { "docstring": "\n\t\t\tselect amount\n\t\t\tfrom `tabSalary Detail`\n\t\t\twhere parent = %s and is_flexible_benefit = 1\n\t\t\tand salary_component = %s\n\t\t\torder by name\n\t\t", "n_words": 20, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 270731, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "get_input_mask_at", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_input_mask_at(self, node_index):\n \n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, \"_keras_mask\", None) for x in inputs]\n else:\n return getattr(inputs, \"_keras_mask\", None)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 72, "n_words": 22, "vocab_size": 19, "complexity": 3, "nloc": 6, "token_counts": 50, "n_ast_nodes": 80, "n_identifiers": 9, "random_cut": "def get_input_mask_at(self, node_index):\n \n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, \"_keras_mask\", None) for x in inputs]\n else:\n ", "d_id": 80549, "documentation": { "docstring": "Retrieves the input mask tensor(s) of a layer at a given node.\n\n Args:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple inputs).\n ", "n_words": 51, "vocab_size": 39, "n_whitespaces": 150, "language": "en" } }, { "id": 9844, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/peapods/networking.py", "file_name": "networking.py", "fun_name": "get_default_grpc_options", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def get_default_grpc_options():\n \n return [\n ('grpc.max_send_message_length', -1),\n ('grpc.max_receive_message_length', -1),\n ]\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 52, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 22, "n_ast_nodes": 39, "n_identifiers": 1, "random_cut": "def get_default_grpc_options():\n \n retu", "d_id": 1726, "documentation": { "docstring": "\n Returns a list of default options used for creating grpc channels.\n Documentation is here https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h\n :returns: list of tuples defining grpc parameters\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 51, "language": "en" } }, { "id": 30582, "commit_id": "6b425aaebe33703bd44b1b15571e4af8533b851a", "repo": "OCRmyPDF", "path": "src/ocrmypdf/builtin_plugins/concurrency.py", "file_name": "concurrency.py", "fun_name": "_cancel_futures_kwargs", "commit_message": "Add shim for cancel_futures in older Pythons\n\nThanks @hfwittmann\n\nCloses #993\n\nCo-authored-by: H. Felix Wittmann ", "code": "def _cancel_futures_kwargs(self):\n \n if sys.version_info[:2] < (3, 9):\n return {}\n return dict(cancel_futures=True)\n", "url": "https://github.com/ocrmypdf/OCRmyPDF.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 43, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 4, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 6, "random_cut": "def _cancel_futures_kwargs(self):\n \n if sys.version_info[:2] < (3, 9):\n return {}\n return dict(cancel_futures=Tru", "d_id": 5634, "documentation": { "docstring": "Shim older Pythons that do not have Executor.shutdown(...cancel_futures=).\n\n Remove this code when support for Python 3.8 is dropped.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 32, "language": "en" } }, { "id": 124984, "commit_id": "ef091c382eea427783ea75531fe9d5a5f008107c", "repo": "ray", "path": "python/ray/data/_internal/util.py", "file_name": "util.py", "fun_name": "_estimate_available_parallelism", "commit_message": "[data] Add warnings when DatasetPipelines are under-parallelized or using too much memory (#26592)\n\nCurrently, it's not very easy to figure out why a DatasetPipeline may be underperforming. Add some warnings to help guide the user. As a next step, we can try to default to a good pipeline setting based on these constraints.", "code": "def _estimate_available_parallelism() -> int:\n \n cur_pg = ray.util.get_current_placement_group()\n return _estimate_avail_cpus(cur_pg)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 21, "n_ast_nodes": 38, "n_identifiers": 7, "random_cut": "def _estimate_available_parallelism() -> int:\n \n cur_pg = ray.util.get_current_placement_", "d_id": 27734, "documentation": { "docstring": "Estimates the available CPU parallelism for this Dataset in the cluster.\n If we are currently in a placement group, take that into account.", "n_words": 23, "vocab_size": 21, "n_whitespaces": 25, "language": "en" } }, { "id": 175299, "commit_id": "acf7403f9baea3ae1119fc6b4a3298522188bf96", "repo": "cpython", "path": "Lib/enum.py", "file_name": "enum.py", "fun_name": "bin", "commit_message": "bpo-40066: [Enum] update str() and format() output (GH-30582)\n\nUndo rejected PEP-663 changes:\r\n\r\n- restore `repr()` to its 3.10 status\r\n- restore `str()` to its 3.10 status\r\n\r\nNew changes:\r\n\r\n- `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result\r\n- zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == ''`\r\n- update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type\r\n- added `_numeric_repr_` to `Flag` to control display of unnamed values\r\n- enums without doc strings have a more comprehensive doc string added\r\n- `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`", "code": "def bin(num, max_bits=None):\n \n\n ceiling = 2 ** (num).bit_length()\n if num >= 0:\n s = bltns.bin(num + ceiling).replace('1', '0', 1)\n else:\n s = bltns.bin(~num ^ (ceiling - 1) + ceiling)\n sign = s[:3]\n digits = s[3:]\n if max_bits is not None:\n if len(digits) < max_bits:\n digits = (sign[-1] * max_bits + digits)[-max_bits:]\n return \"%s %s\" % (sign, digits)\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 113, "n_words": 57, "vocab_size": 44, "complexity": 4, "nloc": 12, "token_counts": 118, "n_ast_nodes": 192, "n_identifiers": 11, "random_cut": "def bin(num, max_bits=None):\n \n\n ceiling = 2 ** (num).bit_length()\n if num >= 0:\n s = bltns.bin(num + ceiling).replace('1', '0', 1)\n else:", "d_id": 41588, "documentation": { "docstring": "\n Like built-in bin(), except negative values are represented in\n twos-compliment, and the leading bit always indicates sign\n (0=positive, 1=negative).\n\n >>> bin(10)\n '0b0 1010'\n >>> bin(~10) # ~10 is -11\n '0b1 0101'\n ", "n_words": 31, "vocab_size": 30, "n_whitespaces": 58, "language": "en" } }, { "id": 261218, "commit_id": "537c325f2927895449ce418b3a77750135c0ba7b", "repo": "scikit-learn", "path": "sklearn/utils/__init__.py", "file_name": "__init__.py", "fun_name": "axis0_safe_slice", "commit_message": "DOC Ensure that sklearn.utils.axis0_safe_slice passes numpydoc (#24561)", "code": "def axis0_safe_slice(X, mask, len_mask):\n \n if len_mask != 0:\n return X[safe_mask(X, mask), :]\n return np.zeros(shape=(0, X.shape[1]))\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 31, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 4, "token_counts": 45, "n_ast_nodes": 68, "n_identifiers": 8, "random_cut": "def axis0_safe_slice(X, mask, len_mask):\n \n if len_mask ", "d_id": 76691, "documentation": { "docstring": "Return a mask which is safer to use on X than safe_mask.\n\n This mask is safer than safe_mask since it returns an\n empty array, when a sparse matrix is sliced with a boolean mask\n with all False, instead of raising an unhelpful error in older\n versions of SciPy.\n\n See: https://github.com/scipy/scipy/issues/5361\n\n Also note that we can avoid doing the dot product by checking if\n the len_mask is not zero in _huber_loss_and_gradient but this\n is not going to be the bottleneck, since the number of outliers\n and non_outliers are typically non-zero and it makes the code\n tougher to follow.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n Data on which to apply mask.\n\n mask : ndarray\n Mask to be used on X.\n\n len_mask : int\n The length of the mask.\n\n Returns\n -------\n mask : ndarray\n Array that is safe to use on X.\n ", "n_words": 140, "vocab_size": 91, "n_whitespaces": 225, "language": "en" } }, { "id": 260662, "commit_id": "02a4b342181e5ff0226081691308414e53c3107b", "repo": "scikit-learn", "path": "sklearn/impute/_base.py", "file_name": "_base.py", "fun_name": "_most_frequent", "commit_message": "MAINT fix the way to call stats.mode (#23633)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Meekail Zain <34613774+Micky774@users.noreply.github.com>\r\nCo-authored-by: Thomas J. Fan ", "code": "def _most_frequent(array, extra_value, n_repeat):\n \n # Compute the most frequent value in array only\n if array.size > 0:\n if array.dtype == object:\n # scipy.stats.mode is slow with object dtype array.\n # Python Counter is more efficient\n counter = Counter(array)\n most_frequent_count = counter.most_common(1)[0][1]\n # tie breaking similarly to scipy.stats.mode\n most_frequent_value = min(\n value\n for value, count in counter.items()\n if count == most_frequent_count\n )\n else:\n mode = _mode(array)\n most_frequent_value = mode[0][0]\n most_frequent_count = mode[1][0]\n else:\n most_frequent_value = 0\n most_frequent_count = 0\n\n # Compare to array + [extra_value] * n_repeat\n if most_frequent_count == 0 and n_repeat == 0:\n return np.nan\n elif most_frequent_count < n_repeat:\n return extra_value\n elif most_frequent_count > n_repeat:\n return most_frequent_value\n elif most_frequent_count == n_repeat:\n # tie breaking similarly to scipy.stats.mode\n return min(most_frequent_value, extra_value)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 366, "n_words": 121, "vocab_size": 67, "complexity": 10, "nloc": 25, "token_counts": 137, "n_ast_nodes": 221, "n_identifiers": 20, "random_cut": "def _most_frequent(array, extra_value, n_repeat):\n \n # Compute the most frequent value in array only\n if array.size > 0:\n if array.dtype == object:\n # scipy.stats.mode is slow with object dtype array.\n # Python Counter is more efficient\n counter = Counter(array)\n most_frequent_count = counter.most_common(1)[0][1]\n # tie breaking similarly to scipy.stats.mode\n most_frequent_value = min(\n value\n for value, count in counter.items()\n if count == most_frequent_count\n )\n else:\n mode = _mode(array)\n most_frequent_value = mode[0][0]\n most_frequent_count = mode[1][0]\n else:\n most_frequent_value = 0\n ", "d_id": 76401, "documentation": { "docstring": "Compute the most frequent value in a 1d array extended with\n [extra_value] * n_repeat, where extra_value is assumed to be not part\n of the array.", "n_words": 25, "vocab_size": 24, "n_whitespaces": 30, "language": "en" } }, { "id": 259737, "commit_id": "3f1833d5805a99894f1fc6b858a9ac663e175997", "repo": "scikit-learn", "path": "sklearn/discriminant_analysis.py", "file_name": "discriminant_analysis.py", "fun_name": "_cov", "commit_message": "MNT Combine multiple `isinstance` call (#23204)", "code": "def _cov(X, shrinkage=None, covariance_estimator=None):\n \n if covariance_estimator is None:\n shrinkage = \"empirical\" if shrinkage is None else shrinkage\n if isinstance(shrinkage, str):\n if shrinkage == \"auto\":\n sc = StandardScaler() # standardize features\n X = sc.fit_transform(X)\n s = ledoit_wolf(X)[0]\n # rescale\n s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]\n elif shrinkage == \"empirical\":\n s = empirical_covariance(X)\n else:\n raise ValueError(\"unknown shrinkage parameter\")\n elif isinstance(shrinkage, Real):\n if shrinkage < 0 or shrinkage > 1:\n raise ValueError(\"shrinkage parameter must be between 0 and 1\")\n s = shrunk_covariance(empirical_covariance(X), shrinkage)\n else:\n raise TypeError(\"shrinkage must be a float or a string\")\n else:\n if shrinkage is not None and shrinkage != 0:\n raise ValueError(\n \"covariance_estimator and shrinkage parameters \"\n \"are not None. Only one of the two can be set.\"\n )\n covariance_estimator.fit(X)\n if not hasattr(covariance_estimator, \"covariance_\"):\n raise ValueError(\n \"%s does not have a covariance_ attribute\"\n % covariance_estimator.__class__.__name__\n )\n s = covariance_estimator.covariance_\n return s\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 503, "n_words": 144, "vocab_size": 87, "complexity": 12, "nloc": 33, "token_counts": 197, "n_ast_nodes": 333, "n_identifiers": 24, "random_cut": "def _cov(X, shrinkage=None, covariance_estimator=None):\n \n if covariance_estimator is None:\n shrinkage = \"empirical\" if shrinkage is None else shrinkage\n if isinstance(shrinkage, str):\n if shrinkage == \"auto\":\n sc = StandardScaler() # standardize features\n X = sc.fit_transform(X)\n s = ledoit_wolf(X)[0]\n # rescale\n s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]\n elif shrinkage == \"empirical\":\n s = empirical_covariance(X)\n else:\n raise ValueError(\"unknown shrinkage parameter\")\n elif isinstance(shrinkage, Real):\n if shrinkage < 0 or shrinkage > 1:\n raise ValueError(\"shrinkage parameter must be between 0 and 1\")\n s = shrunk_covariance(empirical_covariance(X), shrinkage)\n else:\n raise TypeError(\"shrinkage must be a float or a string\")\n else:\n if shrinkage is not None and shrinkage != 0:\n raise ValueError(\n \"covariance_estimator and shrinkage parameters \"\n \"are not None. Only one of the two can be set.\"\n )\n covariance_estimator.fit(X)\n if not hasattr(covariance_estimator, \"covariance_\"):\n raise ValueError(\n \"%s does not have a covariance_ attribute\"\n % covariance_estimator.__class__.__name__\n )\n s = covariance_estimator.covariance_\n return s\n\n", "d_id": 75886, "documentation": { "docstring": "Estimate covariance matrix (using optional covariance_estimator).\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n shrinkage : {'empirical', 'auto'} or float, default=None\n Shrinkage parameter, possible values:\n - None or 'empirical': no shrinkage (default).\n - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n - float between 0 and 1: fixed shrinkage parameter.\n\n Shrinkage parameter is ignored if `covariance_estimator`\n is not None.\n\n covariance_estimator : estimator, default=None\n If not None, `covariance_estimator` is used to estimate\n the covariance matrices instead of relying on the empirical\n covariance estimator (with potential shrinkage).\n The object should have a fit method and a ``covariance_`` attribute\n like the estimators in :mod:`sklearn.covariance``.\n if None the shrinkage parameter drives the estimate.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n s : ndarray of shape (n_features, n_features)\n Estimated covariance matrix.\n ", "n_words": 126, "vocab_size": 93, "n_whitespaces": 265, "language": "en" } }, { "id": 22139, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "from_key_val_list", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def from_key_val_list(value):\n \n if value is None:\n return None\n\n if isinstance(value, (str, bytes, bool, int)):\n raise ValueError(\"cannot encode objects that are not 2-tuples\")\n\n return OrderedDict(value)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 24, "vocab_size": 22, "complexity": 3, "nloc": 6, "token_counts": 39, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "def from_key_val_list(value):\n \n if value is None:\n return None\n\n if isinstance(value, (str, bytes, bool, int)):\n raise ValueError(\"cannot encode objects tha", "d_id": 4211, "documentation": { "docstring": "Take an object and test to see if it can be represented as a\n dictionary. Unless it can not be represented as such, return an\n OrderedDict, e.g.,\n\n ::\n\n >>> from_key_val_list([('key', 'val')])\n OrderedDict([('key', 'val')])\n >>> from_key_val_list('string')\n Traceback (most recent call last):\n ...\n ValueError: cannot encode objects that are not 2-tuples\n >>> from_key_val_list({'key': 'val'})\n OrderedDict([('key', 'val')])\n\n :rtype: OrderedDict\n ", "n_words": 56, "vocab_size": 44, "n_whitespaces": 127, "language": "en" } }, { "id": 20445, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/lexers/__init__.py", "file_name": "__init__.py", "fun_name": "load_lexer_from_file", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def load_lexer_from_file(filename, lexername=\"CustomLexer\", **options):\n \n try:\n # This empty dict will contain the namespace for the exec'd file\n custom_namespace = {}\n with open(filename, 'rb') as f:\n exec(f.read(), custom_namespace)\n # Retrieve the class `lexername` from that namespace\n if lexername not in custom_namespace:\n raise ClassNotFound('no valid %s class found in %s' %\n (lexername, filename))\n lexer_class = custom_namespace[lexername]\n # And finally instantiate it with the options\n return lexer_class(**options)\n except OSError as err:\n raise ClassNotFound('cannot read %s: %s' % (filename, err))\n except ClassNotFound:\n raise\n except Exception as err:\n raise ClassNotFound('error when loading custom lexer: %s' % err)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 237, "n_words": 92, "vocab_size": 70, "complexity": 5, "nloc": 16, "token_counts": 100, "n_ast_nodes": 176, "n_identifiers": 14, "random_cut": "def load_lexer_from_file(filename, lexername=\"CustomLexer\", **options):\n \n try:\n # This empty dict will contain the namespace for the exec'd file\n custom_namespace = {}\n with open(filename, 'rb') as f:\n exec(f.read(), custom_namespace)\n #", "d_id": 3373, "documentation": { "docstring": "Load a lexer from a file.\n\n This method expects a file located relative to the current working\n directory, which contains a Lexer class. By default, it expects the\n Lexer to be name CustomLexer; you can specify your own class name\n as the second argument to this function.\n\n Users should be very careful with the input, because this method\n is equivalent to running eval on the input file.\n\n Raises ClassNotFound if there are any problems importing the Lexer.\n\n .. versionadded:: 2.2\n ", "n_words": 80, "vocab_size": 62, "n_whitespaces": 107, "language": "en" } }, { "id": 64228, "commit_id": "282fbf4b07740e14566f16d749b549239d7253a7", "repo": "erpnext", "path": "erpnext/utilities/product.py", "file_name": "product.py", "fun_name": "get_price", "commit_message": "fix: fetch correct selling price.", "code": "def get_price(item_code, price_list, customer_group, company, qty=1):\n\tfrom erpnext.e_commerce.shopping_cart.cart import get_party\n\n\ttemplate_item_code = frappe.db.get_value(\"Item\", item_code, \"variant_of\")\n\n\tif price_list:\n\t\tprice = frappe.get_all(\"Item Price\", fields=[\"price_list_rate\", \"currency\"],\n\t\t\tfilters={\"price_list\": price_list, \"item_code\": item_code})\n\n\t\tif template_item_code and not price:\n\t\t\tprice = frappe.get_all(\"Item Price\", fields=[\"price_list_rate\", \"currency\"],\n\t\t\t\tfilters={\"price_list\": price_list, \"item_code\": template_item_code})\n\n\t\tif price:\n\t\t\tparty = get_party()\n\t\t\tpricing_rule_dict = frappe._dict({\n\t\t\t\t\"item_code\": item_code,\n\t\t\t\t\"qty\": qty,\n\t\t\t\t\"stock_qty\": qty,\n\t\t\t\t\"transaction_type\": \"selling\",\n\t\t\t\t\"price_list\": price_list,\n\t\t\t\t\"customer_group\": customer_group,\n\t\t\t\t\"company\": company,\n\t\t\t\t\"conversion_rate\": 1,\n\t\t\t\t\"for_shopping_cart\": True,\n\t\t\t\t\"currency\": frappe.db.get_value(\"Price List\", price_list, \"currency\")\n\t\t\t})\n\n\t\t\tif party and party.doctype == \"Customer\":\n\t\t\t\tpricing_rule_dict.update({\"customer\": party.name})\n\n\t\t\tpricing_rule = get_pricing_rule_for_item(pricing_rule_dict)\n\t\t\tprice_obj = price[0]\n\n\t\t\tif pricing_rule:\n\t\t\t\t# price without any rules applied\n\t\t\t\tmrp = price_obj.price_list_rate or 0\n\n\t\t\t\tif pricing_rule.pricing_rule_for == \"Discount Percentage\":\n\t\t\t\t\tprice_obj.discount_percent = pricing_rule.discount_percentage\n\t\t\t\t\tprice_obj.formatted_discount_percent = str(flt(pricing_rule.discount_percentage, 0)) + \"%\"\n\t\t\t\t\tprice_obj.price_list_rate = flt(price_obj.price_list_rate * (1.0 - (flt(pricing_rule.discount_percentage) / 100.0)))\n\n\t\t\t\tif pricing_rule.pricing_rule_for == \"Rate\":\n\t\t\t\t\trate_discount = flt(mrp) - flt(pricing_rule.price_list_rate)\n\t\t\t\t\tif rate_discount > 0:\n\t\t\t\t\t\tprice_obj.formatted_discount_rate = fmt_money(rate_discount, currency=price_obj[\"currency\"])\n\t\t\t\t\tprice_obj.price_list_rate = pricing_rule.price_list_rate or 0\n\n\t\t\tif price_obj:\n\t\t\t\tprice_obj[\"formatted_price\"] = fmt_money(price_obj[\"price_list_rate\"], currency=price_obj[\"currency\"])\n\t\t\t\tif mrp != price_obj[\"price_list_rate\"]:\n\t\t\t\t\tprice_obj[\"formatted_mrp\"] = fmt_money(mrp, currency=price_obj[\"currency\"])\n\n\t\t\t\tprice_obj[\"currency_symbol\"] = not cint(frappe.db.get_default(\"hide_currency_symbol\")) \\\n\t\t\t\t\tand (frappe.db.get_value(\"Currency\", price_obj.currency, \"symbol\", cache=True) or price_obj.currency) \\\n\t\t\t\t\tor \"\"\n\n\t\t\t\tuom_conversion_factor = frappe.db.sql(, item_code)\n\n\t\t\t\tuom_conversion_factor = uom_conversion_factor[0][0] if uom_conversion_factor else 1\n\t\t\t\tprice_obj[\"formatted_price_sales_uom\"] = fmt_money(price_obj[\"price_list_rate\"] * uom_conversion_factor, currency=price_obj[\"currency\"])\n\n\t\t\t\tif not price_obj[\"price_list_rate\"]:\n\t\t\t\t\tprice_obj[\"price_list_rate\"] = 0\n\n\t\t\t\tif not price_obj[\"currency\"]:\n\t\t\t\t\tprice_obj[\"currency\"] = \"\"\n\n\t\t\t\tif not price_obj[\"formatted_price\"]:\n\t\t\t\t\tprice_obj[\"formatted_price\"], price_obj[\"formatted_mrp\"] = \"\", \"\"\n\n\t\t\treturn price_obj\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 158, "n_words": 214, "vocab_size": 128, "complexity": 22, "nloc": 58, "token_counts": 509, "n_ast_nodes": 854, "n_identifiers": 45, "random_cut": "def get_price(item_code, price_list, customer_group, company, qty=1):\n\tfrom erpnext.e_commerce.shopping_cart.cart import get_party\n\n\ttemplate_item_code = frappe.db.get_value(\"Item\", item_code, \"variant_of\")\n\n\tif price_list:\n\t\tprice = frappe.get_all(\"Item Price\", fields=[\"price_list_rate\", \"currency\"],\n\t\t\tfilters={\"price_list\": price_list, \"item_code\": item_code})\n\n\t\tif template_item_code and not price:\n\t\t\tprice = frappe.get_all(\"Item Price\", fields=[\"price_list_rate\", \"currency\"],\n\t\t\t\tfilters={\"price_list\": price_list, \"item_code\": template_item_code})\n\n\t\tif price:\n\t\t\tparty = get_party()\n\t\t\tpricing_rule_dict = frappe._dict({\n\t\t\t\t\"item_code\": item_code,\n\t\t\t\t\"qty\": qty,\n\t\t\t\t\"stock_qty\": qty,\n\t\t\t\t\"transaction_type\": \"selling\",\n\t\t\t\t\"price_list\": price_list,\n\t\t\t\t\"customer_group\": customer_group,\n\t\t\t\t\"company\": company,\n\t\t\t\t\"conversion_rate\": 1,\n\t\t\t\t\"for_shopping_cart\": True,\n\t\t\t\t\"currency\": frappe.db.get_value(\"Price List\", price_list, \"currency\")\n\t\t\t})\n\n\t\t\tif party and party.doctype == \"Customer\":\n\t\t\t\tpricing_rule_dict.update({\"customer\": party.name})\n\n\t\t\tpricing_rule = get_pricing_rule_for_item(pricing_rule_dict)\n\t\t\tprice_obj = price[0]\n\n\t\t\tif pricing_rule:\n\t\t\t\t# price without any rules applied\n\t\t\t\tmrp = price_obj.price_list_rate or 0\n\n\t\t\t\tif pricing_rule.pricing_rule_for == \"Discount Percentage\":\n\t\t\t\t\tprice_obj.discount_percent = pricing_rule.discount_percentage\n\t\t\t\t\tprice_obj.formatted_discount_percent = str(flt(pricing_rule.discount_percentage, 0)) + \"%\"\n\t\t\t\t\tprice_obj.price_list_rate = flt(price_obj.price_list_rate * (1.0 - (flt(pricing_rule.discount_percentage) / 100.0)))\n\n\t\t\t\tif pricing_rule.pricing_rule_for == \"Rate\":\n\t\t\t\t\trate_discount = flt(mrp) - flt(pricing_rule.price_list_rate)\n\t\t\t\t\tif rate_discount > 0:\n\t\t\t\t\t\tprice_obj.formatted_discount_rate = fmt_money(rate_discount, currency=price_obj[\"currency\"])\n\t\t\t\t\tprice_obj.price_list_rate = pricing_rule.price_list_rate or 0\n\n\t\t\tif price_obj:\n\t\t\t\tprice_obj[\"formatted_price\"] = fmt_money(price_obj[\"price_list_rate\"], currency=price_obj[\"currency\"])\n\t\t\t\tif mrp != price_obj[\"price_list_rate\"]:\n\t\t\t\t\tprice_obj[\"formatted_mrp\"] = fmt_money(mrp, currency=price_obj[\"currency\"])\n\n", "d_id": 13580, "documentation": { "docstring": "select\tC.conversion_factor\n\t\t\t\t\tfrom `tabUOM Conversion Detail` C\n\t\t\t\t\tinner join `tabItem` I on C.parent = I.name and C.uom = I.sales_uom\n\t\t\t\t\twhere I.name = %s", "n_words": 23, "vocab_size": 20, "n_whitespaces": 18, "language": "en" } }, { "id": 161042, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg2mel/train/solver.py", "file_name": "solver.py", "fun_name": "progress", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def progress(self, msg):\n \n if self.paras.verbose:\n sys.stdout.write(\"\\033[K\") # Clear line\n print('[{}] {}'.format(human_format(self.step), msg), end='\\r')\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 50, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 43, "n_ast_nodes": 78, "n_identifiers": 13, "random_cut": "def progress(self, msg):\n \n if self.paras.verbose:\n sys.stdout.write(\"\\033[K\") # Clear line\n print('[{}] {}'.format(human", "d_id": 38865, "documentation": { "docstring": " Verbose function for updating progress on stdout (do not include newline) ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 223317, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_sysconfig.py", "file_name": "test_sysconfig.py", "fun_name": "test_customize_compiler_before_get_config_vars", "commit_message": "add python 3.10.4 for windows", "code": "def test_customize_compiler_before_get_config_vars(self):\n # Issue #21923: test that a Distribution compiler\n # instance can be called without an explicit call to\n # get_config_vars().\n with open(TESTFN, 'w') as f:\n f.writelines(textwrap.dedent())\n p = subprocess.Popen([str(sys.executable), TESTFN],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n outs, errs = p.communicate()\n self.assertEqual(0, p.returncode, \"Subprocess failed: \" + outs)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 46, "vocab_size": 43, "complexity": 1, "nloc": 15, "token_counts": 82, "n_ast_nodes": 137, "n_identifiers": 24, "random_cut": "def test_customize_compiler_before_get_config_vars(self):\n # Issue #21923: test that a Distribution compiler\n # instance can be called without an explicit call to\n # get_config_vars().\n with open(TESTFN, 'w') as f:\n f.writelines(textwrap.dedent())\n p = subprocess.Popen", "d_id": 56870, "documentation": { "docstring": "\\\n from distutils.core import Distribution\n config = Distribution().get_command_obj('config')\n # try_compile may pass or it may fail if no compiler\n # is found but it should not raise an exception.\n rc = config.try_compile('int x;')\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 123, "language": "en" } }, { "id": 313986, "commit_id": "4bc5d7bfed07c20d6f3438ab91c734a620505a33", "repo": "core", "path": "tests/components/zha/test_siren.py", "file_name": "test_siren.py", "fun_name": "siren_platform_only", "commit_message": "Speed up zha tests (#73627)", "code": "def siren_platform_only():\n \n with patch(\n \"homeassistant.components.zha.PLATFORMS\",\n (\n Platform.DEVICE_TRACKER,\n Platform.NUMBER,\n Platform.SENSOR,\n Platform.SELECT,\n Platform.SIREN,\n ),\n ):\n yield\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 106, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 12, "token_counts": 36, "n_ast_nodes": 66, "n_identifiers": 10, "random_cut": "def siren_platform_only():\n \n with patch(\n \"homeassistant.components.zha.PLATFORMS\",\n (\n Platform.DEVICE_TRACKER,\n Platform.NUMBER,\n Platform.SENSOR,\n Platform.SELECT,\n ", "d_id": 112597, "documentation": { "docstring": "Only setup the siren and required base platforms to speed up tests.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 248119, "commit_id": "78b99de7c206b106340e12cdee0af9aa246bd5ad", "repo": "synapse", "path": "tests/server_notices/test_resource_limits_server_notices.py", "file_name": "test_resource_limits_server_notices.py", "fun_name": "test_maybe_send_server_notice_when_alerting_suppressed_room_blocked", "commit_message": "Prefer `make_awaitable` over `defer.succeed` in tests (#12505)\n\nWhen configuring the return values of mocks, prefer awaitables from\r\n`make_awaitable` over `defer.succeed`. `Deferred`s are only awaitable\r\nonce, so it is inappropriate for a mock to return the same `Deferred`\r\nmultiple times.\r\n\r\nAlso update `run_in_background` to support functions that return\r\narbitrary awaitables.\r\n\r\nSigned-off-by: Sean Quah ", "code": "def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self):\n \n self._rlsn._auth.check_auth_blocking = Mock(\n return_value=make_awaitable(None),\n side_effect=ResourceLimitError(\n 403, \"foo\", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER\n ),\n )\n\n self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock(\n return_value=make_awaitable((True, []))\n )\n\n mock_event = Mock(\n type=EventTypes.Message, content={\"msgtype\": ServerNoticeMsgType}\n )\n self._rlsn._store.get_events = Mock(\n return_value=make_awaitable({\"123\": mock_event})\n )\n self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))\n\n self._send_notice.assert_called_once()\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 191, "n_words": 33, "vocab_size": 24, "complexity": 1, "nloc": 18, "token_counts": 122, "n_ast_nodes": 197, "n_identifiers": 28, "random_cut": "def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self):\n \n self._rlsn._auth.check_auth_blocking = Mock(\n return_va", "d_id": 72113, "documentation": { "docstring": "\n When the room is already in a blocked state, test that when alerting\n is suppressed that the room is returned to an unblocked state.\n ", "n_words": 24, "vocab_size": 19, "n_whitespaces": 46, "language": "en" } }, { "id": 215066, "commit_id": "374723c3abedee9ea5a399b566b460497b3885f6", "repo": "salt", "path": "tests/pytests/integration/modules/test_event.py", "file_name": "test_event.py", "fun_name": "test_send", "commit_message": "Fix salt-call event.send call with grains and pillar", "code": "def test_send(event_listener, salt_master, salt_minion, salt_call_cli):\n \n event_tag = random_string(\"salt/test/event/\")\n data = {\"event.fire\": \"just test it!!!!\"}\n start_time = time.time()\n ret = salt_call_cli.run(\n \"event.send\",\n event_tag,\n data=data,\n with_grains=True,\n with_pillar=True,\n preload={\"foo\": \"bar\"},\n )\n assert ret.exitcode == 0\n assert ret.json\n assert ret.json is True\n\n event_pattern = (salt_master.id, event_tag)\n matched_events = event_listener.wait_for_events(\n [event_pattern], after_time=start_time, timeout=30\n )\n assert matched_events.found_all_events\n for event in matched_events:\n assert event.data[\"id\"] == salt_minion.id\n assert event.data[\"cmd\"] == \"_minion_event\"\n assert \"event.fire\" in event.data[\"data\"]\n assert event.data[\"foo\"] == \"bar\"\n assert event.data[\"data\"][\"grains\"][\"test_grain\"] == \"cheese\"\n assert event.data[\"data\"][\"pillar\"][\"ext_spam\"] == \"eggs\"\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 212, "n_words": 79, "vocab_size": 57, "complexity": 2, "nloc": 27, "token_counts": 183, "n_ast_nodes": 309, "n_identifiers": 25, "random_cut": "def test_send(event_listener, salt_master, salt_minion, salt_call_cli):\n \n event_tag = random_string(\"salt/test/event/\")\n data = {\"event.fire\": \"just test it!!!!\"}\n start_time = time.time()\n ret = salt_call_cli.run(\n \"event.send\",\n event_tag,\n data=data,\n with_grains=True,\n with_pillar=True,\n ", "d_id": 53786, "documentation": { "docstring": "\n Test sending an event to the master event bus\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 16, "language": "en" } }, { "id": 119997, "commit_id": "df1ceaeeb11efc7c5af1ad2dd102857128c23b26", "repo": "jax", "path": "jax/_src/lax/control_flow.py", "file_name": "control_flow.py", "fun_name": "_check_tree_and_avals", "commit_message": "Deprecate jax.tree_util.tree_multimap", "code": "def _check_tree_and_avals(what, tree1, avals1, tree2, avals2):\n \n if tree1 != tree2:\n raise TypeError(\n f\"{what} must have same type structure, got {tree1} and {tree2}.\")\n if not all(_map(core.typematch, avals1, avals2)):\n diff = tree_map(_show_diff, tree_unflatten(tree1, avals1),\n tree_unflatten(tree2, avals2))\n raise TypeError(f\"{what} must have identical types, got\\n{diff}.\")\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 41, "vocab_size": 36, "complexity": 3, "nloc": 8, "token_counts": 67, "n_ast_nodes": 122, "n_identifiers": 15, "random_cut": "def _check_tree_and_avals(what, tree1, avals1, tree2, avals2):\n \n if tree1 != tree2:\n raise TypeError(\n f\"{what} must have same type structure, got {tree1} and {tree2}.\")\n if not all(_map(core.typematch, avals1, avals2)):\n diff = tree_map(_show_diff, tree_unflatten(tree1, avals1),\n tree_unflatten(tree2, avals2))\n raise TypeError(f\"{what} mus", "d_id": 26740, "documentation": { "docstring": "Raises TypeError if (tree1, avals1) does not match (tree2, avals2).\n\n Corresponding `tree` and `avals` must match in the sense that the number of\n leaves in `tree` must be equal to the length of `avals`. `what` will be\n prepended to details of the mismatch in TypeError.\n ", "n_words": 45, "vocab_size": 33, "n_whitespaces": 49, "language": "en" } }, { "id": 118684, "commit_id": "dd9084523e365e637443ea351eaaaa25f52d8412", "repo": "streamlit", "path": "lib/tests/streamlit/config_test.py", "file_name": "config_test.py", "fun_name": "test_load_global_local_flag_config", "commit_message": "Report sharing removal (#4260)\n\nThe report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand.", "code": "def test_load_global_local_flag_config(self):\n \n\n global_config = \n\n local_config = \n\n global_config_path = \"/mock/home/folder/.streamlit/config.toml\"\n local_config_path = os.path.join(os.getcwd(), \".streamlit/config.toml\")\n\n global_open = mock_open(read_data=global_config)\n local_open = mock_open(read_data=local_config)\n open = mock_open()\n open.side_effect = [global_open.return_value, local_open.return_value]\n\n open_patch = patch(\"streamlit.config.open\", open)\n # patch streamlit.*.os.* instead of os.* for py35 compat\n makedirs_patch = patch(\"streamlit.config.os.makedirs\")\n makedirs_patch.return_value = True\n pathexists_patch = patch(\"streamlit.config.os.path.exists\")\n pathexists_patch.side_effect = lambda path: path in [\n global_config_path,\n local_config_path,\n ]\n\n with open_patch, makedirs_patch, pathexists_patch:\n config.get_config_options(options_from_flags={\"theme.font\": \"monospace\"})\n\n self.assertEqual(\"light\", config.get_option(\"theme.base\"))\n self.assertEqual(\"#FFFFFF\", config.get_option(\"theme.textColor\"))\n self.assertEqual(\"monospace\", config.get_option(\"theme.font\"))\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 257, "n_words": 70, "vocab_size": 58, "complexity": 1, "nloc": 31, "token_counts": 163, "n_ast_nodes": 292, "n_identifiers": 26, "random_cut": "def test_load_global_local_flag_config(self):\n \n\n global_config = \n\n local_config = \n\n global_config_path = \"/mock/home/folder/.streamlit/config.toml\"\n local_config_path = os.path.join(os.getcwd(), \".streamlit/config.toml\")\n\n global_open = mock_open(read_data=global_config)\n local_open = mock_open(read_data=local_config)\n open = mock_open()\n open.side_effect = [global_open.return_value, local_open.return_value]\n\n open_patch = patch(\"streamlit.config.open\", open)\n # patch streamlit.*.os.* instead of os.* for py35 compat\n makedirs_patch = patch(\"streamlit.config.os.makedirs\")\n makedirs_patch.return_value = True\n pathexists_patch = patch(\"streamlit.config.os.path.exists\")\n pathexists_patch.side_effect =", "d_id": 26359, "documentation": { "docstring": "Test that CLI flags have higher priority than both\n ~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time.\n \n [theme]\n base = \"dark\"\n font = \"sans serif\"\n textColor = \"#FFFFFF\"\n \n [theme]\n base = \"light\"\n font = \"serif\"\n ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 112, "language": "en" } }, { "id": 206738, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/text.py", "file_name": "text.py", "fun_name": "get_valid_filename", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_valid_filename(name):\n \n s = str(name).strip().replace(\" \", \"_\")\n s = re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n if s in {\"\", \".\", \"..\"}:\n raise SuspiciousFileOperation(\"Could not derive file name from '%s'\" % name)\n return s\n\n\n@keep_lazy_text", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@keep_lazy_text", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 52, "n_words": 31, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 56, "n_ast_nodes": 106, "n_identifiers": 10, "random_cut": "def get_valid_filename(name):\n \n s = str(name).strip().replace(\" \", \"_\")\n s = re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n if s in {\"\", \".\", \"..\"}:\n raise SuspiciousFileOperation(\"Could not derive file name from '%s'\" % name)\n return s\n\n\n@keep_lazy_text", "d_id": 51670, "documentation": { "docstring": "\n Return the given string converted to a string that can be used for a clean\n filename. Remove leading and trailing spaces; convert other spaces to\n underscores; and remove anything that is not an alphanumeric, dash,\n underscore, or dot.\n >>> get_valid_filename(\"john's portrait in 2004.jpg\")\n 'johns_portrait_in_2004.jpg'\n ", "n_words": 44, "vocab_size": 39, "n_whitespaces": 66, "language": "en" } }, { "id": 206991, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_changelist/tests.py", "file_name": "tests.py", "fun_name": "test_no_duplicates_for_m2m_in_list_filter", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_no_duplicates_for_m2m_in_list_filter(self):\n \n blues = Genre.objects.create(name=\"Blues\")\n band = Band.objects.create(name=\"B.B. King Review\", nr_of_members=11)\n\n band.genres.add(blues)\n band.genres.add(blues)\n\n m = BandAdmin(Band, custom_site)\n request = self.factory.get(\"/band/\", data={\"genres\": blues.pk})\n request.user = self.superuser\n\n cl = m.get_changelist_instance(request)\n cl.get_results(request)\n\n # There's only one Group instance\n self.assertEqual(cl.result_count, 1)\n # Queryset must be deletable.\n self.assertIs(cl.queryset.query.distinct, False)\n cl.queryset.delete()\n self.assertEqual(cl.queryset.count(), 0)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 159, "n_words": 47, "vocab_size": 40, "complexity": 1, "nloc": 14, "token_counts": 144, "n_ast_nodes": 238, "n_identifiers": 33, "random_cut": "def test_no_duplicates_for_m2m_in_list_filter(self):\n \n blues = Genre.objects.create(name=\"Blues\")\n band = Band.objects.create(name=\"B.B. King Review\", nr_of_members=11)\n\n band.genres.add(blues)\n band.genres.add(blues)\n\n m = BandAdmin(Band, custom_site)\n request = self.factory.get(\"/band/\", data={\"genres\": blues.pk})\n request.user = self.superuser\n\n cl = m.get_changelist_instance(request)\n cl.get_results(request)\n\n # There's only one Group instance\n self.assertEqual(c", "d_id": 51826, "documentation": { "docstring": "\n Regression test for #13902: When using a ManyToMany in list_filter,\n results shouldn't appear more than once. Basic ManyToMany.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 167798, "commit_id": "f65417656ba8c59438d832b6e2a431f78d40c21c", "repo": "pandas", "path": "pandas/core/ops/methods.py", "file_name": "methods.py", "fun_name": "add_flex_arithmetic_methods", "commit_message": "TYP: more return annotations in core/ (#47618)\n\n* TYP: more return annotations in core/\r\n\r\n* from __future__ import annotations\r\n\r\n* more __future__", "code": "def add_flex_arithmetic_methods(cls) -> None:\n \n flex_arith_method, flex_comp_method = _get_method_wrappers(cls)\n new_methods = _create_methods(cls, flex_arith_method, flex_comp_method)\n new_methods.update(\n {\n \"multiply\": new_methods[\"mul\"],\n \"subtract\": new_methods[\"sub\"],\n \"divide\": new_methods[\"div\"],\n }\n )\n # opt out of bool flex methods for now\n assert not any(kname in new_methods for kname in (\"ror_\", \"rxor\", \"rand_\"))\n\n _add_methods(cls, new_methods=new_methods)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 116, "n_words": 45, "vocab_size": 40, "complexity": 2, "nloc": 21, "token_counts": 80, "n_ast_nodes": 138, "n_identifiers": 11, "random_cut": "def add_flex_arithmetic_methods(cls) -> None:\n \n flex_arith_method, flex_comp_method = _get_method_wrappers(cls)\n new_methods = _create_methods(cls, flex_arith_method, flex_comp_method)\n new_methods.update(\n {\n \"multiply\": new_methods[\"mul\"],\n \"subtract\": new_methods[\"sub\"],\n \"divide\": new_methods[\"div\"],\n }\n )\n # opt out of bool flex methods for now\n assert not any", "d_id": 40126, "documentation": { "docstring": "\n Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)\n to the class.\n\n Parameters\n ----------\n cls : class\n flex methods will be defined and pinned to this class\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 55, "language": "en" } }, { "id": 210309, "commit_id": "7018dad10757b6d414f1b00a547244bced596d68", "repo": "PaddleDetection", "path": "deploy/python/action_utils.py", "file_name": "action_utils.py", "fun_name": "get_collected_keypoint", "commit_message": "Pipeline with kpt and act (#5399)\n\n* add keypoint infer and visualize into Pipeline\r\n\r\n* add independent action model inference\r\n\r\n* add action inference into pipeline, still in working\r\n\r\n* test different display frames and normalization methods\r\n\r\n* use bbox and scale normalization\r\n\r\n* Remove debug info and Optimize code structure\r\n\r\n* remove useless visual param\r\n\r\n* make action parameters configurable", "code": "def get_collected_keypoint(self):\n \n output = []\n for tracker_id in self.id_to_pop:\n output.append([tracker_id, self.keypoint_saver[tracker_id]])\n del (self.keypoint_saver[tracker_id])\n self.flag_to_pop = False\n self.id_to_pop.clear()\n return output\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 83, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 8, "token_counts": 55, "n_ast_nodes": 88, "n_identifiers": 9, "random_cut": "def get_collected_keypoint(self):\n \n output = []\n for tracker_id in self.id_to_pop:\n output.append([tracker_id, self.keypoint_saver[tracker_id]])\n del (self.keypoint_saver[tracker_id])\n self.flag_to_pop = False\n self.id_to", "d_id": 52889, "documentation": { "docstring": "\n Output (List): List of keypoint results for Action Recognition task, where \n the format of each element is [tracker_id, KeyPointSequence of tracker_id]\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 67, "language": "en" } }, { "id": 63987, "commit_id": "8b5827ed6db1041526b6440ca8e4fde19c646e1e", "repo": "erpnext", "path": "erpnext/education/api.py", "file_name": "api.py", "fun_name": "get_course_schedule_events", "commit_message": "fix: from time and to time not updated in drag and drop action #29114\n\nfix: from time and to time not updated in drag and drop action", "code": "def get_course_schedule_events(start, end, filters=None):\n\t\n\tfrom frappe.desk.calendar import get_event_conditions\n\tconditions = get_event_conditions(\"Course Schedule\", filters)\n\n\tdata = frappe.db.sql(.format(conditions=conditions), {\n\t\t\t\"start\": start,\n\t\t\t\"end\": end\n\t\t\t}, as_dict=True, update={\"allDay\": 0})\n\n\treturn data\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 19, "n_words": 28, "vocab_size": 26, "complexity": 1, "nloc": 14, "token_counts": 69, "n_ast_nodes": 123, "n_identifiers": 16, "random_cut": "def get_course_schedule_events(start, end, filters=None):\n\t\n\tfrom frappe.desk.calendar import get_event_conditions\n\tconditions = get_event_conditions(\"Course Schedule\", filters)\n\n\tdata = frappe.db.sql(.format(conditions=conditions), {\n\t\t\t\"start\": start,\n\t\t", "d_id": 13546, "documentation": { "docstring": "Returns events for Course Schedule Calendar view rendering.\n\n\t:param start: Start date-time.\n\t:param end: End date-time.\n\t:param filters: Filters (JSON).\n\tselect name, course, color,\n\t\t\ttimestamp(schedule_date, from_time) as from_time,\n\t\t\ttimestamp(schedule_date, to_time) as to_time,\n\t\t\troom, student_group, 0 as 'allDay'\n\t\tfrom `tabCourse Schedule`\n\t\twhere ( schedule_date between %(start)s and %(end)s )\n\t\t{conditions}", "n_words": 49, "vocab_size": 43, "n_whitespaces": 38, "language": "en" } }, { "id": 167425, "commit_id": "734db4f1fde2566a02b3c7ff661a479b0a71633c", "repo": "pandas", "path": "pandas/io/json/_json.py", "file_name": "_json.py", "fun_name": "check_keys_split", "commit_message": "TYP: Return annotations for io/{formats,json} (#47516)\n\n* TYP: Return annotations for io/{formats,json}\r\n\r\n* flake8\r\n\r\n* explicitly check whether width is None", "code": "def check_keys_split(self, decoded) -> None:\n \n bad_keys = set(decoded.keys()).difference(set(self._split_keys))\n if bad_keys:\n bad_keys_joined = \", \".join(bad_keys)\n raise ValueError(f\"JSON data had unexpected key(s): {bad_keys_joined}\")\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 64, "n_words": 21, "vocab_size": 20, "complexity": 2, "nloc": 8, "token_counts": 47, "n_ast_nodes": 85, "n_identifiers": 11, "random_cut": "def check_keys_split(self, decoded) -> None:\n \n bad_keys = set(decoded.keys()).difference(set(self._split_", "d_id": 40008, "documentation": { "docstring": "\n Checks that dict has only the appropriate keys for orient='split'.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 205092, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/oracle/operations.py", "file_name": "operations.py", "fun_name": "adapt_datetimefield_value", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def adapt_datetimefield_value(self, value):\n \n\n if value is None:\n return None\n\n # Expression values are adapted by the database.\n if hasattr(value, \"resolve_expression\"):\n return value\n\n # cx_Oracle doesn't support tz-aware datetimes\n if timezone.is_aware(value):\n if settings.USE_TZ:\n value = timezone.make_naive(value, self.connection.timezone)\n else:\n raise ValueError(\n \"Oracle backend does not support timezone-aware datetimes when USE_TZ is False.\"\n )\n\n return Oracle_datetime.from_datetime(value)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 210, "n_words": 53, "vocab_size": 42, "complexity": 5, "nloc": 13, "token_counts": 66, "n_ast_nodes": 112, "n_identifiers": 13, "random_cut": "def adapt_datetimefield_value(self, value):\n \n\n if value is None:\n return None\n\n # Expression values are adapted by the database.\n if hasattr(value", "d_id": 51013, "documentation": { "docstring": "\n Transform a datetime value to an object compatible with what is expected\n by the backend driver for datetime columns.\n\n If naive datetime is passed assumes that is in UTC. Normally Django\n models.DateTimeField makes sure that if USE_TZ is True passed datetime\n is timezone aware.\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 87, "language": "en" } }, { "id": 217412, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/fractions.py", "file_name": "fractions.py", "fun_name": "__pow__", "commit_message": "add python 3.10.4 for windows", "code": "def __pow__(a, b):\n \n if isinstance(b, numbers.Rational):\n if b.denominator == 1:\n power = b.numerator\n if power >= 0:\n return Fraction(a._numerator ** power,\n a._denominator ** power,\n _normalize=False)\n elif a._numerator >= 0:\n return Fraction(a._denominator ** -power,\n a._numerator ** -power,\n _normalize=False)\n else:\n return Fraction((-a._denominator) ** -power,\n (-a._numerator) ** -power,\n _normalize=False)\n else:\n # A fractional power will generally produce an\n # irrational number.\n return float(a) ** float(b)\n else:\n return float(a) ** b\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 493, "n_words": 67, "vocab_size": 39, "complexity": 5, "nloc": 20, "token_counts": 132, "n_ast_nodes": 208, "n_identifiers": 14, "random_cut": "def __pow__(a, b):\n \n if isinstance(b, numbers.Rational):\n if b.denominator == 1:\n power = b.numerator\n if power >= 0:\n return Fraction(a._numerator ** power,\n a._denominator ** power,\n ", "d_id": 54764, "documentation": { "docstring": "a ** b\n\n If b is not an integer, the result will be a float or complex\n since roots are generally irrational. If b is an integer, the\n result will be rational.\n\n ", "n_words": 32, "vocab_size": 21, "n_whitespaces": 60, "language": "en" } }, { "id": 260777, "commit_id": "d593606a8267a325d98b1e9a57de6b7b87442f55", "repo": "scikit-learn", "path": "sklearn/linear_model/_ridge.py", "file_name": "_ridge.py", "fun_name": "fit", "commit_message": "MAINT Parameters validation for RidgeCV and RidgeClassifierCV (#24184)\n\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def fit(self, X, y, sample_weight=None):\n \n self._validate_params()\n\n super().fit(X, y, sample_weight=sample_weight)\n return self\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 39, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def fit(self, X, y, sample_weight=None):\n \n self._validate_params()\n\n super().fit(X, y, sa", "d_id": 76483, "documentation": { "docstring": "Fit Ridge regression model with cv.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Training data. If using GCV, will be cast to float64\n if necessary.\n\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary.\n\n sample_weight : float or ndarray of shape (n_samples,), default=None\n Individual weights for each sample. If given a float, every sample\n will have the same weight.\n\n Returns\n -------\n self : object\n Fitted estimator.\n\n Notes\n -----\n When sample_weight is provided, the selected hyperparameter may depend\n on whether we use leave-one-out cross-validation (cv=None or cv='auto')\n or another form of cross-validation, because only leave-one-out\n cross-validation takes the sample weights into account when computing\n the validation score.\n ", "n_words": 118, "vocab_size": 89, "n_whitespaces": 296, "language": "en" } }, { "id": 19355, "commit_id": "def289b723e9216830c2a7b2577cb31b55710167", "repo": "PythonRobotics", "path": "PathPlanning/CubicSpline/cubic_spline_planner.py", "file_name": "cubic_spline_planner.py", "fun_name": "calc_position", "commit_message": "enhance cubic spline path doc (#698)\n\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc", "code": "def calc_position(self, x):\n \n if x < self.x[0]:\n return None\n elif x > self.x[-1]:\n return None\n\n i = self.__search_index(x)\n dx = x - self.x[i]\n position = self.a[i] + self.b[i] * dx + \\\n self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0\n\n return position\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 127, "n_words": 45, "vocab_size": 29, "complexity": 3, "nloc": 10, "token_counts": 97, "n_ast_nodes": 141, "n_identifiers": 11, "random_cut": "def calc_position(self, x):\n \n if x < self.x[0]:\n return None\n elif x > self.x[-1]:\n ", "d_id": 2943, "documentation": { "docstring": "\n Calc `y` position for given `x`.\n\n if `x` is outside the data point's `x` range, return None.\n\n Returns\n -------\n y : float\n y position for given x.\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 81, "language": "en" } }, { "id": 286612, "commit_id": "59d8b36bb0467a1a99513b10e8b8471afaa56fd6", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/attribution_model.py", "file_name": "attribution_model.py", "fun_name": "get_daily_sector_prices", "commit_message": "[IMPROVE] Fix Docstring formatting/Fix missing, incomplete type hints (#3412)\n\n* Fixes\r\n\r\n* Update stocks_helper.py\r\n\r\n* update git-actions set-output to new format\r\n\r\n* Update stocks_helper.py\r\n\r\n* Update terminal_helper.py\r\n\r\n* removed LineAnnotateDrawer from qa_view\r\n\r\n* lint\r\n\r\n* few changes\r\n\r\n* updates\r\n\r\n* sdk auto gen modules done\r\n\r\n* Update stocks_helper.py\r\n\r\n* updates to changed imports, and remove first sdk_modules\r\n\r\n* Update generate_sdk.py\r\n\r\n* Update generate_sdk.py\r\n\r\n* pylint\r\n\r\n* revert stocks_helper\r\n\r\n* Update generate_sdk.py\r\n\r\n* Update sdk.py\r\n\r\n* Update generate_sdk.py\r\n\r\n* full auto generation, added sdk.py/controllers creation\r\n\r\n* missed enable forecasting\r\n\r\n* added running black in subprocess after sdk files generation completes\r\n\r\n* removed deleted sdk_arg_logger\r\n\r\n* comment out tests\r\n\r\n* property doc fix\r\n\r\n* clean up\r\n\r\n* Update generate_sdk.py\r\n\r\n* make trailmap classes useable for doc generation\r\n\r\n* Update generate_sdk.py\r\n\r\n* added lineon to trailmap class for linking to func in markdown\r\n\r\n* changed lineon to dict\r\n\r\n* added full_path to trailmap for linking in docs\r\n\r\n* updated portfolio\r\n\r\n* feat: initial files\r\n\r\n* feat: added meta head\r\n\r\n* feat: added funcdef\r\n\r\n* added func_def to trailmap attributes for markdown in docs, added missing type hints to covid functions\r\n\r\n* feat: added view and merged with jaun\r\n\r\n* Update generate_sdk.py\r\n\r\n* Update generate_sdk.py\r\n\r\n* Update generate_sdk.py\r\n\r\n* Update generate_sdk.py\r\n\r\n* init\r\n\r\n* fix returns\r\n\r\n* fix: random stuff\r\n\r\n* fix: random\r\n\r\n* fixed encoding issue on windows\r\n\r\n* fix: generate tabs\r\n\r\n* update\r\n\r\n* Update generate_sdk_markdown.py\r\n\r\n* Create .pydocstyle.ini\r\n\r\n* added type hint classes for views\r\n\r\n* fixes\r\n\r\n* alt, ba\r\n\r\n* alt-economy\r\n\r\n* Update finviz_compare_model.py\r\n\r\n* fixs\r\n\r\n* Update substack_model.py\r\n\r\n* Update generate_sdk.py\r\n\r\n* last of my section\r\n\r\n* porfolio\r\n\r\n* po\r\n\r\n* Update optimizer_model.py\r\n\r\n* fixing more things\r\n\r\n* few more\r\n\r\n* keys done\r\n\r\n* update\r\n\r\n* fixes\r\n\r\n* Update generate_sdk_markdown.py\r\n\r\n* Update generate_sdk_markdown.py\r\n\r\n* mypy forecast fix\r\n\r\n* Update generate_sdk_markdown.py\r\n\r\n* Update generate_sdk_markdown.py\r\n\r\n* Update generate_sdk_markdown.py\r\n\r\n* fixes\r\n\r\n* forecast fixes\r\n\r\n* one more fix\r\n\r\n* Update coinbase_model.py\r\n\r\n* Update generate_sdk_markdown.py\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: James Maslek \r\nCo-authored-by: jose-donato \r\nCo-authored-by: andrewkenreich ", "code": "def get_daily_sector_prices(start_date, end_date) -> dict:\n \n # sector ticker information\n sp500_tickers = {\n \"S&P 500 Materials (Sector)\": \"^SP500-15\",\n \"S&P 500 Industrials (Sector)\": \"^SP500-20\",\n \"S&P 500 Consumer Discretionary (Sector)\": \"^SP500-25\",\n \"S&P 500 Consumer Staples (Sector)\": \"^SP500-30\",\n \"S&P 500 Health Care (Sector)\": \"^SP500-35\",\n \"S&P 500 Financials (Sector)\": \"^SP500-40\",\n \"S&P 500 Information Technology (Sector)\": \"^SP500-45\",\n \"S&P 500 Telecommunication Services (Sector)\": \"^SP500-50\",\n \"S&P 500 Utilities (Sector)\": \"^SP500-55\",\n \"S&P 500 Real Estate (Sector)\": \"^SP500-60\",\n \"S&P 500 Energy (Sector)\": \"^GSPE\",\n }\n\n sp500_tickers_data = {} # to store data\n\n for (\n sector,\n sector_ticker,\n ) in sp500_tickers.items(): # iterate thru the sectors\n # load the data required from yfinance\n sp500_tickers_data[\n sector\n ] = { # builds a dictionary entry for the sector with adj close data\n \"sector_data\": yf.download(\n sector_ticker,\n start=start_date,\n end=end_date,\n progress=False,\n )[\"Adj Close\"]\n } # stores the data here\n\n return sp500_tickers_data\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 373, "n_words": 133, "vocab_size": 82, "complexity": 2, "nloc": 45, "token_counts": 109, "n_ast_nodes": 207, "n_identifiers": 14, "random_cut": "def get_daily_sector_prices(start_date, end_date) -> dict:\n \n # sector ticker information\n sp500_tickers = {\n \"S&P 500 Materials (Sector)\": \"^SP500-15\",\n \"S&P 500 Industrials (Sector)\": \"^SP500-20\",\n \"S&P 500 Consumer Discretionary (Sector)\": \"^SP500-25\",\n \"S&P 500 Consumer Staples (Sector)\": \"^SP500-30\",\n \"S&P 500 Health Care (Sector)\": \"^SP500-35\",\n \"S&P 500 Financials (Sector)\": \"^SP500-40\",\n \"S&P 500 Information Technology (Sector)\": \"^SP500-45\",\n \"S&P 500 Telecommunication Services (Sector)\": \"^SP500-50\",\n", "d_id": 85928, "documentation": { "docstring": "\n fetches daily sector prices for S&P500 for a fixed time period\n\n Parameters\n ----------\n start_date : str ('yyyy-mm-dd') or datetime.date\n start date for fetching data\n end_date : str ('yyyy-mm-dd') or datetime.date\n end date for fetching data\n\n Returns\n -------\n sp500_tickers_data : dict\n dictionary of dataframes with SPY daily sector prices\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 97, "language": "en" } }, { "id": 189675, "commit_id": "e040bcacd38378386749db18aeba575b93f4ebca", "repo": "manim", "path": "manim/mobject/geometry/arc.py", "file_name": "arc.py", "fun_name": "get_unpositioned_tip", "commit_message": "Improved structure of the :mod:`.mobject` module (#2476)\n\n* group graphing and update its references\r\n\r\n* group text and update its references\r\n\r\n* group opengl and update its references\r\n\r\n* group three_d and update its references\r\n\r\n* group geometry and update (most) references\r\n\r\n* move some chaning.py + updater files into animation\r\n\r\n* refactor arc.py\r\n\r\n* refactor line.py\r\n\r\n* refactor polygram.py\r\n\r\n* refactor tips.py\r\n\r\n* black + isort\r\n\r\n* import new files in __init__.py\r\n\r\n* refactor places where geometry was used\r\n\r\n* black + isort again\r\n\r\n* remove unused imports\r\n\r\n* update reference.rst\r\n\r\n* add descriptions to files\r\n\r\n* fix circular imports\r\n\r\n* forgot ArrowTip\r\n\r\n* fix tests\r\n\r\n* fix doctests\r\n\r\n* satisfy mypy?\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix ALL merge conflicts\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* one VMobject import slipped through\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* re-add imports to `manim/opengl/__init__.py`\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix reference manual\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* ignore unknown directive type\r\n\r\n* fix arrow tip imports in docstrings\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Benjamin Hackl ", "code": "def get_unpositioned_tip(self, tip_shape=None, tip_length=None):\n \n from manim.mobject.geometry.tips import ArrowTriangleFilledTip\n\n if tip_shape is None:\n tip_shape = ArrowTriangleFilledTip\n if tip_length is None:\n tip_length = self.get_default_tip_length()\n color = self.get_color()\n style = {\"fill_color\": color, \"stroke_color\": color}\n style.update(self.tip_style)\n tip = tip_shape(length=tip_length, **style)\n return tip\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 123, "n_words": 38, "vocab_size": 27, "complexity": 3, "nloc": 11, "token_counts": 83, "n_ast_nodes": 134, "n_identifiers": 17, "random_cut": "def get_unpositioned_tip(self, tip_shape=None, tip_length=None):\n \n from manim.mobject.geometry.tips import ArrowTriangleFilledTip\n\n if tip_shape is None:\n tip_shape = ArrowTriangleFilledTip\n if tip_length is None:\n tip_length = self.get_default_tip_length()\n color = self.get_color()\n style = {\"fill_c", "d_id": 46162, "documentation": { "docstring": "\n Returns a tip that has been stylistically configured,\n but has not yet been given a position in space.\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 40, "language": "en" } }, { "id": 293309, "commit_id": "d302b0d14e9df9cc46e7e035a0d2be5290182b40", "repo": "core", "path": "tests/components/todoist/test_calendar.py", "file_name": "test_calendar.py", "fun_name": "test_parse_due_date_without_timezone_uses_offset", "commit_message": "Fix todoist parsing due dates for calendar events (#65403)", "code": "def test_parse_due_date_without_timezone_uses_offset():\n \n data: DueDate = {\n \"date\": \"2022-02-02T14:00:00\",\n \"is_recurring\": False,\n \"lang\": \"en\",\n \"string\": \"Feb 2 2:00 PM\",\n \"timezone\": None,\n }\n actual = _parse_due_date(data, timezone_offset=-8)\n assert datetime(2022, 2, 2, 22, 0, 0, tzinfo=dt.UTC) == actual\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 84, "n_words": 34, "vocab_size": 30, "complexity": 1, "nloc": 10, "token_counts": 65, "n_ast_nodes": 109, "n_identifiers": 10, "random_cut": "def test_parse_due_date_without_timezone_uses_offset():\n \n data: DueDate = {\n \"date\": \"2022-02-02T14:00:00\",\n \"is_recurring\": False,\n \"lang\": \"en\",\n \"string\": \"Feb 2 2:00 PM\",\n \"timezone\": None,\n }\n actual = _parse_due_date(data, timezone_offset=-8)\n assert dat", "d_id": 92372, "documentation": { "docstring": "Test due date uses user local timezone offset when it has no timezone.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 32426, "commit_id": "8e8384663d716d4b5a4f510070ff954fc0ba4a52", "repo": "transformers", "path": "src/transformers/models/hubert/modeling_tf_hubert.py", "file_name": "modeling_tf_hubert.py", "fun_name": "serving_output", "commit_message": "Update serving code to enable `saved_model=True` (#18153)\n\n* Add serving_output and serving methods to some vision models\r\n\r\n* Add serving outputs for DeiT\r\n\r\n* Don't convert hidden states - differing shapes\r\n\r\n* Make saveable\r\n\r\n* Fix up\r\n\r\n* Make swin saveable\r\n\r\n* Add in tests\r\n\r\n* Fix funnel tests (can't convert to tensor)\r\n\r\n* Fix numpy call\r\n\r\n* Tidy up a bit\r\n\r\n* Add in hidden states - resnet\r\n\r\n* Remove numpy\r\n\r\n* Fix failing tests - tensor shape and skipping tests\r\n\r\n* Remove duplicated function\r\n\r\n* PR comments - formatting and var names\r\n\r\n* PR comments\r\nAdd suggestions made by Joao Gante:\r\n* Use tf.shape instead of shape_list\r\n* Use @tooslow decorator on tests\r\n* Simplify some of the logic\r\n\r\n* PR comments\r\nAddress Yih-Dar Sheih comments - making tensor names consistent and make types float\r\n\r\n* Types consistent with docs; disable test on swin (slow)\r\n\r\n* CI trigger\r\n\r\n* Change input_features to float32\r\n\r\n* Add serving_output for segformer\r\n\r\n* Fixup\r\n\r\nCo-authored-by: Amy Roberts ", "code": "def serving_output(self, output):\n hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n return TFBaseModelOutput(\n last_hidden_state=output.last_hidden_state, hidden_states=hidden_states, attentions=attentions\n )\n\n\n@add_start_docstrings(\n ,\n HUBERT_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"\"\"TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\"\"\",\n HUBERT_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 69, "n_words": 27, "vocab_size": 22, "complexity": 3, "nloc": 6, "token_counts": 60, "n_ast_nodes": 103, "n_identifiers": 14, "random_cut": "def serving_output(self, output):\n hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states e", "d_id": 5924, "documentation": { "docstring": "TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 259240, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/utils/_encode.py", "file_name": "_encode.py", "fun_name": "_unique_np", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def _unique_np(values, return_inverse=False, return_counts=False):\n \n uniques = np.unique(\n values, return_inverse=return_inverse, return_counts=return_counts\n )\n\n inverse, counts = None, None\n\n if return_counts:\n *uniques, counts = uniques\n\n if return_inverse:\n *uniques, inverse = uniques\n\n if return_counts or return_inverse:\n uniques = uniques[0]\n\n # np.unique will have duplicate missing values at the end of `uniques`\n # here we clip the nans and remove it from uniques\n if uniques.size and is_scalar_nan(uniques[-1]):\n nan_idx = np.searchsorted(uniques, np.nan)\n uniques = uniques[: nan_idx + 1]\n if return_inverse:\n inverse[inverse > nan_idx] = nan_idx\n\n if return_counts:\n counts[nan_idx] = np.sum(counts[nan_idx:])\n counts = counts[: nan_idx + 1]\n\n ret = (uniques,)\n\n if return_inverse:\n ret += (inverse,)\n\n if return_counts:\n ret += (counts,)\n\n return ret[0] if len(ret) == 1 else ret\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 256, "n_words": 111, "vocab_size": 68, "complexity": 12, "nloc": 25, "token_counts": 177, "n_ast_nodes": 276, "n_identifiers": 17, "random_cut": "def _unique_np(values, return_inverse=False, return_counts=False):\n \n uniques = np.unique(\n values, return_inverse=return_inverse, return_counts=return_counts\n )\n\n inverse, counts = None, None\n\n if return_counts:\n *uniques, counts = uniques\n\n if return_inverse:\n *uniques, inverse = uniques\n\n if return_counts or return_inverse:\n uniques = uniques[0]\n\n # np.unique will have duplicate missing values at the end of `uniques`\n # here we clip the nans and remove it from uniques\n if uniques.size and is_scalar_nan(uniques[-1]):\n nan_idx = np.searchsorted(uniques, np.nan)\n uniques = uniques[: nan_idx + 1]\n if return_inverse:\n inverse[inverse > nan_", "d_id": 75671, "documentation": { "docstring": "Helper function to find unique values for numpy arrays that correctly\n accounts for nans. See `_unique` documentation for details.", "n_words": 19, "vocab_size": 17, "n_whitespaces": 21, "language": "en" } }, { "id": 301158, "commit_id": "9c3f9491651f409e8b4d0d645115b55b14f06165", "repo": "core", "path": "homeassistant/components/logbook/processor.py", "file_name": "processor.py", "fun_name": "switch_to_live", "commit_message": "Add live streaming logbook websocket endpoint (#72258)\n\nCo-authored-by: Paulus Schoutsen ", "code": "def switch_to_live(self) -> None:\n \n self.logbook_run.event_cache.clear()\n self.logbook_run.context_lookup.clear()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 26, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "def switch_to_live(self) -> None:\n \n self.logbook_run.event_cache.clear()\n self.logbook_run.context_lookup.clear()\n", "d_id": 100006, "documentation": { "docstring": "Switch to live stream.\n\n Clear caches so we can reduce memory pressure.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 26, "language": "en" } }, { "id": 337460, "commit_id": "e5c17f36a8b5bf8b9478d416c4a80841a353fb19", "repo": "accelerate", "path": "src/accelerate/test_utils/testing.py", "file_name": "testing.py", "fun_name": "require_comet_ml", "commit_message": "Clean up tests + fix import (#330)", "code": "def require_comet_ml(test_case):\n \n return unittest.skipUnless(is_comet_ml_available(), \"test requires comet_ml\")(test_case)\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def require_comet_ml(test_case):\n \n retu", "d_id": 121062, "documentation": { "docstring": "\n Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 23, "language": "en" } }, { "id": 39972, "commit_id": "67f56d09d70e77701d2ae9a002aa330202da118b", "repo": "dash", "path": "dash/_callback_context.py", "file_name": "_callback_context.py", "fun_name": "triggered_id", "commit_message": "added docstrings", "code": "def triggered_id(self):\n \n component_id = None\n if self.triggered:\n prop_id = self.triggered_prop_ids.first()\n component_id = self.triggered_prop_ids[prop_id]\n return component_id\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 15, "vocab_size": 11, "complexity": 2, "nloc": 6, "token_counts": 33, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "def triggered_id(self):\n \n component_id = None\n if self.triggered:\n prop_id = self.triggered_prop_ids.first()\n ", "d_id": 7303, "documentation": { "docstring": "\n Returns the component id (str or dict) of the Input component that triggered the callback.\n\n Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if\n multiple Inputs triggered the callback.\n\n Example usage:\n `if \"btn-1\" == ctx.triggered_id:\n do_something()`\n\n ", "n_words": 47, "vocab_size": 32, "n_whitespaces": 101, "language": "en" } }, { "id": 259703, "commit_id": "69132ebbd39f070590ca01813340b5b12c0d02ab", "repo": "scikit-learn", "path": "sklearn/decomposition/_nmf.py", "file_name": "_nmf.py", "fun_name": "_solve_W", "commit_message": "FEA Online implementation of non-negative matrix factorization (#16948)\n\nCo-authored-by: Tom Dupré la Tour \r\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def _solve_W(self, X, H, max_iter):\n \n avg = np.sqrt(X.mean() / self._n_components)\n W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)\n W_buffer = W.copy()\n\n # Get scaled regularization terms. Done for each minibatch to take into account\n # variable sizes of minibatches.\n l1_reg_W, _, l2_reg_W, _ = self._scale_regularization(X)\n\n for _ in range(max_iter):\n W, *_ = _multiplicative_update_w(\n X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma\n )\n\n W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)\n if self.tol > 0 and W_diff <= self.tol:\n break\n\n W_buffer[:] = W\n\n return W\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 228, "n_words": 80, "vocab_size": 62, "complexity": 4, "nloc": 14, "token_counts": 148, "n_ast_nodes": 224, "n_identifiers": 28, "random_cut": "def _solve_W(self, X, H, max_iter):\n \n avg = np.sqrt(X.mean() / self._n_components)\n W = np.full((X.shape[0], self._n_components), avg", "d_id": 75878, "documentation": { "docstring": "Minimize the objective function w.r.t W.\n\n Update W with H being fixed, until convergence. This is the heart\n of `transform` but it's also used during `fit` when doing fresh restarts.\n ", "n_words": 30, "vocab_size": 29, "n_whitespaces": 51, "language": "en" } }, { "id": 211032, "commit_id": "c84153a355d9855fe55cf51d203b8b24e7d884e5", "repo": "PaddleDetection", "path": "deploy/pptracking/python/mot/tracker/ocsort_tracker.py", "file_name": "ocsort_tracker.py", "fun_name": "convert_bbox_to_z", "commit_message": "[MOT] Add OC_SORT tracker (#6272)\n\n* add ocsort tracker\r\n\r\n* add ocsort deploy\r\n\r\n* merge develop\r\n\r\n* fix ocsort tracker codes\r\n\r\n* fix doc, test=document_fix\r\n\r\n* fix doc, test=document_fix", "code": "def convert_bbox_to_z(bbox):\n \n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w / 2.\n y = bbox[1] + h / 2.\n s = w * h # scale is just area\n r = w / float(h + 1e-6)\n return np.array([x, y, s, r]).reshape((4, 1))\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 74, "n_words": 49, "vocab_size": 31, "complexity": 1, "nloc": 8, "token_counts": 91, "n_ast_nodes": 133, "n_identifiers": 12, "random_cut": "def convert_bbox_to_z(bbox):\n \n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w / 2.\n y = bbox[1] + h / 2.\n s = w * h # scale is just area\n ", "d_id": 53006, "documentation": { "docstring": "\n Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form\n [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is\n the aspect ratio\n ", "n_words": 34, "vocab_size": 22, "n_whitespaces": 51, "language": "en" } }, { "id": 24770, "commit_id": "b7d99acd2e06945c789312cda70d60b7c8a5b0d0", "repo": "PaddleOCR", "path": "ppstructure/recovery/table_process.py", "file_name": "table_process.py", "fun_name": "remove_whitespace", "commit_message": "update recovery (#7259)\n\n* update recovery\r\n\r\n* update recovery\r\n\r\n* update recovery\r\n\r\n* update recovery\r\n\r\n* update recovery", "code": "def remove_whitespace(string, leading=False, trailing=False):\n \n # Remove any leading new line characters along with any surrounding white space\n if leading:\n string = re.sub(r'^\\s*\\n+\\s*', '', string)\n\n # Remove any trailing new line characters along with any surrounding white space\n if trailing:\n string = re.sub(r'\\s*\\n+\\s*$', '', string)\n\n # Replace new line characters and absorb any surrounding space.\n string = re.sub(r'\\s*\\n\\s*', ' ', string)\n # TODO need some way to get rid of extra spaces in e.g. text text\n return re.sub(r'\\s+', ' ', string)\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 126, "n_words": 82, "vocab_size": 50, "complexity": 3, "nloc": 7, "token_counts": 71, "n_ast_nodes": 136, "n_identifiers": 6, "random_cut": "def remove_whitespace(string, leading=False, trailing=False):\n \n # Remove any leading new line characters along with any surrounding white space\n if leading:\n string = re.sub(r'^\\s*\\n+\\s*', '', string)\n\n # Remove an", "d_id": 4786, "documentation": { "docstring": "Remove white space from a string.\n Args:\n string(str): The string to remove white space from.\n leading(bool, optional): Remove leading new lines when True.\n trailing(bool, optional): Remove trailing new lines when False.\n Returns:\n str: The input string with new line characters removed and white space squashed.\n Examples:\n Single or multiple new line characters are replaced with space.\n >>> remove_whitespace(\"abc\\\\ndef\")\n 'abc def'\n >>> remove_whitespace(\"abc\\\\n\\\\n\\\\ndef\")\n 'abc def'\n New line characters surrounded by white space are replaced with a single space.\n >>> remove_whitespace(\"abc \\\\n \\\\n \\\\n def\")\n 'abc def'\n >>> remove_whitespace(\"abc \\\\n \\\\n \\\\n def\")\n 'abc def'\n Leading and trailing new lines are replaced with a single space.\n >>> remove_whitespace(\"\\\\nabc\")\n ' abc'\n >>> remove_whitespace(\" \\\\n abc\")\n ' abc'\n >>> remove_whitespace(\"abc\\\\n\")\n 'abc '\n >>> remove_whitespace(\"abc \\\\n \")\n 'abc '\n Use ``leading=True`` to remove leading new line characters, including any surrounding\n white space:\n >>> remove_whitespace(\"\\\\nabc\", leading=True)\n 'abc'\n >>> remove_whitespace(\" \\\\n abc\", leading=True)\n 'abc'\n Use ``trailing=True`` to remove trailing new line characters, including any surrounding\n white space:\n >>> remove_whitespace(\"abc \\\\n \", trailing=True)\n 'abc'\n ", "n_words": 166, "vocab_size": 73, "n_whitespaces": 509, "language": "en" } }, { "id": 320780, "commit_id": "a20bb67a878b2e68abf8268c1b0a27f018d01352", "repo": "qutebrowser", "path": "qutebrowser/completion/completionwidget.py", "file_name": "completionwidget.py", "fun_name": "selectionChanged", "commit_message": "mypy: Upgrade to PyQt5-stubs 5.15.6.0\n\nFor some unknown reason, those new stubs cause a *lot* of things now to be\nchecked by mypy which formerly probably got skipped due to Any being implied\nsomewhere.\n\nThe stubs themselves mainly improved, with a couple of regressions too.\n\nIn total, there were some 337 (!) new mypy errors. This commit fixes almost all\nof them, and the next commit improves a fix to get things down to 0 errors\nagain.\n\nOverview of the changes:\n\n==== qutebrowser/app.py\n\n- Drop type ignore due to improved stubs.\n\n==== qutebrowser/browser/browsertab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n This is debatable: I suppose the abstract stuff shouldn't need to know\n anything about the concrete backends at all. But it seems like we cut some\n corners when initially implementing things, and put some code in browsertab.py\n just because the APIs of both backends happened to be compatible. Perhaps\n something to reconsider once we drop QtWebKit and hopefully implement a dummy\n backend.\n\n- Add an additional assertion in AbstractAction.run_string. This is already\n covered by the isinstance(member, self.action_base) above it, but that's too\n dynamic for mypy to understand.\n\n- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x\n and y components), not a single int.\n\n- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x\n and y components), not a single int.\n\n- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass\n fractional percentages too.\n\n- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re\n this being debatable.\n\n- Fix the return type of AbstractTabPrivate.event_target(), which can be None\n (see #3888).\n\n- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS\n return value), not None.\n\n- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can\n be None to use the last used position.\n\n- Declare the type of sub-objects of AbstractTab.\n\n- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.\n\n==== qutebrowser/browser/commands.py\n\n- Make sure the active window is a MainWindow (with a .win_id attribute).\n\n==== qutebrowser/browser/downloadview.py\n\n- Add _model() which makes sure that self.model() is a DownloadModel, not None\n or any other model. This is needed because other methods access a variety of\n custom attributes on it, e.g. last_index().\n\n==== qutebrowser/browser/greasemonkey.py\n\n- Add an ignore for AbstractDownload.requested_url which we patch onto the\n downloads. Probably would be nicer to add it as a proper attribute which always\n gets set by the DownloadManager.\n\n==== qutebrowser/browser/hints.py\n\n- Remove type ignores for QUrl.toString().\n- Add a new type ignore for combining different URL flags (which works, but is\n not exactly type safe... still probably a regression in the stubs).\n- Make sure the things we get back from self._get_keyparser are what we actually\n expect. Probably should introduce a TypedDict (and/or overloads for\n _get_keyparser with typing.Literal) to teach mypy about the exact return value.\n See #7098.\n This is needed because we access Hint/NormalKeyParser-specific attributes such\n as .set_inhibited_timout() or .update_bindings().\n\n==== qutebrowser/browser/inspector.py\n\n- Similar changes than in browsertab.py to make some types where we share API\n (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next\n commit.\n\n==== qutebrowser/browser/network/pac.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/qtnetworkdownloads.py\n\n- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an\n AbstractDownload), so that we can call ._uses_nam() on it.\n\n==== qutebrowser/browser/qutescheme.py\n\n- Remove now unneeded type ignore for QUrl flags.\n\n==== qutebrowser/browser/urlmarks.py\n\n- Specify the type of UrlMarkManager._lineparser, as those only get initialized\n in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.\n\n==== qutebrowser/browser/webelem.py\n\n- New casts to turn single KeyboardModifier (enum) entries into\n KeyboardModifiers (flags). Might not be needed anymore with Qt 6.\n- With that, casting the final value is now unneeded.\n\n==== qutebrowser/browser/webengine/notification.py\n\n- Remove now unneeded type ignore for signal.\n- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()\n is a QProcess, not just any QObject.\n\n==== qutebrowser/browser/webengine/webenginedownloads.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webengine/webengineelem.py\n\n- Specify the type of WebEngineElement._tab.\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webengineinspector.py\n\n- See changes to inspector.py and next commit.\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/webengine/webenginequtescheme.py\n\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webenginesettings.py\n\n- Ignore access of .setter attribute which we patch onto QWebEngineProfile.\n Would be nice to have a subclass or wrapper-class instead.\n\n==== qutebrowser/browser/webengine/webenginetab.py\n\n- Specified the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Remove some now-unneeded type ignores for creating FindFlags.\n- Specify more concrete types for WebEngineTab members where we actually need to\n access WebEngine-specific attributes.\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webengine/webview.py\n\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webkit/network/networkreply.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webkit/webkitinspector.py\n\n- See changes to inspector.py and next commit.\n\n==== qutebrowser/browser/webkit/webkittab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Add a type ignore for WebKitAction because our workaround needs to\n treat them as ints (which is allowed by PyQt, even if not type-safe).\n- Add new ignores for findText calls: The text is a QString and can be None; the\n flags are valid despite mypy thinking they aren't (stubs regression?).\n- Specify the type for WebKitHistoryPrivate._history, because we access\n WebKit-specific attributes. See above (_widget) re this being debatable.\n- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs\n regression?).\n- Make sure the .page() and .page().networkAccessManager() are our subclasses\n rather than the more generic QtWebKit objects, as we use custom attributes.\n- Add new type ignores for signals (stubs regression!)\n\n==== qutebrowser/browser/webkit/webpage.py\n\n- Make sure the .networkAccessManager() is our subclass rather than the more\n generic QtWebKit object, as we use custom attributes.\n- Replace a cast by a type ignore. The cast didn't work anymore.\n\n==== qutebrowser/browser/webkit/webview.py\n\n- Make sure the .page() is our subclass rather than the more generic QtWebKit\n object, as we use custom attributes.\n\n==== qutebrowser/commands/userscripts.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/completion/completer.py\n\n- Add a new _completion() getter (which ensures it actually gets the completion\n view) rather than accessing the .parent() directly (which could be any QObject).\n\n==== qutebrowser/completion/completiondelegate.py\n\n- Make sure self.parent() is a CompletionView (no helper method as there is only\n one instance).\n- Remove a now-unneeded type ignore for adding QSizes.\n\n==== qutebrowser/completion/completionwidget.py\n\n- Add a ._model() getter which ensures that we get a CompletionModel (with\n custom attributes) rather than Qt's .model() which can be any QAbstractItemModel\n (or None).\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/completion/models/completionmodel.py\n\n- Remove now unneeded type ignores for signals.\n- Ignore a complaint about .set_pattern() not being defined. Completion\n categories don't share any common parent class, so it would be good to introduce\n a typing.Protocol for this. See #7098.\n\n==== qutebrowser/components/misccommands.py\n\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/components/readlinecommands.py\n\n- Make sure QApplication.instance() is a QApplication (and not just a\n QCoreApplication). This includes the former \"not None\" check.\n\n==== qutebrowser/components/scrollcommands.py\n\n- Add basic annotation for \"funcs\" dict. Could have a callable protocol to\n specify it needs a count kwarg, see #7098.\n\n==== qutebrowser/config/stylesheet.py\n\n- Correctly specify that stylesheet apply to QWidgets, not any QObject.\n- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy\n about this with overloads and protocols (stylesheet for set_register being None\n => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not\n worth the troble. See #7098.\n\n==== qutebrowser/keyinput/keyutils.py\n\n- Remove some now-unneeded type ignores and add a cast for using a single enum\n value as flags. Might need to look at this again with Qt 6 support.\n\n==== qutebrowser/keyinput/modeman.py\n\n- Add a FIXME for using a TypedDict, see comments for hints.py above.\n\n==== qutebrowser/mainwindow/mainwindow.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n- Improve where we cast from WindowType to WindowFlags, no int needed\n- Use new .tab_bar() getter, see below.\n\n==== qutebrowser/mainwindow/prompt.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n\n==== qutebrowser/mainwindow/statusbar/bar.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/statusbar/command.py\n\n- Fix type for setText() override (from QLineEdit): text can be None\n (QString in C++).\n\n==== qutebrowser/mainwindow/statusbar/url.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/tabbedbrowser.py\n\n- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses\n AbstractTab-specific attributes.\n- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access\n .maybe_hide.\n- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.\n- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and\n .widget(), which ensures that the return values are valid AbstractTabs (or None\n for _tab_by_idx). This is needed because we access AbstractTab-specific\n attributes.\n- For some places, where the tab can be None, continue using .currentTab() but\n add asserts.\n- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None\n possibility now.\n\n==== qutebrowser/mainwindow/tabwidget.py\n\n- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and\n .widget() are of type TabBar and AbstractTab, respectively.\n- Add additional assertions where we expect ._tab_by_idx() to never be None.\n- Remove dead code in get_tab_fields for handling a None y scroll position. I\n was unable to find any place in the code where this could be set to None.\n- Remove some now-unneeded type ignores and casts, as mypy now knows that\n _type_by_idx() could be None.\n- Work around a strange instance where mypy complains about not being able to\n find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,\n despite it clearly being shown as a bool *inside* that class without any\n annotation.\n- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in\n fact a TabWidget.\n\n==== qutebrowser/misc/crashsignal.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/editor.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/ipc.py\n\n- Remove now unneeded type ignores for signals.\n- Add new type ignores for .error() which is both a signal and a getter\n (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was\n renamed to errorOccurred in 5.15.\n\n==== qutebrowser/misc/objects.py\n\n- Make sure mypy knows that objects.app is our custom Application (with custom\n attributes) rather than any QApplication.\n\n==== qutebrowser/utils/objreg.py\n\n- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,\n but ideally, the whole objreg stuff should die one day anyways.\n\n==== tests/unit/completion/test_completer.py\n\n- Make CompletionWidgetStub inherit from CompletionView so that it passes the\n new isinstance() asserts in completer.py (see above).", "code": "def selectionChanged(self, selected, deselected):\n \n if not self._active:\n return\n super().selectionChanged(selected, deselected)\n indexes = selected.indexes()\n if not indexes:\n return\n data = str(self._model().data(indexes[0]))\n self.selection_changed.emit(data)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 92, "n_words": 21, "vocab_size": 17, "complexity": 3, "nloc": 9, "token_counts": 65, "n_ast_nodes": 108, "n_identifiers": 12, "random_cut": "def selectionChanged(self, selected, deselected):\n \n if not self._active:\n return\n super().selectionChanged(selected, deselected)\n", "d_id": 117347, "documentation": { "docstring": "Extend selectionChanged to call completers selection_changed.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 198300, "commit_id": "2a1afca9477eb781f16d5d6b63fa37abed7740a3", "repo": "sympy", "path": "sympy/physics/vector/vector.py", "file_name": "vector.py", "fun_name": "__mul__", "commit_message": "Use sympify less", "code": "def __mul__(self, other):\n \n\n newlist = [v for v in self.args]\n other = sympify(other)\n for i, v in enumerate(newlist):\n newlist[i] = (other * newlist[i][0], newlist[i][1])\n return Vector(newlist)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 72, "n_words": 26, "vocab_size": 21, "complexity": 3, "nloc": 6, "token_counts": 64, "n_ast_nodes": 97, "n_identifiers": 10, "random_cut": "def __mul__(self, other):\n \n\n newlist = [v for v in self.args]\n other = sympif", "d_id": 48862, "documentation": { "docstring": "Multiplies the Vector by a sympifyable expression.\n\n Parameters\n ==========\n\n other : Sympifyable\n The scalar to multiply this Vector with\n\n Examples\n ========\n\n >>> from sympy.physics.vector import ReferenceFrame\n >>> from sympy import Symbol\n >>> N = ReferenceFrame('N')\n >>> b = Symbol('b')\n >>> V = 10 * b * N.x\n >>> print(V)\n 10*b*N.x\n\n ", "n_words": 50, "vocab_size": 38, "n_whitespaces": 152, "language": "en" } }, { "id": 146201, "commit_id": "1100c982223757f697a410a0d0c3d8bf3ff9c805", "repo": "ray", "path": "python/ray/serve/application.py", "file_name": "application.py", "fun_name": "to_dict", "commit_message": "[serve] Implement Serve Application object (#22917)\n\nThe concept of a Serve Application, a data structure containing all information needed to deploy Serve on a Ray cluster, has surfaced during recent design discussions. This change introduces a formal Application data structure and refactors existing code to use it.", "code": "def to_dict(self) -> Dict:\n \n\n return serve_application_to_schema(self._deployments.values()).dict()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 10, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 7, "random_cut": "def to_dict(self) -> Dict:\n \n\n return serve_application_to_schema(self._deployments.values()).dict()\n", "d_id": 33631, "documentation": { "docstring": "Returns this Application's deployments as a dictionary.\n\n This dictionary adheres to the Serve REST API schema. It can be deployed\n via the Serve REST API.\n\n Returns:\n Dict: The Application's deployments formatted in a dictionary.\n ", "n_words": 34, "vocab_size": 27, "n_whitespaces": 73, "language": "en" } }, { "id": 68207, "commit_id": "62e72752dce92792166f9b734c2306adb4b41147", "repo": "erpnext", "path": "erpnext/hr/doctype/shift_assignment/shift_assignment.py", "file_name": "shift_assignment.py", "fun_name": "get_shift_details", "commit_message": "refactor: handle shifts spanning over 2 different days", "code": "def get_shift_details(shift_type_name, for_timestamp=None):\n\t\n\tif not shift_type_name:\n\t\treturn None\n\tif not for_timestamp:\n\t\tfor_timestamp = now_datetime()\n\n\tshift_type = frappe.get_doc('Shift Type', shift_type_name)\n\tshift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time)\n\n\tif shift_type.start_time > shift_type.end_time:\n\t\t# shift spans accross 2 different days\n\t\tif get_time(for_timestamp.time()) >= get_time(shift_actual_start):\n\t\t\t# if for_timestamp is greater than start time, its in the first day\n\t\t\tstart_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time\n\t\t\tfor_timestamp = for_timestamp + timedelta(days=1)\n\t\t\tend_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time\n\t\telif get_time(for_timestamp.time()) < get_time(shift_actual_start):\n\t\t\t# if for_timestamp is less than start time, its in the second day\n\t\t\tend_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time\n\t\t\tfor_timestamp = for_timestamp + timedelta(days=-1)\n\t\t\tstart_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time\n\telse:\n\t\t# start and end times fall on the same day\n\t\tstart_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time\n\t\tend_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time\n\n\tactual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time)\n\tactual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time)\n\n\treturn frappe._dict({\n\t\t'shift_type': shift_type,\n\t\t'start_datetime': start_datetime,\n\t\t'end_datetime': end_datetime,\n\t\t'actual_start': actual_start,\n\t\t'actual_end': actual_end\n\t})\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 117, "n_words": 149, "vocab_size": 75, "complexity": 6, "nloc": 28, "token_counts": 282, "n_ast_nodes": 460, "n_identifiers": 25, "random_cut": "def get_shift_details(shift_type_name, for_timestamp=None):\n\t\n\tif not shift_type_name:\n\t\treturn None\n\tif not for_timestamp:\n\t\tfor_timestamp = now_datetime()\n\n\tshift_type = frappe.get_doc('Shift Type', shift_type_name)\n\tshift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time)\n\n\tif shift_type.start_time > shift_type.end_time:\n\t\t# shift spans accross 2 different days\n\t\tif get_time(for_timestamp.time()) >= get_time(shift_actual_start):\n\t\t\t# if for_timestamp is greater than start time, its in the first day\n\t\t\tstart_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time\n\t\t\tfor_timestamp = for_timestamp + timedelta(days=1)\n\t\t\tend_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time\n\t\telif get_time(for_timestamp.time()) < get_time(shift_actual_start):\n\t\t\t# if for_timestamp is less than start time, its in the second day\n\t\t\tend_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time\n\t\t\tfor_timestamp = for_timestamp + timedelta(days=-1)\n\t\t\tstart_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time\n\telse:\n\t\t# start and end times fall on the same day\n\t\tstart_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time\n\t\tend_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time\n\n\tactual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time)\n\tactual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time)\n\n\treturn frappe._dict({\n\t\t'shift_type': shift_type,\n\t\t'start_datetime': start_datetime,\n\t\t'end_datetime': end_da", "d_id": 14740, "documentation": { "docstring": "Returns Shift Details which contain some additional information as described below.\n\t'shift_details' contains the following keys:\n\t 'shift_type' - Object of DocType Shift Type,\n\t 'start_datetime' - Date and Time of shift start on given date,\n\t 'end_datetime' - Date and Time of shift end on given date,\n\t 'actual_start' - datetime of shift start after adding 'begin_check_in_before_shift_start_time',\n\t 'actual_end' - datetime of shift end after adding 'allow_check_out_after_shift_end_time'(None is returned if this is zero)\n\n\t:param shift_type_name: shift type name for which shift_details is required.\n\t:param for_timestamp: DateTime value on which shift_details are required\n\t", "n_words": 88, "vocab_size": 57, "n_whitespaces": 119, "language": "en" } }, { "id": 108485, "commit_id": "031093e6f05496f55616a1fa2f39e573fea02828", "repo": "matplotlib", "path": "lib/matplotlib/testing/__init__.py", "file_name": "__init__.py", "fun_name": "subprocess_run_helper", "commit_message": "Tweak subprocess_run_helper.\n\nOn general grounds, an API like\n`subprocess_run_helper(func, *args, timeout, **extra_env)`\nis problematic because it prevents one from passing an environment\nvariable called \"timeout\".\n\nInstead, pass the extra environment variables as a dict, without\nunpacking.\n\n(Technically this has been released in 3.5.2 as public API, but 1) I'm\nnot really sure it should have been a public API to start with (should\nwe deprecate it and make it private?), and 2) hopefully tweaking that in\n3.5.3 with no deprecation is not going to disrupt anyone... I can still\nput in a changelog entry if that's preferred.)", "code": "def subprocess_run_helper(func, *args, timeout, extra_env=None):\n \n target = func.__name__\n module = func.__module__\n proc = subprocess.run(\n [sys.executable,\n \"-c\",\n f\"from {module} import {target}; {target}()\",\n *args],\n env={**os.environ, \"SOURCE_DATE_EPOCH\": \"0\", **(extra_env or {})},\n timeout=timeout, check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n return proc\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 116, "n_words": 35, "vocab_size": 32, "complexity": 2, "nloc": 14, "token_counts": 92, "n_ast_nodes": 151, "n_identifiers": 22, "random_cut": "def subprocess_run_helper(func, *args, timeout, extra_env=None):\n \n target = func.__name__\n module = func.__module__\n proc = subprocess.run(\n [sys.executable,\n \"-c\",\n f\"from {module} import {target}; {target}()\",\n *args],\n env={**os.environ, \"SOURCE_DATE_EPOCH\": \"0\", **(extra_env or {})},\n timeout=timeout, check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n return proc\n\n", "d_id": 23212, "documentation": { "docstring": "\n Run a function in a sub-process.\n\n Parameters\n ----------\n func : function\n The function to be run. It must be in a module that is importable.\n *args : str\n Any additional command line arguments to be passed in\n the first argument to ``subprocess.run``.\n extra_env : dict[str, str]\n Any additional environment variables to be set for the subprocess.\n ", "n_words": 56, "vocab_size": 39, "n_whitespaces": 107, "language": "en" } }, { "id": 118567, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/streamlit/server/server.py", "file_name": "server.py", "fun_name": "add_preheated_app_session", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def add_preheated_app_session(self) -> None:\n \n session = self._create_or_reuse_app_session(ws=None)\n session.handle_rerun_script_request(is_preheat=True)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 8, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def add_preheated_app_session(self) -> None:\n \n session = self._create_or_reuse_app_session(ws=None)\n session.handle_rerun_script_request(is_preheat=True)\n", "d_id": 26297, "documentation": { "docstring": "Register a fake browser with the server and run the script.\n\n This is used to start running the user's script even before the first\n browser connects.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 47, "language": "en" } }, { "id": 181815, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/base.py", "file_name": "base.py", "fun_name": "score", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def score(self, testing_features, testing_target):\n \n if self.fitted_pipeline_ is None:\n raise RuntimeError(\n \"A pipeline has not yet been optimized. Please call fit() first.\"\n )\n\n testing_features, testing_target = self._check_dataset(\n testing_features, testing_target, sample_weight=None\n )\n\n # If the scoring function is a string, we must adjust to use the sklearn\n # scoring interface\n if isinstance(self.scoring_function, str):\n scorer = SCORERS[self.scoring_function]\n elif callable(self.scoring_function):\n scorer = self.scoring_function\n else:\n raise RuntimeError(\n \"The scoring function should either be the name of a scikit-learn scorer or a scorer object\"\n )\n score = scorer(\n self.fitted_pipeline_,\n testing_features.astype(np.float64),\n testing_target.astype(np.float64),\n )\n return score\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 312, "n_words": 88, "vocab_size": 64, "complexity": 4, "nloc": 22, "token_counts": 105, "n_ast_nodes": 168, "n_identifiers": 17, "random_cut": "def score(self, testing_features, testing_target):\n \n if self.fitted_pipeline_ is None:\n raise RuntimeError(\n \"A pipeline has not yet been optimized. Please call fit() first.\"\n )\n\n testing_feature", "d_id": 43599, "documentation": { "docstring": "Return the score on the given testing data using the user-specified scoring function.\n\n Parameters\n ----------\n testing_features: array-like {n_samples, n_features}\n Feature matrix of the testing set\n testing_target: array-like {n_samples}\n List of class labels for prediction in the testing set\n\n Returns\n -------\n accuracy_score: float\n The estimated test set accuracy\n\n ", "n_words": 47, "vocab_size": 37, "n_whitespaces": 136, "language": "en" } }, { "id": 289912, "commit_id": "2a2e097e174204e3710161898b4302e1bceca1e5", "repo": "core", "path": "tests/util/test_unit_system.py", "file_name": "test_unit_system.py", "fun_name": "test_as_dict", "commit_message": "Use unit enums in unit utilities (#81030)", "code": "def test_as_dict():\n \n expected = {\n LENGTH: UnitOfLength.KILOMETERS,\n WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND,\n TEMPERATURE: UnitOfTemperature.CELSIUS,\n VOLUME: UnitOfVolume.LITERS,\n MASS: UnitOfMass.GRAMS,\n PRESSURE: UnitOfPressure.PA,\n ACCUMULATED_PRECIPITATION: UnitOfLength.MILLIMETERS,\n }\n\n assert expected == METRIC_SYSTEM.as_dict()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 85, "n_words": 24, "vocab_size": 23, "complexity": 1, "nloc": 11, "token_counts": 59, "n_ast_nodes": 88, "n_identifiers": 24, "random_cut": "def test_as_dict():\n \n expected = {\n LENGTH: UnitOfLength.KILOMETERS,\n WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND,\n TEMPERATURE: UnitOfTemperature.CELSIUS,\n VOLUME: UnitOfVolume.LITERS,\n MASS: U", "d_id": 89038, "documentation": { "docstring": "Test that the as_dict() method returns the expected dictionary.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 100384, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "plugins/train/model/_base.py", "file_name": "_base.py", "fun_name": "_get_inputs", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _get_inputs(self):\n \n logger.debug(\"Getting inputs\")\n if len(self.input_shape) == 3:\n input_shapes = [self.input_shape, self.input_shape]\n else:\n input_shapes = self.input_shape\n inputs = [Input(shape=shape, name=f\"face_in_{side}\")\n for side, shape in zip((\"a\", \"b\"), input_shapes)]\n logger.debug(\"inputs: %s\", inputs)\n return inputs\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 120, "n_words": 32, "vocab_size": 28, "complexity": 3, "nloc": 10, "token_counts": 80, "n_ast_nodes": 137, "n_identifiers": 13, "random_cut": "def _get_inputs(self):\n \n logger.debug(\"Getting inputs\")\n if len(self.input_shape) == 3:\n input_shapes = [self.input_shape, self.input_shape]\n else:\n input_shapes = self.in", "d_id": 19870, "documentation": { "docstring": " Obtain the standardized inputs for the model.\n\n The inputs will be returned for the \"A\" and \"B\" sides in the shape as defined by\n :attr:`input_shape`.\n\n Returns\n -------\n list\n A list of :class:`keras.layers.Input` tensors. This will be a list of 2 tensors (one\n for each side) each of shapes :attr:`input_shape`.\n ", "n_words": 49, "vocab_size": 35, "n_whitespaces": 114, "language": "en" } }, { "id": 268037, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/python_requirements.py", "file_name": "python_requirements.py", "fun_name": "collect_units_install", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def collect_units_install() -> t.List[PipInstall]:\n \n requirements_paths = [] # type: t.List[t.Tuple[str, str]]\n constraints_paths = [] # type: t.List[t.Tuple[str, str]]\n\n path = os.path.join(data_context().content.unit_path, 'requirements.txt')\n requirements_paths.append((data_context().content.root, path))\n\n path = os.path.join(data_context().content.unit_path, 'constraints.txt')\n constraints_paths.append((data_context().content.root, path))\n\n return collect_install(requirements_paths, constraints_paths)\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 59, "n_words": 33, "vocab_size": 22, "complexity": 1, "nloc": 9, "token_counts": 95, "n_ast_nodes": 158, "n_identifiers": 15, "random_cut": "def collect_units_install() -> t.List[PipInstall]:\n \n requirements_paths = [] # type: t.List[t.Tuple[str, str]]\n constraints_paths = [] # type: t.List[t.Tuple[str, str]]\n\n path = os.path.join(data_context().content.unit_path, 'requirements.txt')\n requirements_paths.append((data_context().content.root, path))\n\n path = os.path.join(data_context().content.unit_path, 'constraints.txt')\n constraints_paths.append((data_context().content.root, path))\n\n return collect_install(requirements_paths, constraints_paths)\n\n", "d_id": 79311, "documentation": { "docstring": "Return details necessary for the specified units pip install(s).", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 34527, "commit_id": "99a2771189321c826ff55d161a7cfedadd4023c7", "repo": "transformers", "path": "src/transformers/models/yoso/modeling_yoso.py", "file_name": "modeling_yoso.py", "fun_name": "forward", "commit_message": "Add YOSO (#15091)\n\n* Add cookiecutter files\r\n\r\n* Add cuda kernels and cpp files\r\n\r\n* Update modeling_yoso.py\r\n\r\n* Add .h files\r\n\r\n* Update configuration_yoso.py\r\n\r\n* Updates\r\n\r\n* Remove tokenizer\r\n\r\n* Code quality\r\n\r\n* Update modeling_yoso.py\r\n\r\n* Update modeling_yoso.py\r\n\r\n* Fix failing test\r\n\r\n* Update modeling_yoso.py\r\n\r\n* Fix code quality\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review and fix integration tests\r\n\r\n* Update src/transformers/models/yoso/modeling_yoso.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Apply suggestions from code review\r\n\r\n* Fix copied from statement\r\n\r\n* Fix docstring\r\n\r\n* Fix code quality\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply suggestions and fix mask\r\n\r\n* Apply suggestions from code review\r\n\r\n* Fix code quality\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Fix docstrings\r\n\r\n* Fix code quality\r\n\r\n* Remove trailing whitespace\r\n\r\n* Update yoso.mdx\r\n\r\n* Move kernel loading to YosoEncoder\r\n\r\n* make style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/yoso/modeling_yoso.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Add short summary to docs\r\n\r\n* Update docs/source/model_doc/yoso.mdx\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update yoso.mdx\r\n\r\n* Update docs/source/model_doc/yoso.mdx\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Remove CausalLM model and add copied from\r\n\r\n* Remove autoregressive code\r\n\r\n* Remove unused imports\r\n\r\n* add copied from for embeddings\r\n\r\n* Fix code quality\r\n\r\n* Update docs/source/model_doc/yoso.mdx\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Apply suggestion from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Patrick von Platen ", "code": "def forward(self, features, **kwargs):\n x = features[:, 0, :] # take token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = ACT2FN[self.config.hidden_act](x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n ,\n YOSO_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"\"\"YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.\"\"\",\n YOSO_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 90, "n_words": 37, "vocab_size": 25, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 120, "n_identifiers": 13, "random_cut": "def forward(self, features, **kwargs):\n x = features[:, 0, :] # take token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = ACT2FN[self.config.hidden_act](x)\n x = self.dropout(x)\n x = self.out_proj(x)\n ", "d_id": 6289, "documentation": { "docstring": "YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.", "n_words": 23, "vocab_size": 21, "n_whitespaces": 25, "language": "en" } }, { "id": 205896, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/subqueries.py", "file_name": "subqueries.py", "fun_name": "delete_batch", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def delete_batch(self, pk_list, using):\n \n # number of objects deleted\n num_deleted = 0\n field = self.get_meta().pk\n for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):\n self.clear_where()\n self.add_filter(\n f\"{field.attname}__in\",\n pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE],\n )\n num_deleted += self.do_query(\n self.get_meta().db_table, self.where, using=using\n )\n return num_deleted\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 181, "n_words": 39, "vocab_size": 34, "complexity": 2, "nloc": 13, "token_counts": 83, "n_ast_nodes": 136, "n_identifiers": 18, "random_cut": "def delete_batch(self, pk_list, using):\n \n # number of objects deleted\n num_deleted = 0\n field = self.get_meta().pk\n for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):\n self.clear_where()\n self.add_filter(\n f\"{field.attname}__in\",\n pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE],\n )\n num_deleted += self.do_query(\n self.get_meta().db_table, self.where, using=using\n ", "d_id": 51270, "documentation": { "docstring": "\n Set up and execute delete queries for all the objects in pk_list.\n\n More than one physical query may be executed if there are a\n lot of values in pk_list.\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 58, "language": "en" } }, { "id": 267959, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/coverage_util.py", "file_name": "coverage_util.py", "fun_name": "generate_ansible_coverage_config", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def generate_ansible_coverage_config() -> str:\n \n coverage_config = \n\n return coverage_config\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 18, "token_counts": 12, "n_ast_nodes": 25, "n_identifiers": 3, "random_cut": "def generate_ansible_coverage_config() -> str:\n \n coverage_config = \n\n return coverage_config\n\n", "d_id": 79234, "documentation": { "docstring": "Generate code coverage configuration for Ansible tests.\n[run]\nbranch = True\nconcurrency = multiprocessing\nparallel = True\n\nomit =\n */python*/dist-packages/*\n */python*/site-packages/*\n */python*/distutils/*\n */pyshared/*\n */pytest\n */AnsiballZ_*.py\n */test/results/*\n", "n_words": 26, "vocab_size": 22, "n_whitespaces": 41, "language": "en" } }, { "id": 98271, "commit_id": "b7dee7f2457a911bea343d20f2119e691bb153ce", "repo": "sentry", "path": "src/sentry/models/organizationmember.py", "file_name": "organizationmember.py", "fun_name": "get_allowed_roles_to_invite", "commit_message": "feat(access): Implement team-based role management and access (#33387)\n\nIntroduce team-based roles in parallel to existing, organization-based\r\nroles. Split the levels into their own objects, accessible through the\r\nparent RoleManager object. Map org roles onto the corresponding minimum\r\nteam roles, which each member with that org role enjoys automatically.\r\n\r\nHave Access.has_team_scope check scopes given by the member's team role,\r\nin addition to those given by their organization role. This differs from\r\nprevious behavior, in that a member might enjoy a scope for a particular\r\nteam that they would not if Access.has_scope were called.\r\n\r\nIntroduce the \"organizations:team-roles\" feature flag. Organizations\r\nwithout this flag don't give any additional scopes for team roles.\r\n\r\nThere is currently no way to assign team roles. API support is pending.", "code": "def get_allowed_roles_to_invite(self):\n \n return [\n r\n for r in organization_roles.get_all()\n if r.priority <= organization_roles.get(self.role).priority\n ]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 68, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 6, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def get_allowed_roles_to_invite(self):\n \n return [\n r\n for r in organization_roles.get_all()\n if r.priority <= organization_roles.get(self.role).priority\n ]\n", "d_id": 19555, "documentation": { "docstring": "\n Return a list of roles which that member could invite\n Must check if member member has member:admin first before checking\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 42, "language": "en" } }, { "id": 112159, "commit_id": "14d2966b9e91ae16dcc39de8f41017a75cec8ff9", "repo": "nni", "path": "nni/retiarii/oneshot/pytorch/supermodule/base.py", "file_name": "base.py", "fun_name": "search_space_spec", "commit_message": "Valuechoice oneshot lightning (#4602)", "code": "def search_space_spec(self) -> Dict[str, ParameterSpec]:\n \n raise NotImplementedError()\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 10, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 6, "random_cut": "def search_space_spec(self) -> Dict[str, ParameterSpec]:\n \n raise NotImplementedError()\n", "d_id": 24601, "documentation": { "docstring": "\n Space specification (sample points).\n Mapping from spec name to ParameterSpec. The names in choices should be in the same format of export.\n\n For example: ::\n\n {\"layer1\": ParameterSpec(values=[\"conv\", \"pool\"])}\n ", "n_words": 28, "vocab_size": 27, "n_whitespaces": 68, "language": "en" } }, { "id": 246790, "commit_id": "02d708568b476f2f7716000b35c0adfa4cbd31b3", "repo": "synapse", "path": "tests/rest/admin/test_room.py", "file_name": "test_room.py", "fun_name": "test_context_as_admin", "commit_message": "Replace assertEquals and friends with non-deprecated versions. (#12092)", "code": "def test_context_as_admin(self) -> None:\n \n\n # Create a room. We're not part of it.\n user_id = self.register_user(\"test\", \"test\")\n user_tok = self.login(\"test\", \"test\")\n room_id = self.helper.create_room_as(user_id, tok=user_tok)\n\n # Populate the room with events.\n events = []\n for i in range(30):\n events.append(\n self.helper.send_event(\n room_id, \"com.example.test\", content={\"index\": i}, tok=user_tok\n )\n )\n\n # Now let's fetch the context for this room.\n midway = (len(events) - 1) // 2\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/rooms/%s/context/%s\"\n % (room_id, events[midway][\"event_id\"]),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(\n channel.json_body[\"event\"][\"event_id\"], events[midway][\"event_id\"]\n )\n\n for found_event in channel.json_body[\"events_before\"]:\n for j, posted_event in enumerate(events):\n if found_event[\"event_id\"] == posted_event[\"event_id\"]:\n self.assertTrue(j < midway)\n break\n else:\n self.fail(\"Event %s from events_before not found\" % j)\n\n for found_event in channel.json_body[\"events_after\"]:\n for j, posted_event in enumerate(events):\n if found_event[\"event_id\"] == posted_event[\"event_id\"]:\n self.assertTrue(j > midway)\n break\n else:\n self.fail(\"Event %s from events_after not found\" % j)\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 557, "n_words": 132, "vocab_size": 89, "complexity": 8, "nloc": 39, "token_counts": 261, "n_ast_nodes": 438, "n_identifiers": 34, "random_cut": "def test_context_as_admin(self) -> None:\n \n\n # Create a room. We're not part of it.\n user_id = self.register_user(\"test\", \"test\")\n user_tok = self.login(\"test\", \"test\")\n room_id = self.helper.create_room_as(user_id, tok=user_tok)\n\n # Populate the room with events.\n events = []\n for i in range(30):\n events.append(\n self.helper.send_event(\n room_id, \"com.example.test\", content={\"index\": i}, tok=user_tok\n )\n )\n\n # Now let's fetch the context for this room.\n midway = (len(events) - 1) // 2\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/rooms/%s/context/%s\"\n % (room_id, events[midway][\"event_id\"]),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(\n channel.json_body[\"event\"][\"event_id\"], events[midway][\"event_id\"]\n )\n\n for found_event in channel.json_body[\"events_before\"]:\n for j, posted_event in enumerate(events):\n if found_event[\"event_id\"] == posted_event[\"event_id\"]:\n self.assertTrue(j < midway)\n break\n else:\n self.fail(\"Event %s from events_before not found\" % j)\n\n for found_event in channel.json_body[\"events_after\"]:\n for j, posted_event in enumerate(events):\n if found_event[\"event_id\"] == posted_event[\"event_id\"]:\n ", "d_id": 71346, "documentation": { "docstring": "\n Test that, as admin, we can find the context of an event without having joined the room.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 157370, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "file_name": "dpm_solver.py", "fun_name": "marginal_std", "commit_message": "release more models", "code": "def marginal_std(self, t):\n \n return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 31, "n_ast_nodes": 48, "n_identifiers": 7, "random_cut": "def marginal_std(self, t):\n \n ", "d_id": 36910, "documentation": { "docstring": "\n Compute sigma_t of a given continuous-time label t in [0, T].\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 78245, "commit_id": "d967eccef28ce47f60d26be1c28f2d83a25f40b0", "repo": "wagtail", "path": "wagtail/contrib/settings/models.py", "file_name": "models.py", "fun_name": "for_request", "commit_message": "Add generic settings to compliment site-specific settings (#8327)", "code": "def for_request(cls, request):\n \n attr_name = cls.get_cache_attr_name()\n if hasattr(request, attr_name):\n return getattr(request, attr_name)\n site = Site.find_for_request(request)\n site_settings = cls.for_site(site)\n # to allow more efficient page url generation\n site_settings._request = request\n setattr(request, attr_name, site_settings)\n return site_settings\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 108, "n_words": 34, "vocab_size": 29, "complexity": 2, "nloc": 9, "token_counts": 61, "n_ast_nodes": 98, "n_identifiers": 14, "random_cut": "def for_request(cls, request):\n \n attr_name = cls.get_cache_attr_name()\n if hasattr(request, attr_name):\n return getattr(request, attr_name)\n site = Site.find_for_request(request)\n site_settings = cls.for_site(site)\n # to allow mo", "d_id": 16745, "documentation": { "docstring": "\n Get or create an instance of this model for the request,\n and cache the result on the request for faster repeat access.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 44, "language": "en" } }, { "id": 186377, "commit_id": "eeca208c8f57304590ac1af80b496e61021aaa45", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/configurator.py", "file_name": "configurator.py", "fun_name": "_enable_ocsp_stapling", "commit_message": "Various clean-ups in certbot-apache. Use f-strings. (#9132)\n\n* Various clean-ups in certbot-apache. Use f-strings.\r\n\r\n* Smaller tweaks", "code": "def _enable_ocsp_stapling(self, ssl_vhost, unused_options):\n \n min_apache_ver = (2, 3, 3)\n if self.get_version() < min_apache_ver:\n raise errors.PluginError(\n \"Unable to set OCSP directives.\\n\"\n \"Apache version is below 2.3.3.\")\n\n if \"socache_shmcb_module\" not in self.parser.modules:\n self.enable_mod(\"socache_shmcb\")\n\n # Check if there's an existing SSLUseStapling directive on.\n use_stapling_aug_path = self.parser.find_dir(\"SSLUseStapling\",\n \"on\", start=ssl_vhost.path)\n if not use_stapling_aug_path:\n self.parser.add_dir(ssl_vhost.path, \"SSLUseStapling\", \"on\")\n\n ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))\n\n # Check if there's an existing SSLStaplingCache directive.\n stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',\n None, ssl_vhost_aug_path)\n\n # We'll simply delete the directive, so that we'll have a\n # consistent OCSP cache path.\n if stapling_cache_aug_path:\n self.parser.aug.remove(\n re.sub(r\"/\\w*$\", \"\", stapling_cache_aug_path[0]))\n\n self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,\n \"SSLStaplingCache\",\n [\"shmcb:/var/run/apache2/stapling_cache(128000)\"])\n\n msg = \"OCSP Stapling was enabled on SSL Vhost: %s.\\n\"%(\n ssl_vhost.filep)\n self.save_notes += msg\n self.save()\n logger.info(msg)\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 518, "n_words": 108, "vocab_size": 89, "complexity": 5, "nloc": 26, "token_counts": 182, "n_ast_nodes": 311, "n_identifiers": 31, "random_cut": "def _enable_ocsp_stapling(self, ssl_vhost, unused_options):\n \n min_apache_ver = (2, 3, 3)\n if self.get_version() < min_apache_ver:\n raise errors.PluginError(\n \"Unable to set OCSP directives.\\n\"\n \"Apache version is below 2.3.3.\")\n\n if \"socache_shmcb_module\" not in self.parser.modules:\n self.enable_mod(\"socache_shmcb\")\n\n # Check if there's an existing SSLUseStapling directive on.\n use_stapling_aug_path = self.parser.find_dir(\"SSLUseStapling\",\n \"on\", start=ssl_vhost.path)\n if not use_stapling_aug_path:\n self.parser.add_dir(ssl_vhost.path, \"SSLUseStapling\", \"on\")\n\n ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))\n\n # Check if there's an existing SSLStaplingCache directive.\n stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',\n None, ssl_vhost_aug_path)\n\n # We'll simply delete the directive, so that we'll have a\n # consistent OCSP cache path.\n if stapling_cache_aug_path:\n self.parser.aug.remove(\n re.sub(r\"/\\w*$\", \"\", stapling_cache_aug_path[0]))\n\n self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,\n \"SSLStaplingCache\",\n [\"shmcb:/var/run/apache2/stapling_cache(128000)\"])\n\n msg = \"OCSP Stapling was enabled on SSL Vhost: %s.\\n\"%(\n ", "d_id": 45473, "documentation": { "docstring": "Enables OCSP Stapling\n\n In OCSP, each client (e.g. browser) would have to query the\n OCSP Responder to validate that the site certificate was not revoked.\n\n Enabling OCSP Stapling, would allow the web-server to query the OCSP\n Responder, and staple its response to the offered certificate during\n TLS. i.e. clients would not have to query the OCSP responder.\n\n OCSP Stapling enablement on Apache implicitly depends on\n SSLCertificateChainFile being set by other code.\n\n .. note:: This function saves the configuration\n\n :param ssl_vhost: Destination of traffic, an ssl enabled vhost\n :type ssl_vhost: :class:`~certbot_apache._internal.obj.VirtualHost`\n\n :param unused_options: Not currently used\n :type unused_options: Not Available\n\n :returns: Success, general_vhost (HTTP vhost)\n :rtype: (bool, :class:`~certbot_apache._internal.obj.VirtualHost`)\n\n ", "n_words": 107, "vocab_size": 78, "n_whitespaces": 212, "language": "en" } }, { "id": 67319, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/united_arab_emirates/utils.py", "file_name": "utils.py", "fun_name": "make_regional_gl_entries", "commit_message": "style: format code with black", "code": "def make_regional_gl_entries(gl_entries, doc):\n\t\n\tcountry = frappe.get_cached_value(\"Company\", doc.company, \"country\")\n\n\tif country != \"United Arab Emirates\":\n\t\treturn gl_entries\n\n\tif doc.reverse_charge == \"Y\":\n\t\ttax_accounts = get_tax_accounts(doc.company)\n\t\tfor tax in doc.get(\"taxes\"):\n\t\t\tif tax.category not in (\"Total\", \"Valuation and Total\"):\n\t\t\t\tcontinue\n\t\t\tgl_entries = make_gl_entry(tax, gl_entries, doc, tax_accounts)\n\treturn gl_entries\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 44, "vocab_size": 35, "complexity": 5, "nloc": 11, "token_counts": 81, "n_ast_nodes": 137, "n_identifiers": 14, "random_cut": "def make_regional_gl_entries(gl_entries, doc):\n\t\n\tcountry = frappe.get_cached_value(\"Company\", doc.company, \"country\")\n\n\t", "d_id": 14494, "documentation": { "docstring": "Hooked to make_regional_gl_entries in Purchase Invoice.It appends the region specific general ledger entries to the list of GL Entries.", "n_words": 19, "vocab_size": 17, "n_whitespaces": 18, "language": "en" } }, { "id": 8266, "commit_id": "d4dcff26dd9f25b3eb43c4e74a56af93879eeab2", "repo": "ludwig", "path": "tests/integration_tests/utils.py", "file_name": "utils.py", "fun_name": "private_param", "commit_message": "Fixed issue when uploading output directory artifacts to remote filesystems (#2598)", "code": "def private_param(param):\n \n return pytest.param(\n *param,\n marks=pytest.mark.skipif(\n not _run_private_tests,\n reason=\"Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run\",\n ),\n )\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 75, "n_words": 23, "vocab_size": 23, "complexity": 1, "nloc": 8, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 8, "random_cut": "def private_param(param):\n \n return pytest.param(\n *param,\n marks=pytest.mark.skipif(\n not _run_private_tests,\n reason=\"Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run\",\n ),\n )\n\n", "d_id": 1387, "documentation": { "docstring": "Wrap param to mark it as private, meaning it requires credentials to run.\n\n Private tests are skipped by default. Set the RUN_PRIVATE environment variable to a truth value to run them.\n ", "n_words": 31, "vocab_size": 27, "n_whitespaces": 37, "language": "en" } }, { "id": 177161, "commit_id": "df9a128f4171d95671e5d9f5460970cc4bf8e3b3", "repo": "networkx", "path": "networkx/algorithms/d_separation.py", "file_name": "d_separation.py", "fun_name": "minimal_d_separator", "commit_message": "[ENH] Find and verify a minimal D-separating set in DAG (#5898)\n\n* Ran black\r\n\r\n* Add unit tests\r\n\r\n* Rename and fix citation\r\n\r\n* Black\r\n\r\n* Fix unite tests\r\n\r\n* Isort\r\n\r\n* Add algo description\r\n\r\n* Update networkx/algorithms/tests/test_d_separation.py\r\n\r\n* Update networkx/algorithms/traversal/breadth_first_search.py\r\n\r\n* Address dans comments\r\n\r\n* Fix unit tests\r\n\r\n* Update networkx/algorithms/tests/test_d_separation.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/dag.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/dag.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Fix comments\r\n\r\n* Clean up the docs a bit more\r\n\r\n* Merge\r\n\r\nCo-authored-by: Dan Schult ", "code": "def minimal_d_separator(G, u, v):\n \n if not nx.is_directed_acyclic_graph(G):\n raise nx.NetworkXError(\"graph should be directed acyclic\")\n\n union_uv = {u, v}\n\n if any(n not in G.nodes for n in union_uv):\n raise nx.NodeNotFound(\"one or more specified nodes not found in the graph\")\n\n # first construct the set of ancestors of X and Y\n x_anc = nx.ancestors(G, u)\n y_anc = nx.ancestors(G, v)\n D_anc_xy = x_anc.union(y_anc)\n D_anc_xy.update((u, v))\n\n # second, construct the moralization of the subgraph of Anc(X,Y)\n moral_G = nx.moral_graph(G.subgraph(D_anc_xy))\n\n # find a separating set Z' in moral_G\n Z_prime = set(G.predecessors(u)).union(set(G.predecessors(v)))\n\n # perform BFS on the graph from 'x' to mark\n Z_dprime = _bfs_with_marks(moral_G, u, Z_prime)\n Z = _bfs_with_marks(moral_G, v, Z_dprime)\n return Z\n\n\n@not_implemented_for(\"undirected\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"undirected\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 172, "n_words": 108, "vocab_size": 77, "complexity": 4, "nloc": 15, "token_counts": 152, "n_ast_nodes": 254, "n_identifiers": 28, "random_cut": "def minimal_d_separator(G, u, v):\n \n if not nx.is_directed_acyclic_graph(G):\n raise nx.NetworkXError(\"graph should be direct", "d_id": 42296, "documentation": { "docstring": "Compute a minimal d-separating set between 'u' and 'v'.\n\n A d-separating set in a DAG is a set of nodes that blocks all paths\n between the two nodes, 'u' and 'v'. This function\n constructs a d-separating set that is \"minimal\", meaning it is the smallest\n d-separating set for 'u' and 'v'. This is not necessarily\n unique. For more details, see Notes.\n\n Parameters\n ----------\n G : graph\n A networkx DAG.\n u : node\n A node in the graph, G.\n v : node\n A node in the graph, G.\n\n Raises\n ------\n NetworkXError\n Raises a :exc:`NetworkXError` if the input graph is not a DAG.\n\n NodeNotFound\n If any of the input nodes are not found in the graph,\n a :exc:`NodeNotFound` exception is raised.\n\n References\n ----------\n .. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators.\n\n Notes\n -----\n This function only finds ``a`` minimal d-separator. It does not guarantee\n uniqueness, since in a DAG there may be more than one minimal d-separator\n between two nodes. Moreover, this only checks for minimal separators\n between two nodes, not two sets. Finding minimal d-separators between\n two sets of nodes is not supported.\n\n Uses the algorithm presented in [1]_. The complexity of the algorithm\n is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the\n number of edges in the moralized graph of the sub-graph consisting\n of only the ancestors of 'u' and 'v'. For full details, see [1]_.\n\n The algorithm works by constructing the moral graph consisting of just\n the ancestors of `u` and `v`. Then it constructs a candidate for\n a separating set ``Z'`` from the predecessors of `u` and `v`.\n Then BFS is run starting from `u` and marking nodes\n found from ``Z'`` and calling those nodes ``Z''``.\n Then BFS is run again starting from `v` and marking nodes if they are\n present in ``Z''``. Those marked nodes are the returned minimal\n d-separating set.\n\n https://en.wikipedia.org/wiki/Bayesian_network#d-separation\n ", "n_words": 306, "vocab_size": 148, "n_whitespaces": 463, "language": "en" } }, { "id": 189502, "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", "repo": "manim", "path": "manim/mobject/svg/text_mobject.py", "file_name": "text_mobject.py", "fun_name": "_change_alignment_for_a_line", "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", "code": "def _change_alignment_for_a_line(self, alignment, line_no):\n \n self.lines[1][line_no] = alignment\n if self.lines[1][line_no] == \"center\":\n self[line_no].move_to(\n np.array([self.get_center()[0], self[line_no].get_center()[1], 0]),\n )\n elif self.lines[1][line_no] == \"right\":\n self[line_no].move_to(\n np.array(\n [\n self.get_right()[0] - self[line_no].width / 2,\n self[line_no].get_center()[1],\n 0,\n ],\n ),\n )\n elif self.lines[1][line_no] == \"left\":\n self[line_no].move_to(\n np.array(\n [\n self.get_left()[0] + self[line_no].width / 2,\n self[line_no].get_center()[1],\n 0,\n ],\n ),\n )\n\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 440, "n_words": 50, "vocab_size": 30, "complexity": 4, "nloc": 26, "token_counts": 196, "n_ast_nodes": 294, "n_identifiers": 12, "random_cut": "def _change_alignment_for_a_line(self, alignment, line_no):\n \n self.lines[1][line_no] = alignment\n if self.lines[1][line_no] == \"center\":\n self[line_no].move_to(\n np.array([self.get_center()[0], self[line_no].get_center()[1], 0]),\n )\n elif self.lines[1][line_no] == \"right\":\n self[line_no].move_to(\n np.array(\n [\n self.get_right()[0] - self[line_no].width / 2,\n self[line_no].get_center()[1],\n 0,\n ],\n ),\n )\n elif self.lines[1][line_no] == \"left\":\n self[line_no].move_to(\n np.array(\n [\n self.get_left()[0] + self[line_no].width / 2", "d_id": 46101, "documentation": { "docstring": "Function to change one line's alignment to a specific value.\n\n Parameters\n ----------\n alignment : :class:`str`\n Defines the alignment of paragraph. Possible values are \"left\", \"right\", \"center\".\n line_no : :class:`int`\n Defines the line number for which we want to set given alignment.\n ", "n_words": 41, "vocab_size": 34, "n_whitespaces": 98, "language": "en" } }, { "id": 249425, "commit_id": "682dfcfc0db05d9c99b7615d950997535df4d533", "repo": "synapse", "path": "tests/handlers/test_room_member.py", "file_name": "test_room_member.py", "fun_name": "test_rejoin_forgotten_by_user", "commit_message": "Fix that user cannot `/forget` rooms after the last member has left (#13546)", "code": "def test_rejoin_forgotten_by_user(self) -> None:\n \n self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)\n\n self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)\n self.get_success(self.handler.forget(self.alice_ID, self.room_id))\n self.assertTrue(\n self.get_success(self.store.did_forget(self.alice, self.room_id))\n )\n\n # the server has not forgotten the room\n self.assertFalse(\n self.get_success(self.store.is_locally_forgotten_room(self.room_id))\n )\n\n self.helper.join(self.room_id, user=self.alice, tok=self.alice_token)\n # TODO: A join to a room does not invalidate the forgotten cache\n # see https://github.com/matrix-org/synapse/issues/13262\n self.store.did_forget.invalidate_all()\n self.assertFalse(\n self.get_success(self.store.did_forget(self.alice, self.room_id))\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 189, "n_words": 51, "vocab_size": 35, "complexity": 1, "nloc": 18, "token_counts": 170, "n_ast_nodes": 265, "n_identifiers": 22, "random_cut": "def test_rejoin_forgotten_by_user(self) -> None:\n \n self.helper.join(self.room_id, user=self.", "d_id": 72907, "documentation": { "docstring": "Test that a user that has forgotten a room can do a re-join.\n The room was not forgotten from the local server.\n One local user is still member of the room.", "n_words": 31, "vocab_size": 23, "n_whitespaces": 44, "language": "en" } }, { "id": 203990, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/gdal/libgdal.py", "file_name": "libgdal.py", "fun_name": "std_call", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def std_call(func):\n \n if os.name == \"nt\":\n return lwingdal[func]\n else:\n return lgdal[func]\n\n\n# #### Version-information functions. ####\n\n# Return GDAL library version information with the given key.\n_version_info = std_call(\"GDALVersionInfo\")\n_version_info.argtypes = [c_char_p]\n_version_info.restype = c_char_p\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 53, "n_words": 35, "vocab_size": 30, "complexity": 2, "nloc": 5, "token_counts": 25, "n_ast_nodes": 77, "n_identifiers": 10, "random_cut": "def std_call(func):\n \n if os.name == \"nt\":\n return lwingdal[func]\n else:\n return lgdal[func]\n\n\n# #### Version-information functions. ####\n\n#", "d_id": 50601, "documentation": { "docstring": "\n Return the correct STDCALL function for certain OSR routines on Win32\n platforms.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 22, "language": "en" } }, { "id": 206594, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/datastructures.py", "file_name": "datastructures.py", "fun_name": "__getitem__", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def __getitem__(self, key):\n \n use_func = key.startswith(self.prefix)\n if use_func:\n key = key[len(self.prefix) :]\n value = super().__getitem__(key)\n if use_func:\n return self.func(value)\n return value\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 85, "n_words": 21, "vocab_size": 15, "complexity": 3, "nloc": 8, "token_counts": 55, "n_ast_nodes": 91, "n_identifiers": 10, "random_cut": "def __getitem__(self, key):\n \n use_func = key.startswith(self.prefix)\n if use_func:\n", "d_id": 51581, "documentation": { "docstring": "\n Retrieve the real value after stripping the prefix string (if\n present). If the prefix is present, pass the value through self.func\n before returning, otherwise return the raw value.\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 57, "language": "en" } }, { "id": 55900, "commit_id": "84d0f8a18f6a413fc70b78e4ccbef67372d05075", "repo": "prefect", "path": "tests/test_client.py", "file_name": "test_client.py", "fun_name": "not_enough_open_files", "commit_message": "Skip running the more intense client tests when the ulimit of files is too low (PrefectHQ/orion#1905)\n\nOn some systems the ulimit for open files is set excruciatingly low, especially\r\nthe default settings of 256 for macOS. We can skip the threading tests on\r\nsystems with no enough open files.\r\n\r\nCo-authored-by: Michael Adkins ", "code": "def not_enough_open_files() -> bool:\n \n try:\n import resource\n except ImportError:\n # resource limits is not a concept on all systems, notably Windows\n return False\n\n soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)\n return soft_limit < 512 or hard_limit < 512\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 71, "n_words": 35, "vocab_size": 30, "complexity": 3, "nloc": 11, "token_counts": 36, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def not_enough_open_files() -> bool:\n \n try:\n import resource\n except ImportError:\n # resource limits is not a concept on all systems, notably Windows\n return False\n\n soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)\n return soft", "d_id": 11418, "documentation": { "docstring": "\n The current process does not currently allow enough open files for this test.\n You can increase the number of open files with `ulimit -n 512`.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 35, "language": "en" } }, { "id": 264432, "commit_id": "23a80770e1e96c0351cb4ead62ebf294f639845a", "repo": "netbox", "path": "netbox/netbox/tables/tables.py", "file_name": "tables.py", "fun_name": "configure", "commit_message": "Move configure_table() logic to NetBoxTable.configure()", "code": "def configure(self, request):\n \n # Save ordering preference\n if request.user.is_authenticated:\n table_name = self.__class__.__name__\n if self.prefixed_order_by_field in request.GET:\n # If an ordering has been specified as a query parameter, save it as the\n # user's preferred ordering for this table.\n ordering = request.GET.getlist(self.prefixed_order_by_field)\n request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)\n elif ordering := request.user.config.get(f'tables.{table_name}.ordering'):\n # If no ordering has been specified, set the preferred ordering (if any).\n self.order_by = ordering\n\n # Paginate the table results\n paginate = {\n 'paginator_class': EnhancedPaginator,\n 'per_page': get_paginate_count(request)\n }\n tables.RequestConfig(request, paginate).configure(self)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 273, "n_words": 79, "vocab_size": 57, "complexity": 4, "nloc": 13, "token_counts": 107, "n_ast_nodes": 185, "n_identifiers": 22, "random_cut": "def configure(self, request):\n \n # Save ordering preference\n if request.user.is_authenticated:\n table_name = self.__class__.__name__\n if self.prefixed_order_by_field in request.GET:\n # If an ordering has been specified as a query parameter, save it as the\n # user's preferred ordering for this table.\n ordering = request.GET.getlist(self.prefixed_order_by_field)\n ", "d_id": 77722, "documentation": { "docstring": "\n Configure the table for a specific request context. This performs pagination and records\n the user's preferred ordering logic.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 122960, "commit_id": "1fc9197c79af89ef292dc69d508ed1569f62c4f0", "repo": "jax", "path": "jax/interpreters/pxla.py", "file_name": "pxla.py", "fun_name": "_shard_arg", "commit_message": "Simplify Array's shard_arg_handler by merging pmap and pjit/xmap paths\n\nPiperOrigin-RevId: 497991966", "code": "def _shard_arg(arg, devices, arg_indices):\n \n if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:\n # The shard_arg_handlers allow an extensible set of types to be sharded, but\n # inline handling for ShardedDeviceArray as a special case for performance\n # NOTE: we compare indices instead of sharding_spec because\n # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster.\n return [\n buf if buf.device() == d else buf.copy_to_device(d)\n for d, buf in zip(devices, arg.device_buffers)\n ]\n else:\n arg = xla.canonicalize_dtype(arg)\n return shard_arg_handlers[type(arg)](arg, devices, arg_indices)\n\n\n@profiler.annotate_function", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@profiler.annotate_function", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 115, "n_words": 75, "vocab_size": 64, "complexity": 5, "nloc": 9, "token_counts": 81, "n_ast_nodes": 134, "n_identifiers": 19, "random_cut": "def _shard_arg(arg, devices, arg_indices):\n \n if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:\n # The shard_arg_handlers allow an extensible set of types to be sharded, but\n # inline handling for ShardedDeviceArray as a special case for performance\n # NOTE: we compare indices instead of sharding_spec because\n # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster.\n return [\n buf if buf.device() ", "d_id": 27280, "documentation": { "docstring": "Returns a list of size len(devices) containing per-device buffers.\n\n For the C++ pmap path, we fallback to Python (this function) to shard\n arguments that are not supported by the C++ `ShardArg`.\n\n Arrgs:\n arg: The Python argument.\n devices: The list of devices to shard over.\n arg_indices: A list of `len(devices)` indices to use to shard the argument.\n ", "n_words": 56, "vocab_size": 40, "n_whitespaces": 69, "language": "en" } }, { "id": 60304, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py", "file_name": "test_coord_map.py", "fun_name": "test_rect", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def test_rect(self):\n \n n3x3 = coord_net_spec(ks=3, stride=1, pad=0)\n n5x5 = coord_net_spec(ks=5, stride=2, pad=10)\n n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])\n ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)\n ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)\n ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)\n self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)\n self.assertEquals(a_3x3, a_3x5[0])\n self.assertEquals(b_3x3, b_3x5[0])\n self.assertEquals(a_5x5, a_3x5[1])\n self.assertEquals(b_5x5, b_3x5[1])\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 135, "n_words": 51, "vocab_size": 45, "complexity": 1, "nloc": 12, "token_counts": 168, "n_ast_nodes": 245, "n_identifiers": 23, "random_cut": "def test_rect(self):\n \n n3x3 = coord_net_spec(ks=3, stride=1, pad=0)\n n5x5 = coord_net_spec(ks=5, stride=2, pad=10)\n n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])\n ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)\n ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)\n ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.d", "d_id": 12082, "documentation": { "docstring": "\n Anisotropic mapping is equivalent to its isotropic parts.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 73512, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/test_model.py", "file_name": "test_model.py", "fun_name": "test_get_page_url_when_for_settings_fetched_via_for_site", "commit_message": "Reformat with black", "code": "def test_get_page_url_when_for_settings_fetched_via_for_site(self):\n \n self._create_importantpages_object()\n\n settings = ImportantPages.for_site(self.default_site)\n\n # Force site root paths query beforehand\n self.default_site.root_page._get_site_root_paths()\n\n for page_fk_field, expected_result in (\n (\"sign_up_page\", \"http://localhost/\"),\n (\"general_terms_page\", \"http://localhost/\"),\n (\"privacy_policy_page\", \"http://other/\"),\n ):\n with self.subTest(page_fk_field=page_fk_field):\n\n # only the first request for each URL will trigger queries.\n # 2 are triggered instead of 1 here, because tests use the\n # database cache backed, and the cache is queried each time\n # to fetch site root paths (because there's no 'request' to\n # store them on)\n\n with self.assertNumQueries(2):\n\n self.assertEqual(\n settings.get_page_url(page_fk_field), expected_result\n )\n\n # when called directly\n self.assertEqual(\n settings.get_page_url(page_fk_field), expected_result\n )\n\n # when called indirectly via shortcut\n self.assertEqual(\n getattr(settings.page_url, page_fk_field), expected_result\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 506, "n_words": 102, "vocab_size": 74, "complexity": 2, "nloc": 20, "token_counts": 115, "n_ast_nodes": 201, "n_identifiers": 17, "random_cut": "def test_get_page_url_when_for_settings_fetched_via_for_site(self):\n \n self._create_importantpages_object()\n\n settings = ImportantPages.for_site(self.default_site)\n\n # Force site root paths query beforehand\n self.default_site.root_page._get_site_root_paths()\n\n for page_fk_field, expected_result in (\n (\"sign_up_page\", \"http://localhost/\"),\n (\"general_terms_page\", \"http://localhost/\"),\n (\"privacy_policy_page\", \"http://other/\"),\n ):\n with self.subTest(page_fk_field=page_fk_field):\n\n # only the first request for each URL will trigger queries.\n # 2 are triggered instead of 1 here, because tests use the\n # database cache backed, and the cache is queried each time\n # to fetch site root paths (because there's no 'request' to\n # store them on)\n\n with self.assertNumQueries(2):\n\n self.assertEqual(\n settings.get_page_url(page_fk_field), expected_result\n ", "d_id": 16034, "documentation": { "docstring": "ImportantPages.for_site() cannot make the settings object\n request-aware, so things are a little less efficient, and the\n URLs returned will not be site-relative", "n_words": 22, "vocab_size": 21, "n_whitespaces": 35, "language": "en" } }, { "id": 248389, "commit_id": "c52abc1cfdd9e5480cdb4a03d626fe61cacc6573", "repo": "synapse", "path": "tests/federation/test_federation_sender.py", "file_name": "test_federation_sender.py", "fun_name": "test_send_receipts_with_backoff", "commit_message": "Additional constants for EDU types. (#12884)\n\nInstead of hard-coding strings in many places.", "code": "def test_send_receipts_with_backoff(self):\n \n mock_send_transaction = (\n self.hs.get_federation_transport_client().send_transaction\n )\n mock_send_transaction.return_value = make_awaitable({})\n\n sender = self.hs.get_federation_sender()\n receipt = ReadReceipt(\n \"room_id\", \"m.read\", \"user_id\", [\"event_id\"], {\"ts\": 1234}\n )\n self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))\n\n self.pump()\n\n # expect a call to send_transaction\n mock_send_transaction.assert_called_once()\n json_cb = mock_send_transaction.call_args[0][1]\n data = json_cb()\n self.assertEqual(\n data[\"edus\"],\n [\n {\n \"edu_type\": EduTypes.RECEIPT,\n \"content\": {\n \"room_id\": {\n \"m.read\": {\n \"user_id\": {\n \"event_ids\": [\"event_id\"],\n \"data\": {\"ts\": 1234},\n }\n }\n }\n },\n }\n ],\n )\n mock_send_transaction.reset_mock()\n\n # send the second RR\n receipt = ReadReceipt(\n \"room_id\", \"m.read\", \"user_id\", [\"other_id\"], {\"ts\": 1234}\n )\n self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))\n self.pump()\n mock_send_transaction.assert_not_called()\n\n self.reactor.advance(19)\n mock_send_transaction.assert_not_called()\n\n self.reactor.advance(10)\n mock_send_transaction.assert_called_once()\n json_cb = mock_send_transaction.call_args[0][1]\n data = json_cb()\n self.assertEqual(\n data[\"edus\"],\n [\n {\n \"edu_type\": EduTypes.RECEIPT,\n \"content\": {\n \"room_id\": {\n \"m.read\": {\n \"user_id\": {\n \"event_ids\": [\"other_id\"],\n \"data\": {\"ts\": 1234},\n }\n }\n }\n },\n }\n ],\n )\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1066, "n_words": 119, "vocab_size": 57, "complexity": 1, "nloc": 63, "token_counts": 296, "n_ast_nodes": 519, "n_identifiers": 28, "random_cut": "def test_send_receipts_with_backoff(self):\n \n mock_send_transaction = (\n self.hs.get_federation_transport_client().send_transaction\n )\n mock_send_transaction.return_value = make_awaitable({})\n\n sender = self.hs.get_federation_sender()\n receipt = ReadReceipt(\n \"room_id\", \"m.read\", \"user_id\", [\"event_id\"], {\"ts\": 1234}\n )\n self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))\n\n self.pump()\n\n # expect a call to send_transaction\n mock_send_transaction.assert_called_once()\n json_cb = mock_send_transaction.call_args[0][1]\n data = json_cb()\n self.assertEqual(\n data[\"edus\"],\n [\n {\n \"edu_type\": EduTypes.RECEIPT,\n \"content\": {\n \"room_id\": {\n \"m.read\": {\n \"user_id\": {\n \"event_ids\": [\"event_id\"],\n \"data\": {\"ts\": 1234},\n }\n }\n }\n },\n }\n ],\n )\n mock_send_transaction.reset_mock()\n\n # send the second RR\n receipt = ReadReceipt(\n \"room_id\", \"m.read\", \"user_id\", [\"other_id\"], {\"ts\": 1234}\n )\n self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))\n self.pump()\n mock_send_transaction.assert_not_called()\n\n self.reactor.advance(19)\n mock_send_transaction.assert_not_called()\n\n self.reactor.advance(10)\n mock_send_transaction.assert_ca", "d_id": 72260, "documentation": { "docstring": "Send two receipts in quick succession; the second should be flushed, but\n only after 20ms", "n_words": 15, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 86265, "commit_id": "ae9c0d8a33d509d9719a5a03e06c9797741877e9", "repo": "sentry", "path": "src/sentry/lang/javascript/processor.py", "file_name": "processor.py", "fun_name": "process_frame", "commit_message": "ref(processor): Use symbolic-sourcemapcache for JavaScript Sourcemap processing (#38551)\n\nThis PR attempts to replace the currently used `rust-sourcemap` crate\r\nand it's symbolic python bindings, with `symbolic-sourcemapcache` crate.\r\n\r\nIt makes the whole processing pipeline easier to maintain, as it pushes\r\nsome work directly to Symbolic, as well as we get better function names\r\ndue to better scope resolution and in some cases better file URLs.\r\n\r\nOther than that, we don't use `SourceView` anymore, as it seemed like an\r\nunnecessary layer of abstraction for something that is used only for\r\n`context_lines` extraction. We cache `utf-8` decoded sources directly\r\nnow, as this way we can encode them only once for `SmCache` instance\r\ninitialization, and use the source directly otherwise for context lines\r\nextraction.\r\n\r\nSome tests had to updated to express current behavior.\r\n\r\nThe notable thing is `useless_fn_names = [\"\",\r\n\"__webpack_require__\", \"__webpack_modules__\"]`, which is mostly for\r\n`production` mode of webpack, that by default trims all the function\r\nnames, and we decided to fallback to the minified names in those cases\r\ninstead (this was already the old behavior).\r\n\r\nIt should be possible to extract something better, but we'd need to\r\nparse all `sourceContents` from sourcemap to do that, as the only thing\r\nwe can get better function name for the case mentioned above, is if we\r\nlook at the right-hand side of default node export, in form of\r\n`module.exports = function foo () {}`. This should give us `foo`, yet\r\nthe only thing we can extract is `module.exports`, as minified form of\r\nthis expression in webpack production mode is `module.exports = function\r\n() {}`.", "code": "def process_frame(self, processable_frame, processing_task):\n \n frame = processable_frame.frame\n token = None\n\n cache = self.cache\n sourcemaps = self.sourcemaps\n all_errors = []\n sourcemap_applied = False\n\n # can't demangle if there's no filename or line number present\n if not frame.get(\"abs_path\") or not frame.get(\"lineno\"):\n return\n\n # also can't demangle node's internal modules\n # therefore we only process user-land frames (starting with /)\n # or those created by bundle/webpack internals\n if self.data.get(\"platform\") == \"node\" and not frame.get(\"abs_path\").startswith(\n (\"/\", \"app:\", \"webpack:\")\n ):\n return\n\n errors = cache.get_errors(frame[\"abs_path\"])\n if errors:\n all_errors.extend(errors)\n\n # This might fail but that's okay, we try with a different path a\n # bit later down the road.\n source = self.get_sourceview(frame[\"abs_path\"])\n\n in_app = None\n new_frame = dict(frame)\n raw_frame = dict(frame)\n\n sourcemap_url, sourcemap_view = sourcemaps.get_link(frame[\"abs_path\"])\n self.sourcemaps_touched.add(sourcemap_url)\n if sourcemap_view and frame.get(\"colno\") is None:\n all_errors.append(\n {\"type\": EventError.JS_NO_COLUMN, \"url\": http.expose_url(frame[\"abs_path\"])}\n )\n elif sourcemap_view:\n if is_data_uri(sourcemap_url):\n sourcemap_label = frame[\"abs_path\"]\n else:\n sourcemap_label = sourcemap_url\n\n sourcemap_label = http.expose_url(sourcemap_label)\n\n if frame.get(\"function\"):\n minified_function_name = frame[\"function\"]\n minified_source = self.get_sourceview(frame[\"abs_path\"])\n else:\n minified_function_name = minified_source = None\n\n try:\n # Errors are 1-indexed in the frames, so we need to -1 to get\n # zero-indexed value from tokens.\n assert frame[\"lineno\"] > 0, \"line numbers are 1-indexed\"\n token = sourcemap_view.lookup(\n frame[\"lineno\"] - 1, frame[\"colno\"] - 1, minified_function_name, minified_source\n )\n except Exception:\n token = None\n all_errors.append(\n {\n \"type\": EventError.JS_INVALID_SOURCEMAP_LOCATION,\n \"column\": frame.get(\"colno\"),\n \"row\": frame.get(\"lineno\"),\n \"source\": frame[\"abs_path\"],\n \"sourcemap\": sourcemap_label,\n }\n )\n\n # persist the token so that we can find it later\n processable_frame.data[\"token\"] = token\n\n # Store original data in annotation\n new_frame[\"data\"] = dict(frame.get(\"data\") or {}, sourcemap=sourcemap_label)\n\n sourcemap_applied = True\n\n if token is not None:\n abs_path = non_standard_url_join(sourcemap_url, token.src)\n\n logger.debug(\n \"Mapping compressed source %r to mapping in %r\", frame[\"abs_path\"], abs_path\n )\n source = self.get_sourceview(abs_path)\n\n if source is None:\n errors = cache.get_errors(abs_path)\n if errors:\n all_errors.extend(errors)\n else:\n all_errors.append(\n {\"type\": EventError.JS_MISSING_SOURCE, \"url\": http.expose_url(abs_path)}\n )\n\n # the tokens are zero indexed, so offset correctly\n new_frame[\"lineno\"] = token.src_line + 1\n new_frame[\"colno\"] = token.src_col + 1\n\n # Try to use the function name we got from symbolic\n original_function_name = token.function_name\n\n # In the ideal case we can use the function name from the\n # frame and the location to resolve the original name\n # through the heuristics in our sourcemap library.\n if original_function_name is None:\n last_token = None\n\n # Find the previous token for function name handling as a\n # fallback.\n if (\n processable_frame.previous_frame\n and processable_frame.previous_frame.processor is self\n ):\n last_token = processable_frame.previous_frame.data.get(\"token\")\n if last_token:\n original_function_name = last_token.name\n\n if original_function_name is not None:\n new_frame[\"function\"] = original_function_name\n\n filename = token.src\n # special case webpack support\n # abs_path will always be the full path with webpack:/// prefix.\n # filename will be relative to that\n if abs_path.startswith(\"webpack:\"):\n filename = abs_path\n # webpack seems to use ~ to imply \"relative to resolver root\"\n # which is generally seen for third party deps\n # (i.e. node_modules)\n if \"/~/\" in filename:\n filename = \"~/\" + abs_path.split(\"/~/\", 1)[-1]\n elif WEBPACK_NAMESPACE_RE.match(filename):\n filename = re.sub(WEBPACK_NAMESPACE_RE, \"./\", abs_path)\n else:\n filename = filename.split(\"webpack:///\", 1)[-1]\n\n # As noted above:\n # * [js/node] '~/' means they're coming from node_modules, so these are not app dependencies\n # * [node] sames goes for `./node_modules/` and '../node_modules/', which is used when bundling node apps\n # * [node] and webpack, which includes it's own code to bootstrap all modules and its internals\n # eg. webpack:///webpack/bootstrap, webpack:///external\n if (\n filename.startswith(\"~/\")\n or \"/node_modules/\" in filename\n or not filename.startswith(\"./\")\n ):\n in_app = False\n # And conversely, local dependencies start with './'\n elif filename.startswith(\"./\"):\n in_app = True\n # We want to explicitly generate a webpack module name\n new_frame[\"module\"] = generate_module(filename)\n\n # while you could technically use a subpath of 'node_modules' for your libraries,\n # it would be an extremely complicated decision and we've not seen anyone do it\n # so instead we assume if node_modules is in the path its part of the vendored code\n elif \"/node_modules/\" in abs_path:\n in_app = False\n\n if abs_path.startswith(\"app:\"):\n if filename and NODE_MODULES_RE.search(filename):\n in_app = False\n else:\n in_app = True\n\n new_frame[\"abs_path\"] = abs_path\n new_frame[\"filename\"] = filename\n if not frame.get(\"module\") and abs_path.startswith(\n (\"http:\", \"https:\", \"webpack:\", \"app:\")\n ):\n new_frame[\"module\"] = generate_module(abs_path)\n\n elif sourcemap_url:\n new_frame[\"data\"] = dict(\n new_frame.get(\"data\") or {}, sourcemap=http.expose_url(sourcemap_url)\n )\n\n # TODO: theoretically a minified source could point to\n # another mapped, minified source\n changed_frame = self.expand_frame(new_frame, source=source)\n\n # If we did not manage to match but we do have a line or column\n # we want to report an error here.\n if not new_frame.get(\"context_line\") and source and new_frame.get(\"colno\") is not None:\n all_errors.append(\n {\n \"type\": EventError.JS_INVALID_SOURCEMAP_LOCATION,\n \"column\": new_frame[\"colno\"],\n \"row\": new_frame[\"lineno\"],\n \"source\": new_frame[\"abs_path\"],\n }\n )\n\n changed_raw = sourcemap_applied and self.expand_frame(raw_frame)\n\n if sourcemap_applied or all_errors or changed_frame or changed_raw:\n # In case we are done processing, we iterate over all errors that we got\n # and we filter out all `JS_MISSING_SOURCE` errors since we consider if we have\n # a `context_line` we have a symbolicated frame and we don't need to show the error\n has_context_line = bool(new_frame.get(\"context_line\"))\n if has_context_line:\n all_errors[:] = [\n x for x in all_errors if x.get(\"type\") is not EventError.JS_MISSING_SOURCE\n ]\n\n if in_app is not None:\n new_frame[\"in_app\"] = in_app\n raw_frame[\"in_app\"] = in_app\n\n # Run new processor only for frames that were actually modified in any way.\n if should_run_smcache(self) and new_frame != raw_frame:\n smcache_rv = self.smcache_processor.process_frame(processable_frame, None)\n set_path(new_frame, \"data\", \"smcache_frame\", value=smcache_rv[0][0])\n\n new_frames = [new_frame]\n raw_frames = [raw_frame] if changed_raw else None\n return new_frames, raw_frames, all_errors\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 3597, "n_words": 857, "vocab_size": 421, "complexity": 51, "nloc": 145, "token_counts": 953, "n_ast_nodes": 1670, "n_identifiers": 76, "random_cut": "def process_frame(self, processable_frame, processing_task):\n \n frame = processable_frame.frame\n token = None\n\n cache = self.cache\n sourcemaps = self.sourcemaps\n all_errors = []\n sourcemap_applied = False\n\n # can't demangle if there's no filename or line number present\n if not frame.get(\"abs_path\") or not frame.get(\"lineno\"):\n return\n\n # also can't demangle node's internal modules\n # therefore we only process user-land frames (starting with /)\n # or those created by bundle/webpack internals\n if self.data.get(\"platform\") == \"node\" and not frame.get(\"abs_path\").startswith(\n (\"/\", \"app:\", \"webpack:\")\n ):\n return\n\n errors = cache.get_errors(frame[\"abs_path\"])\n if errors:\n all_errors.extend(errors)\n\n # This might fail but that's okay, we try with a different path a\n # bit later down the road.\n source = self.get_sourceview(frame[\"abs_path\"])\n\n in_app = None\n new_frame = dict(frame)\n raw_frame = dict(frame)\n\n sourcemap_url, sourcemap_view = sourcemaps.get_link(frame[\"abs_path\"])\n self.sourcemaps_touched.add(sourcemap_url)\n if sourcemap_view and frame.get(\"colno\") is None:\n all_errors.append(\n {\"type\": EventError.JS_NO_COLUMN, \"url\": http.expose_url(frame[\"abs_path\"])}\n )\n elif sourcemap_view:\n if is_data_uri(sourcemap_url):\n sourcemap_label = frame[\"abs_path\"]\n else:\n sourcemap_label = sourcemap_url\n\n sourcemap_label = http.expose_url(sourcemap_label)\n\n if frame.get(\"function\"):\n minified_function_name = frame[\"function\"]\n minified_source = self.get_sourceview(frame[\"abs_path\"])\n else:\n minified_function_name = minified_source = None\n\n try:\n # Errors are 1-indexed in the frames, so we need to -1 to get\n # zero-indexed value from tokens.\n assert frame[\"lineno\"] > 0, \"line numbers are 1-indexed\"\n token = sourcemap_view.lookup(\n frame[\"lineno\"] - 1, frame[\"colno\"] - 1, minified_function_name, minified_source\n )\n except Exception:\n token = None\n all_errors.append(\n {\n \"type\": EventError.JS_INVALID_SOURCEMAP_LOCATION,\n \"column\": frame.get(\"colno\"),\n \"row\": frame.get(\"lineno\"),\n \"source\": frame[\"abs_path\"],\n \"sourcemap\": sourcemap_label,\n }\n )\n\n # persist the token so that we can find it later\n processable_frame.data[\"token\"] = token\n\n # Store original data in annotation\n new_frame[\"data\"] = dict(frame.get(\"data\") or {}, sourcemap=sourcemap_label)\n\n sourcemap_applied = True\n\n if token is not None:\n abs_path = non_standard_url_join(sourcemap_url, token.src)\n\n logger.debug(\n \"Mapping compressed source %r to mapping in %r\", frame[\"abs_path\"], abs_path\n )\n source = self.get_sourceview(abs_path)\n\n if source is None:\n errors = cache.get_errors(abs_path)\n if errors:\n all_errors.extend(errors)\n else:\n all_errors.append(\n {\"type\": EventError.JS_", "d_id": 18094, "documentation": { "docstring": "\n Attempt to demangle the given frame.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 265949, "commit_id": "cb815ede60ab298ca13907d523126380f50a8023", "repo": "netbox", "path": "netbox/utilities/forms/utils.py", "file_name": "utils.py", "fun_name": "validate_csv", "commit_message": "7961 CSV bulk update (#10715)\n\n* 7961 add csv bulk update\r\n\r\n* temp checkin - blocked\r\n\r\n* 7961 bugfix and cleanup\r\n\r\n* 7961 change to id, add docs\r\n\r\n* 7961 add tests cases\r\n\r\n* 7961 fix does not exist validation error\r\n\r\n* 7961 fix does not exist validation error\r\n\r\n* 7961 update tests\r\n\r\n* 7961 update tests\r\n\r\n* 7961 update tests\r\n\r\n* 7961 update tests\r\n\r\n* 7961 update tests\r\n\r\n* 7961 update tests\r\n\r\n* 7961 update tests\r\n\r\n* 7961 update tests\r\n\r\n* 7961 update tests\r\n\r\n* 7961 make test cases more explicit\r\n\r\n* 7961 make test cases more explicit\r\n\r\n* 7961 make test cases more explicit\r\n\r\n* 7961 make test cases more explicit\r\n\r\n* 7961 make test cases more explicit\r\n\r\n* 7961 make test cases more explicit\r\n\r\n* 7961 make test cases more explicit\r\n\r\n* 7961 optimize loading csv test data\r\n\r\n* 7961 update tests remove redundant code\r\n\r\n* 7961 avoid MPTT issue in test cases", "code": "def validate_csv(headers, fields, required_fields):\n \n # Validate provided column headers\n is_update = False\n for field, to_field in headers.items():\n if field == \"id\":\n is_update = True\n continue\n if field not in fields:\n raise forms.ValidationError(f'Unexpected column header \"{field}\" found.')\n if to_field and not hasattr(fields[field], 'to_field_name'):\n raise forms.ValidationError(f'Column \"{field}\" is not a related object; cannot use dots')\n if to_field and not hasattr(fields[field].queryset.model, to_field):\n raise forms.ValidationError(f'Invalid related object attribute for column \"{field}\": {to_field}')\n\n # Validate required fields (if not an update)\n if not is_update:\n for f in required_fields:\n if f not in headers:\n raise forms.ValidationError(f'Required column header \"{f}\" not found.')\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 229, "n_words": 95, "vocab_size": 59, "complexity": 11, "nloc": 16, "token_counts": 118, "n_ast_nodes": 212, "n_identifiers": 14, "random_cut": "def validate_csv(headers, fields, required_fields):\n \n # Validate provided column headers\n is_update = False\n for field, to_field in headers.items():\n if field == \"id\":\n is_update = True\n continue\n if field not in fields:\n raise forms.Vali", "d_id": 78250, "documentation": { "docstring": "\n Validate that parsed csv data conforms to the object's available fields. Raise validation errors\n if parsed csv data contains invalid headers or does not contain required headers.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 37, "language": "en" } }, { "id": 199621, "commit_id": "e875bdb804b0285e4a9bd8de0158436e792c03cb", "repo": "sympy", "path": "sympy/polys/appellseqs.py", "file_name": "appellseqs.py", "fun_name": "bernoulli_poly", "commit_message": "Initial definition of Appell sequences", "code": "def bernoulli_poly(n, x=None, polys=False):\n \n return appell_poly(n, [[1], [1, QQ(-1,2)]], QQ(1,2),\n lambda p, i: p * QQ(1<<(i-1), 1-(1< lval\n assert drawdown == result\n assert pytest.approx(drawdown_rel) == result_rel\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 149, "n_words": 91, "vocab_size": 69, "complexity": 2, "nloc": 14, "token_counts": 152, "n_ast_nodes": 236, "n_identifiers": 35, "random_cut": "def test_calculate_max_drawdown_abs(values, relative, result, result_rel):\n \n\n dates = [Arrow(2020, 1, 1).shift(days=i) for i in range(len(values))]\n df = DataFrame(zip(values, dates), columns=['profit", "d_id": 34465, "documentation": { "docstring": "\n Test case from issue https://github.com/freqtrade/freqtrade/issues/6655\n [1000, 500, 1000, 11000, 10000] # absolute results\n [1000, 50%, 0%, 0%, ~9%] # Relative drawdowns\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 46, "language": "en" } }, { "id": 132413, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/tests/test_checkpoint_manager.py", "file_name": "test_checkpoint_manager.py", "fun_name": "testOnCheckpointUnavailableAttribute", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testOnCheckpointUnavailableAttribute(self):\n \n checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1)\n\n no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {})\n with patch.object(logger, \"error\") as log_error_mock:\n checkpoint_manager.on_checkpoint(no_attr_checkpoint)\n log_error_mock.assert_called_once()\n # The newest checkpoint should still be set despite this error.\n self.assertEqual(\n checkpoint_manager.newest_persistent_checkpoint, no_attr_checkpoint\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 130, "n_words": 32, "vocab_size": 30, "complexity": 1, "nloc": 9, "token_counts": 62, "n_ast_nodes": 106, "n_identifiers": 15, "random_cut": "def testOnCheckpointUnavailableAttribute(self):\n \n checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1)\n\n no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {})\n with patch.object(logger, \"error\") as log_error_mock:\n checkpoint_manager.on_checkpoint(no_attr_checkpoint)\n log_error_mock.assert_called_once()\n # The newest checkpoint should still be set despite this error.\n ", "d_id": 29749, "documentation": { "docstring": "\n Tests that an error is logged when the associated result of the\n checkpoint has no checkpoint score attribute.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 27097, "commit_id": "b5e414c98a1535d287721c859994424cf0eea081", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py", "file_name": "fixtures.py", "fun_name": "subscription_app_status_changed_webhook", "commit_message": "New events related to apps changes. (#9698)\n\n* New events related to apps changes.\r\n\r\n* Schema update after rebase\r\n\r\n* CHANGELOG.md update\r\n\r\n* New events description fix\r\n\r\n* Missing app event added to CHANGELOG.md", "code": "def subscription_app_status_changed_webhook(subscription_webhook):\n return subscription_webhook(\n APP_STATUS_CHANGED_SUBSCRIPTION_QUERY,\n WebhookEventAsyncType.APP_STATUS_CHANGED,\n )\n\n\nCATEGORY_CREATED_SUBSCRIPTION_QUERY = \n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 28, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 15, "n_ast_nodes": 37, "n_identifiers": 8, "random_cut": "def subscription_app_status_changed_webhook(subscription_webhook):\n return subscription_webhook(\n APP_STATUS_CHANGED_SUBSCRIPTION_QUERY,\n WebhookEventAsyncType.APP_STATUS_CHANGED,\n )\n\n\nCATEGORY_CREATED_SUBSCRIPTION_QUERY = \n\n\n@pytest.fixture", "d_id": 5090, "documentation": { "docstring": "\n subscription{\n event{\n ...on CategoryCreated{\n category{\n id\n }\n }\n }\n }\n", "n_words": 10, "vocab_size": 7, "n_whitespaces": 69, "language": "en" } }, { "id": 153801, "commit_id": "cca9468648521e9317de1cb69cf8e6b1d5292d21", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "binary_op", "commit_message": "PERF-#4493: Use partition size caches more in Modin dataframe. (#4495)\n\nCo-authored-by: Devin Petersohn \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: mvashishtha ", "code": "def binary_op(self, op, right_frame, join_type=\"outer\"):\n \n left_parts, right_parts, joined_index, row_lengths = self._copartition(\n 0, right_frame, join_type, sort=True\n )\n # unwrap list returned by `copartition`.\n right_parts = right_parts[0]\n new_frame = self._partition_mgr_cls.binary_operation(\n 1, left_parts, lambda l, r: op(l, r), right_parts\n )\n new_columns = self.columns.join(right_frame.columns, how=join_type)\n return self.__constructor__(\n new_frame,\n joined_index,\n new_columns,\n row_lengths,\n column_widths=self._column_widths_cache,\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 196, "n_words": 49, "vocab_size": 40, "complexity": 1, "nloc": 16, "token_counts": 104, "n_ast_nodes": 149, "n_identifiers": 23, "random_cut": "def binary_op(self, op, right_frame, join_type=\"outer\"):\n \n left_parts, right_parts, joined_index, row_lengths = self._copartition(\n 0, right_frame, join_type, sort=True\n )\n # unwrap list returned by `copartition`.\n right_parts = right_parts[0]\n ", "d_id": 35617, "documentation": { "docstring": "\n Perform an operation that requires joining with another Modin DataFrame.\n\n Parameters\n ----------\n op : callable\n Function to apply after the join.\n right_frame : PandasDataframe\n Modin DataFrame to join with.\n join_type : str, default: \"outer\"\n Type of join to apply.\n\n Returns\n -------\n PandasDataframe\n New Modin DataFrame.\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 160, "language": "en" } }, { "id": 108226, "commit_id": "7c6c5f6215b40a27cfefb7bf21246299fd9b3a1e", "repo": "matplotlib", "path": "lib/matplotlib/__init__.py", "file_name": "__init__.py", "fun_name": "rc_file", "commit_message": "Fix removed cross-references", "code": "def rc_file(fname, *, use_default_template=True):\n \n # Deprecation warnings were already handled in rc_params_from_file, no need\n # to reemit them here.\n with _api.suppress_matplotlib_deprecation_warning():\n from .style.core import STYLE_BLACKLIST\n rc_from_file = rc_params_from_file(\n fname, use_default_template=use_default_template)\n rcParams.update({k: rc_from_file[k] for k in rc_from_file\n if k not in STYLE_BLACKLIST})\n\n\n@contextlib.contextmanager", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@contextlib.contextmanager", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 109, "n_words": 42, "vocab_size": 37, "complexity": 3, "nloc": 7, "token_counts": 58, "n_ast_nodes": 103, "n_identifiers": 15, "random_cut": "def rc_file(fname, *, use_default_template=True):\n \n # Deprecation warnings were already handled in rc_params_from_file, no need\n # to reemit them here.\n with _api.suppress_matplotlib_deprecation_warning():\n from .style.core import STYLE_BLACKLIST\n rc_from_file = rc_params_from_file(\n fname, use_default_template=use_default_template)\n rcParams.update({k: rc_from_file[k] for k in rc_from_file\n ", "d_id": 23107, "documentation": { "docstring": "\n Update `.rcParams` from file.\n\n Style-blacklisted `.rcParams` (defined in\n ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.\n\n Parameters\n ----------\n fname : str or path-like\n A file with Matplotlib rc settings.\n\n use_default_template : bool\n If True, initialize with default parameters before updating with those\n in the given file. If False, the current configuration persists\n and only the parameters specified in the file are updated.\n ", "n_words": 58, "vocab_size": 43, "n_whitespaces": 111, "language": "en" } }, { "id": 140529, "commit_id": "905258dbc19753c81039f993477e7ab027960729", "repo": "ray", "path": "python/ray/util/collective/collective_group/gloo_util.py", "file_name": "gloo_util.py", "fun_name": "create_gloo_context", "commit_message": "Clean up docstyle in python modules and add LINT rule (#25272)", "code": "def create_gloo_context(rank, world_size):\n \n context = pygloo.rendezvous.Context(rank, world_size)\n return context\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "def create_gloo_context(rank, world_size):\n \n context = pygloo.rendezvous.Context(rank, world_size)\n return context\n\n", "d_id": 32000, "documentation": { "docstring": "Create a GLOO context using GLOO APIs.\n\n Args:\n rank: the rank of this process.\n world_size: the number of processes of this collective group.\n\n Returns:\n context (pygloo.Context): a GLOO context.\n ", "n_words": 29, "vocab_size": 21, "n_whitespaces": 59, "language": "en" } }, { "id": 37307, "commit_id": "3104036e7f1a3cd6e07a69d648c3597de32f72fe", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_bitsandbytes", "commit_message": "Add support for bitsandbytes (#15622)\n\n* Add initial BNB integration\r\n\r\n* fixup! Add initial BNB integration\r\n\r\n* Add bnb test decorator\r\n\r\n* Update Adamw8bit option name\r\n\r\n* Use the full bnb package name\r\n\r\n* Overide bnb for all embedding layers\r\n\r\n* Fix package name\r\n\r\n* Formatting\r\n\r\n* Remove unnecessary import\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* Rename AdamwBNB optimizer option\r\n\r\n* Add training test checking that bnb memory utilization is lower\r\n\r\n* fix merge\r\n\r\n* fix merge; fix + extend new test\r\n\r\n* cleanup\r\n\r\n* expand bnb\r\n\r\n* move all require_* candidates to testing_utils.py\r\n\r\nCo-authored-by: Stas Bekman \r\nCo-authored-by: Stas Bekman ", "code": "def require_bitsandbytes(test_case):\n \n if not is_bitsandbytes_available():\n return unittest.skip(\"test requires bnb\")(test_case)\n else:\n return test_case\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 5, "random_cut": "def require_bitsandbytes(test_case):\n \n if not is_bitsandbytes", "d_id": 6763, "documentation": { "docstring": "\n Decorator for bits and bytes (bnb) dependency\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 285675, "commit_id": "72b0a9f1ee8b91ad9fd9e76d80d2ccab51ee6d21", "repo": "OpenBBTerminal", "path": "openbb_terminal/api.py", "file_name": "api.py", "fun_name": "copy_func", "commit_message": "Next release : reports on steroids (#2349)\n\n* fix gov tests\r\n\r\n* refactor insider\r\n\r\n* new virtual path extraction\r\n\r\n* removed some symbol default params as they're considered critical\r\n\r\n* little adjustments\r\n\r\n* portfolio refactor\r\n\r\n* merge API factory\r\n\r\n* add helpers, stocks, crypto, forex\r\n\r\n* minor forex changes\r\n\r\n* include forex api paths\r\n\r\n* add 2 missing forex funcs\r\n\r\n* portfolio brokers refactor\r\n\r\n* display help on api func call\r\n\r\n* add econometrics virtual paths to api\r\n\r\n* add api unit test\r\n\r\n* fixed report for the new api\r\n\r\n* minor portfolio refactorings\r\n\r\n* added gdapps\r\n\r\n* anchor_yield path\r\n\r\n* some more crypto path fixes\r\n\r\n* small change\r\n\r\n* fixed wrong param\r\n\r\n* minor fixes\r\n\r\n* wip - inital commit for forex report\r\n\r\n* add bw as a model, we'll get better solution afterwards\r\n\r\n* added ema with dummy model as it adds great functionality to the report\r\n\r\n* minor fixes\r\n\r\n* wip - added functions to forex report\r\n\r\n* add feedparser news path\r\n\r\n* add new virtual paths to api\r\n\r\n* adding commands to equity report\r\n\r\n* revert to old paths, new ones were breaking\r\n\r\n* Add in very basic ETF report\r\n\r\n* Add candle chart to ETF report\r\n\r\n* add etf load\r\n\r\n* allow use of candle without data\r\n\r\n* add raw to candle\r\n\r\n* added forex report\r\n\r\n* ongoing equity report\r\n\r\n* equity report change\r\n\r\n* fix some portfolio bugs and add docstrings\r\n\r\n* include portfolio paths and coin class\r\n\r\n* add crypto paths\r\n\r\n* change event dates to str\r\n\r\n* starting economy report\r\n\r\n* window for limit\r\n\r\n* equity report and refactor newsapi\r\n\r\n* add helper to api\r\n\r\n* update on economy report\r\n\r\n* equity report\r\n\r\n* update economy report\r\n\r\n* refactor some docstrings\r\n\r\n* change maturities helper\r\n\r\n* refactor newsapi\r\n\r\n* refactor futures command\r\n\r\n* add some sauce to ycrv plot\r\n\r\n* black\r\n\r\n* update report\r\n\r\n* refactor alphavantage\r\n\r\n* refactor wsj\r\n\r\n* update economy report\r\n\r\n* ycrv tenor\r\n\r\n* map avaiable_indices\r\n\r\n* map economy helpers\r\n\r\n* fix econdb docstring\r\n\r\n* add plots on economy report\r\n\r\n* minor fixes\r\n\r\n* wip - crypto report\r\n\r\n* update economy report\r\n\r\n* added same default args as view\r\n\r\n* added view to explicity use chart=True when suing the api\r\n\r\n* adjustments - removed rich tables to use only df\r\n\r\n* final version economy report\r\n\r\n* change report name\r\n\r\n* equity report for review\r\n\r\n* linting\r\n\r\n* add etf symbols endpoint\r\n\r\n* incorporate feedback economy report\r\n\r\n* fix reports launch by adding tag to economy report\r\n\r\n* fix equity bug\r\n\r\n* remove analyst name\r\n\r\n* fix\r\n\r\n* fix news\r\n\r\n* make links hyperlinks for equity\r\n\r\n* click links\r\n\r\n* fixed arg name\r\n\r\n* improved news\r\n\r\n* small improves\r\n\r\n* Fix light terminal stylesheet that would prevent using it in notebooks (#2473)\r\n\r\n* improved report\r\n\r\n* run reports in installer\r\n\r\n* fix #2209\r\n\r\n* minor ycrv refactoring\r\n\r\n* refactor portfolio/holdv virtual path\r\n\r\n* refactor benchmark trades\r\n\r\n* fix events args\r\n\r\n* adapt economy report to changes\r\n\r\n* fix portfolio controller bug\r\n\r\n* holdv refactor\r\n\r\n* refactor perf command\r\n\r\n* start portfolio report\r\n\r\n* remove perf view\r\n\r\n* refactor holp\r\n\r\n* add textwrap3 to poetry (doesn't solve the error)\r\n\r\n* fix equity after merge\r\n\r\n* add some rolling commands\r\n\r\n* fix equity after save button\r\n\r\n* improved crypto report, plus minor fixes\r\n\r\n* minor fixes on the reports\r\n\r\n* add maxdd and distr\r\n\r\n* refactor qa\r\n\r\n* var command\r\n\r\n* refactor qa expected shortfall\r\n\r\n* add es command\r\n\r\n* add es command\r\n\r\n* fix qa percentile bug\r\n\r\n* fix economy rendering\r\n\r\n* refactor qa omega\r\n\r\n* add om command\r\n\r\n* add summary command\r\n\r\n* add dret command\r\n\r\n* add mret command\r\n\r\n* add yret command\r\n\r\n* add metrics\r\n\r\n* add allocs to report\r\n\r\n* remove bro and po commands, add later\r\n\r\n* fixed some tests\r\n\r\n* adjustments to crypto report\r\n\r\n* Fix docstring for VSCode\r\n\r\nAdded a note about installing Jupyter PowerToys extension for optimal API usage in Jupyter VSCode, in the API_README.md.\r\n\r\n* minor adjustment\r\n\r\n* remove nft calendar model virtual paths\r\n\r\n* Add in Portfolio report\r\n\r\n* fix external axes portfolio view\r\n\r\n* Update portfolio report with rolling plots\r\n\r\n* Details for ETF and Portfolio\r\n\r\n* fix economy report\r\n\r\n* change analyst to openbb\r\n\r\n* floppy\r\n\r\n* fixed unmatched axis in reports\r\n\r\n* Speed up tests\r\n\r\n* fix file and load on po\r\n\r\n* get_news output\r\n\r\n* add some po paths\r\n\r\n* Add integration tests for Reports menu\r\n\r\n* refactor maxsharpe\r\n\r\n* open maxsharpe\r\n\r\n* open minrisk\r\n\r\n* open maxutil\r\n\r\n* open maxret\r\n\r\n* Added fixes\r\n\r\n* black\r\n\r\n* remove useless views\r\n\r\n* Fixed small issue\r\n\r\n* refactor ef\r\n\r\n* open ef api\r\n\r\n* portfolio optimization report\r\n\r\n* Added fixes\r\n\r\n* unblock api loading\r\n\r\n* add more endpoints\r\n\r\n* update po report\r\n\r\n* unblock api loading\r\n\r\n* update po report\r\n\r\n* expose herc\r\n\r\n* expose property endpoint\r\n\r\n* Added fixes\r\n\r\n* More api fixes\r\n\r\n* flake8\r\n\r\n* Fixed some mypy\r\n\r\n* news api model\r\n\r\n* flake8\r\n\r\n* mypy fix\r\n\r\n* mypy\r\n\r\n* black\r\n\r\n* pylint\r\n\r\n* fix tests\r\n\r\n* markdown\r\n\r\n* markdown\r\n\r\n* Added fixes\r\n\r\n* fix economy report\r\n\r\n* merge\r\n\r\n* fix economy report\r\n\r\n* remove empty notebook\r\n\r\n* expose nco\r\n\r\n* remove jupyter notebook\r\n\r\n* expose plot endpoint\r\n\r\n* remove po report, just used for tests\r\n\r\n* api v paths plot\r\n\r\n* remove api_old\r\n\r\n* change loading msg\r\n\r\nCo-authored-by: montezdesousa \r\nCo-authored-by: hjoaquim \r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: Om Gupta \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: JerBouma \r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: Om Gupta <85685255+soggyomelette@users.noreply.github.com>\r\nCo-authored-by: Diogo Sousa \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\nCo-authored-by: Minh Hoang ", "code": "def copy_func(f) -> Callable:\n \n g = types.FunctionType(\n f.__code__,\n f.__globals__,\n name=f.__name__,\n argdefs=f.__defaults__,\n closure=f.__closure__,\n )\n g = functools.update_wrapper(g, f)\n g.__kwdefaults__ = f.__kwdefaults__\n return g\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 22, "vocab_size": 18, "complexity": 1, "nloc": 21, "token_counts": 60, "n_ast_nodes": 91, "n_identifiers": 17, "random_cut": "def copy_func(f) -> Callable:\n \n ", "d_id": 85374, "documentation": { "docstring": "Copies the contents and attributes of the entered function. Based on https://stackoverflow.com/a/13503277\n Parameters\n ----------\n f: Callable\n Function to be copied\n Returns\n -------\n g: Callable\n New function\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 61, "language": "en" } }, { "id": 20032, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/markers.py", "file_name": "markers.py", "fun_name": "evaluate", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def evaluate(self, expr, context):\n \n if isinstance(expr, string_types):\n if expr[0] in '\\'\"':\n result = expr[1:-1]\n else:\n if expr not in context:\n raise SyntaxError('unknown variable: %s' % expr)\n result = context[expr]\n else:\n assert isinstance(expr, dict)\n op = expr['op']\n if op not in self.operations:\n raise NotImplementedError('op not implemented: %s' % op)\n elhs = expr['lhs']\n erhs = expr['rhs']\n if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):\n raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))\n\n lhs = self.evaluate(elhs, context)\n rhs = self.evaluate(erhs, context)\n if ((elhs == 'python_version' or erhs == 'python_version') and\n op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):\n lhs = NV(lhs)\n rhs = NV(rhs)\n elif elhs == 'python_version' and op in ('in', 'not in'):\n lhs = NV(lhs)\n rhs = _get_versions(rhs)\n result = self.operations[op](lhs, rhs)\n return result\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 463, "n_words": 123, "vocab_size": 73, "complexity": 12, "nloc": 28, "token_counts": 233, "n_ast_nodes": 395, "n_identifiers": 19, "random_cut": "def evaluate(self, expr, context):\n \n if isinstance(expr, string_types):\n if expr[0] in '\\'\"':\n result = expr[1:-1]\n else:\n if expr not in context:\n raise SyntaxError('unknown variable: %s' % expr)\n result = context[expr]\n else:\n assert isinstance(expr, dict)\n op = expr['op']\n if op not in self.operations:\n raise NotImplementedError('op not implemented: %s' % op)\n elhs = expr['lhs']\n erhs = expr['rhs']\n if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):\n raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))\n\n lhs = self.evaluate(elhs, context)\n rhs = self.evaluate(erhs, context)\n if ((elhs == 'python_version' or erhs == 'python_version') and\n op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):\n lhs = NV(lhs)\n rhs = NV(rhs)\n elif elhs == 'python_version' and op ", "d_id": 3185, "documentation": { "docstring": "\n Evaluate a marker expression returned by the :func:`parse_requirement`\n function in the specified context.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 35, "language": "en" } }, { "id": 269522, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "track_tf_optimizer", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def track_tf_optimizer(tf_optimizer):\n \n if tf.executing_eagerly():\n return\n optimizers = _GRAPH_TF_OPTIMIZERS[None]\n optimizers.add(tf_optimizer)\n\n\n@keras_export(\"keras.__internal__.backend.track_variable\", v1=[])", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.__internal__.backend.track_variable\", v1=[])", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 29, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def track_tf_optimizer(tf_optimizer):\n \n if tf.executing_eagerly():\n return\n optimizers = _GRAPH_TF_OPTIMIZERS[None]\n optimiz", "d_id": 80151, "documentation": { "docstring": "Tracks the given TF optimizer for initialization of its variables.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 195687, "commit_id": "d3c0fc825c4a80904a1fb9a2092137c3d9e0c3fe", "repo": "sympy", "path": "sympy/polys/numberfields/galoisgroups.py", "file_name": "galoisgroups.py", "fun_name": "_galois_group_degree_5", "commit_message": "Add a `galois_group()` function", "code": "def _galois_group_degree_5(T, max_tries=30, randomize=False):\n r\n from sympy.combinatorics.permutations import Permutation\n from sympy.combinatorics.named_groups import (\n CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup\n )\n\n # The ideas here are all the same as in the degree-4 method.\n # The specific resolvents we use, and how we interpret the results, are\n # adapted to the degree-5 case.\n\n X = symbols('X0 X1 X2 X3 X4')\n # For the first resolvent, we have G = S5,\n # and stabilizer H = M20 = < (01234), (1234) >.\n F1 = (X[0]**2*(X[1]*X[4] + X[2]*X[3])\n + X[1]**2*(X[2]*X[0] + X[3]*X[4])\n + X[2]**2*(X[3]*X[1] + X[4]*X[0])\n + X[3]**2*(X[4]*X[2] + X[0]*X[1])\n + X[4]**2*(X[0]*X[3] + X[1]*X[2]))\n s1 = [\n Permutation(4),\n Permutation(4)(0, 1),\n Permutation(4)(0, 2),\n Permutation(4)(0, 3),\n Permutation(4)(0, 4),\n Permutation(4)(1, 4)\n ]\n R1 = Resolvent(F1, X, s1)\n\n # For the second resolvent, we'll have G = D5, H = C5.\n F2_pre = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[4]**2 + X[4]*X[0]**2\n s2_pre = [\n Permutation(4),\n Permutation(4)(0, 1)(2, 4)\n ]\n\n history = set()\n for i in range(max_tries):\n if i > 0:\n _, T = tschirnhausen_transformation(T, max_tries=max_tries, history=history, fixed_order=not randomize)\n\n R_dup, _, i0 = R1.eval_for_poly(T, find_integer_root=True)\n if not dup_sqf_p(R_dup, ZZ):\n continue\n\n sq_disc = has_square_disc(T)\n\n if i0 is None:\n return (AlternatingGroup(5), True) if sq_disc else (SymmetricGroup(5), False)\n\n if not sq_disc:\n return (M20(), False)\n\n sigma = s1[i0]\n F2 = F2_pre.subs(zip(X, sigma(X)), simultaneous=True)\n s2 = [sigma*tau*sigma for tau in s2_pre]\n R2 = Resolvent(F2, X, s2)\n R_dup, _, _ = R2.eval_for_poly(T)\n d = dup_discriminant(R_dup, ZZ)\n if d == 0:\n continue\n if is_square(d):\n return (CyclicGroup(5), True)\n else:\n return (DihedralGroup(5), True)\n\n raise MaxTriesException\n\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 582, "n_words": 247, "vocab_size": 159, "complexity": 10, "nloc": 60, "token_counts": 556, "n_ast_nodes": 820, "n_identifiers": 49, "random_cut": "def _galois_group_degree_5(T, max_tries=30, randomize=False):\n r\n from sympy.combinatorics.permutations import Permutation\n from sympy.combinatorics.named_groups import (\n CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup\n ", "d_id": 47370, "documentation": { "docstring": "\n Compute the Galois group of a polynomial of degree 5, following Alg 6.3.9\n of Cohen.\n\n References\n ==========\n\n .. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*.\n\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 47, "language": "en" } }, { "id": 220126, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/argparse.py", "file_name": "argparse.py", "fun_name": "error", "commit_message": "add python 3.10.4 for windows", "code": "def error(self, message):\n \n self.print_usage(_sys.stderr)\n args = {'prog': self.prog, 'message': message}\n self.exit(2, _('%(prog)s: error: %(message)s\\n') % args)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 44, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 74, "n_identifiers": 10, "random_cut": "def error(self, message):\n \n se", "d_id": 55922, "documentation": { "docstring": "error(message: string)\n\n Prints a usage message incorporating the message to stderr and\n exits.\n\n If you override this in a subclass, it should not return -- it\n should either exit or raise an exception.\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 68, "language": "en" } }, { "id": 269418, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/applications/regnet.py", "file_name": "regnet.py", "fun_name": "preprocess_input", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def preprocess_input(x, data_format=None): # pylint: disable=unused-argument\n \n return x\n\n\n@keras_export(\"keras.applications.regnet.decode_predictions\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.applications.regnet.decode_predictions\")", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 33, "n_identifiers": 4, "random_cut": "def preprocess_input(x, data_format=None): # pylint: disable=unused-argument\n \n return x\n\n\n@keras_export(\"k", "d_id": 80066, "documentation": { "docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the regnet model\n implementation. Users are no longer required to call this method to normalize\n the input data. This method does nothing and only kept as a placeholder to\n align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it, it\n defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ", "n_words": 95, "vocab_size": 76, "n_whitespaces": 152, "language": "en" } }, { "id": 74807, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_edit_post", "commit_message": "Reformat with black", "code": "def test_edit_post(self):\n \n # Send request\n response = self.client.post(\n reverse(\"wagtaildocs:edit_multiple\", args=(self.doc.id,)),\n {\n \"doc-%d-%s\" % (self.doc.id, field): data\n for field, data in self.edit_post_data.items()\n },\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"Content-Type\"], \"application/json\")\n\n # Check JSON\n response_json = json.loads(response.content.decode())\n self.assertIn(\"doc_id\", response_json)\n self.assertNotIn(\"form\", response_json)\n self.assertIn(\"success\", response_json)\n self.assertEqual(response_json[\"doc_id\"], self.doc.id)\n self.assertTrue(response_json[\"success\"])\n\n self.check_doc_after_edit()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 214, "n_words": 46, "vocab_size": 38, "complexity": 2, "nloc": 17, "token_counts": 147, "n_ast_nodes": 246, "n_identifiers": 24, "random_cut": "def test_edit_post(self):\n \n # Send request\n response = self.client.post(\n reverse(\"wagtaildocs:edit_multiple\", args=(self.doc.id,)),\n {\n \"doc-%d-%s\" % (self.doc.id, field): data\n for field, data in self.edit_post_data.items()\n },\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"Content-Type\"], \"application/json\")\n\n # Check JSON\n response_json = json.loads(response.content.decode())\n self.assertIn(\"doc_id\", response_json)\n self.assertNotIn(\"form\", response_json)\n self.assertIn(\"success\", response_json)\n self.assertEqual(response_json[\"doc_id\"], self.doc.id)\n self.assertTrue(response_json[\"success\"])\n\n self.check_doc_after_edit()\n", "d_id": 16323, "documentation": { "docstring": "\n This tests that a POST request to the edit view edits the document\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 320926, "commit_id": "5616a99eff34f7074641d1391ed77d6b4b743529", "repo": "qutebrowser", "path": "tests/unit/mainwindow/test_messageview.py", "file_name": "test_messageview.py", "fun_name": "test_message_hiding", "commit_message": "Add a MessageInfo data class\n\nPreparation for #7246", "code": "def test_message_hiding(qtbot, view):\n \n with qtbot.wait_signal(view._clear_timer.timeout):\n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))\n assert not view._messages\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 26, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 72, "n_identifiers": 13, "random_cut": "def test_message_hiding(qtbot, view):\n \n with qtbot.wait_signal(view._clear_timer.timeout):\n view.show_message(message.M", "d_id": 117439, "documentation": { "docstring": "Messages should be hidden after the timer times out.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 246133, "commit_id": "bf60da1a60096fac5fb778b732ff2214862ac808", "repo": "synapse", "path": "tests/rest/client/test_profile.py", "file_name": "test_profile.py", "fun_name": "test_avatar_allowed_mime_type_per_room", "commit_message": "Configurable limits on avatars (#11846)\n\nOnly allow files which file size and content types match configured\r\nlimits to be set as avatar.\r\n\r\nMost of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19", "code": "def test_avatar_allowed_mime_type_per_room(self):\n \n self._setup_local_files(\n {\n \"good\": {\"mimetype\": \"image/png\"},\n \"bad\": {\"mimetype\": \"application/octet-stream\"},\n }\n )\n\n room_id = self.helper.create_room_as(tok=self.owner_tok)\n\n channel = self.make_request(\n \"PUT\",\n f\"/rooms/{room_id}/state/m.room.member/{self.owner}\",\n content={\"membership\": \"join\", \"avatar_url\": \"mxc://test/bad\"},\n access_token=self.owner_tok,\n )\n self.assertEqual(channel.code, 403, channel.result)\n self.assertEqual(\n channel.json_body[\"errcode\"], Codes.FORBIDDEN, channel.json_body\n )\n\n channel = self.make_request(\n \"PUT\",\n f\"/rooms/{room_id}/state/m.room.member/{self.owner}\",\n content={\"membership\": \"join\", \"avatar_url\": \"mxc://test/good\"},\n access_token=self.owner_tok,\n )\n self.assertEqual(channel.code, 200, channel.result)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 283, "n_words": 48, "vocab_size": 32, "complexity": 1, "nloc": 25, "token_counts": 150, "n_ast_nodes": 276, "n_identifiers": 19, "random_cut": "def test_avatar_allowed_mime_type_per_room(self):\n \n self._setup_local_files(\n {\n \"good\": {\"mimetype\": \"image/png\"},\n \"bad\": {\"mimetype\": \"application/octet-stream\"},\n }\n )\n\n room_id = self.helper.create_room_as(tok=self.owner_tok)\n\n channel = self.make_request(\n \"PUT\",\n f\"/rooms/{room_id}/state/m.room.member/{self.owner}\",\n ", "d_id": 71034, "documentation": { "docstring": "Tests that the MIME type whitelist for avatars is enforced when updating a\n per-room profile.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 29, "language": "en" } }, { "id": 268018, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_profiles.py", "file_name": "host_profiles.py", "fun_name": "container_name", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def container_name(self) -> t.Optional[str]:\n \n return self.state.get('container_name')\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 39, "n_identifiers": 7, "random_cut": "def container_name(self) -> t.Optional[str]:\n \n return self.state.get('container_na", "d_id": 79292, "documentation": { "docstring": "Return the stored container name, if any, otherwise None.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 259303, "commit_id": "7dc97a378ecbfa056dd9cfa9d1ef4c07d2d0cc1f", "repo": "scikit-learn", "path": "sklearn/metrics/_scorer.py", "file_name": "_scorer.py", "fun_name": "get_scorer", "commit_message": "API get_scorer returns a copy and introduce get_scorer_names (#22866)", "code": "def get_scorer(scoring):\n \n if isinstance(scoring, str):\n try:\n scorer = copy.deepcopy(_SCORERS[scoring])\n except KeyError:\n raise ValueError(\n \"%r is not a valid scoring value. \"\n \"Use sklearn.metrics.get_scorer_names() \"\n \"to get valid options.\" % scoring\n )\n else:\n scorer = scoring\n return scorer\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 148, "n_words": 37, "vocab_size": 30, "complexity": 3, "nloc": 13, "token_counts": 46, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def get_scorer(scoring):\n \n if isinstance(scoring, str):\n try:\n scorer = copy.deepcopy(_SCORERS[scoring])\n ", "d_id": 75702, "documentation": { "docstring": "Get a scorer from string.\n\n Read more in the :ref:`User Guide `.\n :func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names\n of all available scorers.\n\n Parameters\n ----------\n scoring : str or callable\n Scoring method as string. If callable it is returned as is.\n\n Returns\n -------\n scorer : callable\n The scorer.\n\n Notes\n -----\n When passed a string, this function always returns a copy of the scorer\n object. Calling `get_scorer` twice for the same scorer results in two\n separate scorer objects.\n ", "n_words": 78, "vocab_size": 62, "n_whitespaces": 137, "language": "en" } }, { "id": 258445, "commit_id": "5c675183d81d71e7e670bb32cf869afb99b513af", "repo": "scikit-learn", "path": "sklearn/discriminant_analysis.py", "file_name": "discriminant_analysis.py", "fun_name": "fit", "commit_message": "ENH Adds get_feature_names_out for discriminant_analysis (#22120)", "code": "def fit(self, X, y):\n \n X, y = self._validate_data(\n X, y, ensure_min_samples=2, dtype=[np.float64, np.float32]\n )\n self.classes_ = unique_labels(y)\n n_samples, _ = X.shape\n n_classes = len(self.classes_)\n\n if n_samples == n_classes:\n raise ValueError(\n \"The number of samples must be more than the number of classes.\"\n )\n\n if self.priors is None: # estimate priors from sample\n _, y_t = np.unique(y, return_inverse=True) # non-negative ints\n self.priors_ = np.bincount(y_t) / float(len(y))\n else:\n self.priors_ = np.asarray(self.priors)\n\n if (self.priors_ < 0).any():\n raise ValueError(\"priors must be non-negative\")\n if not np.isclose(self.priors_.sum(), 1.0):\n warnings.warn(\"The priors do not sum to 1. Renormalizing\", UserWarning)\n self.priors_ = self.priors_ / self.priors_.sum()\n\n # Maximum number of components no matter what n_components is\n # specified:\n max_components = min(len(self.classes_) - 1, X.shape[1])\n\n if self.n_components is None:\n self._max_components = max_components\n else:\n if self.n_components > max_components:\n raise ValueError(\n \"n_components cannot be larger than min(n_features, n_classes - 1).\"\n )\n self._max_components = self.n_components\n\n if self.solver == \"svd\":\n if self.shrinkage is not None:\n raise NotImplementedError(\"shrinkage not supported\")\n if self.covariance_estimator is not None:\n raise ValueError(\n \"covariance estimator \"\n \"is not supported \"\n \"with svd solver. Try another solver\"\n )\n self._solve_svd(X, y)\n elif self.solver == \"lsqr\":\n self._solve_lsqr(\n X,\n y,\n shrinkage=self.shrinkage,\n covariance_estimator=self.covariance_estimator,\n )\n elif self.solver == \"eigen\":\n self._solve_eigen(\n X,\n y,\n shrinkage=self.shrinkage,\n covariance_estimator=self.covariance_estimator,\n )\n else:\n raise ValueError(\n \"unknown solver {} (valid solvers are 'svd', \"\n \"'lsqr', and 'eigen').\".format(self.solver)\n )\n if self.classes_.size == 2: # treat binary case as a special case\n self.coef_ = np.array(\n self.coef_[1, :] - self.coef_[0, :], ndmin=2, dtype=X.dtype\n )\n self.intercept_ = np.array(\n self.intercept_[1] - self.intercept_[0], ndmin=1, dtype=X.dtype\n )\n self._n_features_out = self._max_components\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 1034, "n_words": 249, "vocab_size": 151, "complexity": 13, "nloc": 68, "token_counts": 437, "n_ast_nodes": 696, "n_identifiers": 50, "random_cut": "def fit(self, X, y):\n \n X, y = self._validate_data(\n X, y, ensure_min_samples=2, dtype=[np.float64, np.float32]\n )\n self.classes_ = unique_labels(y)\n n_samples, _ = X.shape\n n_classes = len(self.classes_)\n\n if n_samples == n_classes:\n raise ValueError(\n \"The number of samples must be more than the number of classes.\"\n )\n\n if self.priors is None: # estimate priors from sample\n _, y_t = np.unique(y, return_inverse=True) # non-negative ints\n self.priors_ = np.bincount(y_t) / float(len(y))\n else:\n self.prio", "d_id": 75239, "documentation": { "docstring": "Fit the Linear Discriminant Analysis model.\n\n .. versionchanged:: 0.19\n *store_covariance* has been moved to main constructor.\n\n .. versionchanged:: 0.19\n *tol* has been moved to main constructor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ", "n_words": 52, "vocab_size": 38, "n_whitespaces": 187, "language": "en" } }, { "id": 83277, "commit_id": "b0ce4f1bce8031881addecb1e86073483517f392", "repo": "zulip", "path": "zerver/webhooks/bitbucket3/tests.py", "file_name": "tests.py", "fun_name": "test_pr_opened_with_multiple_reviewers", "commit_message": "docs: Fix many spelling mistakes.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_pr_opened_with_multiple_reviewers(self) -> None:\n expected_topic = \"sandbox / PR #6 sample_file: Add sample_file.txt.\"\n expected_message = \n self.check_webhook(\n \"pull_request_opened_with_multiple_reviewers\", expected_topic, expected_message\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 59, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 23, "n_ast_nodes": 46, "n_identifiers": 5, "random_cut": "def test_pr_opened_with_multiple_reviewers(self) -> None:\n expected_topic = \"sandbox / PR #6 sample_file: Add sample_file.txt.\"\n expected_message = \n self.check_webhook(\n \"pull_request_opened_with_multiple_reviewers\", expected_topic, expected_message\n )\n", "d_id": 17646, "documentation": { "docstring": "[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.214:7990/users/shimura) for review):\\n\\n~~~ quote\\nAdd a simple text file for further testing purposes.\\n~~~", "n_words": 25, "vocab_size": 22, "n_whitespaces": 24, "language": "en" } }, { "id": 39911, "commit_id": "d19f04c9529d624a8d8f9d02f047c4e972f9d4db", "repo": "dash", "path": "dash/_grouping.py", "file_name": "_grouping.py", "fun_name": "make_grouping_by_key", "commit_message": "extended ctx.arg_grouping and changed it to AttributeDict", "code": "def make_grouping_by_key(schema, source, default=None):\n \n return map_grouping(lambda s: source.get(s, default), schema)\n\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 29, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def make_grouping_by_key(schema, source, default=None):\n \n", "d_id": 7284, "documentation": { "docstring": "\n Create a grouping from a schema by using the schema's scalar values to look up\n items in the provided source object.\n\n :param schema: A grouping of potential keys in source\n :param source: Dict-like object to use to look up scalar grouping value using\n scalar grouping values as keys\n :param default: Default scalar value to use if grouping scalar key is not present\n in source\n :return: grouping\n ", "n_words": 66, "vocab_size": 39, "n_whitespaces": 102, "language": "en" } }, { "id": 196278, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/point.py", "file_name": "point.py", "fun_name": "taxicab_distance", "commit_message": "Updated import locations", "code": "def taxicab_distance(self, p):\n \n s, p = Point._normalize_dimension(self, Point(p))\n return Add(*(abs(a - b) for a, b in zip(s, p)))\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 39, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 3, "token_counts": 47, "n_ast_nodes": 74, "n_identifiers": 11, "random_cut": "def taxicab_distance(self, p):\n \n s, p = Point._normalize_dimension(self, Point(p))\n return Add(*(abs(a - b) for a, b in zip(s, p)))\n", "d_id": 47778, "documentation": { "docstring": "The Taxicab Distance from self to point p.\n\n Returns the sum of the horizontal and vertical distances to point p.\n\n Parameters\n ==========\n\n p : Point\n\n Returns\n =======\n\n taxicab_distance : The sum of the horizontal\n and vertical distances to point p.\n\n See Also\n ========\n\n sympy.geometry.point.Point.distance\n\n Examples\n ========\n\n >>> from sympy import Point\n >>> p1, p2 = Point(1, 1), Point(4, 5)\n >>> p1.taxicab_distance(p2)\n 7\n\n ", "n_words": 62, "vocab_size": 40, "n_whitespaces": 188, "language": "en" } }, { "id": 268884, "commit_id": "b4dca51d0558e788f62a96d1009a07f773a202f4", "repo": "keras", "path": "keras/metrics/metrics.py", "file_name": "metrics.py", "fun_name": "cosine_similarity", "commit_message": "Refactor disparate metrics-related files into a single metrics folder.\n\nFurther work may be needed to split up the long file with individual metric definitions. However having a single file per metric may be too granular. TBD.\n\nPiperOrigin-RevId: 425248502", "code": "def cosine_similarity(y_true, y_pred, axis=-1):\n \n y_true = tf.linalg.l2_normalize(y_true, axis=axis)\n y_pred = tf.linalg.l2_normalize(y_pred, axis=axis)\n return tf.reduce_sum(y_true * y_pred, axis=axis)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 54, "n_ast_nodes": 83, "n_identifiers": 8, "random_cut": "def cosine_similarity(y_true, y_pred, axis=-1):\n \n y_true = tf.linalg.l2_normalize(y_true, axis=axis)\n y_pred = tf.linalg.l2_normalize(y_pred,", "d_id": 79750, "documentation": { "docstring": "Computes the cosine similarity between labels and predictions.\n\n Args:\n y_true: The ground truth values.\n y_pred: The prediction values.\n axis: (Optional) Defaults to -1. The dimension along which the cosine\n similarity is computed.\n\n Returns:\n Cosine similarity value.\n ", "n_words": 36, "vocab_size": 29, "n_whitespaces": 56, "language": "en" } }, { "id": 106953, "commit_id": "ff120cdc5aef1d609913678b1ac8c26e6f30691e", "repo": "matplotlib", "path": "lib/matplotlib/transforms.py", "file_name": "transforms.py", "fun_name": "rotate", "commit_message": "Micro-optimize rotation transform.\n\nThe following test script shows a ~3x speedup.\n\n```python\nimport math, numpy as np\n\nmtx = np.array([[.1, .2, .3], [.4, .5, .6], [0, 0, 1]])\ntheta = np.pi / 4\n\ndef rotate(mtx, theta):\n a = math.cos(theta)\n b = math.sin(theta)\n rotate_mtx = np.array([[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],\n float)\n return np.dot(rotate_mtx, mtx)\n\ndef rfast(mtx, theta):\n a = math.cos(theta)\n b = math.sin(theta)\n (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()\n # mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx\n mtx[0, 0] = a * xx - b * yx\n mtx[0, 1] = a * xy - b * yy\n mtx[0, 2] = a * x0 - b * y0\n mtx[1, 0] = b * xx + a * yx\n mtx[1, 1] = b * xy + a * yy\n mtx[1, 2] = b * x0 + a * y0\n return mtx\n\n%timeit rotate(mtx, theta)\n%timeit rfast(mtx, theta)\n```", "code": "def rotate(self, theta):\n \n a = math.cos(theta)\n b = math.sin(theta)\n mtx = self._mtx\n # Operating and assigning one scalar at a time is much faster.\n (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()\n # mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx\n mtx[0, 0] = a * xx - b * yx\n mtx[0, 1] = a * xy - b * yy\n mtx[0, 2] = a * x0 - b * y0\n mtx[1, 0] = b * xx + a * yx\n mtx[1, 1] = b * xy + a * yy\n mtx[1, 2] = b * x0 + a * y0\n self.invalidate()\n return self\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 215, "n_words": 110, "vocab_size": 53, "complexity": 1, "nloc": 13, "token_counts": 143, "n_ast_nodes": 214, "n_identifiers": 19, "random_cut": "def rotate(self, theta):\n \n a = math.cos(theta)\n b = math.sin(theta)\n ", "d_id": 22523, "documentation": { "docstring": "\n Add a rotation (in radians) to this transform in place.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 64, "language": "en" } }, { "id": 286499, "commit_id": "46141766d7250671b7bc75872e2034afe4938374", "repo": "OpenBBTerminal", "path": "openbb_terminal/parent_classes.py", "file_name": "parent_classes.py", "fun_name": "call_load", "commit_message": "Sdk dates (#3354)\n\n* example changes in slopes\r\n\r\n* change lettering size and side bar capitalization\r\n\r\n* revert back to Fira\r\n\r\n* start automatic website generation\r\n\r\n* this was autogen\r\n\r\n* add examples to slopes model\r\n\r\n* generate slopes doc\r\n\r\n* change to _index.md\r\n\r\n* allow italic formatting\r\n\r\n* fix regex\r\n\r\n* option to regenerate paths\r\n\r\n* update alt docs\r\n\r\n* fix generate\r\n\r\n* update alt\r\n\r\n* fix generate\r\n\r\n* update common\r\n\r\n* target italic only for types\r\n\r\n* format alt\r\n\r\n* format italic common\r\n\r\n* add sig indentation\r\n\r\n* update sig indent alt\r\n\r\n* update common ident\r\n\r\n* add todo\r\n\r\n* generate docstrings for all menus\r\n\r\n* fix maxdd\r\n\r\n* fix returns font size\r\n\r\n* fix keys docs\r\n\r\n* fix more docstrings\r\n\r\n* escape literal symbols\r\n\r\n* escape literal symbols\r\n\r\n* reformat keys\r\n\r\n* format opt\r\n\r\n* remove literal escape\r\n\r\n* remove another literal escape\r\n\r\n* remove another literal escape\r\n\r\n* unindent returns\r\n\r\n* update docs return unindent\r\n\r\n* add comma in last arg\r\n\r\n* fix funcs without params\r\n\r\n* fix signature\r\n\r\n* compact some code\r\n\r\n* refactor some more code\r\n\r\n* refactor some code\r\n\r\n* some final cleanup\r\n\r\n* write docstrings\r\n\r\n* change main\r\n\r\n* move futures paths\r\n\r\n* generate futures docs\r\n\r\n* add external axes references\r\n\r\n* fix typo\r\n\r\n* revert to double docstring\r\n\r\n* fix small bug\r\n\r\n* remove docs folder\r\n\r\n* generate.py in website folder\r\n\r\n* add forecast to docs\r\n\r\n* clear some warnings\r\n\r\n* fix underscore\r\n\r\n* remove cite\r\n\r\n* refresh website docs\r\n\r\n* fix forecast docstrings\r\n\r\n* fix po\r\n\r\n* fix po docs and remove italic\r\n\r\n* fix more docstrings\r\n\r\n* remove last warning\r\n\r\n* codespell\r\n\r\n* flake8\r\n\r\n* exclude website contente from flake\r\n\r\n* noqa on optimizer\r\n\r\n* update website\r\n\r\n* fix mypy\r\n\r\n* remove setup from mypy\r\n\r\n* mypy to openbbterminal\r\n\r\n* update precommit\r\n\r\n* pylint\r\n\r\n* try to remove sdk loading issue\r\n\r\n* fix dates active command\r\n\r\n* fix crypto.change formats\r\n\r\n* fix eb formats\r\n\r\n* nonzero fix\r\n\r\n* format dates crypto.load\r\n\r\n* format supply transac\r\n\r\n* format hr altindex\r\n\r\n* format load crypto\r\n\r\n* regenerate docs\r\n\r\n* format ba trend dates\r\n\r\n* regenerate docs\r\n\r\n* format ba trend\r\n\r\n* candle defaults\r\n\r\n* fix sentiment test\r\n\r\n* remove unused import\r\n\r\n* shopt\r\n\r\n* shopt again\r\n\r\n* revert crypto helpers\r\n\r\n* test shopt\r\n\r\n* fix some tests\r\n\r\n* skip trending test\r\n\r\n* fix alcoin test\r\n\r\n* helpers\r\n\r\n* write docs\r\n\r\n* rewrite helper\r\n\r\nCo-authored-by: Jeroen Bouma ", "code": "def call_load(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"load\",\n description=,\n )\n parser.add_argument(\n \"-c\",\n \"--coin\",\n help=\"Coin to get. Must be coin symbol (e.g., btc, eth)\",\n dest=\"coin\",\n type=str,\n required=\"-h\" not in other_args,\n )\n\n parser.add_argument(\n \"-s\",\n \"--start\",\n type=valid_date,\n default=(datetime.now() - timedelta(days=1100)).strftime(\"%Y-%m-%d\"),\n dest=\"start\",\n help=\"The starting date (format YYYY-MM-DD) of the crypto\",\n )\n\n parser.add_argument(\n \"--exchange\",\n help=\"Exchange to search\",\n dest=\"exchange\",\n type=str,\n default=\"binance\",\n choices=self.exchanges,\n )\n\n parser.add_argument(\n \"-e\",\n \"--end\",\n type=valid_date,\n default=datetime.now().strftime(\"%Y-%m-%d\"),\n dest=\"end\",\n help=\"The ending date (format YYYY-MM-DD) of the crypto\",\n )\n parser.add_argument(\n \"-i\",\n \"--interval\",\n action=\"store\",\n dest=\"interval\",\n type=str,\n default=\"1440\",\n choices=[\"1\", \"5\", \"15\", \"30\", \"60\", \"240\", \"1440\", \"10080\", \"43200\"],\n help=\"The interval of the crypto\",\n )\n\n parser.add_argument(\n \"--vs\",\n help=\"Quote currency (what to view coin vs). e.g., usdc, usdt, ... if source is ccxt, usd, eur, ... otherwise\", # noqa\n dest=\"vs\",\n default=\"usdt\",\n type=str,\n )\n\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-c\")\n\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n\n if ns_parser:\n if ns_parser.source in (\"YahooFinance\", \"CoinGecko\"):\n if ns_parser.vs == \"usdt\":\n ns_parser.vs = \"usd\"\n (self.current_df) = cryptocurrency_helpers.load(\n symbol=ns_parser.coin.lower(),\n vs_currency=ns_parser.vs,\n end_date=ns_parser.end.strftime(\"%Y-%m-%d\"),\n start_date=ns_parser.start.strftime(\"%Y-%m-%d\"),\n interval=ns_parser.interval,\n source=ns_parser.source,\n exchange=ns_parser.exchange,\n )\n if not self.current_df.empty:\n self.vs = ns_parser.vs\n self.exchange = ns_parser.exchange\n self.source = ns_parser.source\n self.current_interval = ns_parser.interval\n self.current_currency = ns_parser.vs\n self.symbol = ns_parser.coin.lower()\n cryptocurrency_helpers.show_quick_performance(\n self.current_df,\n self.symbol,\n self.current_currency,\n ns_parser.source,\n ns_parser.exchange,\n self.current_interval,\n )\n export_data(\n ns_parser.export,\n os.path.dirname(os.path.abspath(__file__)),\n \"load\",\n self.current_df.copy(),\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1328, "n_words": 198, "vocab_size": 141, "complexity": 7, "nloc": 99, "token_counts": 486, "n_ast_nodes": 791, "n_identifiers": 59, "random_cut": "def call_load(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n ", "d_id": 85834, "documentation": { "docstring": "Process load command.Load crypto currency to perform analysis on.\n Yahoo Finance is used as default source.\n Other sources can be used such as 'ccxt' or 'cg' with --source.\n If you select 'ccxt', you can then select any exchange with --exchange.\n You can also select a specific interval with --interval.", "n_words": 49, "vocab_size": 40, "n_whitespaces": 92, "language": "en" } }, { "id": 264448, "commit_id": "7c105019d8ae9205051c302e7499b33a455f9176", "repo": "netbox", "path": "netbox/utilities/templatetags/builtins/filters.py", "file_name": "filters.py", "fun_name": "render_markdown", "commit_message": "Closes #8600: Document built-in template tags & filters", "code": "def render_markdown(value):\n \n schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES)\n\n # Strip HTML tags\n value = strip_tags(value)\n\n # Sanitize Markdown links\n pattern = fr'\\[([^\\]]+)\\]\\((?!({schemes})).*:(.+)\\)'\n value = re.sub(pattern, '[\\\\1](\\\\3)', value, flags=re.IGNORECASE)\n\n # Sanitize Markdown reference links\n pattern = fr'\\[(.+)\\]:\\s*(?!({schemes}))\\w*:(.+)'\n value = re.sub(pattern, '[\\\\1]: \\\\3', value, flags=re.IGNORECASE)\n\n # Render Markdown\n html = markdown(value, extensions=['fenced_code', 'tables', StrikethroughExtension()])\n\n # If the string is not empty wrap it in rendered-markdown to style tables\n if html:\n html = f'
    {html}
    '\n\n return mark_safe(html)\n\n\n@register.filter('json')", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@register.filter('json')", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 123, "n_words": 72, "vocab_size": 50, "complexity": 2, "nloc": 11, "token_counts": 98, "n_ast_nodes": 198, "n_identifiers": 19, "random_cut": "def render_markdown(value):\n \n schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES)\n\n # Strip HTML tags\n value = strip_tags(value)\n\n # Sanitize Markdown links\n pattern = fr'\\[([^\\]]+)\\]\\((?!({schemes})).*:(.+)\\)'\n value = re.sub(pattern, '[\\\\1](\\\\3)', value, flags=re.IGNORECASE)\n\n # Sanitize Markdown reference links\n pattern = fr'\\[(.+)\\]:\\s*(?!({schemes}))\\w*:(.+)'\n value = re.sub(pattern, '[\\\\1]: \\\\3', value, flags=re.IGNORECASE)\n\n # Render Markdown\n html = markdown(value, extensions=", "d_id": 77734, "documentation": { "docstring": "\n Render a string as Markdown. This filter is invoked as \"markdown\":\n\n {{ md_source_text|markdown }}\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 177588, "commit_id": "074af782e6f351c711f18d8ad6a05aa4f632339c", "repo": "label-studio", "path": "label_studio/projects/functions/next_task.py", "file_name": "next_task.py", "fun_name": "_try_breadth_first", "commit_message": "feat: DEV-469: Skip queue (#1693)\n\n* DEV-469 Skip queue project setting\r\n\r\n* DEV-469 review fixes\r\n\r\n* Merge migrations (DEV-469)\r\n\r\n* Update requirements-test.txt\r\n\r\n* Update requirements-test.txt\r\n\r\n* Update test_exception.py\r\n\r\n* Revert \"Update test_exception.py\"\r\n\r\nThis reverts commit b9c686c9bacaf298bafe3a207352cc5260fef737.\r\n\r\n* Revert \"Update requirements-test.txt\"\r\n\r\nThis reverts commit 3704d29978761089bcd008506f9e1c30a162bb3a.\r\n\r\n* Revert \"Update requirements-test.txt\"\r\n\r\nThis reverts commit 50273847ae2872b31bccc376d04a3afff0efcf21.\r\n\r\n* Recalc is_labeled after skip_queue change (DEV-469)\r\n\r\n* Fix migrations (DEV-469)\r\n\r\nCo-authored-by: Max Tkachenko \r\nCo-authored-by: niklub \r\nCo-authored-by: nik ", "code": "def _try_breadth_first(tasks, user):\n \n\n tasks = tasks.annotate(annotations_count=Count('annotations'))\n max_annotations_count = tasks.aggregate(Max('annotations_count'))['annotations_count__max']\n if max_annotations_count == 0:\n # there is no any labeled tasks found\n return\n\n # find any task with maximal amount of created annotations\n not_solved_tasks_labeling_started = tasks.annotate(\n reach_max_annotations_count=Case(\n When(annotations_count=max_annotations_count, then=Value(True)),\n default=Value(False),\n output_field=BooleanField(),\n )\n )\n not_solved_tasks_labeling_with_max_annotations = not_solved_tasks_labeling_started.filter(\n reach_max_annotations_count=True\n )\n if not_solved_tasks_labeling_with_max_annotations.exists():\n # try to complete tasks that are already in progress\n return _get_random_unlocked(not_solved_tasks_labeling_with_max_annotations, user)\n\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 174, "n_words": 62, "vocab_size": 49, "complexity": 3, "nloc": 17, "token_counts": 104, "n_ast_nodes": 174, "n_identifiers": 22, "random_cut": "def _try_breadth_first(tasks, user):\n \n\n tasks = tasks.annotate(annotations_count=Count(", "d_id": 42454, "documentation": { "docstring": "Try to find tasks with maximum amount of annotations, since we are trying to label tasks as fast as possible\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 23, "language": "en" } }, { "id": 143838, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/policy/tests/test_rnn_sequencing.py", "file_name": "test_rnn_sequencing.py", "fun_name": "test_pad_batch_dynamic_max", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_pad_batch_dynamic_max(self):\n \n view_requirements = {\n \"state_in_0\": ViewRequirement(\n \"state_out_0\",\n shift=[-1],\n used_for_training=False,\n used_for_compute_actions=True,\n batch_repeat_value=1,\n )\n }\n max_seq_len = 20\n num_seqs = np.random.randint(1, 20)\n seq_lens = np.random.randint(1, max_seq_len, size=(num_seqs))\n max_len = np.max(seq_lens)\n sum_seq_lens = np.sum(seq_lens)\n\n s1 = SampleBatch(\n {\n \"a\": np.arange(sum_seq_lens),\n \"b\": np.arange(sum_seq_lens),\n \"seq_lens\": seq_lens,\n \"state_in_0\": [[0]] * num_seqs,\n },\n _max_seq_len=max_seq_len,\n )\n\n pad_batch_to_sequences_of_same_size(\n s1,\n max_seq_len=max_seq_len,\n feature_keys=[\"a\", \"b\"],\n view_requirements=view_requirements,\n )\n check(s1.max_seq_len, max_len)\n check(s1[\"a\"].shape[0], max_len * num_seqs)\n check(s1[\"b\"].shape[0], max_len * num_seqs)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 405, "n_words": 66, "vocab_size": 49, "complexity": 1, "nloc": 33, "token_counts": 190, "n_ast_nodes": 299, "n_identifiers": 27, "random_cut": "def test_pad_batch_dynamic_max(self):\n \n view_requirements = {\n \"state_in_0\": ViewRequirement(\n \"state_out_0\",\n shift=[-1],\n used_for_training=False,\n used_for_compute_actions=True,\n batch_repeat_value=1,\n )\n }\n max_seq_len = 20\n num_seqs = np.random.randint(1, 20)\n seq_lens = np.random.randint(1, max_seq_len, size=(num_seqs))\n max_len = np.max(seq_lens)\n sum_seq_lens = np.sum(seq_lens)\n\n s1 = SampleBatch(\n {\n \"a\": np.arange(sum_seq_lens),\n \"b\": np.arange(sum_seq_lens),\n \"seq_lens\": seq_lens,\n \"state_in_0\": [[0]] * num_seqs,\n },\n _max_seq_len=max_seq_len,\n )\n\n pad_batch_to_sequences_of_same_size(\n s1,\n max_seq_len=max_seq_len,\n feature_keys=[\"a\", \"b\"],\n view_requirements=view_requirements,\n )\n check(s1.max_seq_len, max_len)\n check(s1[\"a\"].shape[0], max_len * num_seqs)\n check(s1[\"b\"].shape[0], max_len * num_seqs)\n", "d_id": 33074, "documentation": { "docstring": "Test pad_batch_to_sequences_of_same_size when dynamic_max = True", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 188975, "commit_id": "46cb6c212a870b36bd0af17c48dd29f53468734b", "repo": "psutil", "path": "psutil/_pslinux.py", "file_name": "_pslinux.py", "fun_name": "sensors_fans", "commit_message": "[Linux] cat/bcat utils refactoring (#2053)", "code": "def sensors_fans():\n \n ret = collections.defaultdict(list)\n basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')\n if not basenames:\n # CentOS has an intermediate /device directory:\n # https://github.com/giampaolo/psutil/issues/971\n basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')\n\n basenames = sorted(set([x.split('_')[0] for x in basenames]))\n for base in basenames:\n try:\n current = int(bcat(base + '_input'))\n except (IOError, OSError) as err:\n debug(err)\n continue\n unit_name = cat(os.path.join(os.path.dirname(base), 'name'))\n label = cat(base + '_label', fallback='')\n ret[unit_name].append(_common.sfan(label, current))\n\n return dict(ret)\n\n", "url": "https://github.com/giampaolo/psutil.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 171, "n_words": 61, "vocab_size": 48, "complexity": 5, "nloc": 16, "token_counts": 143, "n_ast_nodes": 245, "n_identifiers": 31, "random_cut": "def sensors_fans():\n \n ret = collections.defaultdict(list)\n basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')\n if not basenames:\n # CentOS has an intermediate /device directory:\n # https://github.com/giampaolo/psutil/issues/971\n basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')\n\n basenames = sorted(set([x.split('_')[0] for x in basenames]))\n for base in basenames:\n try:\n current = int(bcat(base + '_input'))\n except (IOError, OSError) as err:\n debug(err)\n continue\n unit_name = cat(os.path.join(os.path.dirname(base), 'name'))\n label = cat(base + '_label', fallback='')\n ret[", "d_id": 45950, "documentation": { "docstring": "Return hardware fans info (for CPU and other peripherals) as a\n dict including hardware label and current speed.\n\n Implementation notes:\n - /sys/class/hwmon looks like the most recent interface to\n retrieve this info, and this implementation relies on it\n only (old distros will probably use something else)\n - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon\n ", "n_words": 54, "vocab_size": 45, "n_whitespaces": 79, "language": "en" } }, { "id": 218360, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/util.py", "file_name": "util.py", "fun_name": "factory", "commit_message": "add python 3.10.4 for windows", "code": "def factory(cls, loader):\n \n cls.__check_eager_loader(loader)\n return lambda *args, **kwargs: cls(loader(*args, **kwargs))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 55, "n_identifiers": 6, "random_cut": "def factory(cls, loader):\n \n cls.__check_eager_loader(loader)\n return lambda *args, **", "d_id": 55257, "documentation": { "docstring": "Construct a callable which returns the eager loader made lazy.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 73586, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/table_block/tests.py", "file_name": "tests.py", "fun_name": "test_table_block_caption_render", "commit_message": "Reformat with black", "code": "def test_table_block_caption_render(self):\n \n value = {\n \"table_caption\": \"caption\",\n \"first_row_is_table_header\": False,\n \"first_col_is_header\": False,\n \"data\": [\n [\"Test 1\", \"Test 2\", \"Test 3\"],\n [None, None, None],\n [None, None, None],\n ],\n }\n block = TableBlock()\n result = block.render(value)\n expected = \n self.assertHTMLEqual(result, expected)\n self.assertIn(\"Test 2\", result)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 197, "n_words": 40, "vocab_size": 31, "complexity": 1, "nloc": 25, "token_counts": 83, "n_ast_nodes": 140, "n_identifiers": 10, "random_cut": "def test_table_block_caption_render(self):\n \n value = {\n \"table_caption\": \"caption\",\n \"first_row_is_table_header\": False,\n \"first_col_is_header\": Fals", "d_id": 16053, "documentation": { "docstring": "\n Test a generic render with caption.\n \n \n \n \n \n \n \n \n
    caption
    Test 1Test 2Test 3
    \n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 164, "language": "en" } }, { "id": 3409, "commit_id": "0a3713a5a52995dc0dc205d8edfd097bf625899f", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-salesforce/unit_tests/unit_test.py", "file_name": "unit_test.py", "fun_name": "stream_config_without_start_date", "commit_message": "Source Salesforce: Deprecate API Type parameter (#9302)\n\n* use BULK for the first sync, REST for incremental sync\r\n\r\n* if stream contains compound data or/and base64 use always REST\r\n\r\n* fix get stream state from connector state\r\n\r\n* fix integration test\r\n\r\n* refactor catalog name\r\n\r\n* format code\r\n\r\n* refactor unit tests\r\n\r\n* refactor unit tests 2\r\n\r\n* format code 2\r\n\r\n* Set additionalProperties to true not to break test temporarily\r\n\r\n* fix unit test and remove unnecessary filtering fields\r\n\r\n* bump version\r\n\r\n* updated spec and def yaml\r\n\r\nCo-authored-by: auganbay ", "code": "def stream_config_without_start_date():\n \n return {\n \"client_id\": \"fake_client_id\",\n \"client_secret\": \"fake_client_secret\",\n \"refresh_token\": \"fake_refresh_token\",\n \"is_sandbox\": False,\n \"wait_timeout\": 15,\n }\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 59, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 8, "token_counts": 28, "n_ast_nodes": 58, "n_identifiers": 1, "random_cut": "def stream_config_without_start_date():", "d_id": 472, "documentation": { "docstring": "Generates streams settings for REST logic without start_date", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 151476, "commit_id": "3e8d8fd1b08e28f8ec231de9ee3be57a539b266e", "repo": "freqtrade", "path": "freqtrade/rpc/api_server/ws/channel.py", "file_name": "channel.py", "fun_name": "relay", "commit_message": "refactor broadcasting to a queue per client", "code": "async def relay(self):\n \n while True:\n message = await self.queue.get()\n try:\n await self.send(message)\n self.queue.task_done()\n except RuntimeError:\n # The connection was closed, just exit the task\n return\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 132, "n_words": 25, "vocab_size": 24, "complexity": 3, "nloc": 8, "token_counts": 39, "n_ast_nodes": 72, "n_identifiers": 8, "random_cut": "async def relay(self):\n \n while True:\n message = await self.queue.get()\n try:\n await self.send(message)\n self.queue.task_done()\n except RuntimeError:\n # The connection was closed, just exit t", "d_id": 35023, "documentation": { "docstring": "\n Relay messages from the channel's queue and send them out. This is started\n as a task.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 49786, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py", "file_name": "gaussian_diffusion.py", "fun_name": "q_sample", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def q_sample(self, x_start, t, noise=None):\n \n if noise is None:\n # noise = th.randn_like(x_start)\n noise = paddle.randn(x_start.shape, x_start.dtype)\n assert noise.shape == x_start.shape\n return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +\n _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 98, "n_words": 33, "vocab_size": 26, "complexity": 2, "nloc": 6, "token_counts": 73, "n_ast_nodes": 109, "n_identifiers": 12, "random_cut": "def q_sample(self, x_start, t, noise=None):\n \n if noise is None:\n # noise = th.randn_like(x_start)\n noise = paddle.randn(x_start.shape, x_start.dtype)\n assert noise.shape == x_start.shape\n return (_extract_into_tensor(self.sqrt_alphas_cumpr", "d_id": 9909, "documentation": { "docstring": "\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n ", "n_words": 52, "vocab_size": 42, "n_whitespaces": 102, "language": "en" } }, { "id": 114587, "commit_id": "7e3da9157508a5eb38dbfabbd7f08ba8fa6c5a88", "repo": "mindsdb", "path": "mindsdb/integrations/postgres_handler/postgres_handler.py", "file_name": "postgres_handler.py", "fun_name": "get_views", "commit_message": "Get tables, views, describe", "code": "def get_views(self):\n \n query = f\"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')\"\n result = self.run_native_query(query)\n return result\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 20, "n_ast_nodes": 36, "n_identifiers": 5, "random_cut": "def get_views(self):\n \n query = f\"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')\"\n result = self.run_native_query(q", "d_id": 25222, "documentation": { "docstring": "\n List all views in PostgreSQL without the system views information_schema and pg_catalog\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 204139, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/utils/layermapping.py", "file_name": "layermapping.py", "fun_name": "verify_ogr_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def verify_ogr_field(self, ogr_field, model_field):\n \n if isinstance(ogr_field, OFTString) and isinstance(\n model_field, (models.CharField, models.TextField)\n ):\n if self.encoding and ogr_field.value is not None:\n # The encoding for OGR data sources may be specified here\n # (e.g., 'cp437' for Census Bureau boundary files).\n val = force_str(ogr_field.value, self.encoding)\n else:\n val = ogr_field.value\n if (\n model_field.max_length\n and val is not None\n and len(val) > model_field.max_length\n ):\n raise InvalidString(\n \"%s model field maximum string length is %s, given %s characters.\"\n % (model_field.name, model_field.max_length, len(val))\n )\n elif isinstance(ogr_field, OFTReal) and isinstance(\n model_field, models.DecimalField\n ):\n try:\n # Creating an instance of the Decimal value to use.\n d = Decimal(str(ogr_field.value))\n except DecimalInvalidOperation:\n raise InvalidDecimal(\n \"Could not construct decimal from: %s\" % ogr_field.value\n )\n\n # Getting the decimal value as a tuple.\n dtup = d.as_tuple()\n digits = dtup[1]\n d_idx = dtup[2] # index where the decimal is\n\n # Maximum amount of precision, or digits to the left of the decimal.\n max_prec = model_field.max_digits - model_field.decimal_places\n\n # Getting the digits to the left of the decimal place for the\n # given decimal.\n if d_idx < 0:\n n_prec = len(digits[:d_idx])\n else:\n n_prec = len(digits) + d_idx\n\n # If we have more than the maximum digits allowed, then throw an\n # InvalidDecimal exception.\n if n_prec > max_prec:\n raise InvalidDecimal(\n \"A DecimalField with max_digits %d, decimal_places %d must \"\n \"round to an absolute value less than 10^%d.\"\n % (model_field.max_digits, model_field.decimal_places, max_prec)\n )\n val = d\n elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(\n model_field, models.IntegerField\n ):\n # Attempt to convert any OFTReal and OFTString value to an OFTInteger.\n try:\n val = int(ogr_field.value)\n except ValueError:\n raise InvalidInteger(\n \"Could not construct integer from: %s\" % ogr_field.value\n )\n else:\n val = ogr_field.value\n return val\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1068, "n_words": 274, "vocab_size": 155, "complexity": 16, "nloc": 53, "token_counts": 278, "n_ast_nodes": 451, "n_identifiers": 36, "random_cut": "def verify_ogr_field(self, ogr_field, model_field):\n \n if isinstance(ogr_field, OFTString) and isinstance(\n model_field, (models.CharField, models.TextField)\n ):\n if self.encoding and ogr_field.value is not None:\n # The encoding for OGR data sources may be specified here\n # (e.g., 'cp437' for Census Bureau boundary files).\n val = force_str(ogr_field.value, self.encoding)\n else:\n val = ogr_field.value\n if (\n model_field.max_length\n and val is not None\n and len(val) > model_field.max_length\n ):\n raise InvalidString(\n \"%s model field maximum string length is %s, given %s characters.\"\n % (model_field.name, model_field.max_length, len(val))\n )\n elif isinstance(ogr_field, OFTReal) and isinstance(\n model_field, models.DecimalField\n ):\n try:\n # Creating an instance of the Decimal value to use.\n d = Decimal(str(ogr_field.value))\n except DecimalInvalidOperation:\n raise InvalidDecimal(\n \"Could not construct decimal from: %s\" % ogr_field.value\n )\n\n # Getting the decimal value as a tuple.\n dtup = d.as_tuple()\n digits = dtup[1]\n d_idx = dtup[2] # index where the decimal is\n\n # Maximum amount of precision, or digits to the left of the decimal.\n max_prec = model_field.max_digits - model_field.decimal_places\n\n # Getting the digits to the left of the decimal place for the\n # given decimal.\n if d_idx < 0:\n n_prec = len(digits[:d_idx])\n else:\n n_prec = len(digits) + d_idx\n\n # If we have more than the maximum digits allowed, then throw an\n # InvalidDecimal exception.\n if n_prec > max_prec:\n raise InvalidDecimal(\n \"A DecimalField with max_digits %d, decimal_places %d must \"\n \"round to an absolute value less than 10^%d.\"\n % (model_field.max_digits, model_field.decimal_places, max_prec)\n )\n val = d\n elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(\n model_field, models.IntegerField\n ):\n # Attempt to convert any OFTReal and OFTString value to an OFTInteger.\n try:\n val = in", "d_id": 50640, "documentation": { "docstring": "\n Verify if the OGR Field contents are acceptable to the model field. If\n they are, return the verified value, otherwise raise an exception.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 45, "language": "en" } }, { "id": 13717, "commit_id": "b36e6bdb1f5d02a4c5af3131f3a07d7b4ccddced", "repo": "jina", "path": "jina/serve/streamer.py", "file_name": "streamer.py", "fun_name": "get_streamer", "commit_message": "feat: add get_streamer helper and inject streamer info (#5472)", "code": "def get_streamer():\n \n if 'JINA_STREAMER_ARGS' in os.environ:\n args_dict = json.loads(os.environ['JINA_STREAMER_ARGS'])\n return GatewayStreamer(**args_dict)\n else:\n raise OSError('JINA_STREAMER_ARGS environment variable is not set')\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 73, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 6, "token_counts": 38, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "def get_streamer():\n \n if 'JINA_STREAMER_ARGS' in os.environ:\n args_dict = json.loads(os.environ['JINA_STREAMER_ARGS'])\n return GatewayStreamer(**args_dict)\n ", "d_id": 2741, "documentation": { "docstring": "\n Return a streamer object based on the current environment context.\n The streamer object is contructed using runtime arguments stored in the `JINA_STREAMER_ARGS` environment variable.\n If this method is used outside a Jina context (process not controlled/orchestrated by jina), this method will\n raise an error.\n The streamer object does not have tracing/instrumentation capabilities.\n\n :return: Returns an instance of `GatewayStreamer`\n ", "n_words": 58, "vocab_size": 45, "n_whitespaces": 108, "language": "en" } }, { "id": 149618, "commit_id": "737bdfe844e575bdbbc9cd9d2a84291fe2e58300", "repo": "freqtrade", "path": "freqtrade/freqtradebot.py", "file_name": "freqtradebot.py", "fun_name": "enter_positions", "commit_message": "Use \"side\" parameter when calling Pairlocks", "code": "def enter_positions(self) -> int:\n \n trades_created = 0\n\n whitelist = copy.deepcopy(self.active_pair_whitelist)\n if not whitelist:\n logger.info(\"Active pair whitelist is empty.\")\n return trades_created\n # Remove pairs for currently opened trades from the whitelist\n for trade in Trade.get_open_trades():\n if trade.pair in whitelist:\n whitelist.remove(trade.pair)\n logger.debug('Ignoring %s in pair whitelist', trade.pair)\n\n if not whitelist:\n logger.info(\"No currency pair in active pair whitelist, \"\n \"but checking to exit open trades.\")\n return trades_created\n if PairLocks.is_global_lock(side='*'):\n # This only checks for total locks (both sides).\n # per-side locks will be evaluated by `is_pair_locked` within create_trade,\n # once the direction for the trade is clear.\n lock = PairLocks.get_pair_longest_lock('*')\n if lock:\n self.log_once(f\"Global pairlock active until \"\n f\"{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)}. \"\n f\"Not creating new trades, reason: {lock.reason}.\", logger.info)\n else:\n self.log_once(\"Global pairlock active. Not creating new trades.\", logger.info)\n return trades_created\n # Create entity and execute trade for each pair from whitelist\n for pair in whitelist:\n try:\n trades_created += self.create_trade(pair)\n except DependencyException as exception:\n logger.warning('Unable to create trade for %s: %s', pair, exception)\n\n if not trades_created:\n logger.debug(\"Found no enter signals for whitelisted currencies. Trying again...\")\n\n return trades_created\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 590, "n_words": 170, "vocab_size": 111, "complexity": 10, "nloc": 34, "token_counts": 172, "n_ast_nodes": 327, "n_identifiers": 31, "random_cut": "def enter_positions(self) -> int:\n \n trades_created = 0\n\n whitelist = copy.deepcopy(self.active_pair_whitelist)\n if not whitelist:\n logger.info(\"Active pair whitelist is empty.\")\n return trades_created\n # Remove pairs for currently opened trades from the whitelist\n for trade in Trade.get_open_trades():\n if", "d_id": 34462, "documentation": { "docstring": "\n Tries to execute entry orders for new trades (positions)\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 40171, "commit_id": "c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c", "repo": "dash", "path": "dash/_validate.py", "file_name": "_validate.py", "fun_name": "validate_js_path", "commit_message": "f-strings everywhere! fffff", "code": "def validate_js_path(registered_paths, package_name, path_in_package_dist):\n if package_name not in registered_paths:\n raise exceptions.DependencyException(\n f\n )\n\n if path_in_package_dist not in registered_paths[package_name]:\n raise exceptions.DependencyException(\n f\n )\n\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 77, "n_words": 22, "vocab_size": 15, "complexity": 3, "nloc": 17, "token_counts": 40, "n_ast_nodes": 93, "n_identifiers": 8, "random_cut": "def validate_js_path(registered_paths, package_name, path_in_package_dist):\n if package_name not in registered_paths:\n raise exceptions.DependencyException(\n f\n )\n\n if path_in_package_dist not in registered_paths[package_name]:\n raise exceptions.DependencyException(\n f\n )", "d_id": 7335, "documentation": { "docstring": "\n Error loading dependency. \"{package_name}\" is not a registered library.\n Registered libraries are:\n {list(registered_paths.keys())}\n \n \"{package_name}\" is registered but the path requested is not valid.\n The path requested: \"{path_in_package_dist}\"\n List of registered paths: {registered_paths}\n ", "n_words": 32, "vocab_size": 25, "n_whitespaces": 122, "language": "en" } }, { "id": 124874, "commit_id": "0ecc7dad74d77a24705e44da2ba80892177377bc", "repo": "ray", "path": "python/ray/serve/utils.py", "file_name": "utils.py", "fun_name": "get_all_node_ids", "commit_message": "Revert \"Revert \"[serve] Use soft constraint for pinning controller on head node (#25091)\" (#25857)\" (#25858)", "code": "def get_all_node_ids() -> List[Tuple[str, str]]:\n \n node_ids = []\n # Sort on NodeID to ensure the ordering is deterministic across the cluster.\n for node in sorted(ray.nodes(), key=lambda entry: entry[\"NodeID\"]):\n # print(node)\n if node[\"Alive\"]:\n node_ids.append((node[\"NodeID\"], node[\"NodeName\"]))\n\n return node_ids\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 76, "n_words": 36, "vocab_size": 33, "complexity": 3, "nloc": 11, "token_counts": 65, "n_ast_nodes": 110, "n_identifiers": 12, "random_cut": "def get_all_node_ids() -> List[Tuple[str, str]]:\n \n node_ids = []\n # Sort on NodeID to ensure the ordering is deterministic across the cluster.\n for node in sorted(ray.nodes(), key=lambda entry: entry[\"NodeID\"]):\n # print(node)\n if node[\"Alive\"]:\n node_ids.append((node[\"NodeID\"], node[\"NodeName\"]))\n\n return node_ids\n\n", "d_id": 27706, "documentation": { "docstring": "Get IDs for all live nodes in the cluster.\n\n Returns a list of (node_id: str, ip_address: str). The node_id can be\n passed into the Ray SchedulingPolicy API.\n ", "n_words": 27, "vocab_size": 26, "n_whitespaces": 36, "language": "en" } }, { "id": 242540, "commit_id": "ea7e108ca3c6fcd00014de370075ed0361a08138", "repo": "Pillow", "path": "src/PIL/PpmImagePlugin.py", "file_name": "PpmImagePlugin.py", "fun_name": "_ignore_comments", "commit_message": "Implement bitonal decoder", "code": "def _ignore_comments(self, block):\n \n\n comment_spans = False\n while True:\n comment_start = block.find(b\"#\") # look for next comment\n if comment_start == -1: # no comment found\n break\n comment_end = self._find_comment_end(block, comment_start)\n if comment_end != -1: # comment ends in this block\n block = (\n block[:comment_start] + block[comment_end + 1 :]\n ) # delete comment\n else: # last comment continues to next block(s)\n block = block[:comment_start]\n comment_spans = True\n break\n return block, comment_spans\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 267, "n_words": 70, "vocab_size": 45, "complexity": 4, "nloc": 16, "token_counts": 80, "n_ast_nodes": 136, "n_identifiers": 8, "random_cut": "def _ignore_comments(self, block):\n \n\n comment_spans = False\n while True:\n comment_start = block.find(b\"#\") # look for next comment\n if comment_start == -1: # no comment found\n break\n comment_end = self._find_comment_end(block, comment_start)\n if comment_end != -1: # comment ends in this block\n block = (\n block[:comment_start] + block[comment_end + 1 :]\n ) # delete comment\n else: # last comment continues to next block(s)\n block = block[:com", "d_id": 69876, "documentation": { "docstring": "\n Deletes comments from block. If comment does not end in this\n block, raises a flag.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 128859, "commit_id": "d99eff919bf785f911e4eebc87ddc4960344a139", "repo": "ray", "path": "python/ray/train/tests/test_gpu.py", "file_name": "test_gpu.py", "fun_name": "test_torch_auto_gpu_to_cpu", "commit_message": "[AIR] Hard deprecate old Trainer, old callbacks (#29015)\n\nHard deprecations for ray.train.Trainer, ray.train.callbacks and ray.train.checkpoint.CheckpointStrategy. Restart-on-failure logic from BackendExecutor has also been removed as it is superseded by Tune.\r\n\r\nSome tests have been refactored to use the new API. Tests that are no longer applicable have been removed.\r\n\r\nSigned-off-by: Antoni Baum \r\nSigned-off-by: Amog Kamsetty \r\nCo-authored-by: Amog Kamsetty ", "code": "def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus):\n \n num_workers = 2\n assert os.environ[\"CUDA_VISIBLE_DEVICES\"] == \"\"\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 3, "nloc": 23, "token_counts": 163, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus):\n \n num_workers = 2\n assert os.environ[\"CUDA_VISIBLE_DEVICES\"] == \"\"\n", "d_id": 28826, "documentation": { "docstring": "Tests if GPU tensors are auto converted to CPU on driver.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 221262, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/calendar.py", "file_name": "calendar.py", "fun_name": "formatyear", "commit_message": "add python 3.10.4 for windows", "code": "def formatyear(self, theyear, width=3):\n \n v = []\n a = v.append\n width = max(width, 1)\n a('' %\n self.cssclass_year)\n a('\\n')\n a('' % (\n width, self.cssclass_year_head, theyear))\n for i in range(January, January+12, width):\n # months in this row\n months = range(i, min(i+width, 13))\n a('')\n for m in months:\n a('')\n a('')\n a('
    %s
    ')\n a(self.formatmonth(theyear, m, withyear=False))\n a('
    ')\n return ''.join(v)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 250, "n_words": 60, "vocab_size": 52, "complexity": 3, "nloc": 19, "token_counts": 131, "n_ast_nodes": 223, "n_identifiers": 19, "random_cut": "def formatyear(self, theyear, width=3):\n \n v = []\n a = v.append\n width = max(width, 1)\n a('' %\n self.cssclass_year)\n a('\\n')\n a('' % (\n width, self.cssclass_year_head, theyear))\n for i in range(January, January+12, width):\n # months in this row\n months = range(i, min(i+width, 13))\n a('')\n for m in months:\n a('')\n\n # App index page\n response = self.client.get(reverse(\"admin:app_list\", args=(\"admin_views\",)))\n self.assertContains(response, '
    ')\n self.assertContains(response, '
    ')\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 114, "n_words": 37, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 87, "n_ast_nodes": 156, "n_identifiers": 8, "random_cut": "def test_index_css_classes(self):\n \n ", "d_id": 52092, "documentation": { "docstring": "\n CSS class names are used for each app and model on the admin index\n pages (#17050).\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 207495, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/test_actions.py", "file_name": "test_actions.py", "fun_name": "test_action_column_class", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_action_column_class(self):\n \n response = self.client.get(reverse(\"admin:admin_views_subscriber_changelist\"))\n self.assertIsNotNone(response.context[\"action_form\"])\n self.assertContains(response, \"action-checkbox-column\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 36, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 38, "n_ast_nodes": 69, "n_identifiers": 9, "random_cut": "def test_action_column_class(self):\n \n response = self.client.get(reverse(\"admin:admin_views_subscriber_changelist\"))\n self.assertIsNotNone(response.context[\"act", "d_id": 51985, "documentation": { "docstring": "The checkbox column class is present in the response.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 81176, "commit_id": "21972c91dd2b52cd206bf71ea038ab0e1f478b32", "repo": "awx", "path": "awx/conf/settings.py", "file_name": "settings.py", "fun_name": "hashkey", "commit_message": "add lock to cachetools usage\n\n* We observed daphne giving tracebacks when accessing logging settings.\n Originally, configure tower in tower settings was no a suspect because\n daphne is not multi-process. We've had issues with configure tower in\n tower settings and multi-process before. We later learned that Daphne\n is multi-threaded. Configure tower in tower was back to being a\n suspect. We constructed a minimal reproducer to show that multiple\n threads accessing settings can cause the same traceback that we saw in\n daphne. See\n https://gist.github.com/chrismeyersfsu/7aa4bdcf76e435efd617cb078c64d413\n for that recreator. These fixes stop the recreation.", "code": "def hashkey(cls, *args, **kwargs):\n \n return cachetools.keys.hashkey(f\"<{cls.__name__}>\", *args, **kwargs)\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 28, "n_ast_nodes": 52, "n_identifiers": 7, "random_cut": "def hashkey(cls, *args, **kwargs):\n \n return cachetools.keys.hashkey(f\"<{cls.__name__}>\", *args, **kwargs)\n\n", "d_id": 17168, "documentation": { "docstring": "\n Usage of @cachetools.cached has changed to @cachetools.cachedmethod\n The previous cachetools decorator called the hash function and passed in (self, key).\n The new cachtools decorator calls the hash function with just (key).\n Ideally, we would continue to pass self, however, the cachetools decorator interface\n does not allow us to.\n\n This hashkey function is to maintain that the key generated looks like\n ('', key). The thought is that maybe it is important to namespace\n our cache to the SettingsWrapper scope in case some other usage of this cache exists.\n I can not think of how any other system could and would use our private cache, but\n for safety sake we are ensuring the key schema does not change.\n ", "n_words": 116, "vocab_size": 82, "n_whitespaces": 194, "language": "en" } }, { "id": 111625, "commit_id": "3f6a8274a97bf003b5eadc05faa324162b7f4123", "repo": "nni", "path": "nni/experiment/config/base.py", "file_name": "base.py", "fun_name": "load", "commit_message": "Some string changes around experiment module (#4442)", "code": "def load(cls, path):\n \n with open(path) as yaml_file:\n data = yaml.safe_load(yaml_file)\n if not isinstance(data, dict):\n raise TypeError(f'Conent of config file {path} is not a dict/object')\n utils.set_base_path(Path(path).parent)\n config = cls(**data)\n utils.unset_base_path()\n return config\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 102, "n_words": 31, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 64, "n_ast_nodes": 114, "n_identifiers": 17, "random_cut": "def load(cls, path):\n \n with open(path) as yaml_file:\n data = yaml.safe_load(yaml_file)\n if not isinstance(data, dict):\n raise TypeError(f'Conent of config file {path} is not a dict/object')\n utils.set_base_path(Path(path).parent)\n config = cls(**data)\n utils.uns", "d_id": 24457, "documentation": { "docstring": "\n Load a YAML config file from file system.\n\n Since YAML is a superset of JSON, it can also load JSON files.\n\n This method raises exception if:\n\n - The file is not available\n - The file content is not valid YAML\n - Top level value of the YAML is not object\n - The YAML contains not supported fields\n\n It does not raise exception when the YAML misses fields or contains bad fields.\n\n Parameters\n ----------\n path : PathLike\n Path of the config file.\n\n Returns\n -------\n cls\n An object of ConfigBase subclass.\n ", "n_words": 89, "vocab_size": 58, "n_whitespaces": 217, "language": "en" } }, { "id": 66151, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/leave_allocation/leave_allocation.py", "file_name": "leave_allocation.py", "fun_name": "get_leave_allocation_for_period", "commit_message": "style: format code with black", "code": "def get_leave_allocation_for_period(employee, leave_type, from_date, to_date):\n\tleave_allocated = 0\n\tleave_allocations = frappe.db.sql(\n\t\t,\n\t\t{\"from_date\": from_date, \"to_date\": to_date, \"employee\": employee, \"leave_type\": leave_type},\n\t\tas_dict=1,\n\t)\n\n\tif leave_allocations:\n\t\tfor leave_alloc in leave_allocations:\n\t\t\tleave_allocated += leave_alloc.total_leaves_allocated\n\n\treturn leave_allocated\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 22, "n_words": 34, "vocab_size": 29, "complexity": 3, "nloc": 19, "token_counts": 62, "n_ast_nodes": 109, "n_identifiers": 14, "random_cut": "def get_leave_allocation_for_period(employee, leave_type, from_date, to_date):\n\tleave_allocated = 0\n\tleave_allocations = frappe.db.sql(\n\t\t,\n\t\t{\"from_date\": from_date, \"to_date\": to_date, \"employee\": employee, \"leave_type\": leave_type},\n\t\tas_dict=1,\n\t)\n\n\tif leave_allocations:\n\t\tfor leave_alloc in leave_allocations:\n\t\t\tleave_allocated += leave_alloc.total_leaves_allocated\n\n\treturn leave_allocated\n\n\n@frappe.whitelist()", "d_id": 14111, "documentation": { "docstring": "\n\t\tselect employee, leave_type, from_date, to_date, total_leaves_allocated\n\t\tfrom `tabLeave Allocation`\n\t\twhere employee=%(employee)s and leave_type=%(leave_type)s\n\t\t\tand docstatus=1\n\t\t\tand (from_date between %(from_date)s and %(to_date)s\n\t\t\t\tor to_date between %(from_date)s and %(to_date)s\n\t\t\t\tor (from_date < %(from_date)s and to_date > %(to_date)s))\n\t", "n_words": 35, "vocab_size": 23, "n_whitespaces": 28, "language": "en" } }, { "id": 207414, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_utils/test_logentry.py", "file_name": "test_logentry.py", "fun_name": "test_proxy_model_content_type_is_used_for_log_entries", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_proxy_model_content_type_is_used_for_log_entries(self):\n \n proxy_content_type = ContentType.objects.get_for_model(\n ArticleProxy, for_concrete_model=False\n )\n post_data = {\n \"site\": self.site.pk,\n \"title\": \"Foo\",\n \"hist\": \"Bar\",\n \"created_0\": \"2015-12-25\",\n \"created_1\": \"00:00\",\n }\n changelist_url = reverse(\"admin:admin_utils_articleproxy_changelist\")\n\n # add\n proxy_add_url = reverse(\"admin:admin_utils_articleproxy_add\")\n response = self.client.post(proxy_add_url, post_data)\n self.assertRedirects(response, changelist_url)\n proxy_addition_log = LogEntry.objects.latest(\"id\")\n self.assertEqual(proxy_addition_log.action_flag, ADDITION)\n self.assertEqual(proxy_addition_log.content_type, proxy_content_type)\n\n # change\n article_id = proxy_addition_log.object_id\n proxy_change_url = reverse(\n \"admin:admin_utils_articleproxy_change\", args=(article_id,)\n )\n post_data[\"title\"] = \"New\"\n response = self.client.post(proxy_change_url, post_data)\n self.assertRedirects(response, changelist_url)\n proxy_change_log = LogEntry.objects.latest(\"id\")\n self.assertEqual(proxy_change_log.action_flag, CHANGE)\n self.assertEqual(proxy_change_log.content_type, proxy_content_type)\n\n # delete\n proxy_delete_url = reverse(\n \"admin:admin_utils_articleproxy_delete\", args=(article_id,)\n )\n response = self.client.post(proxy_delete_url, {\"post\": \"yes\"})\n self.assertRedirects(response, changelist_url)\n proxy_delete_log = LogEntry.objects.latest(\"id\")\n self.assertEqual(proxy_delete_log.action_flag, DELETION)\n self.assertEqual(proxy_delete_log.content_type, proxy_content_type)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 397, "n_words": 92, "vocab_size": 62, "complexity": 1, "nloc": 36, "token_counts": 251, "n_ast_nodes": 424, "n_identifiers": 34, "random_cut": "def test_proxy_model_content_type_is_used_for_log_entries(self):\n \n proxy_content_type = ContentType.objects.get_for_model(\n ArticleProxy, for_concrete_model=False\n )\n post_data = {\n \"site\": self.site.pk,\n \"title\": \"Foo\",\n \"hist\": \"Bar\",\n \"created_0\": \"2015-12-25\",\n \"created_1\": \"00:00\",\n }\n changelist_url = reverse(\"admin:admin_utils_articleproxy_changelist\")\n\n # add\n proxy_add_url = reverse(\"admin:admin_utils_articleproxy_add\")\n response = self.client.post(proxy_add_url, post_data)\n self.assertRedirects(response, changelist_url)\n proxy_addition_log = LogEntry.objects.latest(\"id\")\n self.assertEqual(proxy_addition_log.action_flag, ADDITION)\n self.assertEqual(proxy_addition_log.content_type, proxy_content_type)\n\n # change\n article_id = proxy_addition_log.object_id\n proxy_change_url = reverse(\n \"admin:admin_utils_articleproxy_change\", args=(article_id,)\n )\n post_data[\"title\"] = \"New\"\n response = self.client.post(proxy_change_url, post_data)\n self.assertRedirects(response, changelist_url)\n proxy_change_log = LogEntry.objects.latest(\"id\")\n self.assertEqual(proxy_change_log.action_flag, CHANGE)\n self.assertEqual(proxy_change_log.content_type, proxy_content_type)\n\n # delete\n proxy_delete_url = reverse(\n \"admin:admin_utils_articleproxy_delete\", args=(arti", "d_id": 51957, "documentation": { "docstring": "\n Log entries for proxy models should have the proxy model's contenttype\n (#21084).\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 34, "language": "en" } }, { "id": 156222, "commit_id": "261bf174931580230717abca93fe172e166cc1e8", "repo": "dask", "path": "dask/utils.py", "file_name": "utils.py", "fun_name": "typename", "commit_message": "Add mild typing to common utils functions (#8848)", "code": "def typename(typ, short=False) -> str:\n \n if not isinstance(typ, type):\n return typename(type(typ))\n try:\n if not typ.__module__ or typ.__module__ == \"builtins\":\n return typ.__name__\n else:\n if short:\n module, *_ = typ.__module__.split(\".\")\n else:\n module = typ.__module__\n return module + \".\" + typ.__name__\n except AttributeError:\n return str(typ)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 156, "n_words": 42, "vocab_size": 29, "complexity": 6, "nloc": 28, "token_counts": 88, "n_ast_nodes": 150, "n_identifiers": 12, "random_cut": "def typename(typ, short=False) -> str:\n \n if not isinstance(typ, type):\n return typename(type(typ))\n try:\n if not typ.__module__ or typ.__module__ == \"builtins\":\n return typ.", "d_id": 36605, "documentation": { "docstring": "\n Return the name of a type\n\n Examples\n --------\n >>> typename(int)\n 'int'\n\n >>> from dask.core import literal\n >>> typename(literal)\n 'dask.core.literal'\n >>> typename(literal, short=True)\n 'dask.literal'\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 57, "language": "en" } }, { "id": 204587, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/__init__.py", "file_name": "__init__.py", "fun_name": "fetch_command", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def fetch_command(self, subcommand):\n \n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n if os.environ.get(\"DJANGO_SETTINGS_MODULE\"):\n # If `subcommand` is missing due to misconfigured settings, the\n # following line will retrigger an ImproperlyConfigured exception\n # (get_commands() swallows the original one) so the user is\n # informed about it.\n settings.INSTALLED_APPS\n elif not settings.configured:\n sys.stderr.write(\"No Django settings specified.\\n\")\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self.prog_name)\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 396, "n_words": 114, "vocab_size": 89, "complexity": 6, "nloc": 20, "token_counts": 126, "n_ast_nodes": 223, "n_identifiers": 24, "random_cut": "def fetch_command(self, subcommand):\n \n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n if os.environ.get(\"DJANGO_SETTINGS_MODULE\"):\n # If `subcommand` is missing due to misconfigured settings, the\n # following line will retrigger an ImproperlyConfigured exception\n # (get_commands() swallows the original one) so the user is\n # informed about it.\n settings.INSTALLED_APPS\n elif not settings.configured:\n sys.stderr.write(\"No Django settings specified.\\n\")\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self", "d_id": 50801, "documentation": { "docstring": "\n Try to fetch the given subcommand, printing a message with the\n appropriate command called from the command line (usually\n \"django-admin\" or \"manage.py\") if it can't be found.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 56, "language": "en" } }, { "id": 178270, "commit_id": "92314e4a9c431c407533e4a064481acf3c5983ab", "repo": "label-studio", "path": "label_studio/core/storage.py", "file_name": "storage.py", "fun_name": "url", "commit_message": "fix: DEV-3911: Move persistent storages to OS (#3377)\n\n* fix: DEV-3911: Move persistent storages to OS\r\n\r\n* Fix\r\n\r\n* Add deps\r\n\r\n* Back header\r\n\r\n* Move DownloadStorageData handler\r\n\r\n* Update all urls json\r\n\r\n* Fix import\r\n\r\n* add nginx config\r\n\r\n* Fix GSC storage\r\n\r\nCo-authored-by: Sergei Ivashchenko \r\nCo-authored-by: Sergey Zhuk ", "code": "def url(self, name):\n \n name = self._normalize_name(clean_name(name))\n blob = self.bucket.blob(name)\n blob_params = self.get_object_parameters(name)\n no_signed_url = (\n blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth)\n\n if not self.custom_endpoint and no_signed_url:\n return blob.public_url\n elif no_signed_url:\n out = '{storage_base_url}/{quoted_name}'.format(\n storage_base_url=self.custom_endpoint,\n quoted_name=_quote(name, safe=b\"/~\"),\n )\n return out\n elif not self.custom_endpoint:\n out2 = blob.generate_signed_url(\n expiration=self.expiration,\n version=\"v4\",\n **self._get_signing_kwargs()\n )\n return out2\n else:\n out3 = blob.generate_signed_url(\n bucket_bound_hostname=self.custom_endpoint,\n expiration=self.expiration,\n version=\"v4\",\n **self._get_signing_kwargs()\n )\n return out3\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 382, "n_words": 63, "vocab_size": 41, "complexity": 6, "nloc": 29, "token_counts": 164, "n_ast_nodes": 266, "n_identifiers": 28, "random_cut": "def url(self, name):\n \n name = self._normalize_name(clean_name(name))\n blob = self.bucket.blob(name)\n blob_params = self.get_object_parameters(name)\n no_signed_url = (\n blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth)\n\n if not self.custom_endpoint and no_signed_url:\n return blob.public_url\n elif no_signed_url:\n out = '{storage_base_url}/{quoted_name}'.format(\n storage_base_url=self.custom_endpoint,\n quoted_name=_quote(name, safe=b\"/~\"),\n )\n return out\n elif not self.custom_endpoint:\n out2 = blob.generate_signed_url(\n expiration=self.expiration,\n version=\"v4\",\n **self._get_signing_kwargs()\n )\n return out2\n else:\n out3 = blob.generate_signed_url(\n bucket_bound_hostname=self.custom_endpoint,\n expir", "d_id": 42643, "documentation": { "docstring": "\n Return public url or a signed url for the Blob.\n This DOES NOT check for existance of Blob - that makes codes too slow\n for many use cases.\n Overridden to force the use of the IAM signBlob API.\n See https://github.com/googleapis/python-storage/blob/519074112775c19742522158f612b467cf590219/google/cloud/storage/_signing.py#L628 # NOQA\n ", "n_words": 42, "vocab_size": 35, "n_whitespaces": 86, "language": "en" } }, { "id": 260960, "commit_id": "b850a9417d4777931e2894fd8155b73dc87973b9", "repo": "scikit-learn", "path": "sklearn/utils/validation.py", "file_name": "validation.py", "fun_name": "check_is_fitted", "commit_message": "DOC Ensures that check_is_fitted passes numpydoc validation (#24454)", "code": "def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):\n \n if isclass(estimator):\n raise TypeError(\"{} is a class, not an instance.\".format(estimator))\n if msg is None:\n msg = (\n \"This %(name)s instance is not fitted yet. Call 'fit' with \"\n \"appropriate arguments before using this estimator.\"\n )\n\n if not hasattr(estimator, \"fit\"):\n raise TypeError(\"%s is not an estimator instance.\" % (estimator))\n\n if attributes is not None:\n if not isinstance(attributes, (list, tuple)):\n attributes = [attributes]\n fitted = all_or_any([hasattr(estimator, attr) for attr in attributes])\n elif hasattr(estimator, \"__sklearn_is_fitted__\"):\n fitted = estimator.__sklearn_is_fitted__()\n else:\n fitted = [\n v for v in vars(estimator) if v.endswith(\"_\") and not v.startswith(\"__\")\n ]\n\n if not fitted:\n raise NotFittedError(msg % {\"name\": type(estimator).__name__})\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 242, "n_words": 104, "vocab_size": 69, "complexity": 12, "nloc": 22, "token_counts": 170, "n_ast_nodes": 284, "n_identifiers": 23, "random_cut": "def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):\n \n if isclass(estimator):\n raise TypeError(\"{} is a class, not an instance.\".format(estimator))\n if msg is None:\n msg = (\n \"This %(name)s instance is not fitted yet. Call 'fit' with \"\n \"appropriate arguments before using this estimator.\"\n )\n\n if not hasattr(estimator, \"fit\"):\n raise TypeError(\"%s is not an estimator instance.\" % (estimator))\n\n if attributes is not None:\n if not isinstance(attributes, (list, tuple)):\n attributes = [attributes]\n fitted = all_or_any([hasattr(estimator, attr) for attr in attributes])\n elif hasattr(estimator, \"__sklearn_is_fitted__\"):\n fitted = estimator.__sklearn_is_fitted__()\n else:\n fitted = [\n v for v in vars(estimator) if v.endswith(\"_\") and not v.startswith(\"__\")\n ]\n\n if not fitted:\n raise NotFittedError(msg % {\"name\": type(estimator).__name__})\n\n", "d_id": 76591, "documentation": { "docstring": "Perform is_fitted validation for estimator.\n\n Checks if the estimator is fitted by verifying the presence of\n fitted attributes (ending with a trailing underscore) and otherwise\n raises a NotFittedError with the given message.\n\n If an estimator does not set any attributes with a trailing underscore, it\n can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the\n estimator is fitted or not.\n\n Parameters\n ----------\n estimator : estimator instance\n Estimator instance for which the check is performed.\n\n attributes : str, list or tuple of str, default=None\n Attribute name(s) given as string or a list/tuple of strings\n Eg.: ``[\"coef_\", \"estimator_\", ...], \"coef_\"``\n\n If `None`, `estimator` is considered fitted if there exist an\n attribute that ends with a underscore and does not start with double\n underscore.\n\n msg : str, default=None\n The default error message is, \"This %(name)s instance is not fitted\n yet. Call 'fit' with appropriate arguments before using this\n estimator.\"\n\n For custom messages if \"%(name)s\" is present in the message string,\n it is substituted for the estimator name.\n\n Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\".\n\n all_or_any : callable, {all, any}, default=all\n Specify whether all or any of the given attributes must exist.\n\n Raises\n ------\n TypeError\n If the estimator is a class or not an estimator instance\n\n NotFittedError\n If the attributes are not found.\n ", "n_words": 213, "vocab_size": 127, "n_whitespaces": 369, "language": "en" } }, { "id": 334468, "commit_id": "1e21f061601dda0aa9740e88bfce68bf4aac4acd", "repo": "diffusers", "path": "models/vision/glide/modeling_glide.py", "file_name": "modeling_glide.py", "fun_name": "p_mean_variance", "commit_message": "Classifier-free guidance scheduler + GLIDe pipeline", "code": "def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None):\n \n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n model_output = model(x, t, transformer_out)\n\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = torch.split(model_output, C, dim=1)\n min_log = _extract_into_tensor(self.noise_scheduler.posterior_log_variance_clipped, t, x.shape)\n max_log = _extract_into_tensor(np.log(self.noise_scheduler.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = torch.exp(model_log_variance)\n\n pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n if clip_denoised:\n pred_xstart = pred_xstart.clamp(-1, 1)\n model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)\n\n assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n return model_mean, model_variance, model_log_variance, pred_xstart\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 261, "n_words": 113, "vocab_size": 77, "complexity": 3, "nloc": 19, "token_counts": 243, "n_ast_nodes": 356, "n_identifiers": 37, "random_cut": "def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None):\n \n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n model_output = model(x, t, transformer_out)\n\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = torch.split(model_output, C, dim=1)\n min_log = _extract_into_tensor(self.noise_scheduler.posterior_log_variance_clipped, t, x.shape)\n max_log = _extract_into_tensor(np.log(self.noise_scheduler.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = torch.exp(model_log_variance)\n\n pred_xstart = self._predict_xstart_from_eps(x", "d_id": 120634, "documentation": { "docstring": "\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n ", "n_words": 116, "vocab_size": 76, "n_whitespaces": 276, "language": "en" } }, { "id": 260606, "commit_id": "84c6421a9067de7d1b54b7a6d8e21ce38e1f0eca", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_estimator_html_repr.py", "file_name": "test_estimator_html_repr.py", "fun_name": "test_invalid_parameters_in_stacking", "commit_message": "FIX Show a HTML repr for meta-estimatosr with invalid parameters (#24015)\n\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def test_invalid_parameters_in_stacking():\n \n stacker = StackingClassifier(estimators=[])\n\n html_output = estimator_html_repr(stacker)\n assert html.escape(str(stacker)) in html_output\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 9, "random_cut": "def test_invalid_parameters_in_stacking():\n \n stacker = StackingClassifier(estimators", "d_id": 76372, "documentation": { "docstring": "Invalidate stacking configuration uses default repr.\n\n Non-regression test for #24009.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 67302, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/south_africa/setup.py", "file_name": "setup.py", "fun_name": "add_permissions", "commit_message": "style: format code with black", "code": "def add_permissions():\n\t\n\tfor doctype in (\"South Africa VAT Settings\", \"South Africa VAT Account\"):\n\t\tadd_permission(doctype, \"All\", 0)\n\t\tfor role in (\"Accounts Manager\", \"Accounts User\", \"System Manager\"):\n\t\t\tadd_permission(doctype, role, 0)\n\t\t\tupdate_permission_property(doctype, role, 0, \"write\", 1)\n\t\t\tupdate_permission_property(doctype, role, 0, \"create\", 1)\n\n\tif not frappe.db.get_value(\"Custom Role\", dict(report=\"VAT Audit Report\")):\n\t\tfrappe.get_doc(\n\t\t\tdict(\n\t\t\t\tdoctype=\"Custom Role\",\n\t\t\t\treport=\"VAT Audit Report\",\n\t\t\t\troles=[dict(role=\"Accounts User\"), dict(role=\"Accounts Manager\"), dict(role=\"Auditor\")],\n\t\t\t)\n\t\t).insert()\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 44, "n_words": 59, "vocab_size": 46, "complexity": 4, "nloc": 15, "token_counts": 128, "n_ast_nodes": 215, "n_identifiers": 13, "random_cut": "def add_permissions():\n\t\n\tfor doctype in (\"South Africa VAT Settings\", \"South Africa VAT Account\"):\n\t\tadd_permission(doctype, \"All\", 0)\n\t\tfor role in (\"Accounts Manager\", \"Accounts User\", \"System Manager\"):\n\t\t\tadd_permission(doctype, role, 0)\n\t\t\tupdate_permission_property(doctype, role, 0, \"write\", 1)\n\t\t\tupdate_permission_property(doctype, role, 0, \"create\", 1)\n\n\tif not frappe.db.get_value(\"Custom Role\", ", "d_id": 14485, "documentation": { "docstring": "Add Permissions for South Africa VAT Settings and South Africa VAT Account\n\tand VAT Audit Report", "n_words": 16, "vocab_size": 11, "n_whitespaces": 14, "language": "en" } }, { "id": 203426, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/options.py", "file_name": "options.py", "fun_name": "has_delete_permission", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def has_delete_permission(self, request, obj=None):\n \n opts = self.opts\n codename = get_permission_codename(\"delete\", opts)\n return request.user.has_perm(\"%s.%s\" % (opts.app_label, codename))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 69, "n_identifiers": 10, "random_cut": "def has_delete_permission(self, request, obj=None):\n \n opts = self.opts\n codename = get_permission_codename(\"delete\", opts)\n retu", "d_id": 50370, "documentation": { "docstring": "\n Return True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overridden by the user in subclasses. In such case it should\n return True if the given request has permission to delete the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to delete *any* object of the given type.\n ", "n_words": 72, "vocab_size": 42, "n_whitespaces": 129, "language": "en" } }, { "id": 109922, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/art3d.py", "file_name": "art3d.py", "fun_name": "set_3d_properties", "commit_message": "Improve mpl_toolkit documentation", "code": "def set_3d_properties(self, zs=0, zdir='z'):\n \n xs = self.get_xdata()\n ys = self.get_ydata()\n zs = cbook._to_unmasked_float_array(zs).ravel()\n zs = np.broadcast_to(zs, len(xs))\n self._verts3d = juggle_axes(xs, ys, zs, zdir)\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 26, "vocab_size": 20, "complexity": 1, "nloc": 7, "token_counts": 72, "n_ast_nodes": 116, "n_identifiers": 17, "random_cut": "def set_3d_properties(self, zs=0, zdir='z'):\n \n xs = self.get_xdata()\n ys = self.get_ydata()\n zs = cbook._to_unmasked_float_array(zs).ravel()\n zs = np.broadcast_to(zs, len(xs))\n self._ve", "d_id": 23829, "documentation": { "docstring": "\n Set the *z* position and direction of the line.\n\n Parameters\n ----------\n zs : float or array of floats\n The location along the *zdir* axis in 3D space to position the\n line.\n zdir : {'x', 'y', 'z'}\n Plane to plot line orthogonal to. Default: 'z'.\n See `.get_dir_vector` for a description of the values.\n ", "n_words": 52, "vocab_size": 42, "n_whitespaces": 139, "language": "en" } }, { "id": 101926, "commit_id": "dab823a3eb7a5257cb1e0818ee10ed234d3de97f", "repo": "faceswap", "path": "lib/gui/project.py", "file_name": "project.py", "fun_name": "clear_tasks", "commit_message": "Typing - lib.gui.display_command", "code": "def clear_tasks(self):\n \n logger.debug(\"Clearing stored tasks\")\n self._tasks = {}\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def clear_tasks(self):\n \n logger.debug(\"Clearing stored tasks\")\n self._tasks = ", "d_id": 21305, "documentation": { "docstring": " Clears all of the stored tasks.\n\n This is required when loading a task stored in a legacy project file, and is only to be\n called by :class:`Project` when a project has been loaded which is in fact a task.\n ", "n_words": 39, "vocab_size": 30, "n_whitespaces": 61, "language": "en" } }, { "id": 66160, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/leave_application/leave_application.py", "file_name": "leave_application.py", "fun_name": "add_department_leaves", "commit_message": "style: format code with black", "code": "def add_department_leaves(events, start, end, employee, company):\n\tdepartment = frappe.db.get_value(\"Employee\", employee, \"department\")\n\n\tif not department:\n\t\treturn\n\n\t# department leaves\n\tdepartment_employees = frappe.db.sql_list(\n\t\t,\n\t\t(department, company),\n\t)\n\n\tfilter_conditions = ' and employee in (\"%s\")' % '\", \"'.join(department_employees)\n\tadd_leaves(events, start, end, filter_conditions=filter_conditions)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 39, "vocab_size": 33, "complexity": 2, "nloc": 11, "token_counts": 71, "n_ast_nodes": 113, "n_identifiers": 15, "random_cut": "def add_department_leaves(events, start, end, employee, company):\n\tdepartment = frappe.db.get_value(\"Emplo", "d_id": 14117, "documentation": { "docstring": "select name from tabEmployee where department=%s\n\t\tand company=%s", "n_words": 8, "vocab_size": 8, "n_whitespaces": 6, "language": "en" } }, { "id": 130291, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "is_file", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def is_file(self, follow_links=None):\n \n if follow_links is None:\n follow_links = True\n\n node_stat = self._stat if follow_links else self._lstat\n return stat.S_ISREG(node_stat.st_mode)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 58, "n_words": 19, "vocab_size": 15, "complexity": 3, "nloc": 5, "token_counts": 38, "n_ast_nodes": 61, "n_identifiers": 9, "random_cut": "def is_file(self, follow_links=None):\n \n if follow_links is None:\n follow_links = True\n\n node_stat = self._stat if follow_links else self._lstat", "d_id": 29216, "documentation": { "docstring": "\n Get whether the entry is a regular file.\n\n *follow_links* (:class:`bool` or :data:`None`) is whether to follow\n symbolic links. If this is :data:`True`, a symlink to a regular file\n will result in :data:`True`. Default is :data:`None` for :data:`True`.\n\n Returns whether the entry is a regular file (:class:`bool`).\n ", "n_words": 46, "vocab_size": 30, "n_whitespaces": 89, "language": "en" } }, { "id": 207170, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_inlines/tests.py", "file_name": "tests.py", "fun_name": "test_tabular_model_form_meta_readonly_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_tabular_model_form_meta_readonly_field(self):\n \n response = self.client.get(reverse(\"admin:admin_inlines_someparentmodel_add\"))\n self.assertContains(\n response,\n '',\n )\n self.assertContains(response, \"Label from ModelForm.Meta\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 119, "n_words": 29, "vocab_size": 24, "complexity": 1, "nloc": 10, "token_counts": 39, "n_ast_nodes": 75, "n_identifiers": 7, "random_cut": "def test_tabular_model_form_meta_readonly_field(self):\n \n response = self.client.get(reverse(\"admin:admin_inlines_someparentmodel_add\"))\n self.assertCont", "d_id": 51888, "documentation": { "docstring": "\n Tabular inlines use ModelForm.Meta.help_texts and labels for read-only\n fields.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 31, "language": "en" } }, { "id": 80220, "commit_id": "10dbbddaf35607e4257f50dd960520a1268dd225", "repo": "wagtail", "path": "wagtail/snippets/tests/test_locking.py", "file_name": "test_locking.py", "fun_name": "test_edit_get_unlocked_no_lock_permission", "commit_message": "Add tests for locking snippets", "code": "def test_edit_get_unlocked_no_lock_permission(self):\n \n # Use edit permission only\n self.set_permissions([\"change\"])\n\n # Get the edit page\n response = self.client.get(self.get_url(\"edit\"))\n html = response.content.decode()\n lock_url = self.get_url(\"lock\")\n\n # Should not show lock message\n self.assertNotContains(\n response,\n \"'I'm a lockable snippet!' was locked\",\n )\n\n # Should show unlocked information in the side panel\n self.assertContains(\n response,\n f\"Anyone can edit this {self.model_name}.\",\n )\n\n # Should not show info to lock the object in the side panel\n self.assertNotContains(\n response,\n \"Lock it to prevent others from editing.\",\n )\n\n # Should show Save action menu item\n self.assertContains(\n response,\n f\"{self.save_button_label}\",\n html=True,\n )\n\n # Should not show Locked action menu item\n self.assertTagInHTML(\n '',\n html,\n count=0,\n allow_extra_attrs=True,\n )\n\n # Should not show the lock button\n self.assertTagInHTML(\n f'',\n html,\n count=0,\n allow_extra_attrs=True,\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 483, "n_words": 121, "vocab_size": 70, "complexity": 1, "nloc": 34, "token_counts": 123, "n_ast_nodes": 225, "n_identifiers": 18, "random_cut": "def test_edit_get_unlocked_no_lock_permission(self):\n \n # Use edit permission only\n self.set_permissions([\"change\"])\n\n # Get the edit page\n response = self.client.get(self.get_url(\"edit\"))\n html = response.content.decode()\n lock_url = self.get_url(\"lock\")\n\n # Should not show lock message\n self.assertNotContains(\n response,\n \"'I'm a lockable snippet!' was locked\",\n )\n\n # Should show unlocked information in the side panel\n self.assertContains(\n response,\n f\"Anyone can edit this {self.model_name}.\",\n )\n\n # Should not show info to lock the object in the side panel\n self.assertNotContains(\n response,\n \"Lock it to prevent others from editing.\",\n )\n\n # Should show Save action menu item\n self.assertContains(\n response,\n f\"{self.save_button_label}\",\n html=True,\n )\n\n # Should not show Locked action menu item\n self.assertTagInHTML(\n '',\n", "d_id": 17033, "documentation": { "docstring": "A user cannot lock an object without the lock permission.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 94186, "commit_id": "7f0e298ca45cd41f0e6df3968a6c0c2923a7b831", "repo": "sentry", "path": "src/sentry/models/counter.py", "file_name": "counter.py", "fun_name": "increment_project_counter", "commit_message": "fix(counter): Fix minor linting violation (#37392)", "code": "def increment_project_counter(project, delta=1, using=\"default\"):\n \n if delta <= 0:\n raise ValueError(\"There is only one way, and that's up.\")\n\n sample_rate = options.get(\"store.projectcounter-modern-upsert-sample-rate\")\n\n modern_upsert = sample_rate and random.random() <= sample_rate\n\n # To prevent the statement_timeout leaking into the session we need to use\n # set local which can be used only within a transaction\n with transaction.atomic(using=using):\n cur = connections[using].cursor()\n try:\n statement_timeout = None\n if settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT:\n # WARNING: This is not a proper fix and should be removed once\n # we have better way of generating next_short_id.\n cur.execute(\"show statement_timeout\")\n statement_timeout = cur.fetchone()[0]\n cur.execute(\n \"set local statement_timeout = %s\",\n [settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT],\n )\n\n if modern_upsert:\n # Our postgres wrapper thing does not allow for named arguments\n cur.execute(\n \"insert into sentry_projectcounter (project_id, value) \"\n \"values (%s, %s) \"\n \"on conflict (project_id) do update \"\n \"set value = sentry_projectcounter.value + %s \"\n \"returning value\",\n [project.id, delta, delta],\n )\n else:\n cur.execute(\n \"select sentry_increment_project_counter(%s, %s)\",\n [project.id, delta],\n )\n\n project_counter = cur.fetchone()[0]\n\n if statement_timeout is not None:\n cur.execute(\n \"set local statement_timeout = %s\",\n [statement_timeout],\n )\n\n return project_counter\n\n finally:\n cur.close()\n\n\n# this must be idempotent because it seems to execute twice\n# (at least during test runs)", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 751, "n_words": 184, "vocab_size": 127, "complexity": 7, "nloc": 39, "token_counts": 179, "n_ast_nodes": 312, "n_identifiers": 23, "random_cut": "def increment_project_counter(project, delta=1, using=\"default\"):\n \n if delta <= 0:\n raise ValueError(\"There is only one way, and that's up.\")\n\n sample_rate = options.get(\"store.projectcounter-modern-upsert-sample-rate\")\n\n modern_upsert = sample_rate and random.random() <= sample_rate\n\n # To prevent the statement_timeout leaking into the session we need to use\n # set local which can be used only within a transaction\n with transaction.atomic(using=using):\n cur = connections[using].cursor()\n try:\n statement_timeout = None\n if settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT:\n # WARNING: This is not a proper fix and should be removed once\n # we have better way of generating next_short_id.\n cur.execute(\"show statement_timeout\")\n statement_timeout = cur.fetchone()[0]\n cur.execute(\n \"set local statement_timeout = %s\",\n [settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT],\n )\n\n if modern_upsert:\n # Our postgres wrapper thing does not allow for named arguments\n cur.execute(\n \"insert into sentry_projectcounter (project_id, value) \"\n \"values (%s, %s) \"\n \"on conflict (project_id) do update \"\n \"set value = sentry_projectcounter.value + %s \"\n ", "d_id": 19047, "documentation": { "docstring": "This method primarily exists so that south code can use it.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 249830, "commit_id": "115f0eb2334b13665e5c112bd87f95ea393c9047", "repo": "synapse", "path": "tests/storage/test_id_generators.py", "file_name": "test_id_generators.py", "fun_name": "test_multiple_gen_nexts_closed_in_different_order", "commit_message": "Reintroduce #14376, with bugfix for monoliths (#14468)\n\n* Add tests for StreamIdGenerator\n\n* Drive-by: annotate all defs\n\n* Revert \"Revert \"Remove slaved id tracker (#14376)\" (#14463)\"\n\nThis reverts commit d63814fd736fed5d3d45ff3af5e6d3bfae50c439, which in\nturn reverted 36097e88c4da51fce6556a58c49bd675f4cf20ab. This restores\nthe latter.\n\n* Fix StreamIdGenerator not handling unpersisted IDs\n\nSpotted by @erikjohnston.\n\nCloses #14456.\n\n* Changelog\n\nCo-authored-by: Nick Mills-Barrett \nCo-authored-by: Erik Johnston ", "code": "def test_multiple_gen_nexts_closed_in_different_order(self) -> None:\n \n id_gen = self._create_id_generator()\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 6, "token_counts": 26, "n_ast_nodes": 28, "n_identifiers": 4, "random_cut": "def test_multiple_gen_nexts_closed_in_different_order(self) -> None:\n ", "d_id": 73160, "documentation": { "docstring": "Check that we handle overlapping calls to gen_next, even when their IDs\n created and persisted in different orders.", "n_words": 18, "vocab_size": 18, "n_whitespaces": 24, "language": "en" } }, { "id": 57811, "commit_id": "36d9870433a22fff3944fa07f8e2feeb1b622bd9", "repo": "prefect", "path": "src/prefect/cli/deployment.py", "file_name": "deployment.py", "fun_name": "str_presenter", "commit_message": "Working YAML generation with lots of bells and whistles", "code": "def str_presenter(dumper, data):\n \n if len(data.splitlines()) > 1: # check for multiline string\n return dumper.represent_scalar(\"tag:yaml.org,2002:str\", data, style=\"|\")\n return dumper.represent_scalar(\"tag:yaml.org,2002:str\", data)\n\n\nyaml.add_representer(str, str_presenter)\nyaml.representer.SafeRepresenter.add_representer(str, str_presenter)\n\ndeployment_app = PrefectTyper(\n name=\"deployment\", help=\"Commands for working with deployments.\"\n)\napp.add_typer(deployment_app)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 34, "vocab_size": 30, "complexity": 2, "nloc": 4, "token_counts": 42, "n_ast_nodes": 135, "n_identifiers": 18, "random_cut": "def str_presenter(dumper, data):\n \n if len(data.splitlines()) > 1: # check for multiline string\n return dumper.represent_scalar(\"tag:yaml.org,2002:str\", data, style=\"|\")\n return dumper.represent_scalar(\"tag:yaml.org,2002:str\", data)\n\n\nyaml.add_representer(str, str_presen", "d_id": 11712, "documentation": { "docstring": "\n configures yaml for dumping multiline strings\n Ref: https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 18, "language": "en" } }, { "id": 29120, "commit_id": "92a0c6c9f4324aa8f65a9b3e3a319604660a92a8", "repo": "saleor", "path": "saleor/core/auth_backend.py", "file_name": "auth_backend.py", "fun_name": "_get_permissions", "commit_message": "Replace Interpolation With Fstring (#11016)\n\n* Replace Interpolation With Fstring\r\n\r\n* Fix out of bound lines.\r\n\r\n* Revert to lazy formatting for log messages. Also fix failing flake8.\r\n\r\n* Fix minor code smells and typo.\r\n\r\n* Make street_address to one line.\r\n\r\n* Fix test cases.\r\n\r\n* Fix lints.", "code": "def _get_permissions(self, user_obj, obj, from_name):\n \n if not user_obj.is_active or user_obj.is_anonymous or obj is not None:\n return set()\n\n perm_cache_name = \"_effective_permissions_cache\"\n if not getattr(user_obj, perm_cache_name, None):\n perms = getattr(self, f\"_get_{from_name}_permissions\")(user_obj)\n perms = perms.values_list(\"content_type__app_label\", \"codename\").order_by()\n setattr(user_obj, perm_cache_name, {f\"{ct}.{name}\" for ct, name in perms})\n return getattr(user_obj, perm_cache_name)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 123, "n_words": 44, "vocab_size": 34, "complexity": 6, "nloc": 9, "token_counts": 95, "n_ast_nodes": 163, "n_identifiers": 16, "random_cut": "def _get_permissions(self, user_obj, obj, from_name):\n \n if not user_obj.is_active or user_obj.is_anonymous or obj is not None:\n return set()\n\n perm_cache_name = \"_effective_permissions_cache\"\n if not getattr(user_obj, perm_cache_name, None):\n perms = getattr(self, f\"_get_{from_name}_permissions\")(user_obj)\n perms = perms.values_list(\"content_type__app_label\", \"codename\").order_by()\n setattr(user_obj, perm_cache_name, {f\"{c", "d_id": 5201, "documentation": { "docstring": "Return the permissions of `user_obj` from `from_name`.\n\n `from_name` can be either \"group\" or \"user\" to return permissions from\n `_get_group_permissions` or `_get_user_permissions` respectively.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 43, "language": "en" } }, { "id": 276145, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saved_model/saved_model_test.py", "file_name": "saved_model_test.py", "fun_name": "test_trainable_layers", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_trainable_layers(self):\n \n model = model = self._get_model()\n # Set the last layer to *not* be trainable.\n model.layers[-1].trainable = False\n self._train_model(model, use_dataset=True)\n loaded = self._save_and_load(model)\n\n self._test_evaluation(model, loaded)\n self.assertFalse(model.layers[-1].trainable)\n self.assertFalse(loaded.layers[-1].trainable)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 91, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 80, "n_ast_nodes": 130, "n_identifiers": 12, "random_cut": "def test_trainable_layers(self):\n \n mo", "d_id": 81576, "documentation": { "docstring": "Tests that trainable status of individual layers is preserved.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 153052, "commit_id": "58bbcc37477866d19c8b092a0e1974a4f0baa586", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_reorder_labels", "commit_message": "REFACTOR-#2656: Update modin to fit algebra (code only) (#3717)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: Rehan Durrani ", "code": "def _reorder_labels(self, row_positions=None, col_positions=None):\n \n if row_positions is not None:\n ordered_rows = self._partition_mgr_cls.map_axis_partitions(\n 0, self._partitions, lambda df: df.iloc[row_positions]\n )\n row_idx = self.index[row_positions]\n else:\n ordered_rows = self._partitions\n row_idx = self.index\n if col_positions is not None:\n ordered_cols = self._partition_mgr_cls.map_axis_partitions(\n 1, ordered_rows, lambda df: df.iloc[:, col_positions]\n )\n col_idx = self.columns[col_positions]\n else:\n ordered_cols = ordered_rows\n col_idx = self.columns\n return self.__constructor__(ordered_cols, row_idx, col_idx)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 239, "n_words": 57, "vocab_size": 36, "complexity": 3, "nloc": 18, "token_counts": 123, "n_ast_nodes": 187, "n_identifiers": 16, "random_cut": "def _reorder_labels(self, row_positions=None, col_positions=None):\n \n if row_positions is not None:\n ordered_rows = self._partition_mgr_cls.map_axis_partitions(\n 0, self._partitions, lambda df: df.iloc[row_positions]\n )\n row_idx = self.index[row_positions]\n else:\n ordered_rows = self._partitions\n row_idx = self.index\n if col_positions is not None:\n ordered_cols = self._partition_mgr_cls.map_a", "d_id": 35236, "documentation": { "docstring": "\n Reorder the column and or rows in this DataFrame.\n\n Parameters\n ----------\n row_positions : list of int, optional\n The ordered list of new row orders such that each position within the list\n indicates the new position.\n col_positions : list of int, optional\n The ordered list of new column orders such that each position within the\n list indicates the new position.\n\n Returns\n -------\n PandasDataframe\n A new PandasDataframe with reordered columns and/or rows.\n ", "n_words": 70, "vocab_size": 39, "n_whitespaces": 189, "language": "en" } }, { "id": 69225, "commit_id": "58d430fe3ee62e93ad8d16a08bb42156a25b7d41", "repo": "erpnext", "path": "erpnext/assets/doctype/asset_capitalization/test_asset_capitalization.py", "file_name": "test_asset_capitalization.py", "fun_name": "get_actual_sle_dict", "commit_message": "feat: Asset Capitalization\n- manual selection of entry type\n- GLE cleanup with smaller functions\n- GLE considering periodical inventory\n- test cases", "code": "def get_actual_sle_dict(name):\n\tsles = frappe.db.sql(\n\t\t,\n\t\tname,\n\t\tas_dict=1,\n\t)\n\n\tsle_dict = {}\n\tfor d in sles:\n\t\tsle_dict[(d.item_code, d.warehouse)] = {\n\t\t\t\"actual_qty\": d.actual_qty,\n\t\t\t\"stock_value_difference\": d.stock_value_difference,\n\t\t}\n\n\treturn sle_dict\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 14, "n_words": 27, "vocab_size": 24, "complexity": 2, "nloc": 22, "token_counts": 60, "n_ast_nodes": 94, "n_identifiers": 13, "random_cut": "def get_actual_sle_dict(name):\n\tsles = frappe.db.sql(\n\t\t,\n\t\tname,\n\t\tas_dict=1,\n\t)\n\n\tsle_dict = {}\n\tfor d in sles:\n\t\tsle_dict[(d.item_code, d.warehouse)] = {\n\t\t\t\"actual_qty\": d.actual_qty,", "d_id": 14998, "documentation": { "docstring": "\n\t\tselect\n\t\t\titem_code, warehouse,\n\t\t\tsum(actual_qty) as actual_qty,\n\t\t\tsum(stock_value_difference) as stock_value_difference\n\t\tfrom `tabStock Ledger Entry`\n\t\twhere voucher_type = 'Asset Capitalization' and voucher_no = %s\n\t\tgroup by item_code, warehouse\n\t\thaving actual_qty != 0\n\t", "n_words": 30, "vocab_size": 27, "n_whitespaces": 22, "language": "en" } }, { "id": 314885, "commit_id": "00810235c92b492a966c6021021d49360ffb3cdd", "repo": "core", "path": "homeassistant/config_entries.py", "file_name": "config_entries.py", "fun_name": "_async_process_on_unload", "commit_message": "Track tasks adding entities (#73828)\n\n* Track tasks adding entities\r\n\r\n* Update homeassistant/config_entries.py\r\n\r\n* fix cast tests\r\n\r\nCo-authored-by: J. Nick Koston ", "code": "async def _async_process_on_unload(self) -> None:\n \n if self._on_unload is not None:\n while self._on_unload:\n self._on_unload.pop()()\n\n while self._pending_tasks:\n pending = [task for task in self._pending_tasks if not task.done()]\n self._pending_tasks.clear()\n if pending:\n await asyncio.gather(*pending)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 125, "n_words": 30, "vocab_size": 25, "complexity": 7, "nloc": 10, "token_counts": 71, "n_ast_nodes": 120, "n_identifiers": 11, "random_cut": "async def _async_process_on_unload(self) -> None:\n \n ", "d_id": 113486, "documentation": { "docstring": "Process the on_unload callbacks and wait for pending tasks.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 154490, "commit_id": "d6d503ac7c3028d871c34d9e99e925ddb0746df6", "repo": "modin", "path": "modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py", "file_name": "partition.py", "fun_name": "apply_func", "commit_message": "FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Jonathan Shi ", "code": "def apply_func(partition, func, *args, **kwargs):\n \n result = func(partition, *args, **kwargs)\n return result, get_ip()\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def apply_func(partition, func, *args, **kwargs):\n \n ", "d_id": 36013, "documentation": { "docstring": "\n Execute a function on the partition in a worker process.\n\n Parameters\n ----------\n partition : pandas.DataFrame\n A pandas DataFrame the function needs to be executed on.\n func : callable\n The function to perform.\n *args : list\n Positional arguments to pass to ``func``.\n **kwargs : dict\n Keyword arguments to pass to ``func``.\n\n Returns\n -------\n pandas.DataFrame\n The resulting pandas DataFrame.\n str\n The node IP address of the worker process.\n\n Notes\n -----\n Directly passing a call queue entry (i.e. a list of [func, args, kwargs]) instead of\n destructuring it causes a performance penalty.\n ", "n_words": 89, "vocab_size": 60, "n_whitespaces": 180, "language": "en" } }, { "id": 216481, "commit_id": "c78f1ee4f49df35ab04e921a45de0878716d8bf5", "repo": "salt", "path": "salt/client/mixins.py", "file_name": "mixins.py", "fun_name": "_proc_function_remote", "commit_message": "Implement ``__getstate__`` and ``__setstate__`` instead of using ``classmethod``\n\nSigned-off-by: Pedro Algarvio ", "code": "def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True):\n \n if daemonize and not salt.utils.platform.is_windows():\n # Shutdown the multiprocessing before daemonizing\n salt.log.setup.shutdown_multiprocessing_logging()\n\n salt.utils.process.daemonize()\n\n # Reconfigure multiprocessing logging after daemonizing\n salt.log.setup.setup_multiprocessing_logging()\n\n # pack a few things into low\n low[\"__jid__\"] = jid\n low[\"__user__\"] = user\n low[\"__tag__\"] = tag\n\n try:\n return self.cmd_sync(low)\n except salt.exceptions.EauthAuthenticationError as exc:\n log.error(exc)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 186, "n_words": 53, "vocab_size": 47, "complexity": 4, "nloc": 12, "token_counts": 105, "n_ast_nodes": 175, "n_identifiers": 22, "random_cut": "def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True):\n \n if daemonize and not salt.utils.platform.is_windows():\n # Shutdown the multiprocessing before daemonizing\n salt.log.setup.shutdown_multiprocessing_logging()\n\n salt.utils.process.daemonize()", "d_id": 54603, "documentation": { "docstring": "\n Run this method in a multiprocess target to execute the function on the\n master and fire the return data on the event bus\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 45, "language": "en" } }, { "id": 156073, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/array/utils.py", "file_name": "utils.py", "fun_name": "array_safe", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def array_safe(a, like, **kwargs):\n \n from dask.array.routines import array\n\n return _array_like_safe(np.array, array, a, like, **kwargs)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 35, "n_ast_nodes": 51, "n_identifiers": 9, "random_cut": "def array_safe(a, like, **kwargs):\n ", "d_id": 36536, "documentation": { "docstring": "\n If `a` is `dask.array`, return `dask.array.asarray(a, **kwargs)`,\n otherwise return `np.asarray(a, like=like, **kwargs)`, dispatching\n the call to the library that implements the like array. Note that\n when `a` is a `dask.Array` backed by `cupy.ndarray` but `like`\n isn't, this function will call `a.compute(scheduler=\"sync\")`\n before `np.array`, as downstream libraries are unlikely to know how\n to convert a `dask.Array` and CuPy doesn't implement `__array__` to\n prevent implicit copies to host.\n ", "n_words": 66, "vocab_size": 52, "n_whitespaces": 94, "language": "en" } }, { "id": 116102, "commit_id": "c8accc16e3c56d0e7d2a0b63c63a956849da57da", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/elasticsearch_handler/elasticsearch_handler.py", "file_name": "elasticsearch_handler.py", "fun_name": "get_tables", "commit_message": "implemented the get_tables() and get_columns() methods", "code": "def get_tables(self) -> StatusResponse:\n \n\n query = \n result = self.native_query(query)\n df = result.data_frame\n df = df.drop(['type', 'type'], axis=1)\n result.data_frame = df.rename(columns={'name': 'table_name'})\n\n return result\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 73, "n_words": 23, "vocab_size": 16, "complexity": 1, "nloc": 14, "token_counts": 58, "n_ast_nodes": 103, "n_identifiers": 12, "random_cut": "def get_tables(self) -> StatusResponse:\n \n\n query = \n result = self.native_query(query)\n df = result.data_frame\n df = df.drop(['type', 'type'], axis=1)\n result.data_frame = df.rename(columns={'name': 'table_name'})\n\n return result\n", "d_id": 25668, "documentation": { "docstring": "\n Return list of entities that will be accessible as tables.\n Returns:\n HandlerResponse\n \n SHOW TABLES;\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 66, "language": "en" } }, { "id": 254376, "commit_id": "f348aecdade3cdec4f93b72da548c7394ecb42ce", "repo": "d2l-en", "path": "d2l/jax.py", "file_name": "jax.py", "fun_name": "accuracy", "commit_message": "JAX: Add section classification.md (#2293)", "code": "def accuracy(self, params, X, Y, averaged=True):\n \n Y_hat = self.apply(params, X)\n Y_hat = d2l.reshape(Y_hat, (-1, Y_hat.shape[-1]))\n preds = d2l.astype(d2l.argmax(Y_hat, axis=1), Y.dtype)\n compare = d2l.astype(preds == d2l.reshape(Y, -1), d2l.float32)\n return d2l.reduce_mean(compare) if averaged else compare\n", "url": "https://github.com/d2l-ai/d2l-en.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 75, "n_words": 33, "vocab_size": 28, "complexity": 2, "nloc": 6, "token_counts": 101, "n_ast_nodes": 150, "n_identifiers": 19, "random_cut": "def accuracy(self, params, X, Y, averaged=True):\n \n Y_hat = self.apply(params, X)\n Y_hat = d2l.reshap", "d_id": 74596, "documentation": { "docstring": "Compute the number of correct predictions.\n \n Defined in :numref:`sec_classification`", "n_words": 9, "vocab_size": 9, "n_whitespaces": 19, "language": "en" } }, { "id": 173483, "commit_id": "26be5ee2372b08c2f906661283a12e84d6c181f8", "repo": "calibre-web", "path": "cps/tasks/metadata_backup.py", "file_name": "metadata_backup.py", "fun_name": "open_metadata", "commit_message": "Backup metadata 3rd step", "code": "def open_metadata(self, book, custom_columns):\n if config.config_use_google_drive:\n if not gdriveutils.is_gdrive_ready():\n raise Exception('Google Drive is configured but not ready')\n\n web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path)\n if not web_content_link:\n raise Exception('Google Drive cover url not found')\n\n stream = None\n try:\n stream = urlopen(web_content_link)\n except Exception as ex:\n # Bubble exception to calling function\n self.log.debug('Error reading metadata.opf: ' + str(ex)) # ToDo Check whats going on\n raise ex\n finally:\n if stream is not None:\n stream.close()\n else:\n # ToDo: Handle book folder not found or not readable\n book_metadata_filepath = os.path.join(config.config_calibre_dir, book.path, 'metadata.opf')\n #if not os.path.isfile(book_metadata_filepath):\n self.create_new_metadata_backup(book, custom_columns, book_metadata_filepath)\n # else:\n \n", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 380, "n_words": 92, "vocab_size": 68, "complexity": 7, "nloc": 37, "token_counts": 121, "n_ast_nodes": 209, "n_identifiers": 24, "random_cut": "def open_metadata(self, book, custom_columns):\n if config.config_use_google_drive:\n if not gdriveutils.is_gdrive_ready():\n raise Exception('Google Drive is configured but not ready')\n\n web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path)\n if not web_content_link:\n raise Exception('Google Drive cover url not found')\n\n stream = None\n try:\n stream = urlopen(web_content_link)\n except Exception as ex:\n # Bubble exception to calling function\n self.log.debug('Error reading metadata", "d_id": 40848, "documentation": { "docstring": "namespaces = {'dc': PURL_NAMESPACE, 'opf': OPF_NAMESPACE}\n test = etree.parse(book_metadata_filepath)\n root = test.getroot()\n for i in root.iter():\n self.log.info(i)\n title = root.find(\"dc:metadata\", namespaces)\n pass\n with open(book_metadata_filepath, \"rb\") as f:\n xml = f.read()\n\n root = objectify.fromstring(xml)\n # root.metadata['{http://purl.org/dc/elements/1.1/}title']\n # root.metadata[PURL + 'title']\n # getattr(root.metadata, PURL +'title')\n # test = objectify.parse()\n pass\n # backup not found has to be created\n #raise Exception('Book cover file not found')", "n_words": 62, "vocab_size": 48, "n_whitespaces": 245, "language": "en" } }, { "id": 262765, "commit_id": "93ad16d5c970f70f843a5eda8b177f681743005b", "repo": "pyinstaller", "path": "tests/functional/test_libraries.py", "file_name": "test_libraries.py", "fun_name": "test_gevent_monkey", "commit_message": "tests: gevent tests: remove no-op excludes\n\nThe `gevent` tests seem to be attempting to exclude several packages.\nAs per comment in 416e1a0e83bf5a4924cc50d2befa2bb622b55107, this\nwas introduced in an attempt to break the following Windows-specific\nimport chain: setuptools.msvc -> numpy -> numpy.testing -> pytest ->\npygments -> PIL -> PIL.ImageQt -> PySide2.\n\nHowever, nowadays we already break that chain in two places: our\nsetuptools.msvc hook excludes numpy, and our numpy hook excludes\npytest.\n\nMore importantly, `excludes` is not a valid keyword argument for\nthe `pyi_builder.test_source` (anymore?), and is quietly swallowed\nby the `**kwargs`. So those exclude lists achieve nothing, except\nconfusing people who look at existing code to find a way to exclude\npackages in a test. (As a side note, the tests that do use `excludes`\nkeyword argument are passing it to the modulegraph's functions,\nnot the `pyi_builder` fixture ones.)", "code": "def test_gevent_monkey(pyi_builder):\n pyi_builder.test_source()\n\n\n# The tkinter module may be available for import, but not actually importable due to missing shared libraries.\n# Therefore, we need to use `can_import_module`-based skip decorator instead of `@importorskip`.\n@pytest.mark.skipif(not can_import_module(\"tkinter\"), reason=\"tkinter cannot be imported.\")", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(not can_import_module(\"tkinter\"), reason=\"tkinter cannot be imported.\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 38, "n_words": 39, "vocab_size": 36, "complexity": 1, "nloc": 5, "token_counts": 11, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def test_gevent_monkey(pyi_builder):\n pyi_builder.test_source()\n\n\n# The tkinter", "d_id": 77352, "documentation": { "docstring": "\n from gevent.monkey import patch_all\n patch_all()\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 27, "language": "en" } }, { "id": 61221, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "split_auth_netloc_from_url", "commit_message": "upd; format", "code": "def split_auth_netloc_from_url(url):\n # type: (str) -> Tuple[str, str, Tuple[str, str]]\n \n url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)\n return url_without_auth, netloc, auth\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 42, "n_identifiers": 7, "random_cut": "def split_auth_netloc_from_url(url):\n # type: (str) -> Tuple[str, str, Tuple[str, ", "d_id": 12446, "documentation": { "docstring": "\n Parse a url into separate netloc, auth, and url with no auth.\n\n Returns: (url_without_auth, netloc, (username, password))\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 27, "language": "en" } }, { "id": 189647, "commit_id": "2275ec5916de0ad3bedbc276da09fc3bfbae4d5e", "repo": "manim", "path": "tests/test_text_mobject.py", "file_name": "test_text_mobject.py", "fun_name": "test_non_str_color", "commit_message": ":class:`~.MathTex`, :class:`~.Tex`, :class:`~.Text` and :class:`~.MarkupText` inherit color from their parent mobjects. (#2467)\n\n* comment out color-related things from tex_mob\r\n\r\n* add change to svg_mobject\r\n\r\n* MarkupText handles colour internally\r\n\r\n* MarkupText handles colour internally\r\n\r\n* make coordinate_system.py colour agnostic\r\n\r\n* get_line_from_axis_to_point\r\n\r\n* add typings for SingleStringMathTex\r\n\r\n* add typings for MathTex\r\n\r\n* make internal methods internal\r\n\r\n* black + isort\r\n\r\n* fix typo\r\n\r\n* black + isort\r\n\r\n* fix typo\r\n\r\n* revert internalizing change\r\n\r\n* Revert \"Merge branch 'mathtexx' of https://github.com/hydrobeam/manim into mathtexx\"\r\n\r\nThis reverts commit 6be3c3981440fd5cfee54e5d9f24b30e1ba991e9, reversing\r\nchanges made to 2b30b446ae4004efb06adbb646f54e9ef269bc61.\r\n\r\n* remove accidental import\r\n\r\n* do it in a less bad way\r\n\r\n* WIP: Text2setting causing problems\r\n\r\n* allow tex_mobject.py to inherit colour\r\n\r\n* allow tex_mobject.py to inherit colour\r\n\r\n* add tests\r\n\r\n* remove undeedde imports + formatting\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix warnings from pre-commit hooks\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix some tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* remove other color_inheritance test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix typo\r\n\r\n* accomodate the color->attribute PR\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix tests and doc build\r\n\r\nadd a check for None when inheriting colour in\r\n`coordinate_systems.py`, and turn written tests\r\ninto graphical tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Comment out `Text` color inheritance test.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Set font for text_color_inheritance test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Small change to retrigger docs build\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def test_non_str_color():\n \n\n text = Text(\"test_color_inheritance\", color=Color(\"blue\"))\n markup_text = MarkupText(\"test_color_inheritance\", color=Color(\"blue\"))\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 19, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 60, "n_identifiers": 7, "random_cut": "def test_non_str_color():\n \n\n text = Text(\"test_color_inheritance\", color=Color(\"blue\"))\n markup_text = MarkupText(\"test_color_inheritance\", color=Color(\"blue\"))\n", "d_id": 46153, "documentation": { "docstring": "Test that the Text and MarkupText can accept non_str color values\n i.e. colour.Color(red).", "n_words": 13, "vocab_size": 13, "n_whitespaces": 15, "language": "en" } }, { "id": 305107, "commit_id": "f78b39bdbfbe151e8bab72610b6fe03afc8c0747", "repo": "core", "path": "tests/components/zha/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "test_strategy_no_network_settings", "commit_message": "ZHA backup/restore config flow (#77044)", "code": "async def test_strategy_no_network_settings(pick_radio, mock_app, hass):\n \n mock_app.load_network_info = MagicMock(side_effect=NetworkNotFormed())\n\n result, port = await pick_radio(RadioType.ezsp)\n assert (\n config_flow.FORMATION_REUSE_SETTINGS\n not in result[\"data_schema\"].schema[\"next_step_id\"].container\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 52, "n_ast_nodes": 87, "n_identifiers": 16, "random_cut": "async def test_strategy_no_network_settings(pick_radio, mock_app, hass):\n \n mock_app.load_network_info = MagicMock(side_effect=NetworkNotFormed())\n\n result, port = await pick_radio(RadioType.ezsp)\n assert (\n config_flow.FORMATION_REUSE_SETTINGS\n not in result[\"data_schema\"].schema[\"next_step_id\"].container\n )\n\n", "d_id": 103899, "documentation": { "docstring": "Test formation strategy when no network settings are present.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 108099, "commit_id": "17b3c44f67f779e7d103381878f08c548c2c8495", "repo": "matplotlib", "path": "lib/matplotlib/mlab.py", "file_name": "mlab.py", "fun_name": "detrend", "commit_message": "Improve mlab documentation (and example)", "code": "def detrend(x, key=None, axis=None):\n \n if key is None or key in ['constant', 'mean', 'default']:\n return detrend(x, key=detrend_mean, axis=axis)\n elif key == 'linear':\n return detrend(x, key=detrend_linear, axis=axis)\n elif key == 'none':\n return detrend(x, key=detrend_none, axis=axis)\n elif callable(key):\n x = np.asarray(x)\n if axis is not None and axis + 1 > x.ndim:\n raise ValueError(f'axis(={axis}) out of bounds')\n if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):\n return key(x)\n # try to use the 'axis' argument if the function supports it,\n # otherwise use apply_along_axis to do it\n try:\n return key(x, axis=axis)\n except TypeError:\n return np.apply_along_axis(key, axis=axis, arr=x)\n else:\n raise ValueError(\n f\"Unknown value for key: {key!r}, must be one of: 'default', \"\n f\"'constant', 'mean', 'linear', or a function\")\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 282, "n_words": 121, "vocab_size": 82, "complexity": 13, "nloc": 21, "token_counts": 180, "n_ast_nodes": 295, "n_identifiers": 15, "random_cut": "def detrend(x, key=None, axis=None):\n \n if key is None or key in ['constant', 'mean', 'default']:\n return detrend(x, key=detrend_mean, axis=axis)\n elif key == 'linear':\n return detrend(x, key=detrend_linear, axis=axis)\n elif key == 'none':\n return detrend(x, key=detrend_none, axis=axis)\n elif callable(key):\n", "d_id": 23059, "documentation": { "docstring": "\n Return *x* with its trend removed.\n\n Parameters\n ----------\n x : array or sequence\n Array or sequence containing the data.\n\n key : {'default', 'constant', 'mean', 'linear', 'none'} or function\n The detrending algorithm to use. 'default', 'mean', and 'constant' are\n the same as `detrend_mean`. 'linear' is the same as `detrend_linear`.\n 'none' is the same as `detrend_none`. The default is 'mean'. See the\n corresponding functions for more details regarding the algorithms. Can\n also be a function that carries out the detrend operation.\n\n axis : int\n The axis along which to do the detrending.\n\n See Also\n --------\n detrend_mean : Implementation of the 'mean' algorithm.\n detrend_linear : Implementation of the 'linear' algorithm.\n detrend_none : Implementation of the 'none' algorithm.\n ", "n_words": 114, "vocab_size": 75, "n_whitespaces": 200, "language": "en" } }, { "id": 71270, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/templatetags/wagtailadmin_tags.py", "file_name": "wagtailadmin_tags.py", "fun_name": "has_unrendered_errors", "commit_message": "Reformat with black", "code": "def has_unrendered_errors(bound_field):\n \n return bound_field.errors and not hasattr(\n bound_field.field.widget, \"render_with_errors\"\n )\n\n\n@register.filter(is_safe=True)\n@stringfilter", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "@register.filter(is_safe=True)\n@stringfilter", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 26, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 22, "n_ast_nodes": 57, "n_identifiers": 10, "random_cut": "def has_unrendered_errors(bound_field):\n \n return bound_field.errors and not hasattr(\n bound_field.field.widget, \"render_with_errors\"\n )\n\n", "d_id": 15653, "documentation": { "docstring": "\n Return true if this field has errors that were not accounted for by render_with_errors, because\n the widget does not support the render_with_errors method\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 33, "language": "en" } }, { "id": 82589, "commit_id": "7ca1b613d8573dff70e45dd54229b0032c3e8ca7", "repo": "django-cms", "path": "cms/tests/test_admin.py", "file_name": "test_admin.py", "fun_name": "test_raw_id_threshold_page_permission_inline_admin", "commit_message": "perf: Don't count users when CMS_RAW_ID_USERS=True (#7414)\n\n* perf: Don't count users when CMS_RAW_ID_USERS=True\n\nWhen using CMS_RAW_ID_USERS=True on a Postgres database with many users,\ncounting the users is slow and will always yield the same result.\n\nOnly count users when using an integer value as a threshold and reuse\nthe same logic for both PagePermissionInlineAdmin and\nGlobalPagePermissionAdmin.\n\n* Ensure that only integer settings of CMS_RAW_ID_USERS are compared to the number of users\n\n* Add documentation for the CMS_RAW_ID_USER=True setting\n\n* fix isort for added tests\n\n* Fix: in python this is always True: isinstance(False, int)\n\nCo-authored-by: Pankrat ", "code": "def test_raw_id_threshold_page_permission_inline_admin(self):\n \n with self.settings(CMS_RAW_ID_USERS=1):\n with self.assertNumQueries(1):\n self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, [])\n\n # Create users to check if threshold is honored\n self._get_guys()\n\n with self.settings(CMS_RAW_ID_USERS=False):\n with self.assertNumQueries(0):\n self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, [])\n\n with self.settings(CMS_RAW_ID_USERS=True):\n with self.assertNumQueries(0):\n self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, ['user'])\n\n with self.settings(CMS_RAW_ID_USERS=1):\n with self.assertNumQueries(1):\n self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, ['user'])\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 189, "n_words": 36, "vocab_size": 21, "complexity": 1, "nloc": 14, "token_counts": 129, "n_ast_nodes": 229, "n_identifiers": 9, "random_cut": "def test_raw_id_threshold_page_permission_inline_admin(self):\n \n with self.settings(CMS_RAW_ID_USERS=1):\n with self.assertNumQueries(1):\n self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, [])\n\n # Create users to check if threshold is honored\n self._get_guys()\n\n with self.settings(CMS_RAW_ID_USERS=False):\n with self.assertNumQueries(0):\n self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, [])\n\n with self.settings(CMS_RAW_ID_USERS=True):\n with sel", "d_id": 17448, "documentation": { "docstring": "\n Only count users when using an integer value as threshold for\n CMS_RAW_ID_USERS.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 221145, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "get_breaks", "commit_message": "add python 3.10.4 for windows", "code": "def get_breaks(self, filename, lineno):\n \n filename = self.canonic(filename)\n return filename in self.breaks and \\\n lineno in self.breaks[filename] and \\\n Breakpoint.bplist[filename, lineno] or []\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 22, "vocab_size": 18, "complexity": 4, "nloc": 5, "token_counts": 47, "n_ast_nodes": 69, "n_identifiers": 8, "random_cut": "def get_breaks(self, filename, lineno):\n \n filename = self", "d_id": 56240, "documentation": { "docstring": "Return all breakpoints for filename:lineno.\n\n If no breakpoints are set, return an empty list.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 249580, "commit_id": "be76cd8200b18f3c68b895f85ac7ef5b0ddc2466", "repo": "synapse", "path": "tests/storage/test_registration.py", "file_name": "test_registration.py", "fun_name": "test_override", "commit_message": "Allow admins to require a manual approval process before new accounts can be used (using MSC3866) (#13556)", "code": "def test_override(self) -> None:\n \n self.get_success(\n self.store.register_user(\n self.user_id,\n self.pwhash,\n approved=True,\n )\n )\n\n user = self.get_success(self.store.get_user_by_id(self.user_id))\n self.assertIsNotNone(user)\n assert user is not None\n self.assertEqual(user[\"approved\"], 1)\n\n approved = self.get_success(self.store.is_user_approved(self.user_id))\n self.assertTrue(approved)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 156, "n_words": 26, "vocab_size": 23, "complexity": 1, "nloc": 17, "token_counts": 94, "n_ast_nodes": 150, "n_identifiers": 14, "random_cut": "def test_override(self) -> None:\n \n self.get_success(\n self.store.register_user(\n self.user_id,\n self.pwhash,\n approved=True,\n )\n )\n\n user = self.get_success(self.store.get_user_by_id(self.user_id))\n self.assertIsNotNone(user)\n assert user is not None\n self.assertEqual(user[\"approved\"], 1)\n\n approved = self.get_success(s", "d_id": 73002, "documentation": { "docstring": "Tests that if we require approval for new accounts, but we explicitly say the\n new user should be considered approved, they're marked as approved.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 38, "language": "en" } }, { "id": 158197, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "tokenize", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def tokenize(lines, token='word'):\n \n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('ERROR: unknown token type: ' + token)\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 64, "n_words": 31, "vocab_size": 23, "complexity": 5, "nloc": 7, "token_counts": 51, "n_ast_nodes": 90, "n_identifiers": 7, "random_cut": "def tokenize(lines, token='word'):\n \n if token == 'word':\n return [line.spl", "d_id": 37370, "documentation": { "docstring": "Split text lines into word or character tokens.\n\n Defined in :numref:`sec_text_preprocessing`", "n_words": 11, "vocab_size": 11, "n_whitespaces": 13, "language": "en" } }, { "id": 67369, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/selling/doctype/sales_order/sales_order.py", "file_name": "sales_order.py", "fun_name": "make_purchase_order_for_default_supplier", "commit_message": "style: format code with black", "code": "def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None):\n\t\n\tif not selected_items:\n\t\treturn\n\n\tif isinstance(selected_items, str):\n\t\tselected_items = json.loads(selected_items)\n\n\tdef set_missing_values(source, target):\n\t\ttarget.supplier = supplier\n\t\ttarget.apply_discount_on = \"\"\n\t\ttarget.additional_discount_percentage = 0.0\n\t\ttarget.discount_amount = 0.0\n\t\ttarget.inter_company_order_reference = \"\"\n\n\t\tdefault_price_list = frappe.get_value(\"Supplier\", supplier, \"default_price_list\")\n\t\tif default_price_list:\n\t\t\ttarget.buying_price_list = default_price_list\n\n\t\tif any(item.delivered_by_supplier == 1 for item in source.items):\n\t\t\tif source.shipping_address_name:\n\t\t\t\ttarget.shipping_address = source.shipping_address_name\n\t\t\t\ttarget.shipping_address_display = source.shipping_address\n\t\t\telse:\n\t\t\t\ttarget.shipping_address = source.customer_address\n\t\t\t\ttarget.shipping_address_display = source.address_display\n\n\t\t\ttarget.customer_contact_person = source.contact_person\n\t\t\ttarget.customer_contact_display = source.contact_display\n\t\t\ttarget.customer_contact_mobile = source.contact_mobile\n\t\t\ttarget.customer_contact_email = source.contact_email\n\n\t\telse:\n\t\t\ttarget.customer = \"\"\n\t\t\ttarget.customer_name = \"\"\n\n\t\ttarget.run_method(\"set_missing_values\")\n\t\ttarget.run_method(\"calculate_taxes_and_totals\")\n\n\tdef update_item(source, target, source_parent):\n\t\ttarget.schedule_date = source.delivery_date\n\t\ttarget.qty = flt(source.qty) - (flt(source.ordered_qty) / flt(source.conversion_factor))\n\t\ttarget.stock_qty = flt(source.stock_qty) - flt(source.ordered_qty)\n\t\ttarget.project = source_parent.project\n\n\tsuppliers = [item.get(\"supplier\") for item in selected_items if item.get(\"supplier\")]\n\tsuppliers = list(dict.fromkeys(suppliers)) # remove duplicates while preserving order\n\n\titems_to_map = [item.get(\"item_code\") for item in selected_items if item.get(\"item_code\")]\n\titems_to_map = list(set(items_to_map))\n\n\tif not suppliers:\n\t\tfrappe.throw(\n\t\t\t_(\"Please set a Supplier against the Items to be considered in the Purchase Order.\")\n\t\t)\n\n\tpurchase_orders = []\n\tfor supplier in suppliers:\n\t\tdoc = get_mapped_doc(\n\t\t\t\"Sales Order\",\n\t\t\tsource_name,\n\t\t\t{\n\t\t\t\t\"Sales Order\": {\n\t\t\t\t\t\"doctype\": \"Purchase Order\",\n\t\t\t\t\t\"field_no_map\": [\n\t\t\t\t\t\t\"address_display\",\n\t\t\t\t\t\t\"contact_display\",\n\t\t\t\t\t\t\"contact_mobile\",\n\t\t\t\t\t\t\"contact_email\",\n\t\t\t\t\t\t\"contact_person\",\n\t\t\t\t\t\t\"taxes_and_charges\",\n\t\t\t\t\t\t\"shipping_address\",\n\t\t\t\t\t\t\"terms\",\n\t\t\t\t\t],\n\t\t\t\t\t\"validation\": {\"docstatus\": [\"=\", 1]},\n\t\t\t\t},\n\t\t\t\t\"Sales Order Item\": {\n\t\t\t\t\t\"doctype\": \"Purchase Order Item\",\n\t\t\t\t\t\"field_map\": [\n\t\t\t\t\t\t[\"name\", \"sales_order_item\"],\n\t\t\t\t\t\t[\"parent\", \"sales_order\"],\n\t\t\t\t\t\t[\"stock_uom\", \"stock_uom\"],\n\t\t\t\t\t\t[\"uom\", \"uom\"],\n\t\t\t\t\t\t[\"conversion_factor\", \"conversion_factor\"],\n\t\t\t\t\t\t[\"delivery_date\", \"schedule_date\"],\n\t\t\t\t\t],\n\t\t\t\t\t\"field_no_map\": [\n\t\t\t\t\t\t\"rate\",\n\t\t\t\t\t\t\"price_list_rate\",\n\t\t\t\t\t\t\"item_tax_template\",\n\t\t\t\t\t\t\"discount_percentage\",\n\t\t\t\t\t\t\"discount_amount\",\n\t\t\t\t\t\t\"pricing_rules\",\n\t\t\t\t\t],\n\t\t\t\t\t\"postprocess\": update_item,\n\t\t\t\t\t\"condition\": lambda doc: doc.ordered_qty < doc.stock_qty\n\t\t\t\t\tand doc.supplier == supplier\n\t\t\t\t\tand doc.item_code in items_to_map,\n\t\t\t\t},\n\t\t\t},\n\t\t\ttarget_doc,\n\t\t\tset_missing_values,\n\t\t)\n\n\t\tdoc.insert()\n\t\tfrappe.db.commit()\n\t\tpurchase_orders.append(doc)\n\n\treturn purchase_orders\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 19, "n_whitespaces": 158, "n_words": 252, "vocab_size": 168, "complexity": 11, "nloc": 66, "token_counts": 297, "n_ast_nodes": 886, "n_identifiers": 68, "random_cut": "def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None):\n\t\n\tif not selected_items:\n\t\treturn\n\n\tif isinstance(selected_items, str):\n\t\tselected_items = json.loads(selected_items)\n\n\tdef set_missing_values(source, target):\n\t\ttarget.supplier = supplier\n\t\ttarget.apply_discount_on = \"\"\n\t\ttarget.additional_discount_percentage = 0.0\n\t\ttarget.discount_amount = 0.0\n\t\ttarget.inter_company_order_reference = \"\"\n\n\t\tdefault_price_list = frappe.get_value(\"Supplier\", supplier, \"default_price_list\")\n\t\tif default_price_list:\n\t\t\ttarget.buying_price_list = default_price_list\n\n\t\tif any(item.delivered_by_supplier == 1 for item in source.items):\n\t\t\tif source.shipping_address_name:\n\t\t\t\ttarget.shipping_address = source.shipping_address_name\n\t\t\t\ttarget.shipping_address_display = source.shipping_address\n\t\t\telse:\n\t\t\t\ttarget.shipping_address = source.customer_address\n\t\t\t\ttarget.shipping_address_display = source.address_display\n\n\t\t\ttarget.customer_contact_person = source.contact_person\n\t\t\ttarget.customer_contact_display = source.contact_display\n\t\t\ttarget.customer_contact_mobile = source.contact_mobile\n\t\t\ttarget.customer_contact_email = source.contact_email\n\n\t\telse:\n\t\t\ttarget.customer = \"\"\n\t\t\ttarget.customer_name = \"\"\n\n\t\ttarget.run_method(\"set_missing_values\")\n\t\ttarget.run_method(\"calculate_taxes_and_totals\")\n\n\tdef update_item(source, target, source_parent):\n\t\ttarget.schedule_date = source.delivery_date\n\t\ttarget.qty = flt(source.qty) - (flt(source.ordered_qty) / flt(source.conversion_factor))\n\t\ttarget.stock_qty = flt(source.stock_qty) - flt(source.ordered_qty)\n\t\ttarget.project = source_parent.project\n\n\tsuppliers = [item.get(\"supplier\") for item in selected_items if item.get(\"supplier\")]\n\tsuppliers = list(dict.fromkeys(suppliers)) # remove duplicates while preserving order\n\n\titems_to_map = [item.get(\"item_code\") for item in selected_items if item.get(\"item_code\")]\n\titems_to_map = list(set(items_to_map))\n\n\tif not suppliers:\n\t\tfrappe.throw(\n\t\t\t_(\"Please set a Supplier against the Items to be considered in the Purchase Order.\")\n\t\t)\n\n\tpurchase_orders = []\n\tfor supplier in suppliers:\n\t\tdoc = get_mapped_doc(\n\t\t\t\"Sales Order\",\n\t\t\tsource_name,\n\t\t\t{\n\t\t\t\t\"Sales Order\": {\n\t\t\t\t\t\"doctype\": \"Purchase Order\",\n\t\t\t\t\t\"field_no_map\": [\n\t\t\t\t\t\t\"address_display\",\n\t\t\t\t\t\t\"contact_display\",\n\t\t\t\t\t\t\"contact_mobile\",\n\t\t\t\t\t\t\"contact_email\",\n\t\t\t\t\t\t\"contact_person\",\n\t\t\t\t\t\t\"taxes_and_charges\",\n\t\t\t\t\t\t\"shipping_address\",\n\t\t\t\t\t\t\"terms\",\n\t\t\t\t\t],\n\t\t\t\t\t\"validation\": {\"docstatus\": [\"=\", 1]},\n\t\t\t\t},\n\t\t\t\t\"Sales Order Item\": {\n\t\t\t\t\t\"doctype\": \"Purchase Order Item\",\n\t\t\t\t\t\"field_map\": [\n\t\t\t\t\t\t[\"name\", \"sales_order_item\"", "d_id": 14507, "documentation": { "docstring": "Creates Purchase Order for each Supplier. Returns a list of doc objects.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 203228, "commit_id": "c5cd8783825b5f6384417dac5f3889b4210b7d08", "repo": "django", "path": "django/db/migrations/utils.py", "file_name": "utils.py", "fun_name": "resolve_relation", "commit_message": "Refs #33476 -- Refactored problematic code before reformatting by Black.\n\nIn these cases Black produces unexpected results, e.g.\r\n\r\ndef make_random_password(\r\n self,\r\n length=10,\r\n allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',\r\n):\r\n\r\nor\r\n\r\ncursor.execute(\"\"\"\r\nSELECT ...\r\n\"\"\",\r\n [table name],\r\n)", "code": "def resolve_relation(model, app_label=None, model_name=None):\n \n if isinstance(model, str):\n if model == RECURSIVE_RELATIONSHIP_CONSTANT:\n if app_label is None or model_name is None:\n raise TypeError(\n 'app_label and model_name must be provided to resolve '\n 'recursive relationships.'\n )\n return app_label, model_name\n if '.' in model:\n app_label, model_name = model.split('.', 1)\n return app_label, model_name.lower()\n if app_label is None:\n raise TypeError(\n 'app_label must be provided to resolve unscoped model relationships.'\n )\n return app_label, model.lower()\n return model._meta.app_label, model._meta.model_name\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 256, "n_words": 70, "vocab_size": 42, "complexity": 7, "nloc": 18, "token_counts": 101, "n_ast_nodes": 169, "n_identifiers": 11, "random_cut": "def resolve_relation(model, app_label=None, model_name=None):\n \n if isinstance(model, str):\n if model == RECURSIVE_RELATIONSHIP_CONSTANT:\n ", "d_id": 50262, "documentation": { "docstring": "\n Turn a model class or model reference string and return a model tuple.\n\n app_label and model_name are used to resolve the scope of recursive and\n unscoped model relationship.\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 41, "language": "en" } }, { "id": 31459, "commit_id": "7cced021fa8ddc59f0f77384300760d34545394e", "repo": "transformers", "path": "src/transformers/modeling_tf_utils.py", "file_name": "modeling_tf_utils.py", "fun_name": "load_tf_weights", "commit_message": "TF Sharded (#17713)\n\n* initial commit\r\n\r\n* update modeeling tf utils\r\n\r\n* quality\r\n\r\n* clean and update args\r\n\r\n* update\r\n\r\n* remove potential bug\r\n\r\n* code quality\r\n\r\n* update\r\n\r\n* update max shard\r\n\r\n* update tests for sharding from pretrained\r\n\r\n* fix remaining test\r\n\r\n* make style\r\n\r\n* h5py if tf available\r\n\r\n* update and fix test\r\n\r\n* fix test\r\n\r\n* style\r\n\r\n* modified push to hub to support shard for TF\r\n\r\n* quick fix\r\n\r\n* update code\r\n\r\n* merge branch main and style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Patrick von Platen \r\n\r\n* update based on reviews\r\n\r\n* update doc\r\n\r\n* update and style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update based on reviews\r\n\r\n* fix typo\r\n\r\n* style\r\n\r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):\n \n missing_layers = []\n unexpected_layers = []\n mismatched_layers = []\n\n # Read the H5 file\n with h5py.File(resolved_archive_file, \"r\") as sharded_checkpoint_file:\n # Retrieve the name of each layer from the H5 file\n saved_h5_model_layers_name = set(\n hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, \"layer_names\")\n )\n\n # Find the missing layers from the high level list of layers\n missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)\n\n # Find the unexpected layers from the high level list of layers\n unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))\n saved_weight_names_set = set()\n symbolic_weights_names = set()\n weight_value_tuples = []\n\n # Compute missing and unexpected sub layers\n # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]\n for layer in model.layers:\n # if layer_name from the H5 file belongs to the layers from the instantiated model\n if layer.name in saved_h5_model_layers_name:\n # Get the H5 layer object from its name\n h5_layer_object = sharded_checkpoint_file[layer.name]\n # Get all the weights as a list from the layer object\n symbolic_weights = layer.trainable_weights + layer.non_trainable_weights\n saved_weights = {}\n\n # Create a dict from the H5 saved model that looks like {\"weight_name\": weight_value}\n # And a set with only the names\n for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, \"weight_names\"):\n # TF names always start with the model name so we ignore it\n name = \"/\".join(weight_name.split(\"/\")[1:])\n\n if _prefix is not None:\n name = _prefix + \"/\" + name\n\n saved_weights[name] = np.asarray(h5_layer_object[weight_name])\n\n # Add the updated name to the final list for computing missing/unexpected values\n saved_weight_names_set.add(name)\n\n # Loop over each weights from the instantiated model and compare with the weights from the H5 file\n for symbolic_weight in symbolic_weights:\n # TF names always start with the model name so we ignore it\n if _prefix is not None:\n delimeter = len(_prefix.split(\"/\"))\n symbolic_weight_name = \"/\".join(\n symbolic_weight.name.split(\"/\")[:delimeter]\n + symbolic_weight.name.split(\"/\")[delimeter + 1 :]\n )\n else:\n symbolic_weight_name = \"/\".join(symbolic_weight.name.split(\"/\")[1:])\n\n # here we check if the current weight is among the weights from the H5 file\n # If yes, get the weight_value of the corresponding weight from the H5 file\n # If not, make the value to None\n saved_weight_value = saved_weights.get(symbolic_weight_name, None)\n\n # Add the updated name to the final list for computing missing/unexpected values\n symbolic_weights_names.add(symbolic_weight_name)\n\n # If the current weight is found\n if saved_weight_value is not None:\n # Check if the shape of the current weight and the one from the H5 file are different\n if K.int_shape(symbolic_weight) != saved_weight_value.shape:\n # If yes we reshape the weight from the H5 file accordingly to the current weight\n # If the two shapes are not compatible we raise an issue\n try:\n array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))\n except ValueError as e:\n if ignore_mismatched_sizes:\n mismatched_layers.append(\n (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))\n )\n continue\n else:\n raise e\n else:\n array = saved_weight_value\n\n # We create the tuple that will be loaded and add it to the final list\n weight_value_tuples.append((symbolic_weight, array))\n\n # Load all the weights\n K.batch_set_value(weight_value_tuples)\n\n # Compute the missing and unexpected layers\n missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))\n unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))\n\n return missing_layers, unexpected_layers, mismatched_layers\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 27, "n_whitespaces": 1803, "n_words": 479, "vocab_size": 200, "complexity": 13, "nloc": 54, "token_counts": 415, "n_ast_nodes": 705, "n_identifiers": 49, "random_cut": "def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):\n \n missing_layers = []\n unexpected_layers = []\n mismatched_layers = []\n\n # Read the H5 file\n with h5py.File(resolved_archive_file, \"r\") as sharded_checkpoint_file:\n # Retrieve the name of each layer from the H5 file\n saved_h5_model_layers_name = set(\n hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, \"layer_names\")\n )\n\n # Find the missing layers from the high level list of layers\n missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)\n\n # Find the unexpected layers from the high level list of layers\n unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))\n saved_weight_names_set = set()\n symbolic_weights_names = set()\n weight_value_tuples = []\n\n # Compute missing and unexpected sub layers\n # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]\n for layer in model.layers:\n # if layer_name from the H5 file belongs to the layers from the instantiated model\n if layer.name in saved_h5_model_layers_name:\n # Get the H5 layer object from its name\n h5_layer_object = sharded_checkpoint_file[layer.name]\n # Get all the weights as a list from the layer object\n symbolic_weights = layer.trainable_weights + layer.non_trainable_weights\n saved_weights = {}\n\n # Create a dict from the H5 saved model that looks like {\"weight_name\": weight_value}\n # And a set with only the names\n for weight_name in ", "d_id": 5750, "documentation": { "docstring": "\n Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and\n shapes.\n\n Args:\n model (`tf.keras.models.Model`):\n The model to load the weights into.\n resolved_archive_file (`str`):\n The location of the H5 file.\n ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):\n Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.\n\n Returns:\n Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the\n mismatched layers.\n ", "n_words": 83, "vocab_size": 56, "n_whitespaces": 167, "language": "en" } }, { "id": 205929, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/forms/boundfield.py", "file_name": "boundfield.py", "fun_name": "css_classes", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def css_classes(self, extra_classes=None):\n \n if hasattr(extra_classes, \"split\"):\n extra_classes = extra_classes.split()\n extra_classes = set(extra_classes or [])\n if self.errors and hasattr(self.form, \"error_css_class\"):\n extra_classes.add(self.form.error_css_class)\n if self.field.required and hasattr(self.form, \"required_css_class\"):\n extra_classes.add(self.form.required_css_class)\n return \" \".join(extra_classes)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 104, "n_words": 29, "vocab_size": 23, "complexity": 7, "nloc": 9, "token_counts": 91, "n_ast_nodes": 153, "n_identifiers": 14, "random_cut": "def css_classes(self, extra_classes=None):\n \n if hasattr(extra_classes, \"split\"):\n extra_classes = extra_classes.split()\n extra_classes = set(extra_classes or [])\n if self.errors and h", "d_id": 51287, "documentation": { "docstring": "\n Return a string of space-separated CSS classes for this field.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 100575, "commit_id": "bdbbad4d310fb606b6f412aa81e9f57ccd994e97", "repo": "faceswap", "path": "lib/gpu_stats/nvidia.py", "file_name": "nvidia.py", "fun_name": "_get_free_vram", "commit_message": "Refactor lib.gpu_stats (#1218)\n\n* inital gpu_stats refactor\r\n\r\n* Add dummy CPU Backend\r\n\r\n* Update Sphinx documentation", "code": "def _get_free_vram(self) -> List[float]:\n \n vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)\n for handle in self._handles]\n self._log(\"debug\", f\"GPU VRAM free: {vram}\")\n return vram\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 14, "token_counts": 46, "n_ast_nodes": 79, "n_identifiers": 11, "random_cut": "def _get_free_vram(self) -> List[float]:\n \n vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024)\n for handle in self._handles]\n self._log(\"debug\", f\"GPU VRAM free: {vram}\")\n return vram\n", "d_id": 20039, "documentation": { "docstring": " Obtain the amount of VRAM that is available, in Megabytes, for each connected Nvidia\n GPU.\n\n Returns\n -------\n list\n List of `float`s containing the amount of VRAM available, in Megabytes, for each\n connected GPU as corresponding to the values in :attr:`_handles\n ", "n_words": 40, "vocab_size": 27, "n_whitespaces": 100, "language": "en" } }, { "id": 66996, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/projects/doctype/task/task.py", "file_name": "task.py", "fun_name": "get_project", "commit_message": "style: format code with black", "code": "def get_project(doctype, txt, searchfield, start, page_len, filters):\n\tfrom erpnext.controllers.queries import get_match_cond\n\n\tmeta = frappe.get_meta(doctype)\n\tsearchfields = meta.get_search_fields()\n\tsearch_columns = \", \" + \", \".join(searchfields) if searchfields else \"\"\n\tsearch_cond = \" or \" + \" or \".join(field + \" like %(txt)s\" for field in searchfields)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tsearch_columns=search_columns, search_condition=search_cond\n\t\t),\n\t\t{\n\t\t\t\"key\": searchfield,\n\t\t\t\"txt\": \"%\" + txt + \"%\",\n\t\t\t\"mcond\": get_match_cond(doctype),\n\t\t\t\"start\": start,\n\t\t\t\"page_len\": page_len,\n\t\t},\n\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 50, "n_words": 69, "vocab_size": 52, "complexity": 3, "nloc": 23, "token_counts": 119, "n_ast_nodes": 210, "n_identifiers": 25, "random_cut": "def get_project(doctype, txt, searchfield, start, page_len, filters):\n\tfrom erpnext.controllers.queries imp", "d_id": 14404, "documentation": { "docstring": " select name {search_columns} from `tabProject`\n\t\twhere %(key)s like %(txt)s\n\t\t\t%(mcond)s\n\t\t\t{search_condition}\n\t\torder by name\n\t\tlimit %(start)s, %(page_len)s", "n_words": 17, "vocab_size": 16, "n_whitespaces": 12, "language": "en" } }, { "id": 275258, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/nadam.py", "file_name": "nadam.py", "fun_name": "build", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def build(self, var_list):\n \n super().build(var_list)\n if getattr(self, \"_built\", False):\n return\n self._built = True\n self._momentums = []\n self._velocities = []\n self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype)\n # Keep a counter on how many times of _u_product has been computed to\n # avoid duplicated computations.\n self._u_product_counter = 1\n\n for var in var_list:\n self._momentums.append(\n self.add_variable_from_reference(\n model_variable=var, variable_name=\"m\"\n )\n )\n self._velocities.append(\n self.add_variable_from_reference(\n model_variable=var, variable_name=\"v\"\n )\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 289, "n_words": 59, "vocab_size": 48, "complexity": 3, "nloc": 20, "token_counts": 113, "n_ast_nodes": 182, "n_identifiers": 18, "random_cut": "def build(self, var_list):\n \n super().build(var_list)\n if getattr(self, \"_built\", False):\n return\n self._built = True\n self._momentums = []\n self._velocities = []\n self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype)\n # Keep a counter on how many times of _u_product has been", "d_id": 81352, "documentation": { "docstring": "Initialize optimizer variables.\n\n Nadam optimizer has 2 types of variables: momentums and velocities.\n\n Args:\n var_list: list of model variables to build Nadam variables on.\n ", "n_words": 24, "vocab_size": 20, "n_whitespaces": 54, "language": "en" } }, { "id": 61938, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "list_distinfo_files", "commit_message": "upd; format", "code": "def list_distinfo_files(self, absolute=False):\n \n record_path = os.path.join(self.path, 'installed-files.txt')\n if os.path.exists(record_path):\n skip = True\n with codecs.open(record_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line == './':\n skip = False\n continue\n if not skip:\n p = os.path.normpath(os.path.join(self.path, line))\n if p.startswith(self.path):\n if absolute:\n yield p\n else:\n yield line\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 372, "n_words": 49, "vocab_size": 34, "complexity": 7, "nloc": 17, "token_counts": 118, "n_ast_nodes": 200, "n_identifiers": 18, "random_cut": "def list_distinfo_files(self, absolute=False):\n \n record_path = os.path.join(self.path, 'installed-files.txt')\n if os.path.exists(record_path):\n skip = True\n with codecs.open(record_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line == './':\n skip = False\n continue\n if not skip:\n p = os.path.normpath(os.path.join(self.path, line))\n if p.startswith(self.path):\n if absolute:\n yield p\n ", "d_id": 12762, "documentation": { "docstring": "\n Iterates over the ``installed-files.txt`` entries and returns paths for\n each line if the path is pointing to a file located in the\n ``.egg-info`` directory or one of its subdirectories.\n\n :parameter absolute: If *absolute* is ``True``, each returned path is\n transformed into a local absolute path. Otherwise the\n raw value from ``installed-files.txt`` is returned.\n :type absolute: boolean\n :returns: iterator of paths\n ", "n_words": 60, "vocab_size": 47, "n_whitespaces": 160, "language": "en" } }, { "id": 13348, "commit_id": "bd8003508da0b35713361484f5801ebc818bd0c3", "repo": "jina", "path": "jina/parsers/orchestrate/base.py", "file_name": "base.py", "fun_name": "mixin_scalable_deployment_parser", "commit_message": "refactor: remove unnecessary parser args (#5328)\n\n* refactor: refactor deployment mixin and remove polling and shards for gateway\r\n\r\n* chore: rename executor to pod and move native and array type to worker args\r\n\r\n* refactor: make exit-on-exceptions just a worker arg\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestion\r\n\r\n* chore: move native parameter to deployment group\r\n\r\n* fix: fix pod init\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: fix shards and replicas in deployment\r\n\r\n* chore: disable gpu and volumes for gateway\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: volume and gpus are optional for container pods\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def mixin_scalable_deployment_parser(parser):\n \n gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')\n\n gp.add_argument(\n '--polling',\n type=str,\n default=PollingType.ANY.name,\n help=,\n )\n\n gp.add_argument(\n '--shards',\n type=int,\n default=1,\n help='The number of shards in the deployment running at the same time. For more details check '\n 'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',\n )\n\n gp.add_argument(\n '--replicas',\n type=int,\n default=1,\n help='The number of replicas in the deployment',\n )\n\n gp.add_argument(\n '--native',\n action='store_true',\n default=False,\n help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',\n )\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 217, "n_words": 68, "vocab_size": 52, "complexity": 1, "nloc": 37, "token_counts": 97, "n_ast_nodes": 162, "n_identifiers": 15, "random_cut": "def mixin_scalable_deployment_parser(parser):\n \n gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')\n\n gp.add_argument(\n '--polling',\n type=str,\n default=PollingType.ANY.name,\n help=,\n )\n\n gp.add_argument(\n '--shards',\n type=int,\n default=1,\n help='The number of shards in the deployment running at the same time. For more details check '\n 'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',\n )\n\n gp.add_argument(\n '--replicas',\n type=int,\n default=1,\n help='The number of ", "d_id": 2610, "documentation": { "docstring": "Mixing in arguments required by a scalable deployment into the given parser.\n The deployment is scalable and can have shards, replicas and polling\n :param parser: the parser instance to which we add arguments\n \n The polling strategy of the Deployment and its endpoints (when `shards>1`).\n Can be defined for all endpoints of a Deployment or by endpoint.\n Define per Deployment:\n - ANY: only one (whoever is idle) Pod polls the message\n - ALL: all Pods poll the message (like a broadcast)\n Define per Endpoint:\n JSON dict, {endpoint: PollingType}\n {'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}\n \n ", "n_words": 93, "vocab_size": 70, "n_whitespaces": 134, "language": "en" } }, { "id": 72114, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_privacy.py", "file_name": "test_privacy.py", "fun_name": "test_explorer_list_private", "commit_message": "Reformat with black", "code": "def test_explorer_list_private(self):\n \n response = self.client.get(\n reverse(\"wagtailadmin_explore\", args=(self.private_page.id,))\n )\n\n # Check the response\n self.assertEqual(response.status_code, 200)\n\n # Must have one privacy icon (next to the private child page)\n self.assertContains(\n response,\n ' 0: return scale * x`\n - `if x < 0: return scale * alpha * (exp(x) - 1)`\n\n where `alpha` and `scale` are pre-defined constants\n (`alpha=1.67326324` and `scale=1.05070098`).\n\n Basically, the SELU activation function multiplies `scale` (> 1) with the\n output of the `tf.keras.activations.elu` function to ensure a slope larger\n than one for positive inputs.\n\n The values of `alpha` and `scale` are\n chosen so that the mean and variance of the inputs are preserved\n between two consecutive layers as long as the weights are initialized\n correctly (see `tf.keras.initializers.LecunNormal` initializer)\n and the number of input units is \"large enough\"\n (see reference paper for more information).\n\n Example Usage:\n\n >>> num_classes = 10 # 10-class problem\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',\n ... activation='selu'))\n >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))\n\n Args:\n x: A tensor or variable to compute the activation function for.\n\n Returns:\n The scaled exponential unit activation: `scale * elu(x, alpha)`.\n\n Notes:\n - To be used together with the\n `tf.keras.initializers.LecunNormal` initializer.\n - To be used together with the dropout variant\n `tf.keras.layers.AlphaDropout` (not regular dropout).\n\n References:\n - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)\n ", "n_words": 205, "vocab_size": 135, "n_whitespaces": 442, "language": "en" } }, { "id": 20369, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/latex.py", "file_name": "latex.py", "fun_name": "_find_safe_escape_tokens", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _find_safe_escape_tokens(self, text):\n \n for i, t, v in self._filter_to(\n self.lang.get_tokens_unprocessed(text),\n lambda t: t in Token.Comment or t in Token.String\n ):\n if t is None:\n for i2, t2, v2 in self._find_escape_tokens(v):\n yield i + i2, t2, v2\n else:\n yield i, None, v\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 155, "n_words": 41, "vocab_size": 29, "complexity": 5, "nloc": 10, "token_counts": 79, "n_ast_nodes": 121, "n_identifiers": 16, "random_cut": "def _find_safe_escape_tokens(self, text):\n \n for i, t, v in self._filter_to(\n self.lang.get_tokens_unprocessed(text),\n lambda t: t in Token.Comment or t in Token.String\n ):\n if t is None:\n for i2, t2, v", "d_id": 3348, "documentation": { "docstring": " find escape tokens that are not in strings or comments ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 320669, "commit_id": "36563450763868f12a2481ca636efccb2c7a43cc", "repo": "qutebrowser", "path": "tests/end2end/features/test_downloads_bdd.py", "file_name": "test_downloads_bdd.py", "fun_name": "set_up_fileselector", "commit_message": "test(downloads) wip test for external fileselect", "code": "def set_up_fileselector(quteproc, py_proc, kind, files, output_type):\n \n cmd, args = py_proc(r)\n args += files.split(' ')\n if output_type == \"a temporary file\":\n args += ['--file={}']\n fileselect_cmd = json.dumps([cmd, *args])\n quteproc.set_setting('fileselect.handler', 'external')\n quteproc.set_setting(f'fileselect.{kind}.command', fileselect_cmd)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 31, "vocab_size": 27, "complexity": 2, "nloc": 25, "token_counts": 71, "n_ast_nodes": 125, "n_identifiers": 13, "random_cut": "def set_up_fileselector(quteproc, py_proc, kind, files, output_type):\n \n cmd, args = py_proc(r)\n args += files.split(' ')\n if output_type == \"a temporary file\":\n args += ['--file={}']\n fileselect_cmd = json.dumps([cmd, *args])\n quteproc.set_setting('fileselect.handler', 'external')\n quteproc.set_setting(f'fileselect.{kind}.command', fileselect_cmd)\n", "d_id": 117264, "documentation": { "docstring": "Set up fileselect.xxx.command to select the file(s).\n import os\n import sys\n tmp_file = None\n for i, arg in enumerate(sys.argv):\n if arg.startswith('--file='):\n tmp_file = arg[len('--file='):]\n sys.argv.pop(i)\n break\n selected_files = sys.argv[1:]\n if tmp_file is None:\n for selected_file in selected_files:\n print(os.path.abspath(selected_file))\n else:\n with open(tmp_file, 'w') as f:\n for selected_file in selected_files:\n f.write(os.path.abspath(selected_file) + '\\n')\n ", "n_words": 51, "vocab_size": 39, "n_whitespaces": 230, "language": "en" } }, { "id": 53896, "commit_id": "dc0f9feb764c72620a68ca139eb56e43f6e5f068", "repo": "prefect", "path": "tests/test_task_runners.py", "file_name": "test_task_runners.py", "fun_name": "task_runner", "commit_message": "Add service marks to task runner tests", "code": "def task_runner(request):\n \n\n if not hasattr(request.param, \"_pytestfixturefunction\"):\n raise TypeError(\"Received invalid `task_runner` parameter. Expected fixture.\")\n\n yield request.getfixturevalue(request.param.__name__)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 4, "token_counts": 33, "n_ast_nodes": 60, "n_identifiers": 7, "random_cut": "def task_runner(request):\n \n\n if not hasattr(r", "d_id": 10948, "documentation": { "docstring": "\n An indirect fixture that expects to receive a pytest fixture that yields a task\n runner.\n ", "n_words": 15, "vocab_size": 12, "n_whitespaces": 25, "language": "en" } }, { "id": 83054, "commit_id": "9e70a47f93ad422cadc9d26c656cc8c02e08805e", "repo": "zulip", "path": "zerver/tests/test_push_notifications.py", "file_name": "test_push_notifications.py", "fun_name": "test_get_apns_context", "commit_message": "test_push_notifications: Close event loops.\n\nFixes “ResourceWarning: unclosed event loop <_UnixSelectorEventLoop\nrunning=False closed=False debug=False>”.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_get_apns_context(self) -> None:\n \n import zerver.lib.push_notifications\n\n zerver.lib.push_notifications.get_apns_context.cache_clear()\n try:\n with self.settings(APNS_CERT_FILE=\"/foo.pem\"), mock.patch(\"aioapns.APNs\") as mock_apns:\n apns_context = get_apns_context()\n assert apns_context is not None\n try:\n self.assertEqual(mock_apns.return_value, apns_context.apns)\n finally:\n apns_context.loop.close()\n finally:\n # Reset the cache for `get_apns_context` so that we don't\n # leak changes to the rest of the world.\n zerver.lib.push_notifications.get_apns_context.cache_clear()\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 224, "n_words": 47, "vocab_size": 40, "complexity": 3, "nloc": 17, "token_counts": 92, "n_ast_nodes": 161, "n_identifiers": 18, "random_cut": "def test_get_apns_context(self) -> None:\n \n import zerver.lib.push_notifications\n\n zerver.lib.push_notifications.get_apns_context.cache_clear()\n try:\n with self.settings(APNS_CERT_FILE=\"/foo.pem\"), mock.patch(\"aioapns.APNs\") as mock_apns:\n apns_context = get_apns_context()\n assert apns_context is not None\n try:\n self.assertEqual(mock_apns.return_value, apns_context.apns)\n finally:\n apns_context.loop.close()\n finally:\n ", "d_id": 17588, "documentation": { "docstring": "This test is pretty hacky, and needs to carefully reset the state\n it modifies in order to avoid leaking state that can lead to\n nondeterministic results for other tests.\n ", "n_words": 29, "vocab_size": 26, "n_whitespaces": 50, "language": "en" } }, { "id": 101376, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "scripts/convert.py", "file_name": "convert.py", "fun_name": "_validate", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def _validate(self) -> None:\n \n if (self._args.writer == \"ffmpeg\" and\n not self._images.is_video and\n self._args.reference_video is None):\n raise FaceswapError(\"Output as video selected, but using frames as input. You must \"\n \"provide a reference video ('-ref', '--reference-video').\")\n\n if (self._args.on_the_fly and\n self._args.mask_type not in (\"none\", \"extended\", \"components\")):\n logger.warning(\"You have selected an incompatible mask type ('%s') for On-The-Fly \"\n \"conversion. Switching to 'extended'\", self._args.mask_type)\n self._args.mask_type = \"extended\"\n\n if (not self._args.on_the_fly and\n self._args.mask_type not in (\"none\", \"predicted\") and\n not self._alignments.mask_is_valid(self._args.mask_type)):\n msg = (f\"You have selected the Mask Type `{self._args.mask_type}` but at least one \"\n \"face does not have this mask stored in the Alignments File.\\nYou should \"\n \"generate the required masks with the Mask Tool or set the Mask Type option to \"\n \"an existing Mask Type.\\nA summary of existing masks is as follows:\\nTotal \"\n f\"faces: {self._alignments.faces_count}, \"\n f\"Masks: {self._alignments.mask_summary}\")\n raise FaceswapError(msg)\n\n if self._args.mask_type == \"predicted\" and not self._predictor.has_predicted_mask:\n available_masks = [k for k, v in self._alignments.mask_summary.items()\n if k != \"none\" and v == self._alignments.faces_count]\n if not available_masks:\n msg = (\"Predicted Mask selected, but the model was not trained with a mask and no \"\n \"masks are stored in the Alignments File.\\nYou should generate the \"\n \"required masks with the Mask Tool or set the Mask Type to `none`.\")\n raise FaceswapError(msg)\n mask_type = available_masks[0]\n logger.warning(\"Predicted Mask selected, but the model was not trained with a \"\n \"mask. Selecting first available mask: '%s'\", mask_type)\n self._args.mask_type = mask_type\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 727, "n_words": 230, "vocab_size": 125, "complexity": 15, "nloc": 49, "token_counts": 224, "n_ast_nodes": 423, "n_identifiers": 23, "random_cut": "def _validate(self) -> None:\n \n if (self._args.writer == \"ffmpeg\" and\n not self._images.is_video and\n self._args.reference_video is None):\n raise FaceswapError(\"Output as video selected, but using frames as input. You must \"\n \"provide a reference video ('-ref', '--reference-video').\")\n\n if (self._args.on_the_fly and\n self._args.mask_type not in (\"none\", \"extended\", \"components\")):\n logger.warning(\"You have selected an incompatible mask type ('%s') for On-The-Fly \"\n \"conversion. Switching to 'extended'\", self._args.mask_type)\n self._args.mask_type = \"extended\"\n\n if (not self._args.on_the_fly and\n self._args.mask_type not in (\"none\", \"predicted\") and\n not self._alignments.mask_is_valid(self._args.mask_type)):\n msg = (f\"You have selected the Mask Type `{self._args.mask_type}` but at least one \"\n \"face does not have this mask stored in the A", "d_id": 20791, "documentation": { "docstring": " Validate the Command Line Options.\n\n Ensure that certain cli selections are valid and won't result in an error. Checks:\n * If frames have been passed in with video output, ensure user supplies reference\n video.\n * If \"on-the-fly\" and a Neural Network mask is selected, warn and switch to 'extended'\n * If a mask-type is selected, ensure it exists in the alignments file.\n * If a predicted mask-type is selected, ensure model has been trained with a mask\n otherwise attempt to select first available masks, otherwise raise error.\n\n Raises\n ------\n FaceswapError\n If an invalid selection has been found.\n\n ", "n_words": 97, "vocab_size": 66, "n_whitespaces": 210, "language": "en" } }, { "id": 90218, "commit_id": "17644550024d6a2eb01356ee48ec0d3ef95c043d", "repo": "sentry", "path": "src/sentry/api/base.py", "file_name": "base.py", "fun_name": "get_authenticators", "commit_message": "ref(hybrid-cloud): Additional test annotations: auth_index (#42425)\n\nExtends the hybrid cloud auth service to be usable in many more places (\r\nTY @corps)\r\nAnnotate 30+ more api endpoint tests\r\nCo-authored-by: Mike Ihbe \r\nCo-authored-by: Zachary Collins \r\nCo-authored-by: Zach Collins ", "code": "def get_authenticators(self) -> List[BaseAuthentication]:\n \n\n # TODO: Increase test coverage and get this working for monolith mode.\n if SiloMode.get_current_mode() == SiloMode.MONOLITH:\n return super().get_authenticators()\n\n last_api_authenticator = ApiAuthentication([])\n result: List[BaseAuthentication] = []\n for authenticator_cls in self.authentication_classes:\n auth_type = ApiAuthenticatorType.from_authenticator(authenticator_cls)\n if auth_type:\n last_api_authenticator.types.append(auth_type)\n else:\n if last_api_authenticator.types:\n result.append(last_api_authenticator)\n last_api_authenticator = ApiAuthentication([])\n result.append(authenticator_cls())\n\n if last_api_authenticator.types:\n result.append(last_api_authenticator)\n return result\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 246, "n_words": 52, "vocab_size": 40, "complexity": 6, "nloc": 21, "token_counts": 113, "n_ast_nodes": 189, "n_identifiers": 18, "random_cut": "def get_authenticators(self) -> List[BaseAuthentication]:\n ", "d_id": 18646, "documentation": { "docstring": "\n Instantiates and returns the list of authenticators that this view can use.\n Aggregates together authenticators that can be supported using HybridCloud.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 43, "language": "en" } }, { "id": 177017, "commit_id": "b2f91c34a23058dd70b41784af0d87890216026a", "repo": "networkx", "path": "networkx/algorithms/tests/test_lowest_common_ancestors.py", "file_name": "test_lowest_common_ancestors.py", "fun_name": "test_naive_lowest_common_ancestor2", "commit_message": "Naive lowest common ancestor implementation (#5736)\n\n* Add naive lca methods\r\n\r\n* Naive algorithm implementation for LCA\r\n\r\n* Modify naive lca functions\r\n\r\n* Correct parameters of nx.ancestors\r\n\r\n* Update lowest_common_ancestors.py\r\n\r\n* Parametrize tests\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Yield instead of append\r\n\r\n* Tests for naive lca\r\n\r\n* Correct test cases for naive lca algorithms\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\n* Fix function name -when calling\r\n\r\n* Make requested changes\r\n\r\n* Inlining _get_a_lowest_common_ancestor\r\n\r\nCo-authored-by: dtuncturk \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Mridul Seth ", "code": "def test_naive_lowest_common_ancestor2(self):\n \n G = nx.DiGraph()\n G.add_edge(0, 1)\n G.add_edge(2, 0)\n G.add_edge(2, 3)\n G.add_edge(4, 0)\n G.add_edge(5, 2)\n\n assert naive_lca(G, 1, 3) == 2\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 77, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 64, "n_ast_nodes": 100, "n_identifiers": 7, "random_cut": "def test_naive_lowest_common_ancestor2(self):\n \n G = nx.DiGraph()\n G.add_edge(0, 1)\n G.add_edge(2, 0)\n G.add_edge(2, 3)\n G.add_edge(4, 0)\n G.add_edge(5, 2)\n\n assert naive_lca(G, 1, 3) == 2\n", "d_id": 42229, "documentation": { "docstring": "Test that the one-pair function works for issue #4942.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 205388, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/utils.py", "file_name": "utils.py", "fun_name": "get_references", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_references(state, model_tuple, field_tuple=()):\n \n for state_model_tuple, model_state in state.models.items():\n for name, field in model_state.fields.items():\n reference = field_references(\n state_model_tuple, field, model_tuple, *field_tuple\n )\n if reference:\n yield model_state, name, field, reference\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 105, "n_words": 29, "vocab_size": 22, "complexity": 4, "nloc": 15, "token_counts": 63, "n_ast_nodes": 96, "n_identifiers": 13, "random_cut": "def get_references(state, model_tuple, field_tuple=()):\n \n for state_model_tuple, model_state in state.models.items():\n for name, field in model_state.fields.items():\n reference = field_references(\n state_model_tuple, field, model_tuple, *field_tuple\n )\n if reference:\n yie", "d_id": 51111, "documentation": { "docstring": "\n Generator of (model_state, name, field, reference) referencing\n provided context.\n\n If field_tuple is provided only references to this particular field of\n model_tuple will be generated.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 40, "language": "en" } }, { "id": 294202, "commit_id": "dc8e87a6f70439f9830d93d03c53d6ff098a4861", "repo": "core", "path": "tests/components/alexa/test_smart_home.py", "file_name": "test_smart_home.py", "fun_name": "test_media_player_eq_bands_not_supported", "commit_message": "Exclude hidden entities from alexa (#68555)", "code": "async def test_media_player_eq_bands_not_supported(hass):\n \n device = (\n \"media_player.test_bands\",\n \"on\",\n {\n \"friendly_name\": \"Test media player\",\n \"supported_features\": SUPPORT_SELECT_SOUND_MODE,\n \"sound_mode\": \"tv\",\n \"sound_mode_list\": [\"movie\", \"music\", \"night\", \"sport\", \"tv\", \"rocknroll\"],\n },\n )\n await discovery_test(device, hass)\n\n context = Context()\n\n # Test for SetBands Error\n request = get_new_request(\n \"Alexa.EqualizerController\", \"SetBands\", \"media_player#test_bands\"\n )\n request[\"directive\"][\"payload\"] = {\"bands\": [{\"name\": \"BASS\", \"value\": -2}]}\n msg = await smart_home.async_handle_message(\n hass, get_default_config(hass), request, context\n )\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n assert msg[\"header\"][\"name\"] == \"ErrorResponse\"\n assert msg[\"header\"][\"namespace\"] == \"Alexa\"\n assert msg[\"payload\"][\"type\"] == \"INVALID_DIRECTIVE\"\n\n # Test for AdjustBands Error\n request = get_new_request(\n \"Alexa.EqualizerController\", \"AdjustBands\", \"media_player#test_bands\"\n )\n request[\"directive\"][\"payload\"] = {\n \"bands\": [{\"name\": \"BASS\", \"levelDelta\": 3, \"levelDirection\": \"UP\"}]\n }\n msg = await smart_home.async_handle_message(\n hass, get_default_config(hass), request, context\n )\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n assert msg[\"header\"][\"name\"] == \"ErrorResponse\"\n assert msg[\"header\"][\"namespace\"] == \"Alexa\"\n assert msg[\"payload\"][\"type\"] == \"INVALID_DIRECTIVE\"\n\n # Test for ResetBands Error\n request = get_new_request(\n \"Alexa.EqualizerController\", \"ResetBands\", \"media_player#test_bands\"\n )\n request[\"directive\"][\"payload\"] = {\n \"bands\": [{\"name\": \"BASS\", \"levelDelta\": 3, \"levelDirection\": \"UP\"}]\n }\n msg = await smart_home.async_handle_message(\n hass, get_default_config(hass), request, context\n )\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n assert msg[\"header\"][\"name\"] == \"ErrorResponse\"\n assert msg[\"header\"][\"namespace\"] == \"Alexa\"\n assert msg[\"payload\"][\"type\"] == \"INVALID_DIRECTIVE\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 429, "n_words": 181, "vocab_size": 72, "complexity": 1, "nloc": 53, "token_counts": 339, "n_ast_nodes": 643, "n_identifiers": 13, "random_cut": "async def test_media_player_eq_bands_not_supported(hass):\n \n device = (\n \"media_player.test_bands\",\n \"on\",\n {\n \"friendly_name\": \"Test media player\",\n \"supported_features\": SUPPORT_SELECT_SOUND_MODE,\n \"sound_mode\": \"tv\",\n \"sound_mode_list\": [\"movie\", \"music\", \"night\", \"sport\", \"tv\", \"rocknroll\"],\n },\n )\n await discovery_test(device, hass)\n\n context = Context()\n\n # Test for SetBands Error\n request = get_new_request(\n \"Alexa.EqualizerController\", \"SetBands\", \"media_player#test_bands\"\n )\n request[\"directive\"][\"payload\"] = {\"bands\": [{\"name\": \"BASS\", \"value\": -2}]}\n msg = await smart_home.async_handle_message(\n hass, get_default_config(hass), request, context\n )\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n assert msg[\"header\"][\"name\"] == \"ErrorResponse\"\n assert msg[\"header\"][\"namespace\"] == \"Alexa\"\n assert msg[\"payload\"][\"type\"] == \"INVALID_DIRECTIVE\"\n\n # Test for AdjustBands Error\n request = get_new_request(\n \"Alexa.EqualizerController\", \"AdjustBands\", \"media_player#test_bands\"\n )\n request[\"directive\"][\"payload\"] = {\n \"bands\": [{\"name\": \"BASS\", \"levelDelta\": 3, \"levelDirection\": \"UP\"}]\n }\n msg = await smart_home.async_handle_message(\n hass, get_default_config(hass), request, context\n )\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n assert msg[\"header\"][\"name\"] == \"ErrorResponse\"\n assert msg[\"header\"][\"namespace\"] == \"Alexa\"\n assert msg[\"payload\"][\"type\"] == \"INVALID_DIRECTIVE\"\n\n # Test for ResetBands Error\n request = get_new_request(\n \"Alexa.EqualizerController\", \"ResetBands\", \"media_player#test_bands\"\n )\n request[\"directive\"][\"payload\"] = {\n \"bands\": [{\"name\": \"BASS\", \"levelDelta\": 3, \"levelDirection\": \"UP\"}]\n }\n msg = await smart_home.async_handle_message(\n hass, get_default_config(hass), request, context\n )\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n assert msg[\"header\"][\"name\"] == \"ErrorResponse\"\n assert msg[\"header\"][\"namespace\"] == \"Alexa\"\n assert msg[\"payload\"][\"type\"] == \"INVALID_DIRECTIVE\"\n\n", "d_id": 93241, "documentation": { "docstring": "Test EqualizerController bands directive not supported.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 155724, "commit_id": "510bbc380531cbf56a409f1ae68e6fd84a9599e6", "repo": "dask", "path": "dask/array/linalg.py", "file_name": "linalg.py", "fun_name": "lstsq", "commit_message": "Update `pre-commit` version (#8691)", "code": "def lstsq(a, b):\n \n q, r = qr(a)\n x = solve_triangular(r, q.T.conj().dot(b))\n residuals = b - a.dot(x)\n residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1)\n\n token = tokenize(a, b)\n\n # r must be a triangular with single block\n\n # rank\n rname = \"lstsq-rank-\" + token\n rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))}\n graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r])\n # rank must be an integer\n rank = Array(graph, rname, shape=(), chunks=(), dtype=int)\n\n # singular\n sname = \"lstsq-singular-\" + token\n rt = r.T.conj()\n sdsk = {\n (sname, 0): (\n _reverse,\n (np.sqrt, (np.linalg.eigvalsh, (np.dot, (rt.name, 0, 0), (r.name, 0, 0)))),\n )\n }\n graph = HighLevelGraph.from_collections(sname, sdsk, dependencies=[rt, r])\n meta = meta_from_array(residuals, 1)\n s = Array(graph, sname, shape=(r.shape[0],), chunks=r.shape[0], meta=meta)\n\n return x, residuals, rank, s\n\n\n@derived_from(np.linalg)", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@derived_from(np.linalg)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 219, "n_words": 118, "vocab_size": 85, "complexity": 1, "nloc": 22, "token_counts": 280, "n_ast_nodes": 425, "n_identifiers": 45, "random_cut": "def lstsq(a, b):\n \n q, r = qr(a)\n x = solve_triangular(r, q.T.conj().dot(b))\n residuals = b - a.dot(x)\n residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1)\n\n token = tokenize(a, b)\n\n # r must be a triangular with single block\n\n # rank\n rname = \"lstsq-rank-\" + token\n rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))}\n graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r])\n # rank must be an integer\n rank = Array(graph, rname, shape=(), chunks=(), dtype=int)\n\n # singular\n sname = \"lstsq-singular-\" + token\n rt = r.T.conj()\n sdsk = {\n (sname, 0): (\n _reverse,\n ", "d_id": 36455, "documentation": { "docstring": "\n Return the least-squares solution to a linear matrix equation using\n QR decomposition.\n\n Solves the equation `a x = b` by computing a vector `x` that\n minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n be under-, well-, or over- determined (i.e., the number of\n linearly independent rows of `a` can be less than, equal to, or\n greater than its number of linearly independent columns). If `a`\n is square and of full rank, then `x` (but for round-off error) is\n the \"exact\" solution of the equation.\n\n Parameters\n ----------\n a : (M, N) array_like\n \"Coefficient\" matrix.\n b : {(M,), (M, K)} array_like\n Ordinate or \"dependent variable\" values. If `b` is two-dimensional,\n the least-squares solution is calculated for each of the `K` columns\n of `b`.\n\n Returns\n -------\n x : {(N,), (N, K)} Array\n Least-squares solution. If `b` is two-dimensional,\n the solutions are in the `K` columns of `x`.\n residuals : {(1,), (K,)} Array\n Sums of residuals; squared Euclidean 2-norm for each column in\n ``b - a*x``.\n If `b` is 1-dimensional, this is a (1,) shape array.\n Otherwise the shape is (K,).\n rank : Array\n Rank of matrix `a`.\n s : (min(M, N),) Array\n Singular values of `a`.\n ", "n_words": 198, "vocab_size": 122, "n_whitespaces": 345, "language": "en" } }, { "id": 266908, "commit_id": "7cb581ed2cb1d4591d094df37a40c9155ea446da", "repo": "ansible", "path": "test/lib/ansible_test/_internal/docker_util.py", "file_name": "docker_util.py", "fun_name": "docker_environment", "commit_message": "Support podman-remote in ansible-test (#75753)", "code": "def docker_environment(): # type: () -> t.Dict[str, str]\n \n env = common_environment()\n env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_') or key.startswith('CONTAINER_')))\n return env\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 36, "n_words": 23, "vocab_size": 22, "complexity": 4, "nloc": 4, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 9, "random_cut": "def docker_environment(): # type: () -> t.Dict[str, str]\n \n env = common_environment()\n env.update(dict((key, os.environ[key]", "d_id": 78652, "documentation": { "docstring": "Return a dictionary of docker related environment variables found in the current environment.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 109177, "commit_id": "5d3124dbc826a019bb55b4229312a033912331ff", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_colorbar.py", "file_name": "test_colorbar.py", "fun_name": "test_remove_from_figure", "commit_message": "warning when scatter plot color settings discarded (#23516)\n\n* Warning when scatter plot color settings discarded\r\n\r\n* Update lib/matplotlib/axes/_axes.py\r\n\r\nCo-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com>\r\n\r\n* Wrapped 23516-MS.rst lines at 80 characters\r\n\r\n* Fixed tests to look for proper warning message\r\n\r\n* Update doc/api/next_api_changes/behavior/23516-MS.rst\r\n\r\nCo-authored-by: Elliott Sales de Andrade \r\n\r\nCo-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com>\r\nCo-authored-by: Elliott Sales de Andrade ", "code": "def test_remove_from_figure(use_gridspec):\n \n fig, ax = plt.subplots()\n sc = ax.scatter([1, 2], [3, 4])\n sc.set_array(np.array([5, 6]))\n pre_position = ax.get_position()\n cb = fig.colorbar(sc, use_gridspec=use_gridspec)\n fig.subplots_adjust()\n cb.remove()\n fig.subplots_adjust()\n post_position = ax.get_position()\n assert (pre_position.get_points() == post_position.get_points()).all()\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 31, "vocab_size": 25, "complexity": 1, "nloc": 11, "token_counts": 107, "n_ast_nodes": 175, "n_identifiers": 20, "random_cut": "def test_remove_from_figure(use_gridspec):\n \n fig, ax = plt.subplots()\n sc = ax.scatter([1, 2], [3, 4])\n", "d_id": 23463, "documentation": { "docstring": "\n Test `remove` with the specified ``use_gridspec`` setting\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 198499, "commit_id": "9d58006fc0a23afcba38f641c9472917c436428a", "repo": "sympy", "path": "sympy/multipledispatch/utils.py", "file_name": "utils.py", "fun_name": "groupby", "commit_message": "Code cleanup", "code": "def groupby(func, seq):\n \n\n d = {}\n for item in seq:\n key = func(item)\n if key not in d:\n d[key] = []\n d[key].append(item)\n return d\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 24, "vocab_size": 19, "complexity": 3, "nloc": 8, "token_counts": 47, "n_ast_nodes": 76, "n_identifiers": 7, "random_cut": "def groupby(func, seq):\n \n\n d = {}\n for item in seq:\n key = func(item)\n if key not in d:\n d[key] = []\n ", "d_id": 48962, "documentation": { "docstring": " Group a collection by a key function\n\n >>> from sympy.multipledispatch.utils import groupby\n >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']\n >>> groupby(len, names) # doctest: +SKIP\n {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}\n\n >>> iseven = lambda x: x % 2 == 0\n >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP\n {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}\n\n See Also:\n ``countby``\n ", "n_words": 72, "vocab_size": 56, "n_whitespaces": 109, "language": "en" } }, { "id": 144863, "commit_id": "35a157948efa7ba1adf1d1507c2af1d6d84a7db7", "repo": "ray", "path": "python/ray/data/dataset.py", "file_name": "dataset.py", "fun_name": "input_files", "commit_message": "Lay the groundwork for lazy dataset optimization (no behavior changes) (#22233)\n\nThis PR refactors Dataset execution to enable lazy mode in the future, which can reduce memory usage in large-scale ingest pipelines. There should be no behavior changes in this PR. Many of the optimizations are also punted for future work.", "code": "def input_files(self) -> List[str]:\n \n metadata = self._plan.execute().get_metadata()\n files = set()\n for m in metadata:\n for f in m.input_files:\n files.add(f)\n return list(files)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 82, "n_words": 21, "vocab_size": 18, "complexity": 3, "nloc": 15, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 14, "random_cut": "def input_files(self) -> List[str]:\n \n metadata = self._plan.execute().get_metadata()\n files = set()\n for m in metadata:\n for f in m.input_files:\n file", "d_id": 33324, "documentation": { "docstring": "Return the list of input files for the dataset.\n\n Time complexity: O(num input files)\n\n Returns:\n The list of input files used to create the dataset, or an empty\n list if the input files is not known.\n ", "n_words": 36, "vocab_size": 25, "n_whitespaces": 79, "language": "en" } }, { "id": 133019, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/collective/collective_group/nccl_util.py", "file_name": "nccl_util.py", "fun_name": "get_nccl_reduce_op", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_nccl_reduce_op(reduce_op):\n \n if reduce_op not in NCCL_REDUCE_OP_MAP:\n raise RuntimeError(\"NCCL does not support reduce op: '{}'.\".format(reduce_op))\n return NCCL_REDUCE_OP_MAP[reduce_op]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 47, "n_identifiers": 5, "random_cut": "def get_nccl_reduce_op(reduce_op):\n \n if reduce_op not in NCCL_REDUCE_OP_MAP:\n ", "d_id": 29926, "documentation": { "docstring": "Map the reduce op to NCCL reduce op type.\n\n Args:\n reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX).\n Returns:\n (nccl.ncclRedOp_t): the mapped NCCL reduce op.\n ", "n_words": 22, "vocab_size": 17, "n_whitespaces": 45, "language": "en" } }, { "id": 101224, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "lib/align/detected_face.py", "file_name": "detected_face.py", "fun_name": "aligned", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def aligned(self) -> AlignedFace:\n \n assert self._aligned is not None\n return self._aligned\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 4, "random_cut": "def aligned(self) -> AlignedFace:\n \n assert self._aligned is not None\n return self._aligned\n", "d_id": 20644, "documentation": { "docstring": " The aligned face connected to this detected face. ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 45465, "commit_id": "69f6f9e01b6df76c3c8fa266d460324163957887", "repo": "airflow", "path": "airflow/migrations/versions/64a7d6477aae_fix_description_field_in_connection_to_.py", "file_name": "64a7d6477aae_fix_description_field_in_connection_to_.py", "fun_name": "upgrade", "commit_message": "Autogenerate migration reference doc (#21601)\n\n* document airflow version in each alembic migration module and use this to autogen the doc\r\n* update each migration module to have the same description used in migration ref (so it can be used in autogen)", "code": "def upgrade():\n \n conn = op.get_bind()\n if conn.dialect.name == \"sqlite\":\n # in sqlite TEXT and STRING column types are the same\n return\n if conn.dialect.name == \"mysql\":\n op.alter_column(\n 'connection',\n 'description',\n existing_type=sa.String(length=5000),\n type_=sa.Text(length=5000),\n existing_nullable=True,\n )\n else:\n # postgres does not allow size modifier for text type\n op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text())\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 159, "n_words": 47, "vocab_size": 41, "complexity": 3, "nloc": 14, "token_counts": 95, "n_ast_nodes": 162, "n_identifiers": 14, "random_cut": "def upgrade():\n \n conn = op.get_bind()\n if conn.dialect.name == \"sqlite\":\n # in sqlite TEXT and STRING column types are the same\n", "d_id": 8592, "documentation": { "docstring": "Apply Fix description field in ``connection`` to be ``text``", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 60322, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/test/test_net.py", "file_name": "test_net.py", "fun_name": "test_memory", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def test_memory(self):\n \n\n params = sum(map(list, six.itervalues(self.net.params)), [])\n blobs = self.net.blobs.values()\n del self.net\n\n # now sum everything (forcing all memory to be read)\n total = 0\n for p in params:\n total += p.data.sum() + p.diff.sum()\n for bl in blobs:\n total += bl.data.sum() + bl.diff.sum()\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 121, "n_words": 43, "vocab_size": 35, "complexity": 3, "nloc": 9, "token_counts": 91, "n_ast_nodes": 148, "n_identifiers": 16, "random_cut": "def test_memory(self):\n \n\n params = sum(map(list, six.itervalues(self.net.params)), [])\n blobs = self.net.blobs.values()\n del self.net\n\n # now sum every", "d_id": 12085, "documentation": { "docstring": "Check that holding onto blob data beyond the life of a Net is OK", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 56927, "commit_id": "336eca7839fccbcbdb77179f352f926da8b1fa15", "repo": "prefect", "path": "tests/test_flows.py", "file_name": "test_flows.py", "fun_name": "test_timeout_stops_execution_in_sync_subflows", "commit_message": "Ensure flows are called in an interruptible thread (PrefectHQ/orion#2174)\n\n* Ensure flows are called in an interruptible thread\r\n\r\n* Set higher runtime limit in `test_timeout_stops_execution_in_sync_subflows`", "code": "async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path):\n \n canary_file = tmp_path / \"canary\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 14, "token_counts": 72, "n_ast_nodes": 26, "n_identifiers": 4, "random_cut": "async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path):\n \n canary_file = tmp_path / \"canary\"\n", "d_id": 11587, "documentation": { "docstring": "\n Sync flow runs can be cancelled after a timeout once a task is called\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 65649, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "get_income_account", "commit_message": "style: format code with black", "code": "def get_income_account(doctype, txt, searchfield, start, page_len, filters):\n\tfrom erpnext.controllers.queries import get_match_cond\n\n\t# income account can be any Credit account,\n\t# but can also be a Asset account with account_type='Income Account' in special circumstances.\n\t# Hence the first condition is an \"OR\"\n\tif not filters:\n\t\tfilters = {}\n\n\tcondition = \"\"\n\tif filters.get(\"company\"):\n\t\tcondition += \"and tabAccount.company = %(company)s\"\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tcondition=condition, match_condition=get_match_cond(doctype), key=searchfield\n\t\t),\n\t\t{\"txt\": \"%\" + txt + \"%\", \"company\": filters.get(\"company\", \"\")},\n\t)\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 59, "n_words": 77, "vocab_size": 66, "complexity": 3, "nloc": 19, "token_counts": 94, "n_ast_nodes": 177, "n_identifiers": 21, "random_cut": "def get_income_account(doctype, txt, searchfield, start, page_len, filters):\n\tfrom erpnext.controllers.queries import get_match_cond\n\n\t# income account can be any Credit account,\n\t# but can also be a Asset account with account_type='Income Account' in special circumstances.\n\t# Hence the first condition is an \"OR\"\n\tif not filter", "d_id": 13970, "documentation": { "docstring": "select tabAccount.name from `tabAccount`\n\t\t\twhere (tabAccount.report_type = \"Profit and Loss\"\n\t\t\t\t\tor tabAccount.account_type in (\"Income Account\", \"Temporary\"))\n\t\t\t\tand tabAccount.is_group=0\n\t\t\t\tand tabAccount.`{key}` LIKE %(txt)s\n\t\t\t\t{condition} {match_condition}\n\t\t\torder by idx desc, name", "n_words": 29, "vocab_size": 27, "n_whitespaces": 22, "language": "en" } }, { "id": 209810, "commit_id": "a2b7a28faff1db058dd22ce097a268e0ad5d1d33", "repo": "scapy", "path": "scapy/arch/windows/__init__.py", "file_name": "__init__.py", "fun_name": "win_find_exe", "commit_message": "[Hinty] Core typing: windows (#3684)\n\n* Core typing: windows\r\n\r\nCo-authored-by: Pierre ", "code": "def win_find_exe(filename, installsubdir=None, env=\"ProgramFiles\"):\n # type: (str, Optional[Any], str) -> str\n \n fns = [filename] if filename.endswith(\".exe\") else [filename + \".exe\", filename] # noqa: E501\n for fn in fns:\n try:\n if installsubdir is None:\n path = _where(fn)\n else:\n path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)]) # noqa: E501\n except IOError:\n path = None\n else:\n break\n return path or \"\"\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 168, "n_words": 56, "vocab_size": 44, "complexity": 6, "nloc": 13, "token_counts": 93, "n_ast_nodes": 156, "n_identifiers": 14, "random_cut": "def win_find_exe(filename, installsubdir=None, env=\"ProgramFiles\"):\n # type: (str, Optional[Any], ", "d_id": 52796, "documentation": { "docstring": "Find executable in current dir, system path or in the\n given ProgramFiles subdir, and retuen its absolute path.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 24, "language": "en" } }, { "id": 74294, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_custom_page_queryset", "commit_message": "Reformat with black", "code": "def test_custom_page_queryset(self):\n \n self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet)\n self.assertIs(type(CustomManagerPage.objects.about_spam()), CustomPageQuerySet)\n self.assertIs(\n type(CustomManagerPage.objects.all().about_spam()), CustomPageQuerySet\n )\n self.assertIs(\n type(CustomManagerPage.objects.about_spam().all()), CustomPageQuerySet\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 85, "n_words": 14, "vocab_size": 10, "complexity": 1, "nloc": 9, "token_counts": 82, "n_ast_nodes": 135, "n_identifiers": 9, "random_cut": "def test_custom_page_queryset(self):\n \n self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet)\n ", "d_id": 16245, "documentation": { "docstring": "\n Managers that are constructed from a custom PageQuerySet\n (via PageManager.from_queryset(CustomPageQuerySet)) should return\n querysets of that type\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 45, "language": "en" } }, { "id": 20048, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distro.py", "file_name": "distro.py", "fun_name": "distro_release_info", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def distro_release_info(self):\n # type: () -> Dict[str, str]\n \n return self._distro_release_info\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 20, "n_identifiers": 3, "random_cut": "def distro_release_info(self):\n # type: () -> Dict[s", "d_id": 3197, "documentation": { "docstring": "\n Return a dictionary containing key-value pairs for the information\n items from the distro release file data source of the OS\n distribution.\n\n For details, see :func:`distro.distro_release_info`.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 61, "language": "en" } }, { "id": 20247, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/windows.py", "file_name": "windows.py", "fun_name": "user_documents_dir", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def user_documents_dir(self) -> str:\n \n return os.path.normpath(get_win_folder(\"CSIDL_PERSONAL\"))\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 5, "token_counts": 20, "n_ast_nodes": 38, "n_identifiers": 7, "random_cut": "def user_documents_dir(self) -> str:\n \n return os.path.normpath(get", "d_id": 3297, "documentation": { "docstring": "\n :return: documents directory tied to the user e.g. ``%USERPROFILE%\\\\Documents``\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 142266, "commit_id": "93aae48b80db80f7e9f922eaabedead0d15ee01c", "repo": "ray", "path": "python/ray/data/_internal/push_based_shuffle.py", "file_name": "push_based_shuffle.py", "fun_name": "round_robin_reduce_idx_iterator", "commit_message": "[dataset] Pipeline task submission during reduce stage in push-based shuffle (#25795)\n\nReduce stage in push-based shuffle fails to complete at 100k output partitions or more. This is likely because of driver or raylet load from having too many tasks in flight at once.\r\n\r\nWe can fix this from ray core too, but for now, this PR adds pipelining for the reduce stage, to limit the total number of reduce tasks in flight at the same time. This is currently set to 2 * available parallelism in the cluster. We have to pick which reduce tasks to submit carefully since these are pinned to specific nodes. The PR does this by assigning tasks round-robin according to the corresponding merge task (which get spread throughout the cluster).\r\n\r\nIn addition, this PR refactors the map, merge, and reduce stages to use a common pipelined iterator pattern, since they all have a similar pattern of submitting a round of tasks at a time, then waiting for a previous round to finish before submitting more.\r\nRelated issue number\r\n\r\nCloses #25412.", "code": "def round_robin_reduce_idx_iterator(self):\n \n idx = 0\n round_idx = 0\n while idx < self.output_num_blocks:\n for merge_idx in range(self.num_merge_tasks_per_round):\n if merge_idx < self._partitions_with_extra_task:\n reduce_idx = merge_idx * (self.merge_partition_size + 1)\n partition_size = self.merge_partition_size + 1\n else:\n reduce_idx = self._partitions_with_extra_task * (\n self.merge_partition_size + 1\n )\n merge_idx -= self._partitions_with_extra_task\n reduce_idx += merge_idx * self.merge_partition_size\n partition_size = self.merge_partition_size\n\n if round_idx >= partition_size:\n continue\n\n reduce_idx += round_idx\n yield reduce_idx\n idx += 1\n round_idx += 1\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 384, "n_words": 69, "vocab_size": 33, "complexity": 5, "nloc": 21, "token_counts": 103, "n_ast_nodes": 168, "n_identifiers": 12, "random_cut": "def round_robin_reduce_idx_iterator(self):\n \n idx = 0\n round_idx = 0\n while idx < self.output_num_blocks:\n for merge_idx in range(self.num_merge_tasks_per_round):\n if merge_idx < self._partitions_with_extra_task:\n reduce_idx = merge_idx * (self.merge_partition_size + 1)\n partition_size = self.merge_partition_size + 1\n else:\n reduce_idx = self", "d_id": 32637, "documentation": { "docstring": "\n When there are multiple nodes, merge tasks are spread throughout the\n cluster to improve load-balancing. Each merge task produces outputs for\n a contiguous partition of reduce tasks. This method creates an iterator\n that returns reduce task indices round-robin across the merge tasks.\n This can be used to submit reduce tasks in a way that spreads the load\n evenly across the cluster.\n ", "n_words": 61, "vocab_size": 45, "n_whitespaces": 111, "language": "en" } }, { "id": 153106, "commit_id": "1e65a4afd191cf61ba05b80545d23f9b88962f41", "repo": "modin", "path": "modin/pandas/groupby.py", "file_name": "groupby.py", "fun_name": "_check_index_name", "commit_message": "FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373)\n\nSigned-off-by: Dmitry Chigarev ", "code": "def _check_index_name(self, result):\n \n if self._by is not None:\n # pandas does not name the index for this case\n result._query_compiler.set_index_name(None)\n return result\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 64, "n_words": 21, "vocab_size": 20, "complexity": 2, "nloc": 4, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "def _check_index_name(self, result):\n \n if self._by is not None:\n # pandas does not n", "d_id": 35261, "documentation": { "docstring": "\n Check the result of groupby aggregation on the need of resetting index name.\n\n Parameters\n ----------\n result : DataFrame\n Group by aggregation result.\n\n Returns\n -------\n DataFrame\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 93, "language": "en" } }, { "id": 211652, "commit_id": "c6c10032924aaf4eb1646a4fd593c17a7e2ecb3b", "repo": "PaddleDetection", "path": "ppdet/modeling/rbox_utils.py", "file_name": "rbox_utils.py", "fun_name": "check_points_in_rotated_boxes", "commit_message": "add ppyoloe_r (#7105)\n\n* add ppyoloe_r\r\n\r\n* modify code of ops.py\r\n\r\n* add ppyoloe_r docs and modify rotate docs\r\n\r\n* modify docs and refine connfigs\r\n\r\n* fix some problems\r\n\r\n* refine docs, add nms_rotated ext_op and fix some problems\r\n\r\n* add image and inference_benchmark.py\r\n\r\n* modify docs\r\n\r\n* fix some problems\r\n\r\n* modify code accroding to review\r\n\r\nCo-authored-by: wangxinxin08 <>", "code": "def check_points_in_rotated_boxes(points, boxes):\n \n # [B, N, 5] -> [B, N, 4, 2]\n corners = box2corners(boxes)\n # [1, L, 2] -> [1, 1, L, 2]\n points = points.unsqueeze(0)\n # [B, N, 4, 2] -> [B, N, 1, 2]\n a, b, c, d = corners.split(4, axis=2)\n ab = b - a\n ad = d - a\n # [B, N, L, 2]\n ap = points - a\n # [B, N, L]\n norm_ab = paddle.sum(ab * ab, axis=-1)\n # [B, N, L]\n norm_ad = paddle.sum(ad * ad, axis=-1)\n # [B, N, L] dot product\n ap_dot_ab = paddle.sum(ap * ab, axis=-1)\n # [B, N, L] dot product\n ap_dot_ad = paddle.sum(ap * ad, axis=-1)\n # [B, N, L] = |A|*|B|*cos(theta) \n is_in_box = (ap_dot_ab >= 0) & (ap_dot_ab <= norm_ab) & (ap_dot_ad >= 0) & (\n ap_dot_ad <= norm_ad)\n return is_in_box\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 210, "n_words": 136, "vocab_size": 58, "complexity": 1, "nloc": 14, "token_counts": 142, "n_ast_nodes": 229, "n_identifiers": 22, "random_cut": "def check_points_in_rotated_boxes(points, boxes):\n \n # [B, N, 5] -> [B, N, 4, 2]\n corners = box2corners(boxes)\n # [1, L, 2] -> [1, 1, L, 2]\n points = points.unsqueeze(0)\n # [B, N, 4, 2] -> [B, N, 1, 2]\n a, b, c, d = corners.split(4, axis=2)\n ab = b - a\n ad = d - a\n # [B, N, L, 2]\n ap = points - a\n # [B, N, L]\n norm_ab = paddle.sum(ab * ab, axis=-1)\n # [B, N, L]\n norm_ad = paddle.sum(ad * ad, axis=-1)\n # [B, N, L] dot product\n ap_dot_ab = paddle.sum(", "d_id": 53127, "documentation": { "docstring": "Check whether point is in rotated boxes\n\n Args:\n points (tensor): (1, L, 2) anchor points\n boxes (tensor): [B, N, 5] gt_bboxes\n eps (float): default 1e-9\n \n Returns:\n is_in_box (tensor): (B, N, L)\n\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 72, "language": "en" } }, { "id": 266557, "commit_id": "b493c590bcee9b64e8ae02c17d4fde2331e0598b", "repo": "ansible", "path": "lib/ansible/modules/git.py", "file_name": "git.py", "fun_name": "write_ssh_wrapper", "commit_message": "Bypass fragile git ssh wrapper (#73404)\n\ngit module now uses env vars exclusively\r\n\r\n - updated docs to clarify usage\r\n - now env vars append instead of overwrite to allow existing custom setups to keep working\r\n fixes #38104, #64673, #64674\r\n - added note for hostkeychecking more securely\r\n fixes #69846\r\n - keep script cause old versions still choke on env\r\n - env var cannot hold more than 'command' for older versions\r\n - all ssh_opts in one place", "code": "def write_ssh_wrapper(module):\n \n try:\n # make sure we have full permission to the module_dir, which\n # may not be the case if we're sudo'ing to a non-root user\n if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK):\n fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/')\n else:\n raise OSError\n except (IOError, OSError):\n fd, wrapper_path = tempfile.mkstemp()\n\n # use existing git_ssh/ssh_command, fallback to 'ssh'\n template = b( % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh')))\n\n # write it\n with os.fdopen(fd, 'w+b') as fh:\n fh.write(template)\n\n # set execute\n st = os.stat(wrapper_path)\n os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)\n\n module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template))\n\n # ensure we cleanup after ourselves\n module.add_cleanup_file(path=wrapper_path)\n\n return wrapper_path\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 208, "n_words": 102, "vocab_size": 83, "complexity": 3, "nloc": 18, "token_counts": 154, "n_ast_nodes": 265, "n_identifiers": 30, "random_cut": "def write_ssh_wrapper(module):\n \n try:\n # make sure we", "d_id": 78474, "documentation": { "docstring": "\n This writes an shell wrapper for ssh options to be used with git\n this is only relevant for older versions of gitthat cannot\n handle the options themselves. Returns path to the script\n #!/bin/sh\n%s $GIT_SSH_OPTS\n", "n_words": 35, "vocab_size": 31, "n_whitespaces": 58, "language": "en" } }, { "id": 243723, "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "alpha_composite", "commit_message": "Improve exception traceback readability", "code": "def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):\n \n\n if not isinstance(source, (list, tuple)):\n msg = \"Source must be a tuple\"\n raise ValueError(msg)\n if not isinstance(dest, (list, tuple)):\n msg = \"Destination must be a tuple\"\n raise ValueError(msg)\n if not len(source) in (2, 4):\n msg = \"Source must be a 2 or 4-tuple\"\n raise ValueError(msg)\n if not len(dest) == 2:\n msg = \"Destination must be a 2-tuple\"\n raise ValueError(msg)\n if min(source) < 0:\n msg = \"Source must be non-negative\"\n raise ValueError(msg)\n\n if len(source) == 2:\n source = source + im.size\n\n # over image, crop if it's not the whole thing.\n if source == (0, 0) + im.size:\n overlay = im\n else:\n overlay = im.crop(source)\n\n # target for the paste\n box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)\n\n # destination image. don't copy if we're using the whole image.\n if box == (0, 0) + self.size:\n background = self\n else:\n background = self.crop(box)\n\n result = alpha_composite(background, overlay)\n self.paste(result, box)\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 441, "n_words": 157, "vocab_size": 80, "complexity": 9, "nloc": 41, "token_counts": 226, "n_ast_nodes": 362, "n_identifiers": 21, "random_cut": "def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):\n \n\n if not isinstance(source, (list, tuple)):\n msg = \"Source must be a tuple\"\n raise ValueError(msg)\n if not isinstance(dest, (list, tuple)):\n msg = \"Destination must be a tuple\"\n raise ValueError(msg)\n if not len(source) in (2, 4):\n msg = \"Source must be a 2 or 4-tuple\"\n raise ValueError(msg)\n if not len(dest) == 2:\n msg = \"Destination must be a 2-tuple\"\n raise ValueError(msg)\n if min(source) < 0:\n msg = \"Source must be non-negative\"\n raise ValueError(msg)\n\n if len(source) == 2:\n source = source + im.size\n\n # over image,", "d_id": 70092, "documentation": { "docstring": "'In-place' analog of Image.alpha_composite. Composites an image\n onto this image.\n\n :param im: image to composite over this one\n :param dest: Optional 2 tuple (left, top) specifying the upper\n left corner in this (destination) image.\n :param source: Optional 2 (left, top) tuple for the upper left\n corner in the overlay source image, or 4 tuple (left, top, right,\n bottom) for the bounds of the source rectangle\n\n Performance Note: Not currently implemented in-place in the core layer.\n ", "n_words": 75, "vocab_size": 49, "n_whitespaces": 144, "language": "en" } }, { "id": 77647, "commit_id": "fd5218220e4ccc7697ee18f57356810560e5e718", "repo": "wagtail", "path": "wagtail/contrib/forms/tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_form_field_clean_name_override", "commit_message": "form builder - allow clean_name generation to be overridden\n\n- adds a new class method to AbstractFormField `get_field_clean_name`\n- resolves #6903", "code": "def test_form_field_clean_name_override(self):\n \n\n field = ExtendedFormField.objects.create(\n page=self.form_page,\n sort_order=1,\n label=\"quanti ge·là·to?\",\n field_type=\"number\", # only number fields will add the ID as a prefix to the clean_name\n required=True,\n )\n\n self.assertEqual(field.clean_name, \"number_field--quanti_gelato\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 112, "n_words": 28, "vocab_size": 27, "complexity": 1, "nloc": 9, "token_counts": 47, "n_ast_nodes": 78, "n_identifiers": 14, "random_cut": "def test_form_field_clean_name_override(self):\n \n\n field = ExtendedFormField.objects.create(\n page=self.form_page,\n sort_order=1,", "d_id": 16685, "documentation": { "docstring": "\n Creating a new field should use the overridden method\n See ExtendedFormField get_field_clean_name method\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 35, "language": "en" } }, { "id": 119830, "commit_id": "603bb3c5ca288674579211e64fa47c6b2b0fb7a6", "repo": "jax", "path": "jax/_src/numpy/polynomial.py", "file_name": "polynomial.py", "fun_name": "polyder", "commit_message": "lax_numpy: move poly functions into numpy.polynomial", "code": "def polyder(p, m=1):\n _check_arraylike(\"polyder\", p)\n m = core.concrete_or_error(operator.index, m, \"'m' argument of jnp.polyder\")\n p, = _promote_dtypes_inexact(p)\n if m < 0:\n raise ValueError(\"Order of derivative must be positive\")\n if m == 0:\n return p\n coeff = (arange(len(p), m, -1)[np.newaxis, :] - 1 - arange(m)[:, np.newaxis]).prod(0)\n return p[:-m] * coeff\n\n\n_LEADING_ZEROS_DOC = \n\n@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 63, "n_words": 52, "vocab_size": 40, "complexity": 3, "nloc": 10, "token_counts": 104, "n_ast_nodes": 190, "n_identifiers": 20, "random_cut": "def polyder(p, m=1):\n _check_arraylike(\"polyder\", p)\n m = core.concrete_or_error(operator.index, m, \"'m' argument of jnp.polyder\")\n p, = _promote_dtypes_inexact(p)\n if m < 0:\n raise ValueError(\"Order of derivative must be positive\")\n if m == 0:\n return p\n coeff = (arange(le", "d_id": 26695, "documentation": { "docstring": "\\\nSetting trim_leading_zeros=True makes the output match that of numpy.\nBut prevents the function from being able to be used in compiled code.\n", "n_words": 23, "vocab_size": 22, "n_whitespaces": 20, "language": "en" } }, { "id": 211506, "commit_id": "92078713cced4f0d9450a6fc80a449fa75fd8c10", "repo": "PaddleDetection", "path": "ppdet/modeling/losses/probiou_loss.py", "file_name": "probiou_loss.py", "fun_name": "probiou_loss", "commit_message": "add fcosr model (#6765)\n\n* add fcosr\r\n\r\n* fix some problem\r\n\r\n* add docs for fcosr\r\n\r\n* modify code\r\n\r\n* modify focsr reader\r\n\r\n* finish tensorrt deployment with dynamic shape\r\n\r\n* modify according to review comment\r\n\r\nCo-authored-by: wangxinxin08 <>", "code": "def probiou_loss(pred, target, eps=1e-3, mode='l1'):\n \n\n gbboxes1 = gbb_form(pred)\n gbboxes2 = gbb_form(target)\n\n x1, y1, a1_, b1_, c1_ = gbboxes1[:,\n 0], gbboxes1[:,\n 1], gbboxes1[:,\n 2], gbboxes1[:,\n 3], gbboxes1[:,\n 4]\n x2, y2, a2_, b2_, c2_ = gbboxes2[:,\n 0], gbboxes2[:,\n 1], gbboxes2[:,\n 2], gbboxes2[:,\n 3], gbboxes2[:,\n 4]\n\n a1, b1, c1 = rotated_form(a1_, b1_, c1_)\n a2, b2, c2 = rotated_form(a2_, b2_, c2_)\n\n t1 = 0.25 * ((a1 + a2) * (paddle.pow(y1 - y2, 2)) + (b1 + b2) * (paddle.pow(x1 - x2, 2))) + \\\n 0.5 * ((c1+c2)*(x2-x1)*(y1-y2))\n t2 = (a1 + a2) * (b1 + b2) - paddle.pow(c1 + c2, 2)\n t3_ = (a1 * b1 - c1 * c1) * (a2 * b2 - c2 * c2)\n t3 = 0.5 * paddle.log(t2 / (4 * paddle.sqrt(F.relu(t3_)) + eps))\n\n B_d = (t1 / t2) + t3\n # B_d = t1 + t2 + t3\n\n B_d = paddle.clip(B_d, min=eps, max=100.0)\n l1 = paddle.sqrt(1.0 - paddle.exp(-B_d) + eps)\n l_i = paddle.pow(l1, 2.0)\n l2 = -paddle.log(1.0 - l_i + eps)\n\n if mode == 'l1':\n probiou = l1\n if mode == 'l2':\n probiou = l2\n\n return probiou\n\n\n@serializable\n@register", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "@serializable\n@register", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 881, "n_words": 181, "vocab_size": 95, "complexity": 3, "nloc": 32, "token_counts": 383, "n_ast_nodes": 553, "n_identifiers": 46, "random_cut": "def probiou_loss(pred, target, eps=1e-3, mode='l1'):\n \n\n gbboxes1 = gbb_form(pred)\n gbboxes2 = gbb_form(target)\n\n x1, y1, a1_, b1_, c1_ = gbboxes1[:,\n ", "d_id": 53110, "documentation": { "docstring": "\n pred -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours predicted box ;in case of HBB angle == 0\n target -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours target box ;in case of HBB angle == 0\n eps -> threshold to avoid infinite values\n mode -> ('l1' in [0,1] or 'l2' in [0,inf]) metrics according our paper\n\n ", "n_words": 58, "vocab_size": 36, "n_whitespaces": 104, "language": "en" } }, { "id": 178818, "commit_id": "613c31d98f20bdd9a4e5884c99826a06a3328438", "repo": "Nuitka", "path": "nuitka/Options.py", "file_name": "Options.py", "fun_name": "mayDisableConsoleWindow", "commit_message": "Standalone: Added support for requiring modes\n\n* For wx on macOS, console must be disabled, avoid the trap.\n\n* For the PySide2, on macOS the --onefile must be used when the\n application bundle is built or else signing has issues.\n\n* Recommend to use new option --disable-console for PySide2, PySide6\n and wx on non-macOS", "code": "def mayDisableConsoleWindow():\n \n\n # TODO: What about MSYS2?\n return isWin32Windows() or isMacOS()\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 2, "token_counts": 13, "n_ast_nodes": 27, "n_identifiers": 3, "random_cut": "def mayDisableConsoleWindow():\n \n\n # TODO: What about ", "d_id": 42834, "documentation": { "docstring": ":returns: bool derived from platform support of disabling the console,", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 244197, "commit_id": "dc14675f79681b88ce2c5a3ca3c69901b415ffe4", "repo": "mmdetection", "path": "mmdet/utils/compat_config.py", "file_name": "compat_config.py", "fun_name": "compat_cfg", "commit_message": "[Feature] Support set dataloader args in config and and add function to handle config compatibility (#7668)\n\n* add cfg_compatibility and support loader args\r\n\r\n* resolve comments\r\n\r\n* add unitest\r\n\r\n* resolve comments\r\n\r\n* delete all warning", "code": "def compat_cfg(cfg):\n \n cfg = copy.deepcopy(cfg)\n cfg = compat_imgs_per_gpu(cfg)\n cfg = compat_loader_args(cfg)\n cfg = compat_runner_args(cfg)\n return cfg\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 34, "n_words": 16, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 34, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def compat_cfg(cfg):\n \n cfg = copy.deepcopy(cfg)\n cfg = compat_imgs_per_gpu(cfg)\n cfg = compat_loader_args(cfg)\n cfg = compat_runner_args(cfg)\n return cf", "d_id": 70277, "documentation": { "docstring": "This function would modify some filed to keep the compatibility of\n config.\n\n For example, it will move some args which will be deprecated to the correct\n fields.\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 39, "language": "en" } }, { "id": 218412, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getsourcelines", "commit_message": "add python 3.10.4 for windows", "code": "def getsourcelines(object):\n \n object = unwrap(object)\n lines, lnum = findsource(object)\n\n if istraceback(object):\n object = object.tb_frame\n\n # for module or frame that corresponds to module, return all source lines\n if (ismodule(object) or\n (isframe(object) and object.f_code.co_name == \"\")):\n return lines, 0\n else:\n return getblock(lines[lnum:]), lnum + 1\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 93, "n_words": 44, "vocab_size": 35, "complexity": 5, "nloc": 10, "token_counts": 73, "n_ast_nodes": 123, "n_identifiers": 13, "random_cut": "def getsourcelines(object):\n \n object = unwrap(object)\n lines, lnum = findsource(object)\n\n if istraceback(object):\n object = object.tb_frame\n\n # for module or frame that corresponds to module, return all source lines\n if (ismodule(object) or\n ", "d_id": 55293, "documentation": { "docstring": "Return a list of source lines and starting line number for an object.\n\n The argument may be a module, class, method, function, traceback, frame,\n or code object. The source code is returned as a list of the lines\n corresponding to the object and the line number indicates where in the\n original source file the first line of code was found. An OSError is\n raised if the source code cannot be retrieved.", "n_words": 71, "vocab_size": 46, "n_whitespaces": 87, "language": "en" } }, { "id": 205453, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/deletion.py", "file_name": "deletion.py", "fun_name": "get_del_batches", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_del_batches(self, objs, fields):\n \n field_names = [field.name for field in fields]\n conn_batch_size = max(\n connections[self.using].ops.bulk_batch_size(field_names, objs), 1\n )\n if len(objs) > conn_batch_size:\n return [\n objs[i : i + conn_batch_size]\n for i in range(0, len(objs), conn_batch_size)\n ]\n else:\n return [objs]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 155, "n_words": 39, "vocab_size": 34, "complexity": 4, "nloc": 12, "token_counts": 82, "n_ast_nodes": 123, "n_identifiers": 16, "random_cut": "def get_del_batches(self, objs, fields):\n ", "d_id": 51131, "documentation": { "docstring": "\n Return the objs in suitably sized batches for the used connection.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 6670, "commit_id": "44356d2d07370b7044640a068ace95842d5ce98c", "repo": "ludwig", "path": "ludwig/utils/checkpoint_utils.py", "file_name": "checkpoint_utils.py", "fun_name": "save", "commit_message": "Add file lock on training checkpoints to prevent race condition (#1938)", "code": "def save(self, global_step):\n \n save_path = osp.join(self.directory, f\"{global_step:09d}.ckpt\")\n self.checkpoint.save(save_path)\n self.latest_checkpoint = save_path\n self.queue.put(True)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 47, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 75, "n_identifiers": 11, "random_cut": "def save(self, global_step):\n \n save_path = osp.join(s", "d_id": 1048, "documentation": { "docstring": "Create a new checkpoint.\n\n Args:\n global_step (int): The iteration number which will be used\n to name the checkpoint.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 52, "language": "en" } }, { "id": 195586, "commit_id": "f0194812568c83585ff09488fe7f67df300938cc", "repo": "rembg", "path": "versioneer.py", "file_name": "versioneer.py", "fun_name": "versions_from_file", "commit_message": "add auto tag", "code": "def versions_from_file(filename):\n \n try:\n with open(filename) as f:\n contents = f.read()\n except OSError:\n raise NotThisMethod(\"unable to read _version.py\")\n mo = re.search(r\"version_json = # END VERSION_JSON\",\n contents, re.M | re.S)\n if not mo:\n mo = re.search(r\"version_json = # END VERSION_JSON\",\n contents, re.M | re.S)\n if not mo:\n raise NotThisMethod(\"no version_json in _version.py\")\n return json.loads(mo.group(1))\n\n", "url": "https://github.com/danielgatis/rembg.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 156, "n_words": 52, "vocab_size": 34, "complexity": 4, "nloc": 14, "token_counts": 94, "n_ast_nodes": 159, "n_identifiers": 16, "random_cut": "def versions_from_file(filename):\n \n try:\n with open(filename) as f:\n contents = f.read()\n except OSError:\n raise NotThisMethod(\"unable to read _version.py\")\n mo = re.search(r\"version_json = # END", "d_id": 47301, "documentation": { "docstring": "Try to determine the version from _version.py if present.\\n(.*)\\r\\n(.*)", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 64798, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/bank_transaction/bank_transaction.py", "file_name": "bank_transaction.py", "fun_name": "get_total_allocated_amount", "commit_message": "style: format code with black", "code": "def get_total_allocated_amount(payment_entry):\n\treturn frappe.db.sql(\n\t\t,\n\t\t(payment_entry.payment_document, payment_entry.payment_entry),\n\t\tas_dict=True,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 3, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 19, "token_counts": 29, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def get_total_allocated_amount(payment_entry):\n\treturn frappe.db.sql(\n\t\t,\n\t\t(payment_entry.p", "d_id": 13724, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\tSUM(btp.allocated_amount) as allocated_amount,\n\t\t\tbt.name\n\t\tFROM\n\t\t\t`tabBank Transaction Payments` as btp\n\t\tLEFT JOIN\n\t\t\t`tabBank Transaction` bt ON bt.name=btp.parent\n\t\tWHERE\n\t\t\tbtp.payment_document = %s\n\t\tAND\n\t\t\tbtp.payment_entry = %s\n\t\tAND\n\t\t\tbt.docstatus = 1", "n_words": 30, "vocab_size": 24, "n_whitespaces": 17, "language": "en" } }, { "id": 176369, "commit_id": "28b3014d68d2b4e40d3e02219770296a827bd55c", "repo": "networkx", "path": "networkx/algorithms/matching.py", "file_name": "matching.py", "fun_name": "min_weight_matching", "commit_message": "Update matching functions for error validation and speed (#4897)\n\n* First steps to update matching functions for #4644\r\n\r\nExpand tests\r\nChange API to raise NetworkXError when matching involves nodes not in G\r\nUpdate is_*_matching to 100+ times faster.\r\n\r\n* improve matching_dict_to_set and docs for min_weight_matching\r\n\r\n* fix sphinx error", "code": "def min_weight_matching(G, maxcardinality=False, weight=\"weight\"):\n \n if len(G.edges) == 0:\n return max_weight_matching(G, maxcardinality, weight)\n G_edges = G.edges(data=weight, default=1)\n min_weight = min(w for _, _, w in G_edges)\n InvG = nx.Graph()\n edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges)\n InvG.add_weighted_edges_from(edges, weight=weight)\n return max_weight_matching(InvG, maxcardinality, weight)\n\n\n@not_implemented_for(\"multigraph\")\n@not_implemented_for(\"directed\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"multigraph\")\n@not_implemented_for(\"directed\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 82, "n_words": 53, "vocab_size": 40, "complexity": 4, "nloc": 9, "token_counts": 114, "n_ast_nodes": 191, "n_identifiers": 21, "random_cut": "def min_weight_matching(G, maxcardinality=False, weight=\"weight\"):\n \n if len(G.edges) == 0:\n return max_weight_matching(G, maxcardinality, weight)\n G_edges = G.edges(data=weight, default=1)\n min_weight = min(w for _, _, w in G_edges)\n InvG = nx.Graph()\n edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges)\n InvG.add_weighted_edges_from(e", "d_id": 41855, "documentation": { "docstring": "Computing a minimum-weight maximal matching of G.\n\n Use reciprocal edge weights with the maximum-weight algorithm.\n\n A matching is a subset of edges in which no node occurs more than once.\n The weight of a matching is the sum of the weights of its edges.\n A maximal matching cannot add more edges and still be a matching.\n The cardinality of a matching is the number of matched edges.\n\n This method replaces the weights with their reciprocal and\n then runs :func:`max_weight_matching`.\n Read the documentation of max_weight_matching for more information.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n maxcardinality: bool, optional (default=False)\n If maxcardinality is True, compute the maximum-cardinality matching\n with minimum weight among all maximum-cardinality matchings.\n\n weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight.\n If key not found, uses 1 as weight.\n\n Returns\n -------\n matching : set\n A minimal weight matching of the graph.\n ", "n_words": 146, "vocab_size": 92, "n_whitespaces": 233, "language": "en" } }, { "id": 20218, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/macos.py", "file_name": "macos.py", "fun_name": "site_config_dir", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def site_config_dir(self) -> str:\n \n return self._append_app_name_and_version(\"/Library/Preferences\")\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 15, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def site_config_dir(self) -> str:\n \n return self._append_app_name_and_version(\"/Libr", "d_id": 3270, "documentation": { "docstring": ":return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 154585, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/expr.py", "file_name": "expr.py", "fun_name": "_cmp_op", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def _cmp_op(self, other, op_name):\n \n lhs_dtype_class = self._get_dtype_cmp_class(self._dtype)\n rhs_dtype_class = self._get_dtype_cmp_class(other._dtype)\n res_dtype = get_dtype(bool)\n # In HDK comparison with NULL always results in NULL,\n # but in pandas it is True for 'ne' comparison and False\n # for others.\n # Also pandas allows 'eq' and 'ne' comparison for values\n # of incompatible types which doesn't work in HDK.\n if lhs_dtype_class != rhs_dtype_class:\n if op_name == \"eq\" or op_name == \"ne\":\n return LiteralExpr(op_name == \"ne\")\n else:\n raise TypeError(\n f\"Invalid comparison between {self._dtype} and {other._dtype}\"\n )\n else:\n cmp = OpExpr(self.binary_operations[op_name], [self, other], res_dtype)\n return build_if_then_else(\n self.is_null(), LiteralExpr(op_name == \"ne\"), cmp, res_dtype\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 310, "n_words": 99, "vocab_size": 70, "complexity": 4, "nloc": 16, "token_counts": 106, "n_ast_nodes": 192, "n_identifiers": 18, "random_cut": "def _cmp_op(self, other, op_name):\n \n lhs_dtype_class = self._get_dtype_cmp_class(self._dtype)\n rhs_dtype_class = self._get_dtype_cmp_class(other._dtype)\n res_dtype = get_dtype(bool)\n # In HDK comparison with NULL always results in NULL,\n # but in pandas it is True for 'ne' comparison and False\n # for others.\n # Also pandas allows 'eq' and 'ne' comparison for value", "d_id": 36095, "documentation": { "docstring": "\n Build a comparison expression.\n\n Parameters\n ----------\n other : BaseExpr\n A value to compare with.\n op_name : str\n The comparison operation name.\n\n Returns\n -------\n BaseExpr\n The resulting comparison expression.\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 125, "language": "en" } }, { "id": 110191, "commit_id": "723cd86d7d7bdc14a4d3fc0e08c3a01e72d310b6", "repo": "matplotlib", "path": "lib/matplotlib/widgets.py", "file_name": "widgets.py", "fun_name": "set_active", "commit_message": "Use scatter for check boxes instead of Rectangle\n\nWith the current implementation, the boxes get stretched into rectangles\nif the aspect ratio is not maintained. To overcome this, the boxes are\nnow created using scatter instead to maintain their shapes.", "code": "def set_active(self, index):\n \n if index not in range(len(self.labels)):\n raise ValueError(f'Invalid CheckButton index: {index}')\n\n if colors.same_color(\n self._crosses.get_facecolor()[index], colors.to_rgba(\"none\")\n ):\n self._crosses.get_facecolor()[index] = colors.to_rgba(\"k\")\n else:\n self._crosses.get_facecolor()[index] = colors.to_rgba(\"none\")\n\n if hasattr(self, \"_rectangles\"):\n for i, p in enumerate(self._rectangles):\n p.set_facecolor(\"k\" if colors.same_color(\n p.get_facecolor(), colors.to_rgba(\"none\"))\n else \"none\")\n\n if self.drawon:\n self.ax.figure.canvas.draw()\n\n if self.eventson:\n self._observers.process('clicked', self.labels[index].get_text())\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 249, "n_words": 47, "vocab_size": 37, "complexity": 8, "nloc": 18, "token_counts": 174, "n_ast_nodes": 295, "n_identifiers": 27, "random_cut": "def set_active(self, index):\n \n if index not in range(len(self.labels)):\n raise ValueError(f'Invalid CheckButton index: {index}')\n\n if colors.same_color(\n self._", "d_id": 23965, "documentation": { "docstring": "\n Toggle (activate or deactivate) a check button by index.\n\n Callbacks will be triggered if :attr:`eventson` is True.\n\n Parameters\n ----------\n index : int\n Index of the check button to toggle.\n\n Raises\n ------\n ValueError\n If *index* is invalid.\n ", "n_words": 36, "vocab_size": 33, "n_whitespaces": 122, "language": "en" } }, { "id": 304408, "commit_id": "3a3f41f3df932368791d3ee3f5fbae5fb3b38bfe", "repo": "core", "path": "homeassistant/components/ebox/sensor.py", "file_name": "sensor.py", "fun_name": "async_update", "commit_message": "Improve entity type hints [e] (#77041)", "code": "async def async_update(self) -> None:\n \n await self.ebox_data.async_update()\n if self.entity_description.key in self.ebox_data.data:\n self._attr_native_value = round(\n self.ebox_data.data[self.entity_description.key], 2\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 75, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 7, "token_counts": 50, "n_ast_nodes": 82, "n_identifiers": 8, "random_cut": "async def async_update(self) -> None:\n \n await s", "d_id": 103215, "documentation": { "docstring": "Get the latest data from EBox and update the state.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 244297, "commit_id": "f3a451abab8fc89810b317ca0a88ee9fd12cb0c2", "repo": "mmdetection", "path": "tools/analysis_tools/analyze_results.py", "file_name": "analyze_results.py", "fun_name": "panoptic_evaluate", "commit_message": "[Feature] Support panoptic segmentation result analysis (#7922)\n\n* support analyze panoptic segmentation result\r\n\r\n* fix lint\r\n\r\n* update docstring\r\n\r\n* update docstring\r\n\r\n* set print_log=False by default\r\n\r\n* update\r\n\r\n* fix bug 8035", "code": "def panoptic_evaluate(self, dataset, results, topk=20):\n \n # image to annotations\n gt_json = dataset.coco.img_ann_map\n\n result_files, tmp_dir = dataset.format_results(results)\n pred_json = mmcv.load(result_files['panoptic'])['annotations']\n pred_folder = osp.join(tmp_dir.name, 'panoptic')\n gt_folder = dataset.seg_prefix\n\n pqs = {}\n prog_bar = mmcv.ProgressBar(len(results))\n for i in range(len(results)):\n data_info = dataset.prepare_train_img(i)\n image_id = data_info['img_info']['id']\n gt_ann = {\n 'image_id': image_id,\n 'segments_info': gt_json[image_id],\n 'file_name': data_info['img_info']['segm_file']\n }\n pred_ann = pred_json[i]\n pq_stat = pq_compute_single_core(\n i, [(gt_ann, pred_ann)],\n gt_folder,\n pred_folder,\n dataset.categories,\n dataset.file_client,\n print_log=False)\n pq_results, classwise_results = pq_stat.pq_average(\n dataset.categories, isthing=None)\n pqs[i] = pq_results['pq']\n prog_bar.update()\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n\n # descending select topk image\n pqs = list(sorted(pqs.items(), key=lambda kv: kv[1]))\n good_pqs = pqs[-topk:]\n bad_pqs = pqs[:topk]\n\n return good_pqs, bad_pqs\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 474, "n_words": 102, "vocab_size": 80, "complexity": 3, "nloc": 34, "token_counts": 248, "n_ast_nodes": 399, "n_identifiers": 49, "random_cut": "def panoptic_evaluate(self, dataset, results, topk=20):\n \n # image to annotations\n gt_json = dataset.coco.img_ann_map\n\n result_files, tmp_dir = dataset.format_results(results)\n pred_json = mmcv.load(result_files['panoptic'])['annotations']\n pred_folder = osp.join(tmp_dir.name, 'panoptic')\n gt_folder = dataset.seg_prefix\n\n pqs = {}\n prog_bar = mmcv.ProgressBar(len(results))\n for i in range(len(results)):\n data_info = dataset.prepare_train_img(i)\n image_id = data_info['img_info']['id']\n gt_ann = {\n 'image_id': image_id,\n 'segments_info': gt_json[image_id],\n 'file_name': data_info['img_info']['segm_file']\n }\n pred_ann = pred_json[i]\n pq_stat = pq_compute_single_core(\n i, [(gt_ann, pred_ann)],\n gt_folder,", "d_id": 70312, "documentation": { "docstring": "Evaluation for panoptic segmentation.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n results (list): Panoptic segmentation results from test\n results pkl file.\n topk (int): Number of the highest topk and\n lowest topk after evaluation index sorting. Default: 20.\n\n Returns:\n tuple: A tuple contains good samples and bad samples.\n good_pqs (dict[int, float]): A dict contains good\n samples's indices in dataset and model's\n performance on them.\n bad_pqs (dict[int, float]): A dict contains bad\n samples's indices in dataset and model's\n performance on them.\n ", "n_words": 78, "vocab_size": 52, "n_whitespaces": 279, "language": "en" } }, { "id": 261234, "commit_id": "c22be1defcf3e59ebd79ed3e479ada8ea558f601", "repo": "scikit-learn", "path": "sklearn/feature_selection/_mutual_info.py", "file_name": "_mutual_info.py", "fun_name": "_compute_mi_cd", "commit_message": "CLN Remove unnecessary operation in mutual_info (#24569)", "code": "def _compute_mi_cd(c, d, n_neighbors):\n \n n_samples = c.shape[0]\n c = c.reshape((-1, 1))\n\n radius = np.empty(n_samples)\n label_counts = np.empty(n_samples)\n k_all = np.empty(n_samples)\n nn = NearestNeighbors()\n for label in np.unique(d):\n mask = d == label\n count = np.sum(mask)\n if count > 1:\n k = min(n_neighbors, count - 1)\n nn.set_params(n_neighbors=k)\n nn.fit(c[mask])\n r = nn.kneighbors()[0]\n radius[mask] = np.nextafter(r[:, -1], 0)\n k_all[mask] = k\n label_counts[mask] = count\n\n # Ignore points with unique labels.\n mask = label_counts > 1\n n_samples = np.sum(mask)\n label_counts = label_counts[mask]\n k_all = k_all[mask]\n c = c[mask]\n radius = radius[mask]\n\n kd = KDTree(c)\n m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)\n m_all = np.array(m_all)\n\n mi = (\n digamma(n_samples)\n + np.mean(digamma(k_all))\n - np.mean(digamma(label_counts))\n - np.mean(digamma(m_all))\n )\n\n return max(0, mi)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 298, "n_words": 113, "vocab_size": 69, "complexity": 3, "nloc": 34, "token_counts": 270, "n_ast_nodes": 422, "n_identifiers": 37, "random_cut": "def _compute_mi_cd(c, d, n_neighbors):\n \n n_samples = c.shape[0]\n c = c.reshape((-1, 1))\n\n radius = np.empty(n_samples)\n label_counts = np.empty(n_samples)\n k_all = np.empty(n_samples)\n nn = NearestNeighbors()\n for label in np.unique(d):\n mask = d == label\n count = np.sum(mask)\n if count > 1:\n k = min(n_neighbors, count - 1)\n nn.set_params(n_neighbors=k)\n nn.fit(c[mask])\n r = nn.kneighbors()[0]\n radius[mask] = np.nextafter(r[:, -1], 0)\n k_all[mask] = k\n label_counts[mask] = count\n\n # Ignore points with un", "d_id": 76702, "documentation": { "docstring": "Compute mutual information between continuous and discrete variables.\n\n Parameters\n ----------\n c : ndarray, shape (n_samples,)\n Samples of a continuous random variable.\n\n d : ndarray, shape (n_samples,)\n Samples of a discrete random variable.\n\n n_neighbors : int\n Number of nearest neighbors to search for each point, see [1]_.\n\n Returns\n -------\n mi : float\n Estimated mutual information. If it turned out to be negative it is\n replace by 0.\n\n Notes\n -----\n True mutual information can't be negative. If its estimate by a numerical\n method is negative, it means (providing the method is adequate) that the\n mutual information is close to 0 and replacing it by 0 is a reasonable\n strategy.\n\n References\n ----------\n .. [1] B. C. Ross \"Mutual Information between Discrete and Continuous\n Data Sets\". PLoS ONE 9(2), 2014.\n ", "n_words": 126, "vocab_size": 85, "n_whitespaces": 221, "language": "en" } }, { "id": 203216, "commit_id": "c5cd8783825b5f6384417dac5f3889b4210b7d08", "repo": "django", "path": "django/core/management/base.py", "file_name": "base.py", "fun_name": "handle_app_config", "commit_message": "Refs #33476 -- Refactored problematic code before reformatting by Black.\n\nIn these cases Black produces unexpected results, e.g.\r\n\r\ndef make_random_password(\r\n self,\r\n length=10,\r\n allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',\r\n):\r\n\r\nor\r\n\r\ncursor.execute(\"\"\"\r\nSELECT ...\r\n\"\"\",\r\n [table name],\r\n)", "code": "def handle_app_config(self, app_config, **options):\n \n raise NotImplementedError(\n \"Subclasses of AppCommand must provide a handle_app_config() method.\"\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 16, "n_ast_nodes": 29, "n_identifiers": 5, "random_cut": "def handle_app_config(self, app_config, **options):\n \n raise Not", "d_id": 50251, "documentation": { "docstring": "\n Perform the command's actions for app_config, an AppConfig instance\n corresponding to an application label given on the command line.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 259339, "commit_id": "db24a30bd3b90a9d55e82e450631de96305744f7", "repo": "scikit-learn", "path": "sklearn/feature_selection/tests/test_from_model.py", "file_name": "test_from_model.py", "fun_name": "test_inferred_max_features_integer", "commit_message": "ENH Allow `SelectFromModel`'s `max_features` to accept callables (#22356)\n\n* Initial implementation\r\n\r\n* Improved error handling and stability\r\n\r\n* Added unit tests\r\n\r\n* Updated test to use `max_features_` instead of `max_features`\r\n\r\n* Added documentation for new private attribute `max_features_`\r\n\r\n* Improved error handling for callables\r\n\r\n* Updated whats_new\r\n\r\n* Removed incorrect term reference to `max_features`\r\n\r\n* Removed float case and improved testing\r\n\r\n* Updated test names to more clearly reflect intention\r\n\r\n* Added a sample callable in `max_features` description\r\n\r\n* Improved documentation and streamlined error handling\r\n\r\n* Updated example to include demonstrate using a callable for max_features\r\n\r\n* Separated out callable demo into separate example\r\n\r\n* Removed demo from `max_features` docs (now in example)\r\n\r\n* Updated changelog\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Thomas J. Fan \r\n\r\n* Trimmed unneeded comments\r\n\r\n* Updated tests to reflect new error handling\r\n\r\n* Removed new line at end of docstring\r\n\r\n* Updated docstring\r\n\r\n* Fixed example syntax error\r\n\r\n* Fixed example syntax\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Thomas J. Fan \r\n\r\n* Reverted irrelevant changes\r\n\r\n* Update sklearn/feature_selection/_from_model.py\r\n\r\nCo-authored-by: Thomas J. Fan \r\n\r\n* Fixed error message\r\n\r\n* Improved test coverage\r\n\r\n* Minor doc improvement -- added a list for `max_features` type\r\n\r\n* Update sklearn/feature_selection/_from_model.py\r\n\r\nCo-authored-by: Adrin Jalali \r\n\r\n* Improved input validation and added test for array-like\r\n\r\n* Updated doc to use no longer use lambda function\r\n\r\n* Fixed docstring list\r\n\r\n* Added missing whitespace for list format in docstring\r\n\r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Adrin Jalali ", "code": "def test_inferred_max_features_integer(max_features):\n \n clf = RandomForestClassifier(n_estimators=5, random_state=0)\n transformer = SelectFromModel(\n estimator=clf, max_features=max_features, threshold=-np.inf\n )\n X_trans = transformer.fit_transform(data, y)\n assert transformer.max_features_ == max_features\n assert X_trans.shape[1] == transformer.max_features_\n\n\n@pytest.mark.parametrize(\n \"max_features\",\n [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)],\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"max_features\",\n [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 70, "n_words": 38, "vocab_size": 29, "complexity": 1, "nloc": 8, "token_counts": 64, "n_ast_nodes": 162, "n_identifiers": 23, "random_cut": "def test_inferred_max_features_integer(max_features):\n \n clf = RandomForestClassifier(n_estimators=5, random_state=0)\n transformer = SelectFromModel(\n estimator=clf, max_features=max_features, threshold=-np.inf\n )\n X_trans = transformer.fit_transform(data, y)\n assert transformer.max_features_ == max_features\n assert X_trans.shape[1] == transformer.max_features_\n\n\n@pytest.mark.paramet", "d_id": 75720, "documentation": { "docstring": "Check max_features_ and output shape for integer max_features.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 177016, "commit_id": "b2f91c34a23058dd70b41784af0d87890216026a", "repo": "networkx", "path": "networkx/algorithms/tests/test_lowest_common_ancestors.py", "file_name": "test_lowest_common_ancestors.py", "fun_name": "test_naive_all_pairs_lowest_common_ancestor3", "commit_message": "Naive lowest common ancestor implementation (#5736)\n\n* Add naive lca methods\r\n\r\n* Naive algorithm implementation for LCA\r\n\r\n* Modify naive lca functions\r\n\r\n* Correct parameters of nx.ancestors\r\n\r\n* Update lowest_common_ancestors.py\r\n\r\n* Parametrize tests\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Yield instead of append\r\n\r\n* Tests for naive lca\r\n\r\n* Correct test cases for naive lca algorithms\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\n* Fix function name -when calling\r\n\r\n* Make requested changes\r\n\r\n* Inlining _get_a_lowest_common_ancestor\r\n\r\nCo-authored-by: dtuncturk \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Mridul Seth ", "code": "def test_naive_all_pairs_lowest_common_ancestor3(self):\n \n all_pairs = product(self.DG.nodes(), self.DG.nodes())\n ans = naive_all_pairs_lca(self.DG, pairs=all_pairs)\n self.assert_lca_dicts_same(dict(ans), self.gold)\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 51, "n_ast_nodes": 83, "n_identifiers": 12, "random_cut": "def test_naive_all_pairs_lowest_common_ancestor3(self):\n \n all_pairs = product(self.DG.nodes(), self.DG.nodes())\n ans = naive_all_pairs_lca(self.DG, pairs=all_pairs)\n self.assert_lca_dicts_same(dict(ans), self.gold)\n", "d_id": 42228, "documentation": { "docstring": "Produces the correct results when all pairs given as a generator.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 205281, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/autodetector.py", "file_name": "autodetector.py", "fun_name": "_resolve_dependency", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _resolve_dependency(dependency):\n \n if dependency[0] != \"__setting__\":\n return dependency, False\n resolved_app_label, resolved_object_name = getattr(\n settings, dependency[1]\n ).split(\".\")\n return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 79, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 7, "token_counts": 54, "n_ast_nodes": 89, "n_identifiers": 8, "random_cut": "def _resolve_dependency(dependency):\n \n if dependency[0] != \"__setting__\":\n return dependen", "d_id": 51063, "documentation": { "docstring": "\n Return the resolved dependency and a boolean denoting whether or not\n it was swappable.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 176975, "commit_id": "abaa68779ccb4cce8d1a5ecade622ab96d01edeb", "repo": "networkx", "path": "networkx/algorithms/lowest_common_ancestors.py", "file_name": "lowest_common_ancestors.py", "fun_name": "lowest_common_ancestor", "commit_message": "Add examples to lowest common ancestors algorithms (#5531)\n\n* Add examples to lowest common ancestors documentation\r\n\r\n* Fix output style of examples\r\n\r\n* Fix output style of example\r\n\r\n* Update pre-commit\r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Indentation fix & pprint dictionary\r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Move \"import pprint\" to the example\r\n\r\nCo-authored-by: dtuncturk \r\nCo-authored-by: Ross Barnowski ", "code": "def lowest_common_ancestor(G, node1, node2, default=None):\n \n ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)]))\n if ans:\n assert len(ans) == 1\n return ans[0][1]\n else:\n return default\n\n\n@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 54, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 55, "n_ast_nodes": 105, "n_identifiers": 11, "random_cut": "def lowest_common_ancestor(G, node1, node2, default=None):\n \n ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)]))\n if ans:\n assert len(ans) == 1\n return ans[0][1]\n else:\n return default\n\n\n@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "d_id": 42203, "documentation": { "docstring": "Compute the lowest common ancestor of the given pair of nodes.\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n node1, node2 : nodes in the graph.\n\n default : object\n Returned if no common ancestor between `node1` and `node2`\n\n Returns\n -------\n The lowest common ancestor of node1 and node2,\n or default if they have no common ancestors.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (2, 4), (1, 6), (4, 5)])\n >>> nx.lowest_common_ancestor(G, 3, 5)\n 2\n\n We can also set `default` argument as below. The value of default is returned\n if there are no common ancestors of given two nodes.\n\n >>> G = nx.DiGraph([(4, 5), (12, 13)])\n >>> nx.lowest_common_ancestor(G, 12, 5, default=\"No common ancestors!\")\n 'No common ancestors!'\n\n Notes\n -----\n Only defined on non-null directed acyclic graphs.\n Takes n log(n) time in the size of the graph.\n See `all_pairs_lowest_common_ancestor` when you have\n more than one pair of nodes of interest.\n\n See Also\n --------\n tree_all_pairs_lowest_common_ancestor\n all_pairs_lowest_common_ancestor\n ", "n_words": 155, "vocab_size": 107, "n_whitespaces": 252, "language": "en" } }, { "id": 65933, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/report/program_wise_fee_collection/program_wise_fee_collection.py", "file_name": "program_wise_fee_collection.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(filters=None):\n\tdata = []\n\n\tconditions = get_filter_conditions(filters)\n\n\tfee_details = frappe.db.sql(\n\t\t\n\t\t% (conditions),\n\t\tas_dict=1,\n\t)\n\n\tfor entry in fee_details:\n\t\tdata.append(\n\t\t\t{\n\t\t\t\t\"program\": entry.program,\n\t\t\t\t\"fees_collected\": entry.paid_amount,\n\t\t\t\t\"outstanding_amount\": entry.outstanding_amount,\n\t\t\t\t\"grand_total\": entry.grand_total,\n\t\t\t}\n\t\t)\n\n\treturn data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 16, "n_words": 33, "vocab_size": 29, "complexity": 2, "nloc": 37, "token_counts": 74, "n_ast_nodes": 121, "n_identifiers": 16, "random_cut": "def get_data(filters=None):\n\tdata = []\n\n\tconditions = get_filter_conditions(filters)\n\n\tfee_details = frappe.db.sql(\n\t\t\n\t\t% (conditions),\n\t\tas_dict=1,\n\t)\n\n\tfor entry in fee_details:\n\t\tdata.append(\n\t\t\t{\n\t\t\t\t\"program\": entry.program,\n\t\t\t\t\"fees_collected\": entry.paid_amount,\n\t\t\t\t\"outstanding_amount\": entry.outstanding_amount,\n\t\t\t\t\"grand_total\": entry.grand_total,\n\t\t\t}\n\t\t)\n\n\treturn data\n\n", "d_id": 14058, "documentation": { "docstring": "\n\t\t\tSELECT\n\t\t\t\tFeesCollected.program,\n\t\t\t\tFeesCollected.paid_amount,\n\t\t\t\tFeesCollected.outstanding_amount,\n\t\t\t\tFeesCollected.grand_total\n\t\t\tFROM (\n\t\t\t\tSELECT\n\t\t\t\t\tsum(grand_total) - sum(outstanding_amount) AS paid_amount, program,\n\t\t\t\t\tsum(outstanding_amount) AS outstanding_amount,\n\t\t\t\t\tsum(grand_total) AS grand_total\n\t\t\t\tFROM `tabFees`\n\t\t\t\tWHERE\n\t\t\t\t\tdocstatus = 1 and\n\t\t\t\t\tprogram IS NOT NULL\n\t\t\t\t\t%s\n\t\t\t\tGROUP BY program\n\t\t\t) AS FeesCollected\n\t\t\tORDER BY FeesCollected.paid_amount DESC\n\t\t", "n_words": 42, "vocab_size": 33, "n_whitespaces": 24, "language": "en" } }, { "id": 22227, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/vendor/requirementslib/models/dependencies.py", "file_name": "dependencies.py", "fun_name": "get_dependencies_from_json", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def get_dependencies_from_json(ireq):\n \n\n if ireq.editable or not is_pinned_requirement(ireq):\n return\n\n # It is technically possible to parse extras out of the JSON API's\n # requirement format, but it is such a chore let's just use the simple API.\n if ireq.extras:\n return\n\n session = requests.session()\n atexit.register(session.close)\n version = str(ireq.req.specifier).lstrip(\"=\")\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 46, "vocab_size": 40, "complexity": 6, "nloc": 18, "token_counts": 101, "n_ast_nodes": 96, "n_identifiers": 15, "random_cut": "def get_dependencies_from_json(ireq):\n \n\n if ireq.editable or not is_pinned_requirement(ireq):\n return\n\n # It is technically possible to parse extras out of the JSON API's\n ", "d_id": 4271, "documentation": { "docstring": "Retrieves dependencies for the given install requirement from the json\n api.\n\n :param ireq: A single InstallRequirement\n :type ireq: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement`\n :return: A set of dependency lines for generating new InstallRequirements.\n :rtype: set(str) or None\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 51, "language": "en" } }, { "id": 171745, "commit_id": "36dcf519c67a8098572447f7d5a896740fc9c464", "repo": "pandas", "path": "pandas/core/frame.py", "file_name": "frame.py", "fun_name": "assign", "commit_message": "ENH/TST: expand copy-on-write to assign() method (#50010)", "code": "def assign(self, **kwargs) -> DataFrame:\n r\n data = self.copy(deep=None)\n\n for k, v in kwargs.items():\n data[k] = com.apply_if_callable(v, data)\n return data\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 58, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 66, "token_counts": 48, "n_ast_nodes": 75, "n_identifiers": 12, "random_cut": "def assign(self, **kwargs) -> DataFrame:\n r\n data = self.copy(deep=None)\n\n for k, v in kwargs.items():\n ", "d_id": 40716, "documentation": { "docstring": "\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas doesn't check it).\n If the values are not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible.\n Later items in '\\*\\*kwargs' may refer to newly created or modified\n columns in 'df'; items are computed and assigned into 'df' in order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence:\n\n >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n You can create multiple columns within the same assign where one\n of the columns depends on another one defined within the same assign:\n\n >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,\n ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n ", "n_words": 268, "vocab_size": 146, "n_whitespaces": 761, "language": "en" } }, { "id": 244278, "commit_id": "d18cdb140ef3cb9ed5fdef6f1a815f5836f1b1ab", "repo": "mmdetection", "path": "mmdet/models/dense_heads/solo_head.py", "file_name": "solo_head.py", "fun_name": "resize_feats", "commit_message": "[Feature] Support SOLOv2 (#7441)\n\n* solov2 init\r\n\r\n* solov2 r18 lightweight\r\n\r\n* add model docstrings and reformat the code\r\n\r\n* add docstrings to model method\r\n\r\n* add solov2 big model config and correct some errors in the docstring\r\n\r\n* fix linting issues\r\n\r\n* refactor code and configs\r\n\r\n* rename variables according to the convention\r\n\r\n* add and enhance solov2 logic\r\n\r\n* add doc strings\r\n\r\n* update solov2 config files\r\n\r\n* fix norm_cfg in mask head\r\n\r\n* minor fix\r\n\r\n* update configs\r\n\r\nCo-authored-by: BIGWangYuDong ", "code": "def resize_feats(self, feats):\n \n out = []\n for i in range(len(feats)):\n if i == 0:\n out.append(\n F.interpolate(\n feats[0],\n size=feats[i + 1].shape[-2:],\n mode='bilinear',\n align_corners=False))\n elif i == len(feats) - 1:\n out.append(\n F.interpolate(\n feats[i],\n size=feats[i - 1].shape[-2:],\n mode='bilinear',\n align_corners=False))\n else:\n out.append(feats[i])\n return out\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 368, "n_words": 40, "vocab_size": 29, "complexity": 4, "nloc": 20, "token_counts": 127, "n_ast_nodes": 198, "n_identifiers": 14, "random_cut": "def resize_feats(self, feats):\n \n out = []\n for i in range(len(feats)):\n if i == 0:\n out.append(\n F.interpolate(\n feats[0],\n size=feats[i + 1].shape[-2:],\n mode='bilinear',", "d_id": 70305, "documentation": { "docstring": "Downsample the first feat and upsample last feat in feats.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 67940, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/report/stock_projected_qty/stock_projected_qty.py", "file_name": "stock_projected_qty.py", "fun_name": "get_bin_list", "commit_message": "style: format code with black", "code": "def get_bin_list(filters):\n\tconditions = []\n\n\tif filters.item_code:\n\t\tconditions.append(\"item_code = '%s' \" % filters.item_code)\n\n\tif filters.warehouse:\n\t\twarehouse_details = frappe.db.get_value(\n\t\t\t\"Warehouse\", filters.warehouse, [\"lft\", \"rgt\"], as_dict=1\n\t\t)\n\n\t\tif warehouse_details:\n\t\t\tconditions.append(\n\t\t\t\t\" exists (select name from `tabWarehouse` wh \\\n\t\t\t\twhere wh.lft >= %s and wh.rgt <= %s and bin.warehouse = wh.name)\"\n\t\t\t\t% (warehouse_details.lft, warehouse_details.rgt)\n\t\t\t)\n\n\tbin_list = frappe.db.sql(\n\t\t.format(\n\t\t\tconditions=\" where \" + \" and \".join(conditions) if conditions else \"\"\n\t\t),\n\t\tas_dict=1,\n\t)\n\n\treturn bin_list\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 50, "n_words": 71, "vocab_size": 52, "complexity": 5, "nloc": 24, "token_counts": 107, "n_ast_nodes": 181, "n_identifiers": 17, "random_cut": "def get_bin_list(filters):\n\tconditions = []\n\n\tif filters.item_code:\n\t\tcond", "d_id": 14672, "documentation": { "docstring": "select item_code, warehouse, actual_qty, planned_qty, indented_qty,\n\t\tordered_qty, reserved_qty, reserved_qty_for_production, reserved_qty_for_sub_contract, projected_qty\n\t\tfrom tabBin bin {conditions} order by item_code, warehouse\n\t\t", "n_words": 19, "vocab_size": 18, "n_whitespaces": 16, "language": "en" } }, { "id": 271613, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "run_eagerly", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def run_eagerly(self):\n \n if (\n self.dynamic and self._run_eagerly is False\n ): # pylint:disable=g-bool-id-comparison\n # TODO(fchollet): consider using py_func to enable this.\n raise ValueError(\n \"Your model contains layers that can only be \"\n \"successfully run in eager execution (layers \"\n \"constructed with `dynamic=True`). \"\n \"You cannot set `run_eagerly=False`.\"\n )\n\n if self._cluster_coordinator and self._run_eagerly:\n raise ValueError(\n \"When using `Model` with `ParameterServerStrategy`, \"\n \"`run_eagerly` is not supported.\"\n )\n\n # Run eagerly logic, by priority:\n # (1) Dynamic models must be run eagerly.\n # (2) Explicitly setting run_eagerly causes a Model to be run eagerly.\n # (3) Not explicitly setting run_eagerly defaults to TF's global setting.\n return (\n self.dynamic\n or self._run_eagerly\n or (tf.config.functions_run_eagerly() and self._run_eagerly is None)\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 373, "n_words": 113, "vocab_size": 80, "complexity": 8, "nloc": 20, "token_counts": 68, "n_ast_nodes": 127, "n_identifiers": 9, "random_cut": "def run_eagerly(self):\n \n if (\n self.dynamic and self._run_eagerly is False\n ): # pylint:disable=g-bool-id-comparison\n # TODO(fchollet): consider using py_func to enable this.\n raise ValueError(\n \"Your model contains layers that can only be \"\n \"successfully run in eager execution (layers \"\n \"constructed with `dynamic=True`). \"\n \"You cannot set `run_eagerly=False`.\"\n )\n\n if self._cluster_coordinator and self._run_eagerly:\n raise ValueError(\n \"When using `Model` with `ParameterServerStrategy`, \"\n \"`run_eagerly` is not ", "d_id": 80828, "documentation": { "docstring": "Settable attribute indicating whether the model should run eagerly.\n\n Running eagerly means that your model will be run step by step,\n like Python code. Your model might run slower, but it should become easier\n for you to debug it by stepping into individual layer calls.\n\n By default, we will attempt to compile your model to a static graph to\n deliver the best execution performance.\n\n Returns:\n Boolean, whether the model should run eagerly.\n ", "n_words": 72, "vocab_size": 52, "n_whitespaces": 130, "language": "en" } }, { "id": 79164, "commit_id": "e864b9c4d12ad0edd38283c17c2935e950e73520", "repo": "wagtail", "path": "wagtail/models/__init__.py", "file_name": "__init__.py", "fun_name": "get_preview_context", "commit_message": "Add docs for PreviewableMixin", "code": "def get_preview_context(self, request, *args, **kwargs):\n \n return {\"object\": self, \"request\": request}\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 5, "random_cut": "def get_preview_context(self, request, *args, **kwargs):\n \n return {\"object\": self, \"request\": request}\n", "d_id": 16882, "documentation": { "docstring": "\n Returns a context dictionary for use in templates for previewing this object.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 300279, "commit_id": "539ce7ff0e9d9bc59cd8f028f245c09f802c89cb", "repo": "core", "path": "tests/components/mobile_app/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "test_default_disabling_entity", "commit_message": "Allow mobile app to disable entities by default (#71562)", "code": "async def test_default_disabling_entity(hass, create_registrations, webhook_client):\n \n webhook_id = create_registrations[1][\"webhook_id\"]\n webhook_url = f\"/api/webhook/{webhook_id}\"\n\n reg_resp = await webhook_client.post(\n webhook_url,\n json={\n \"type\": \"register_sensor\",\n \"data\": {\n \"name\": \"Battery State\",\n \"type\": \"sensor\",\n \"unique_id\": \"battery_state\",\n \"default_disabled\": True,\n },\n },\n )\n\n assert reg_resp.status == HTTPStatus.CREATED\n\n json = await reg_resp.json()\n assert json == {\"success\": True}\n await hass.async_block_till_done()\n\n entity = hass.states.get(\"sensor.test_1_battery_state\")\n assert entity is None\n\n assert (\n er.async_get(hass).async_get(\"sensor.test_1_battery_state\").disabled_by\n == er.RegistryEntryDisabler.INTEGRATION\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 228, "n_words": 61, "vocab_size": 45, "complexity": 1, "nloc": 25, "token_counts": 129, "n_ast_nodes": 230, "n_identifiers": 21, "random_cut": "async def test_default_disabling_entity(hass, create_registrations, webhook_client):\n \n webhook_id = create_registrations[1][\"webhook_id\"]\n webhook_url = f\"/api/webhook/{webhook_id}\"\n\n reg_resp = await webhook_client.post(\n w", "d_id": 99145, "documentation": { "docstring": "Test that sensors can be disabled by default upon registration.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 155918, "commit_id": "73acebb3a2066792dea39c78245a6e1a01b2b173", "repo": "dask", "path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "fun_name": "_create_dd_meta", "commit_message": "Fix \"physical\" column bug in pyarrow-based read_parquet (#8775)\n\nStarting with pyarrow-5.0, the `pyarrow.dataset` API can now be used to write parquet datasets. Using `pyarrow.dataset.write_dataset` to write partitioned data results in different \"pandas metadata\" than we get from a Dask-written dataset, because Dask will not include the partitioned column names in this metadata (since they are not \"physical\" columns), but pyarrow will. This exposed a bug in Dask, where we were conflating \"pandas metadata\" column names with \"physical\" column names. This PR adds a small fix to ensure that Dask will only bail on reading partitioned columns if/when the partitioned columns are really \"physical\" columns.", "code": "def _create_dd_meta(cls, dataset_info):\n \n\n # Collect necessary information from dataset_info\n schema = dataset_info[\"schema\"]\n index = dataset_info[\"index\"]\n categories = dataset_info[\"categories\"]\n partition_obj = dataset_info[\"partitions\"]\n partitions = dataset_info[\"partition_names\"]\n physical_column_names = dataset_info.get(\"physical_schema\", schema).names\n columns = None\n\n # Set index and column names using\n # pandas metadata (when available)\n pandas_metadata = _get_pandas_metadata(schema)\n if pandas_metadata:\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(pandas_metadata)\n if categories is None:\n categories = []\n for col in pandas_metadata[\"columns\"]:\n if (col[\"pandas_type\"] == \"categorical\") and (\n col[\"name\"] not in categories\n ):\n categories.append(col[\"name\"])\n else:\n # No pandas metadata implies no index, unless selected by the user\n index_names = []\n column_names = physical_column_names\n storage_name_mapping = {k: k for k in column_names}\n column_index_names = [None]\n if index is None and index_names:\n # Pandas metadata has provided the index name for us\n index = index_names\n\n # Ensure that there is no overlap between partition columns\n # and explicit column storage\n if partitions:\n _partitions = [p for p in partitions if p not in physical_column_names]\n if not _partitions:\n partitions = []\n dataset_info[\"partitions\"] = None\n dataset_info[\"partition_keys\"] = {}\n dataset_info[\"partition_names\"] = partitions\n elif len(_partitions) != len(partitions):\n raise ValueError(\n \"No partition-columns should be written in the \\n\"\n \"file unless they are ALL written in the file.\\n\"\n \"physical columns: {} | partitions: {}\".format(\n physical_column_names, partitions\n )\n )\n\n column_names, index_names = _normalize_index_columns(\n columns, column_names + partitions, index, index_names\n )\n\n all_columns = index_names + column_names\n\n # Check that categories are included in columns\n if categories and not set(categories).intersection(all_columns):\n raise ValueError(\n \"categories not in available columns.\\n\"\n \"categories: {} | columns: {}\".format(categories, list(all_columns))\n )\n\n dtypes = _get_pyarrow_dtypes(schema, categories)\n dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}\n\n index_cols = index or ()\n meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)\n if categories:\n # Make sure all categories are set to \"unknown\".\n # Cannot include index names in the `cols` argument.\n meta = clear_known_categories(\n meta, cols=[c for c in categories if c not in meta.index.names]\n )\n\n if partition_obj:\n\n for partition in partition_obj:\n if isinstance(index, list) and partition.name == index[0]:\n # Index from directory structure\n meta.index = pd.CategoricalIndex(\n [], categories=partition.keys, name=index[0]\n )\n elif partition.name == meta.index.name:\n # Index created from a categorical column\n meta.index = pd.CategoricalIndex(\n [], categories=partition.keys, name=meta.index.name\n )\n elif partition.name in meta.columns:\n meta[partition.name] = pd.Series(\n pd.Categorical(categories=partition.keys, values=[]),\n index=meta.index,\n )\n\n # Update `dataset_info` and return `meta`\n dataset_info[\"index\"] = index\n dataset_info[\"index_cols\"] = index_cols\n dataset_info[\"categories\"] = categories\n\n return meta\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1545, "n_words": 379, "vocab_size": 196, "complexity": 27, "nloc": 81, "token_counts": 504, "n_ast_nodes": 823, "n_identifiers": 51, "random_cut": "def _create_dd_meta(cls, dataset_info):\n \n\n # Collect necessary information from dataset_info\n schema = dataset_info[\"schema\"]\n index = dataset_info[\"index\"]\n categories = dataset_info[\"categories\"]\n partition_obj = dataset_info[\"partitions\"]\n partitions = dataset_info[\"partition_names\"]\n physical_column_names = dataset_info.get(\"physical_schema\", schema).names\n columns = None\n\n # Set index and column names using\n # pandas metadata (when available)\n pandas_metadata = _get_pandas_metadata(schema)\n if pandas_metadata:\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(pandas_metadata)\n if categories is None:\n categories = []\n for col in pandas_metadata[\"columns\"]:\n if (col[\"pandas_type\"] == \"categorical\") and (\n col[\"name\"] not in categories\n ):\n categories.append(col[\"name\"])\n else:\n # No pandas metadata implies no index, unless selected by the user\n index_names = []\n column_names = physical_column_names\n storage_name_mapping = {k: k for k in column_names}\n column_index_names = [None]\n if index is None and index_names:\n # Pandas metadata has provided the index name for us\n index = index_names\n\n # Ensure that there is no overlap between partition columns\n # and explicit column storage\n if partitions:\n _partitions = [p for p in partitions if p not in physical_column_names]\n if not _partitions:\n partitions = []\n dataset_info[\"partitions\"] = None\n dataset_info[\"partition_keys\"] = {}\n datas", "d_id": 36490, "documentation": { "docstring": "Use parquet schema and hive-partition information\n (stored in dataset_info) to construct DataFrame metadata.\n\n This method is used by both arrow engines.\n ", "n_words": 21, "vocab_size": 21, "n_whitespaces": 42, "language": "en" } }, { "id": 48185, "commit_id": "766726f2e3a282fcd2662f5dc6e9926dc38a6540", "repo": "airflow", "path": "airflow/providers/google/cloud/transfers/postgres_to_gcs.py", "file_name": "postgres_to_gcs.py", "fun_name": "convert_type", "commit_message": "Fix `PostgresToGCSOperator` does not allow nested JSON (#23063)\n\n* Avoid double json.dumps for json data export in PostgresToGCSOperator.\r\n\r\n* Fix CI", "code": "def convert_type(self, value, schema_type, stringify_dict=True):\n \n if isinstance(value, datetime.datetime):\n iso_format_value = value.isoformat()\n if value.tzinfo is None:\n return iso_format_value\n return pendulum.parse(iso_format_value).float_timestamp\n if isinstance(value, datetime.date):\n return value.isoformat()\n if isinstance(value, datetime.time):\n formatted_time = time.strptime(str(value), \"%H:%M:%S\")\n time_delta = datetime.timedelta(\n hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec\n )\n return str(time_delta)\n if stringify_dict and isinstance(value, dict):\n return json.dumps(value)\n if isinstance(value, Decimal):\n return float(value)\n return value\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 243, "n_words": 54, "vocab_size": 35, "complexity": 8, "nloc": 19, "token_counts": 149, "n_ast_nodes": 231, "n_identifiers": 31, "random_cut": "def convert_type(self, value, schema_type, stringify_dict=True):\n \n if isinstance(value, datetime.datetime):\n iso_format_value = value.isoformat()\n if value.tzinfo is None:\n return iso_format_value\n return pendulum.parse(iso_format_value).float_timestamp\n if isinstance(val", "d_id": 9392, "documentation": { "docstring": "\n Takes a value from Postgres, and converts it to a value that's safe for\n JSON/Google Cloud Storage/BigQuery.\n Timezone aware Datetime are converted to UTC seconds.\n Unaware Datetime, Date and Time are converted to ISO formatted strings.\n Decimals are converted to floats.\n\n :param value: Postgres column value.\n :param schema_type: BigQuery data type.\n :param stringify_dict: Specify whether to convert dict to string.\n ", "n_words": 60, "vocab_size": 46, "n_whitespaces": 124, "language": "en" } }, { "id": 276990, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/metrics_utils.py", "file_name": "metrics_utils.py", "fun_name": "_filter_top_k", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _filter_top_k(x, k):\n \n _, top_k_idx = tf.math.top_k(x, k, sorted=False)\n top_k_mask = tf.reduce_sum(\n tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2\n )\n return x * top_k_mask + NEG_INF * (1 - top_k_mask)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 49, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 6, "token_counts": 72, "n_ast_nodes": 110, "n_identifiers": 15, "random_cut": "def _filter_top_k(x, k):\n \n _, top_k_idx = tf.math.top_k(x, k, sorted=False)\n top_k_mask = tf.reduce_sum(\n tf.one_", "d_id": 81820, "documentation": { "docstring": "Filters top-k values in the last dim of x and set the rest to NEG_INF.\n\n Used for computing top-k prediction values in dense labels (which has the same\n shape as predictions) for recall and precision top-k metrics.\n\n Args:\n x: tensor with any dimensions.\n k: the number of values to keep.\n\n Returns:\n tensor with same shape and dtype as x.\n ", "n_words": 59, "vocab_size": 41, "n_whitespaces": 89, "language": "en" } }, { "id": 181690, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/tpot_tests.py", "file_name": "tpot_tests.py", "fun_name": "test_pick_two_individuals_eligible_for_crossover_bad", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_pick_two_individuals_eligible_for_crossover_bad():\n \n\n ind1 = creator.Individual.from_string(\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',\n tpot_obj._pset\n )\n ind2 = creator.Individual.from_string(\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',\n tpot_obj._pset\n )\n ind3 = creator.Individual.from_string(\n 'GaussianNB(input_matrix)',\n tpot_obj._pset\n )\n\n # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3])\n assert pick1 is None and pick2 is None\n\n # You can not do crossover with a population of only 1.\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1])\n assert pick1 is None and pick2 is None\n\n # You can not do crossover with a population of 0.\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([])\n assert pick1 is None and pick2 is None\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 192, "n_words": 102, "vocab_size": 48, "complexity": 4, "nloc": 19, "token_counts": 104, "n_ast_nodes": 171, "n_identifiers": 12, "random_cut": "def test_pick_two_individuals_eligible_for_crossover_bad():\n \n\n ind1 = creator.Individual.from_string(\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',\n tpot_obj._pset\n )\n ind2 = creator.Individual.from_string(\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',\n tpot_obj._pset\n )\n ind3 = creator.Individual.from_string(\n 'GaussianNB(input_matrix)',\n tpot_obj._pset\n )\n\n # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3])\n assert pick1 is None and pick2 is None\n\n # You can", "d_id": 43477, "documentation": { "docstring": "Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 209826, "commit_id": "a2b7a28faff1db058dd22ce097a268e0ad5d1d33", "repo": "scapy", "path": "scapy/arch/windows/__init__.py", "file_name": "__init__.py", "fun_name": "get_ips", "commit_message": "[Hinty] Core typing: windows (#3684)\n\n* Core typing: windows\r\n\r\nCo-authored-by: Pierre ", "code": "def get_ips(v6=False):\n # type: (bool) -> Dict[NetworkInterface, List[str]]\n \n res = {}\n for iface in six.itervalues(conf.ifaces):\n if v6:\n res[iface] = iface.ips[6]\n else:\n res[iface] = iface.ips[4]\n return res\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 77, "n_words": 26, "vocab_size": 22, "complexity": 3, "nloc": 8, "token_counts": 53, "n_ast_nodes": 86, "n_identifiers": 9, "random_cut": "def get_ips(v6=False):\n # type: (bool) -> Dict[NetworkInterface, List[", "d_id": 52808, "documentation": { "docstring": "Returns all available IPs matching to interfaces, using the windows system.\n Should only be used as a WinPcapy fallback.\n\n :param v6: IPv6 addresses\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 32, "language": "en" } }, { "id": 38013, "commit_id": "b971c769e80fe85fb7dd35c7cf65f3ac97ea6421", "repo": "transformers", "path": "src/transformers/models/opt/modeling_opt.py", "file_name": "modeling_opt.py", "fun_name": "_set_gradient_checkpointing", "commit_message": "Add OPT (#17088)\n\n* First version - OPT model\r\n\r\n* Final changes\r\n\r\n- putting use cache to False\r\n\r\n* few changes\r\n\r\n- remove commented block\r\n\r\n* few changes\r\n\r\n- remove unecessary files\r\n\r\n* fix style issues\r\n\r\n* few changes\r\n\r\n- remove a test file\r\n- added the logits test\r\n\r\n* Update src/transformers/models/auto/tokenization_auto.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* add gen tests\r\n\r\n* few changes\r\n\r\n- rm mask filling example on docstring\r\n\r\n* few changes\r\n\r\n- remove useless args\r\n\r\n* some changes\r\n\r\n- more tests should pass now\r\n- needs to clean more\r\n- documentation still needs to be done\r\n\r\n* fix code quality\r\n\r\n* major changes\r\n\r\n- change attention architecture to BART-like\r\n- modify some tests\r\n- style fix\r\n\r\n* rm useless classes\r\n\r\n- remove opt for:\r\n- QA\r\n- cond generation\r\n- seq classif\r\n\r\n* Removed autodoc calls to non-existant classes\r\n\r\nTOkenizers are not implemented\r\n\r\n* Update src/transformers/__init__.py\r\n\r\nCo-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com>\r\n\r\n* Update src/transformers/__init__.py\r\n\r\nCo-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/auto/modeling_tf_auto.py\r\n\r\nCo-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com>\r\n\r\n* Replaced OPTTokeniser with GPT2 tokenizer\r\n\r\n* added GPT2Tokenizer.from_pretrained(\"patrickvonplaten/opt_gpt2_tokenizer\")\r\n\r\n* Removed OPTTokenizer\r\n\r\n* make style\r\n\r\n* Make style replaces\r\n\r\n``` ...).unsqueeze(```\r\nby\r\n``` >>>).unsqueeze(```\r\n\r\n* make repo consistency\r\n\r\n* Removed PretrainedOPTModel\r\n\r\n* fix opt.mdx removed other heads\r\n\r\n* fix init, removed 3 heads\r\n\r\n* removed heads\r\n\r\n* finished cleaning head\r\n\r\n* removed seauence classif and question answering\r\n\r\n* removed unused imports\r\n\r\n* removed useless dummy object for QA, SC and CG\r\n\r\n* removed tests for removed useless dummy object for QA, SC and CG\r\n\r\n* Removed head_mask using encoder layers which don't exist\r\n\r\n* fixed test\r\n\r\n* fix line\r\n\r\n* added OPT to toctree\r\n\r\n* Updated model path with pushed weigths\r\n\r\n* fix model path\r\n\r\n* fixed code quality\r\n\r\n* fixed embeddings and generation tests\r\n\r\n* update paths\r\n\r\n* clean comments\r\n\r\n* removed OPTClassificationHead for sentence classification\r\n\r\n* renamed hidden layer\r\n\r\n* renamed num layers to standard num_hidden_layers\r\n\r\n* num_attention_heads fix\r\n\r\n* changes for 125m\r\n\r\n* add first version for 125m\r\n\r\n* add first version - flax\r\n\r\n* add new version\r\n\r\n* causal LM output\r\n\r\n* replace output type with BaseModelOutputWithPastAndCrossAttentions\r\n\r\n* revert working config from 150m to 350m\r\n\r\n* clean\r\n\r\n* removed decoder input ids\r\n\r\n* fixed embed dim\r\n\r\n* more embed_dim issues\r\n\r\n* make style + removed enc_dec test\r\n\r\n* update falx model\r\n\r\n* removed troublesome copy\r\n\r\n* added is_encoder_decoder=False to config\r\n\r\n* added set_input emb fuinction to model class\r\n\r\n* requires torch on embed test\r\n\r\n* use head mask instead of decoder head mask input param solves a test\r\n\r\n* 8 test remaining, update\r\n\r\n* Updated create_and_check_decoder_model_past_large_inputs\r\n\r\n* Make style\r\n\r\n* update op tokenizer with condition\r\n\r\n* make style\r\n\r\n* See if I can push\r\n\r\n* some clean up\r\n\r\n* remove linear head hack\r\n\r\n* save intermediate\r\n\r\n* save correct attention\r\n\r\n* add copied from from bart\r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* fix part of the reviewss\r\nCo-authored-by: Patrick von Platen \r\n\r\n* same changes in naming / conversion\r\n\r\n* correct mask\r\n\r\n* more fixes\r\n\r\n* delete FlaxOPT and TfOPT\r\n\r\n* clean traces of Flax and Tf\r\n\r\n* fix mask\r\n\r\n* fixed positionnal embedding length when past key value is provoded\r\n\r\n* get 125m, 6.7b to work\r\n\r\n* Added do_layer_norm\r\n\r\n* solved mismatch in load dictionnary\r\n\r\n* clean up preapre opt input dict\r\n\r\n* fixed past key value as bool\r\n\r\n* fix previus\r\n\r\n* fixed return dict False tuple issue\r\n\r\n* All tests are passing\r\n\r\n* Make style\r\n\r\n* Ignore OPTDecoder non tested\r\n\r\n* make fix-copies\r\n\r\n* make repo consistency\r\n\r\n* small fix\r\n\r\n* removed uselss @torch.no_grad decorator\r\n\r\n* make styl;e\r\n\r\n* fix previous opt test\r\n\r\n* style\r\n\r\n* make style\r\n\r\n* added opt documentation\r\n\r\n* update OPT_PRETRAINED_MODEL_ARCHIVE_LIST\r\n\r\n* up\r\n\r\n* more fixes\r\n\r\n* model & config work\r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* added comment on padding hack (+2)\r\n\r\n* cleaup\r\n\r\n* review update\r\n\r\n* docstring for missing arg\r\n\r\n* Update docs/source/en/model_doc/opt.mdx\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update docs/source/en/model_doc/opt.mdx\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update docs/source/en/model_doc/opt.mdx\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/opt/__init__.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* update pretrained map\r\n\r\n* update path and tests\r\n\r\n* make style\r\n\r\n* styling\r\n\r\n* make consistency\r\n\r\n* add gpt2 tok new\r\n\r\n* more tok fixes\r\n\r\n* Update src/transformers/models/auto/tokenization_auto.py\r\n\r\n* Update docs/source/en/model_doc/opt.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update docs/source/en/model_doc/opt.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update docs/source/en/model_doc/opt.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update tests/models/opt/test_modeling_opt.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/opt/modeling_opt.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update based on reviews\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Lysandre Debut \r\n\r\n* make style\r\n\r\n* make tokenizer auto tests pass\r\n\r\n* apply Lysandre suggestion\r\n\r\n* finish tests\r\n\r\n* add some good tokenizer tests\r\n\r\n* improve docs slighly\r\n\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com>\r\nCo-authored-by: ArthurZucker \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (OPTDecoder)):\n module.gradient_checkpointing = value\n\n\nOPT_GENERATION_EXAMPLE = r\n\nOPT_INPUTS_DOCSTRING = r\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 16, "vocab_size": 13, "complexity": 2, "nloc": 3, "token_counts": 26, "n_ast_nodes": 53, "n_identifiers": 9, "random_cut": "def _set_gradient_checkpointing(self, module, value=False):\n ", "d_id": 6898, "documentation": { "docstring": "\n Generation example:\n\n ```python\n >>> from transformers import AutoTokenizer, AutoModelForCausalLM\n\n >>> model = OPTForCausalLM.from_pretrained(\"ArthurZ/opt-350m\")\n >>> tokenizer = GPT2Tokenizer.from_pretrained(\"patrickvonplaten/opt_gpt2_tokenizer\")\n\n >>> TEXTS_TO_GENERATE = \"Hey, are you consciours? Can you talk to me?\" \"Hi there, my name is Barack\"\n >>> inputs = tokenizer([TEXTS_TO_GENERATE], max_length=1024, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs[\"input_ids\"], num_beams=2, min_length=0, max_length=20)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n 'I'm not conscious.<\\s>'\n ```\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_inputs`] and modify\n to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the\n default strategy.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "n_words": 470, "vocab_size": 244, "n_whitespaces": 979, "language": "en" } }, { "id": 337105, "commit_id": "008b608f1551dbcf521284ed0e7a6722cd02ef07", "repo": "diffusers", "path": "examples/text_to_image/train_text_to_image.py", "file_name": "train_text_to_image.py", "fun_name": "to", "commit_message": "[train_text2image] Fix EMA and make it compatible with deepspeed. (#813)\n\n* fix ema\r\n\r\n* style\r\n\r\n* add comment about copy\r\n\r\n* style\r\n\r\n* quality", "code": "def to(self, device=None, dtype=None) -> None:\n r\n # .to() on the tensors handles None correctly\n self.shadow_params = [\n p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device)\n for p in self.shadow_params\n ]\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 29, "vocab_size": 28, "complexity": 3, "nloc": 10, "token_counts": 56, "n_ast_nodes": 85, "n_identifiers": 7, "random_cut": "def to(self, device=None, dtype=None) -> None:\n r\n # .to() on the ten", "d_id": 120959, "documentation": { "docstring": "Move internal buffers of the ExponentialMovingAverage to `device`.\n\n Args:\n device: like `device` argument to `torch.Tensor.to`\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 40, "language": "en" } }, { "id": 285200, "commit_id": "9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b", "repo": "OpenBBTerminal", "path": "openbb_terminal/econometrics/econometrics_model.py", "file_name": "econometrics_model.py", "fun_name": "get_granger_causality", "commit_message": "Here we merge all API Refactor related branches (#2236)\n\n* Update api.py\r\n\r\n* Updated forex menu\r\n\r\n* refactor ycrv command\r\n\r\n* refactor ycrv command black\r\n\r\n* refactor ecocal command\r\n\r\n* Minh changes\r\n\r\n* Adding space to test pushing\r\n\r\n* title fix ecocal df\r\n\r\n* get economic calendar annotation\r\n\r\n* fix investingcom tests\r\n\r\n* refactor index command\r\n\r\n* refactor overview command\r\n\r\n* give defaults to wsj view function args\r\n\r\n* rename date args investincom\r\n\r\n* refacto bigmac command\r\n\r\n* fix ecocal typo\r\n\r\n* refactor rtps command\r\n\r\n* alphavantage gdp\r\n\r\n* alphavantage gdp per capita\r\n\r\n* alphavantage cpi\r\n\r\n* alphavantage tyld\r\n\r\n* alphavantage inf\r\n\r\n* refactor macro command\r\n\r\n* refactor macro command w helpers\r\n\r\n* refactor treasury command\r\n\r\n* fix macro on terminal\r\n\r\n* treasury labels\r\n\r\n* refactor maturities\r\n\r\n* update treasury maturities doc strings\r\n\r\n* refactor get economic calendar finhub\r\n\r\n* refactor map command api\r\n\r\n* display map filter choices\r\n\r\n* route economy api to performance map\r\n\r\n* route economy api to performance map\r\n\r\n* display group choices on valuation command\r\n\r\n* refactor performance and valuation commands\r\n\r\n* refactor spectrum model and view\r\n\r\n* add choices to spectrum controller\r\n\r\n* delete image after view\r\n\r\n* fix model tests finviz\r\n\r\n* fix finciz view tests\r\n\r\n* refactor futures\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix controller test\r\n\r\n* refactor fred series notes\r\n\r\n* update fred notes docstring\r\n\r\n* refacto fred series ids\r\n\r\n* fix pred and qa when empty datasets\r\n\r\n* refactor fred\r\n\r\n* uncomment stuff\r\n\r\n* refacto get series data\r\n\r\n* fix some tests\r\n\r\n* set defaults on args\r\n\r\n* refactor fred yield curve\r\n\r\n* black\r\n\r\n* fix spell and remove ecocal names\r\n\r\n* fix linting\r\n\r\n* linting\r\n\r\n* pylint fix\r\n\r\n* change dangerous defaults\r\n\r\n* Working through crypto fixes (#2256)\r\n\r\n* Working through crypto fixes\r\n\r\n* Continued adding crypto stuff\r\n\r\n* Added crypto overview\r\n\r\n* Added test fixes\r\n\r\n* Added fixtures\r\n\r\n* Fixed tests\r\n\r\n* Fixed charting issue\r\n\r\n* Removed broken APIs\r\n\r\n* Final adjustments\r\n\r\n* Added test fixes\r\n\r\n* map get groups and get ycrv countries into old api\r\n\r\n* exposed econdb helper funcs\r\n\r\n* remove helpers\r\n\r\n* refactor search indices\r\n\r\n* linting\r\n\r\n* refactor arg currency\r\n\r\n* pylint from currency\r\n\r\n* Started switching crpyto ascending to ascend\r\n\r\n* Merging\r\n\r\n* Portfolio model arguements, params, and docstring\r\n\r\n* Refactored for etf commands (#2292)\r\n\r\n* Refactored for etf commands\r\n\r\n* Fixed tests\r\n\r\n* Added load command\r\n\r\n* Fixed menu\r\n\r\n* Portfolio logic fixes\r\n\r\n* Added econometrics (#2260)\r\n\r\n* Added econometrics\r\n\r\n* Fixed tests\r\n\r\n* Simplified API\r\n\r\n* Added test fixes\r\n\r\n* Added test csv\r\n\r\n* Allowed examples to be loaded\r\n\r\n* Fund refactor (#2291)\r\n\r\n* Fund refactor\r\n\r\n* Changed fund_name and fund to name\r\n\r\n* Changed ascending to ascend\r\n\r\n* Stock menu refactoring for easier API usage (#2194)\r\n\r\n* Stocks refactoring for easier API usage\r\n\r\n* Linting\r\n\r\n* Refactor newly added features\r\n\r\n* Linting\r\n\r\n* Fixing tests\r\n\r\n* Refactor common files used by stocks menu\r\n\r\n* Fixing flake8\r\n\r\n* Fix linting and tests\r\n\r\n* Linting\r\n\r\n* Fix flake8\r\n\r\n* refactor insider_data\r\n\r\n* refactor mentions\r\n\r\n* refactor watchlist\r\n\r\n* refactor sentiment\r\n\r\n* refactor sentiment\r\n\r\n* fix yahoofinance tests\r\n\r\n* refactor load and candle\r\n\r\n* refactor get_news and display_news\r\n\r\n* refactor stocks.ins.act\r\n\r\n* candle default matplotlib\r\n\r\n* fix yahoofinance_view tests\r\n\r\n* fix ark model tests\r\n\r\n* fix ark view tests\r\n\r\n* fix business insider model\r\n\r\n* fix business insider view\r\n\r\n* refactor csimarket model\r\n\r\n* fix tests csi market model\r\n\r\n* update dd controller\r\n\r\n* fix get suppliers tests\r\n\r\n* fix dd controller tests\r\n\r\n* fix finhub tests\r\n\r\n* fix finviz tests\r\n\r\n* fix fmp tests\r\n\r\n* fix marketwatch tests\r\n\r\n* corrected argument keywords in test_bt_model\r\n\r\n* corrected argument keywords in test_bt_view\r\n\r\n* refactor fa controller\r\n\r\n* refactor marketwatch view\r\n\r\n* refactor gov controller\r\n\r\n* fix tests fa av\r\n\r\n* fix tests elect\r\n\r\n* fix dcf tests\r\n\r\n* fix polygon tests\r\n\r\n* fix fmp tests\r\n\r\n* fix quiverquant tests\r\n\r\n* fix yahoofinance fa tests\r\n\r\n* fix more fa tests\r\n\r\n* fix insider tests\r\n\r\n* fix more tests\r\n\r\n* fix more tests\r\n\r\n* fix options tests\r\n\r\n* fix stock gov tests\r\n\r\n* fix tests test_ba_controller\r\n\r\n* fix tests for test_finviz_compare_model.py\r\n\r\n* fixed 2 tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fix final tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* Fix tests\r\n\r\n* black\r\n\r\n* forgot to black tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* flakefix\r\n\r\n* Tests + code : Stocks / Discovery\r\n\r\n* fix tests\r\n\r\n* added recorder\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* black\r\n\r\n* black\r\n\r\n* remove unused imports\r\n\r\n* refactor display raw\r\n\r\n* sia dicts fix\r\n\r\n* pylint\r\n\r\n* linting\r\n\r\n* remove dangerous default\r\n\r\n* fix tests\r\n\r\n* fix beta model test\r\n\r\n* black\r\n\r\n* skip screener qa test\r\n\r\n* change sector path to sectors\r\n\r\n* update tests readme\r\n\r\n* fix metric defaults\r\n\r\n* black\r\n\r\n* substitute lost ticker\r\n\r\n* defaults cpic\r\n\r\n* another round on sia\r\n\r\n* refactor cramer\r\n\r\n* reduce default tweets on sentiment\r\n\r\n* refactor yf hist, corr, volume\r\n\r\n* arkorders default\r\n\r\n* refactor income, balance, cashflow\r\n\r\n* refacto scorr, screener, getfinnhub\r\n\r\n* refactor stockgrid\r\n\r\n* ibkr refactor\r\n\r\n* another round on stockgrid\r\n\r\n* add dividens end point\r\n\r\n* refactor discovery endpoints\r\n\r\n* update docstrings with similar input\r\n\r\n* refactor messages\r\n\r\n* refactor ba\r\n\r\n* refactor regioons\r\n\r\n* refactor twitter sentiment\r\n\r\n* refactor hist\r\n\r\n* refactor regions\r\n\r\n* give default to timeframe\r\n\r\n* refactor bunch of defaults and arg names\r\n\r\n* remove leftover imports\r\n\r\n* refactor vwap\r\n\r\n* let tests run\r\n\r\n* fix tests\r\n\r\n* fix stock tests\r\n\r\n* fix stockanalysis tests\r\n\r\n* flake\r\n\r\n* MYPY\r\n\r\n* Made important changes\r\n\r\n* added fixes\r\n\r\n* Fixed big issue\r\n\r\n* Added fixes to tests\r\n\r\n* fix qa tests\r\n\r\n* fix tests\r\n\r\n* fix 1 more test\r\n\r\n* last stocks failing\r\n\r\n* fix crypto test\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: montezdesousa \r\nCo-authored-by: hjoaquim \r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\n\r\n* fix portfolio tests\r\n\r\n* change period to window\r\n\r\n* update ca docstrings\r\n\r\n* refactor get_similar_companies func\r\n\r\n* Fixed\r\n\r\n* Update CI\r\n\r\n* Update CI 2\r\n\r\n* Update CI 3\r\n\r\n* Update dependencies\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: montezdesousa \r\nCo-authored-by: James Simmons \r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com>\r\nCo-authored-by: hjoaquim ", "code": "def get_granger_causality(dependent_series, independent_series, lags):\n \n granger_set = pd.concat([dependent_series, independent_series], axis=1)\n\n granger = grangercausalitytests(granger_set, [lags], verbose=False)\n\n return granger\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 63, "n_identifiers": 11, "random_cut": "def get_granger_causality(dependent_series, independent_series, lags):\n \n granger_set = pd.concat([dependent_series, independent_series], axis=1)\n\n granger = grangercausalitytests(granger_set, [lags], verbose=False)\n\n return granger\n\n", "d_id": 85240, "documentation": { "docstring": "Calculate granger tests\n\n Parameters\n ----------\n dependent_series: Series\n The series you want to test Granger Causality for.\n independent_series: Series\n The series that you want to test whether it Granger-causes time_series_y\n lags : int\n The amount of lags for the Granger test. By default, this is set to 3.\n ", "n_words": 47, "vocab_size": 36, "n_whitespaces": 86, "language": "en" } }, { "id": 206624, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/decorators.py", "file_name": "decorators.py", "fun_name": "_multi_decorate", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _multi_decorate(decorators, method):\n \n if hasattr(decorators, \"__iter__\"):\n # Apply a list/tuple of decorators if 'decorators' is one. Decorator\n # functions are applied so that the call order is the same as the\n # order in which they appear in the iterable.\n decorators = decorators[::-1]\n else:\n decorators = [decorators]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 91, "n_words": 47, "vocab_size": 35, "complexity": 3, "nloc": 10, "token_counts": 52, "n_ast_nodes": 58, "n_identifiers": 4, "random_cut": "def _multi_decorate(decorators, method):\n \n if hasattr(decorators, \"__iter__\")", "d_id": 51592, "documentation": { "docstring": "\n Decorate `method` with one or more function decorators. `decorators` can be\n a single decorator or an iterable of decorators.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 29, "language": "en" } }, { "id": 23192, "commit_id": "9f62b610dea6161627200ed85d92e19b1923279a", "repo": "PaddleOCR", "path": "ppocr/data/imaug/fce_targets.py", "file_name": "fce_targets.py", "fun_name": "generate_level_targets", "commit_message": "add fcenet", "code": "def generate_level_targets(self, img_size, text_polys, ignore_polys):\n \n h, w = img_size\n lv_size_divs = self.level_size_divisors\n lv_proportion_range = self.level_proportion_range\n lv_text_polys = [[] for i in range(len(lv_size_divs))]\n lv_ignore_polys = [[] for i in range(len(lv_size_divs))]\n level_maps = []\n for poly in text_polys:\n # assert len(poly) == 1\n # text_instance = [[poly[i], poly[i + 1]]\n # for i in range(0, len(poly), 2)]\n polygon = np.array(poly, dtype=np.int).reshape((1, -1, 2))\n _, _, box_w, box_h = cv2.boundingRect(polygon)\n proportion = max(box_h, box_w) / (h + 1e-8)\n\n for ind, proportion_range in enumerate(lv_proportion_range):\n if proportion_range[0] < proportion < proportion_range[1]:\n lv_text_polys[ind].append(poly / lv_size_divs[ind])\n\n for ignore_poly in ignore_polys:\n # assert len(ignore_poly) == 1\n # text_instance = [[ignore_poly[i], ignore_poly[i + 1]]\n # for i in range(0, len(ignore_poly), 2)]\n polygon = np.array(ignore_poly, dtype=np.int).reshape((1, -1, 2))\n _, _, box_w, box_h = cv2.boundingRect(polygon)\n proportion = max(box_h, box_w) / (h + 1e-8)\n\n for ind, proportion_range in enumerate(lv_proportion_range):\n if proportion_range[0] < proportion < proportion_range[1]:\n lv_ignore_polys[ind].append(ignore_poly / lv_size_divs[ind])\n\n for ind, size_divisor in enumerate(lv_size_divs):\n current_level_maps = []\n level_img_size = (h // size_divisor, w // size_divisor)\n\n text_region = self.generate_text_region_mask(\n level_img_size, lv_text_polys[ind])[None]\n current_level_maps.append(text_region)\n\n center_region = self.generate_center_region_mask(\n level_img_size, lv_text_polys[ind])[None]\n current_level_maps.append(center_region)\n\n effective_mask = self.generate_effective_mask(\n level_img_size, lv_ignore_polys[ind])[None]\n current_level_maps.append(effective_mask)\n\n fourier_real_map, fourier_image_maps = self.generate_fourier_maps(\n level_img_size, lv_text_polys[ind])\n current_level_maps.append(fourier_real_map)\n current_level_maps.append(fourier_image_maps)\n\n level_maps.append(np.concatenate(current_level_maps))\n\n return level_maps\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 716, "n_words": 191, "vocab_size": 96, "complexity": 10, "nloc": 39, "token_counts": 384, "n_ast_nodes": 586, "n_identifiers": 49, "random_cut": "def generate_level_targets(self, img_size, text_polys, ignore_polys):\n \n h, w = img_size\n lv_size_divs = self.level_size_divisors\n lv_proportion_range = self.level_proportion_range\n lv_text_polys = [[] for i in range(len(lv_size_divs))]\n lv_ignore_polys = [[] for i in", "d_id": 4536, "documentation": { "docstring": "Generate ground truth target on each level.\n\n Args:\n img_size (list[int]): Shape of input image.\n text_polys (list[list[ndarray]]): A list of ground truth polygons.\n ignore_polys (list[list[ndarray]]): A list of ignored polygons.\n Returns:\n level_maps (list(ndarray)): A list of ground target on each level.\n ", "n_words": 40, "vocab_size": 24, "n_whitespaces": 105, "language": "en" } }, { "id": 67961, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/stock_balance.py", "file_name": "stock_balance.py", "fun_name": "get_reserved_qty", "commit_message": "style: format code with black", "code": "def get_reserved_qty(item_code, warehouse):\n\treserved_qty = frappe.db.sql(\n\t\t,\n\t\t(item_code, warehouse, item_code, warehouse),\n\t)\n\n\treturn flt(reserved_qty[0][0]) if reserved_qty else 0\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 12, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 46, "token_counts": 43, "n_ast_nodes": 62, "n_identifiers": 8, "random_cut": "def get_reserved_qty(item_code, warehouse):\n\treserved_qty = frappe.db.sql(\n\t\t,\n\t\t(item_code, warehouse, item_code, warehouse),\n\t)\n\n\treturn flt(", "d_id": 14684, "documentation": { "docstring": "\n\t\tselect\n\t\t\tsum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty))\n\t\tfrom\n\t\t\t(\n\t\t\t\t(select\n\t\t\t\t\tqty as dnpi_qty,\n\t\t\t\t\t(\n\t\t\t\t\t\tselect qty from `tabSales Order Item`\n\t\t\t\t\t\twhere name = dnpi.parent_detail_docname\n\t\t\t\t\t\tand (delivered_by_supplier is null or delivered_by_supplier = 0)\n\t\t\t\t\t) as so_item_qty,\n\t\t\t\t\t(\n\t\t\t\t\t\tselect delivered_qty from `tabSales Order Item`\n\t\t\t\t\t\twhere name = dnpi.parent_detail_docname\n\t\t\t\t\t\tand delivered_by_supplier = 0\n\t\t\t\t\t) as so_item_delivered_qty,\n\t\t\t\t\tparent, name\n\t\t\t\tfrom\n\t\t\t\t(\n\t\t\t\t\tselect qty, parent_detail_docname, parent, name\n\t\t\t\t\tfrom `tabPacked Item` dnpi_in\n\t\t\t\t\twhere item_code = %s and warehouse = %s\n\t\t\t\t\tand parenttype=\"Sales Order\"\n\t\t\t\t\tand item_code != parent_item\n\t\t\t\t\tand exists (select * from `tabSales Order` so\n\t\t\t\t\twhere name = dnpi_in.parent and docstatus = 1 and status != 'Closed')\n\t\t\t\t) dnpi)\n\t\t\tunion\n\t\t\t\t(select stock_qty as dnpi_qty, qty as so_item_qty,\n\t\t\t\t\tdelivered_qty as so_item_delivered_qty, parent, name\n\t\t\t\tfrom `tabSales Order Item` so_item\n\t\t\t\twhere item_code = %s and warehouse = %s\n\t\t\t\tand (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0)\n\t\t\t\tand exists(select * from `tabSales Order` so\n\t\t\t\t\twhere so.name = so_item.parent and so.docstatus = 1\n\t\t\t\t\tand so.status != 'Closed'))\n\t\t\t) tab\n\t\twhere\n\t\t\tso_item_qty >= so_item_delivered_qty\n\t", "n_words": 163, "vocab_size": 69, "n_whitespaces": 124, "language": "en" } }, { "id": 108518, "commit_id": "032316bc6c7798fca6c82de24167c975f237687f", "repo": "matplotlib", "path": "lib/matplotlib/pyplot.py", "file_name": "pyplot.py", "fun_name": "cool", "commit_message": "Cleanup documentation generation for pyplot\n\n- remove the awkward `pyplot.plotting()` function, which only served\n as a namespace to take up the docs for pyplot and output them via\n `.. autofunction`\n- Instead generate the same information using `.. autosummary::`. We\n have to list the desired methods here explicitly. I've added a test\n that these are the same as previously auto-generated in the\n `plotting()` docstring. If we change anything in pyplot, we'll be\n notified through the test failure that we have to adapt the\n autosummary list.\n- Removed the docstring generation logic\n `_setup_pyplot_info_docstrings()`. Apart from generating the\n `plotting()` docstring, this added docstrings to the pyplot colormap\n setters. Instead, we now add these docstrings directly via\n boilerplate.py\n\nCo-authored-by: Elliott Sales de Andrade ", "code": "def cool():\n \n set_cmap('cool')\n\n\n# Autogenerated by boilerplate.py. Do not edit as changes will be lost.", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 2, "token_counts": 9, "n_ast_nodes": 22, "n_identifiers": 2, "random_cut": "def cool():\n \n set_cmap('cool", "d_id": 23229, "documentation": { "docstring": "\n Set the colormap to 'cool'.\n\n This changes the default colormap as well as the colormap of the current\n image if there is one. See ``help(colormaps)`` for more information.\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 41, "language": "en" } }, { "id": 209158, "commit_id": "6d7184e8bec5102dfa66bcc10432a30a7e0dcf3a", "repo": "scapy", "path": "scapy/packet.py", "file_name": "packet.py", "fun_name": "add_parent", "commit_message": "Add parent field to Packet (#3607)\n\nCo-authored-by: Sergey Matsievskiy ", "code": "def add_parent(self, parent):\n # type: (Packet) -> None\n \n self.parent = parent\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 3, "random_cut": "def add_parent(self, parent):\n ", "d_id": 52627, "documentation": { "docstring": "Set packet parent.\n When packet is an element in PacketListField, parent field would\n point to the list owner packet.", "n_words": 19, "vocab_size": 18, "n_whitespaces": 32, "language": "en" } }, { "id": 208534, "commit_id": "23276ac4770f380ce1d5808950dd412a35594af1", "repo": "ipython", "path": "IPython/testing/tools.py", "file_name": "tools.py", "fun_name": "make_tempfile", "commit_message": "Fix EncodingWarning on Python 3.10", "code": "def make_tempfile(name):\n \n open(name, 'w', encoding='utf-8').close()\n try:\n yield\n finally:\n os.unlink(name)\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def make_tempfile(name):\n \n open(name, 'w', encoding='utf-8", "d_id": 52377, "documentation": { "docstring": " Create an empty, named, temporary file for the duration of the context.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 16, "language": "en" } }, { "id": 21319, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/cachecontrol/controller.py", "file_name": "controller.py", "fun_name": "update_cached_response", "commit_message": "Vendor in pip 22.1.2", "code": "def update_cached_response(self, request, response):\n \n cache_url = self.cache_url(request.url)\n\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\n\n if not cached_response:\n # we didn't have a cached response\n return response\n\n # Lets update our headers with the headers from the new request:\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\n #\n # The server isn't supposed to send headers that would make\n # the cached body invalid. But... just in case, we'll be sure\n # to strip out ones we know that might be problmatic due to\n # typical assumptions.\n excluded_headers = [\"content-length\"]\n\n cached_response.headers.update(\n dict(\n (k, v)\n for k, v in response.headers.items()\n if k.lower() not in excluded_headers\n )\n )\n\n # we want a 200 b/c we have content via the cache\n cached_response.status = 200\n\n # update our cache\n self._cache_set(cache_url, request, cached_response)\n\n return cached_response\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 342, "n_words": 120, "vocab_size": 79, "complexity": 4, "nloc": 16, "token_counts": 103, "n_ast_nodes": 172, "n_identifiers": 21, "random_cut": "def update_cached_response(self, request, response):\n \n cache_url = self.cache_url(request.url)\n\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\n\n if not cached_response:\n # we didn't have a cached response\n return response\n\n # Lets update our headers with the headers from the new request:\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\n #\n # The server isn't supposed to send headers that would make\n # the cached body invalid. But... just in case, we'll be sure\n # to strip out ones we know that might be problmatic due to\n # typical assumptions.\n excluded_headers = [\"content-length\"]\n\n cached_response.headers.update(\n dict(\n (k, v)\n for k, v in response.headers.items()\n if k.lower() not in excluded_headers\n )\n )\n\n # we want a 200 b/c we have content via the cache\n cached_response.status = ", "d_id": 3761, "documentation": { "docstring": "On a 304 we will get a new set of headers that we want to\n update our cached value with, assuming we have one.\n\n This should only ever be called when we've sent an ETag and\n gotten a 304 as the response.\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 70, "language": "en" } }, { "id": 9099, "commit_id": "db307ffb12d6ba1f8eaeeafd29ee6d4a3fd6fa97", "repo": "insightface", "path": "parsing/dml_csr/loss/lovasz_softmax.py", "file_name": "lovasz_softmax.py", "fun_name": "binary_xloss", "commit_message": "Create lovasz_softmax.py", "code": "def binary_xloss(logits, labels, ignore=None):\n \n logits, labels = flatten_binary_scores(logits, labels, ignore)\n loss = StableBCELoss()(logits, Variable(labels.float()))\n return loss\n\n\n# --------------------------- MULTICLASS LOSSES ---------------------------\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 32, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 43, "n_ast_nodes": 69, "n_identifiers": 9, "random_cut": "def binary_xloss(logits, labels, ignore=None):\n \n l", "d_id": 1541, "documentation": { "docstring": "\n Binary Cross entropy loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n ignore: void class id\n ", "n_words": 33, "vocab_size": 30, "n_whitespaces": 55, "language": "en" } }, { "id": 283198, "commit_id": "ab4de1dd70fba866930150e440a03e461a6ca6a8", "repo": "OpenBBTerminal", "path": "build/pyinstaller/user_agent/base.py", "file_name": "base.py", "fun_name": "generate_navigator_js", "commit_message": "Create a packaged app bundle with Pyinstaller (#1525)\n\n* Add dashboard widget assets\r\n\r\n* Add ipywidgets and ipyflex to project\r\n\r\n* Add currencies dashboard notebook\r\n\r\n* Update docs and docstrings\r\n\r\n* Add pyinstaller to project deps\r\n\r\n* Add pyinstaller artifacts to gitignore\r\n\r\n* Fix linter errors in terminal.py\r\n\r\n* Update cspell hook and action with a pyinstaller specific word\r\n\r\n* Add pyinstaller specfile and artifacts\r\n\r\n* Add splashscreen image\r\n\r\n* Add app icon\r\n\r\n* adding splash screen support to terminal.spec and terminal.py\r\n\r\n* Restore the conda env build files\r\n\r\n* Sync deps\r\n\r\n* Add border to the splashscreen image\r\n\r\n* Clean up terminal launcher\r\n\r\n* Add support for default feature flags in packages apps\r\n\r\n* Fix types and linting\r\n\r\n* Add splashscreen management to app bootup\r\n\r\n* Check prediction feature flag when entering crypto/pred\r\n\r\n* Update pyinstaller spec file\r\n\r\n* fix .spec file to work for splash and icon - removed the \"..\"\r\n\r\n* Allows to export when using installer (#1568)\r\n\r\n* fix export for packaged apps\r\n\r\n* fix filename\r\n\r\n* Git : replace commit_hash when it is set in config_terminal\r\n\r\n* Add update of the git commit hash in gtff default during build\r\n\r\n* Add packaged app name and feature flag to logs\r\n\r\n* Add platform specific icon assignment\r\n\r\n* Add macOS build assets\r\n\r\n* Add tensorflow to hidden imports\r\n\r\n* Move LOGGING_COMMIT_HASH to gtff\r\n\r\n* Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again.\r\n\r\n* Linting\r\n\r\n* Workflow : ignore ./build/pyinstaller from codespell\r\n\r\n* Workflow : exclude ./build/pyinstaller from flake8\r\n\r\n* Poetry + Workflow : add types-six\r\n\r\n* Pyinstaller : remove property_cached, user_agent and vaderSentiment\r\n\r\n* Revert \"Pyinstaller : remove property_cached, user_agent and vaderSentiment\"\r\n\r\nThis reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703.\r\n\r\n* Clean up local paths in specfile\r\n\r\n* Validate deps have correct Jinja version (they do)\r\n\r\n* Fix logging commit hash to be set correctly for the logger to see it\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: Chavithra PARANA ", "code": "def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None):\n \n\n config = generate_navigator(\n os=os, navigator=navigator, platform=platform, device_type=device_type\n )\n return {\n \"appCodeName\": config[\"app_code_name\"],\n \"appName\": config[\"app_name\"],\n \"appVersion\": config[\"app_version\"],\n \"platform\": config[\"platform\"],\n \"userAgent\": config[\"user_agent\"],\n \"oscpu\": config[\"oscpu\"],\n \"product\": config[\"product\"],\n \"productSub\": config[\"product_sub\"],\n \"vendor\": config[\"vendor\"],\n \"vendorSub\": config[\"vendor_sub\"],\n \"buildID\": config[\"build_id\"],\n }\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 137, "n_words": 38, "vocab_size": 38, "complexity": 1, "nloc": 17, "token_counts": 120, "n_ast_nodes": 207, "n_identifiers": 7, "random_cut": "def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None):\n \n\n config = generate_navigator(\n os=os, navigator=navigator, platform=platform, device_type=device_type\n )\n return {\n \"appCodeName\": config[\"app_code_name\"],\n \"appName\": config[\"app_name\"],\n \"appVersion\": config[\"", "d_id": 84464, "documentation": { "docstring": "\n Generates web navigator's config with keys corresponding\n to keys of `windows.navigator` JavaScript object.\n\n :param os: limit list of oses for generation\n :type os: string or list/tuple or None\n :param navigator: limit list of browser engines for generation\n :type navigator: string or list/tuple or None\n :param device_type: limit possible oses by device type\n :type device_type: list/tuple or None, possible values:\n \"desktop\", \"smartphone\", \"tablet\", \"all\"\n :return: User-Agent config\n :rtype: dict with keys (TODO)\n :raises InvalidOption: if could not generate user-agent for\n any combination of allowed oses and navigators\n :raise InvalidOption: if any of passed options is invalid\n ", "n_words": 95, "vocab_size": 60, "n_whitespaces": 149, "language": "en" } }, { "id": 49719, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/bert_tokenizer.py", "file_name": "bert_tokenizer.py", "fun_name": "printable_text", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def printable_text(text):\n \n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 212, "n_words": 79, "vocab_size": 50, "complexity": 7, "nloc": 17, "token_counts": 103, "n_ast_nodes": 179, "n_identifiers": 13, "random_cut": "def printable_text(text):\n \n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n", "d_id": 9893, "documentation": { "docstring": "Returns text encoded in a way suitable for print or `tf.logging`.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 150599, "commit_id": "01232e9a1f8e28e3611e38af3816edb026600767", "repo": "freqtrade", "path": "freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py", "file_name": "RLPrediction_env_v2.py", "fun_name": "is_tradesignal", "commit_message": "callback function and TDQN model added", "code": "def is_tradesignal(self, action):\n # trade signal \n \n return not ((action == Actions.Neutral.value and self._position == Positions.Neutral)\n or (action == Actions.Short.value and self._position == Positions.Short)\n or (action == Actions.Long.value and self._position == Positions.Long))\n ", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 95, "n_words": 31, "vocab_size": 20, "complexity": 6, "nloc": 4, "token_counts": 65, "n_ast_nodes": 102, "n_identifiers": 10, "random_cut": "def is_tradesignal(self, action):\n # trade signal \n \n return not ((action == Actions.Neutral.value and self._position == Positions.Neutral)\n ", "d_id": 34797, "documentation": { "docstring": "\n not trade signal is :\n Action: Neutral, position: Neutral -> Nothing \n Action: Long, position: Long -> Hold Long\n Action: Short, position: Short -> Hold Short\n ", "n_words": 25, "vocab_size": 16, "n_whitespaces": 62, "language": "en" } }, { "id": 217880, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/server.py", "file_name": "server.py", "fun_name": "log_message", "commit_message": "add python 3.10.4 for windows", "code": "def log_message(self, format, *args):\n \n\n sys.stderr.write(\"%s - - [%s] %s\\n\" %\n (self.address_string(),\n self.log_date_time_string(),\n format%args))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 101, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 37, "n_ast_nodes": 62, "n_identifiers": 9, "random_cut": "def log_message(self, format, *args):\n \n\n sys.stderr.write(\"%s - - [%s] %s\\n\" %\n (self.address_string(),\n self.log_date_time_string(),\n ", "d_id": 54986, "documentation": { "docstring": "Log an arbitrary message.\n\n This is used by all other logging functions. Override\n it if you have specific logging wishes.\n\n The first argument, FORMAT, is a format string for the\n message to be logged. If the format string contains\n any % escapes requiring parameters, they should be\n specified as subsequent arguments (it's just like\n printf!).\n\n The client ip and current date/time are prefixed to\n every message.\n\n ", "n_words": 66, "vocab_size": 57, "n_whitespaces": 138, "language": "en" } }, { "id": 335005, "commit_id": "c3cc8eb23c8095217388d350409b454ea396c12b", "repo": "diffusers", "path": "src/diffusers/utils/logging.py", "file_name": "logging.py", "fun_name": "warning_advice", "commit_message": "changes comments and env vars in `utils/logging`\nremoves mentions of 🤗Transformers with 🤗Diffusers equivalent.", "code": "def warning_advice(self, *args, **kwargs):\n \n no_advisory_warnings = os.getenv(\"DIFFUSERS_NO_ADVISORY_WARNINGS\", False)\n if no_advisory_warnings:\n return\n self.warning(*args, **kwargs)\n\n\nlogging.Logger.warning_advice = warning_advice\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def warning_advice(self, *args, **kwargs):\n \n no_advisory_warnings = os.getenv(\"DIFFUSERS_NO_ADVISORY_WARNINGS\", False)\n if no_advisory_warnings:\n return\n self.warning(*args, **kwargs)\n\n\nlogging.Logger.warning_advice = warning_advice\n", "d_id": 120698, "documentation": { "docstring": "\n This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this\n warning will not be printed\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 29, "language": "en" } }, { "id": 196395, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/repmatrix.py", "file_name": "repmatrix.py", "fun_name": "zip_row_op", "commit_message": "Moved imports to higher level", "code": "def zip_row_op(self, i, k, f):\n \n for j in range(self.cols):\n self[i, j] = f(self[i, j], self[k, j])\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 3, "token_counts": 45, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def zip_row_op(self, i, k, f):\n \n for j in range(self.cols):\n self[i, j] = f", "d_id": 47895, "documentation": { "docstring": "In-place operation on row ``i`` using two-arg functor whose args are\n interpreted as ``(self[i, j], self[k, j])``.\n\n Examples\n ========\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M\n Matrix([\n [1, 0, 0],\n [2, 1, 0],\n [0, 0, 1]])\n\n See Also\n ========\n row\n row_op\n col_op\n\n ", "n_words": 54, "vocab_size": 46, "n_whitespaces": 166, "language": "en" } }, { "id": 199700, "commit_id": "d1d46df73ebaad94089847558d00a8b7269f554d", "repo": "sympy", "path": "sympy/polys/orthopolys.py", "file_name": "orthopolys.py", "fun_name": "legendre_poly", "commit_message": "Run orthopolys and appellseqs through a common interface\n\nIncluding unifying the two Chebyshev generators into one function.\nThere are also two kinds of Hermite polynomials, and they too share the\nsame recurrence, but the second type He_n(x) (aka the probabilist,\nreduced or small polynomials) will not be added here.", "code": "def legendre_poly(n, x=None, polys=False):\n r\n return named_poly(n, dup_legendre, QQ, \"Legendre polynomial\", (x,), polys)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 13, "token_counts": 33, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def legendre_poly(n, x=None, polys=False):\n r\n return named_poly(n, dup_legendre, QQ, \"Legendre polynomial\", (x,), polys)\n\n", "d_id": 49356, "documentation": { "docstring": "Generates the Legendre polynomial `P_n(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 63, "language": "en" } }, { "id": 322189, "commit_id": "621357338437ee420eabbbf5ab19065bc85e73a5", "repo": "PaddleNLP", "path": "paddlenlp/taskflow/knowledge_mining.py", "file_name": "knowledge_mining.py", "fun_name": "_concat_short_text_reuslts", "commit_message": "Update neural search readme and Add Paddle Serving Support (#1558)\n\n* add recall inference similarity\r\n\r\n* update examples\r\n\r\n* updatea readme\r\n\r\n* update dir name\r\n\r\n* update neural search readme\r\n\r\n* update milvus readme\r\n\r\n* update domain adaptive pretraining readme\r\n\r\n* fix the mistakes\r\n\r\n* update readme\r\n\r\n* add recall Paddle Serving Support\r\n\r\n* update readme\r\n\r\n* update readme and format the code\r\n\r\n* reformat the files\r\n\r\n* move the files\r\n\r\n* reformat the code\r\n\r\n* remove redundant code\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: tianxin ", "code": "def _concat_short_text_reuslts(self, input_texts, results):\n \n long_text_lens = [len(text) for text in input_texts]\n concat_results = []\n single_results = {}\n count = 0\n for text in input_texts:\n text_len = len(text)\n while True:\n if len(single_results) == 0 or len(single_results[\n \"text\"]) < text_len:\n if len(single_results) == 0:\n single_results = copy.deepcopy(results[count])\n else:\n single_results[\"text\"] += results[count][\"text\"]\n single_results[\"items\"].extend(results[count][\"items\"])\n count += 1\n elif len(single_results[\"text\"]) == text_len:\n concat_results.append(single_results)\n single_results = {}\n break\n else:\n raise Exception(\n \"The length of input text and raw text is not equal.\")\n for result in concat_results:\n pred_words = result['items']\n pred_words = self._reset_offset(pred_words)\n result['items'] = pred_words\n return concat_results\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 495, "n_words": 91, "vocab_size": 59, "complexity": 9, "nloc": 28, "token_counts": 172, "n_ast_nodes": 289, "n_identifiers": 19, "random_cut": "def _concat_short_text_reuslts(self, input_texts, results):\n \n long_text_lens = [len(text) for text in input_texts]\n concat_results = []\n single_results = {}\n count = 0\n for text in input_texts:\n text_len = len(text)\n while True:\n if len(single_results) == 0 or len(single_results[\n \"text\"]) < text_len:\n if len(single_results) == 0:\n single_results = copy.deepcopy(results[count])\n else:\n single_results[\"text\"] += results[count][\"text\"]\n single_results[\"items\"].extend(results[count][\"items\"])\n count += 1\n elif len(single_results[\"text\"]) == text_len:\n concat_results.append(single_results)\n single_results = {}\n break\n else:\n raise Exception(\n \"The length of input text and raw text is not equal.\")\n ", "d_id": 118084, "documentation": { "docstring": "\n Concat the model output of short texts to the total result of long text.\n ", "n_words": 14, "vocab_size": 12, "n_whitespaces": 29, "language": "en" } }, { "id": 211216, "commit_id": "b4727677751081b257c6fa23c3c124ab9e5a32a1", "repo": "PaddleDetection", "path": "ppdet/modeling/heads/s2anet_head.py", "file_name": "s2anet_head.py", "fun_name": "get_pred", "commit_message": "refactor s2anet (#6604)\n\n* refactor s2anet to support batch_size > 1\r\n\r\n* fix problem of inference\r\n\r\n* support batch_size > 1 for training\r\n\r\n* fix empty results\r\n\r\n* fix dota eval\r\n\r\n* fix configs of s2anet_head\r\n\r\n* modify s2anet_spine_1x to 73 mAP", "code": "def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):\n \n origin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n\n origin_shape_list = []\n scale_factor_list = []\n # scale_factor: scale_y, scale_x\n for i in range(bbox_num.shape[0]):\n expand_shape = paddle.expand(origin_shape[i:i + 1, :],\n [bbox_num[i], 2])\n scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]\n scale = paddle.concat([\n scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,\n scale_y\n ])\n expand_scale = paddle.expand(scale, [bbox_num[i], 8])\n origin_shape_list.append(expand_shape)\n scale_factor_list.append(expand_scale)\n\n origin_shape_list = paddle.concat(origin_shape_list)\n scale_factor_list = paddle.concat(scale_factor_list)\n\n # bboxes: [N, 10], label, score, bbox\n pred_label_score = bboxes[:, 0:2]\n pred_bbox = bboxes[:, 2:]\n\n # rescale bbox to original image\n pred_bbox = pred_bbox.reshape([-1, 8])\n scaled_bbox = pred_bbox / scale_factor_list\n origin_h = origin_shape_list[:, 0]\n origin_w = origin_shape_list[:, 1]\n\n bboxes = scaled_bbox\n zeros = paddle.zeros_like(origin_h)\n x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)\n y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)\n x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)\n y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)\n x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)\n y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)\n x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)\n y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)\n pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)\n pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)\n return pred_result\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 541, "n_words": 191, "vocab_size": 103, "complexity": 2, "nloc": 36, "token_counts": 466, "n_ast_nodes": 682, "n_identifiers": 43, "random_cut": "def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):\n \n origin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n\n origin_shape_list = []\n scale_factor_list = []\n # scale_factor: scale_y, scale_x\n for i in range(bbox_num.shape[0]):\n expand_shape = paddle.expand(origin_shape[i:i + 1, :],\n [b", "d_id": 53046, "documentation": { "docstring": "\n Rescale, clip and filter the bbox from the output of NMS to\n get final prediction.\n Args:\n bboxes(Tensor): bboxes [N, 10]\n bbox_num(Tensor): bbox_num\n im_shape(Tensor): [1 2]\n scale_factor(Tensor): [1 2]\n Returns:\n bbox_pred(Tensor): The output is the prediction with shape [N, 8]\n including labels, scores and bboxes. The size of\n bboxes are corresponding to the original image.\n ", "n_words": 54, "vocab_size": 42, "n_whitespaces": 205, "language": "en" } }, { "id": 64834, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/chart_of_accounts_importer/chart_of_accounts_importer.py", "file_name": "chart_of_accounts_importer.py", "fun_name": "unset_existing_data", "commit_message": "style: format code with black", "code": "def unset_existing_data(company):\n\tlinked = frappe.db.sql(\n\t\t,\n\t\tas_dict=True,\n\t)\n\n\t# remove accounts data from company\n\tupdate_values = {d.fieldname: \"\" for d in linked}\n\tfrappe.db.set_value(\"Company\", company, update_values, update_values)\n\n\t# remove accounts data from various doctypes\n\tfor doctype in [\n\t\t\"Account\",\n\t\t\"Party Account\",\n\t\t\"Mode of Payment Account\",\n\t\t\"Tax Withholding Account\",\n\t\t\"Sales Taxes and Charges Template\",\n\t\t\"Purchase Taxes and Charges Template\",\n\t]:\n\t\tfrappe.db.sql(\n\t\t\t.format(doctype) % (company) # nosec\n\t\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 65, "vocab_size": 48, "complexity": 3, "nloc": 19, "token_counts": 82, "n_ast_nodes": 140, "n_identifiers": 13, "random_cut": "def unset_existing_data(company):\n\tlinked = frappe.db.sql(\n\t\t,\n\t\tas_dict=True,\n\t)\n\n\t# remove accounts data from company\n\tupdate_values = {d.fieldname: \"\" for d in linked}\n\tfrappe.db.set_value(\"Company\", company, update_values, update_values)\n\n\t# remove accounts data from various doctypes\n\tfor doctype in [\n\t\t\"Account\",\n\t\t\"Party Account\",\n\t\t\"Mode of Payment Account\",\n\t\t\"Tax Withholding Account\",\n\t\t\"Sales Taxes and Charges Template\",\n\t\t\"Purchase Taxes and Charges Template\",\n\t]:\n\t\tfrappe.db.sql(", "d_id": 13733, "documentation": { "docstring": "select fieldname from tabDocField\n\t\twhere fieldtype=\"Link\" and options=\"Account\" and parent=\"Company\"delete from `tab{0}` where `company`=\"%s\"", "n_words": 14, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 244465, "commit_id": "924c381a78eb70cede198e042ef34e038e05c15a", "repo": "mmdetection", "path": "mmdet/models/dense_heads/base_dense_head.py", "file_name": "base_dense_head.py", "fun_name": "simple_test", "commit_message": "Modify RetinaNet model interface", "code": "def simple_test(self, feats, batch_img_metas, rescale=False):\n \n outs = self.forward(feats)\n results_list = self.get_results(\n *outs, batch_img_metas=batch_img_metas, rescale=rescale)\n return results_list\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "def simple_test(self, feats, batch_img_metas, rescale=False):\n \n outs = self.forward(feats)\n results_list = self.get_results(\n *outs, batch_img_metas=batch_img_metas, rescale=rescale)\n return results_list\n", "d_id": 70390, "documentation": { "docstring": "Test function without test-time augmentation.\n\n Args:\n feats (tuple[torch.Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n batch_img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[obj:`InstanceData`]: Detection results of each image\n after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n ", "n_words": 91, "vocab_size": 71, "n_whitespaces": 280, "language": "en" } }, { "id": 31086, "commit_id": "78c695eb624bc863ea165b6fb0a8850bfd9fcefa", "repo": "transformers", "path": "src/transformers/commands/pt_to_tf.py", "file_name": "pt_to_tf.py", "fun_name": "compare_pt_tf_models", "commit_message": "CLI: add stricter automatic checks to `pt-to-tf` (#17588)\n\n* Stricter pt-to-tf checks; Update docker image for related tests\r\n\r\n* check all attributes in the output\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input):\n \n pt_outputs = pt_model(**pt_input, output_hidden_states=True)\n tf_outputs = tf_model(**tf_input, output_hidden_states=True)\n\n # 1. All output attributes must be the same\n pt_out_attrs = set(pt_outputs.keys())\n tf_out_attrs = set(tf_outputs.keys())\n if pt_out_attrs != tf_out_attrs:\n raise ValueError(\n f\"The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:\"\n f\" {tf_out_attrs})\"\n )\n\n # 2. For each output attribute, ALL values must be the same", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 167, "n_words": 59, "vocab_size": 48, "complexity": 2, "nloc": 12, "token_counts": 76, "n_ast_nodes": 119, "n_identifiers": 13, "random_cut": "def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input):\n \n pt_outputs = pt_model(**pt_input, output_hidden_states=True)\n tf_outputs = tf_model(**tf_input, output_hidden_states=True)\n\n # 1. All output attributes must be the same\n pt_out_attrs = set(pt_outputs.keys())\n ", "d_id": 5678, "documentation": { "docstring": "\n Compares the TensorFlow and PyTorch models, given their inputs, returning a tuple with the maximum observed\n difference and its source.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 42, "language": "en" } }, { "id": 165035, "commit_id": "08104e8c0a80579dfe3e984b32b35ddc94aafa01", "repo": "pandas", "path": "pandas/tests/io/parser/conftest.py", "file_name": "conftest.py", "fun_name": "_get_all_parser_float_precision_combinations", "commit_message": "CI: Add single_cpu build (#45995)", "code": "def _get_all_parser_float_precision_combinations():\n \n params = []\n ids = []\n for parser, parser_id in zip(_all_parsers, _all_parser_ids):\n if hasattr(parser, \"values\"):\n # Wrapped in pytest.param, get the actual parser back\n parser = parser.values[0]\n for precision in parser.float_precision_choices:\n # Re-wrap in pytest.param for pyarrow\n mark = pytest.mark.single_cpu if parser.engine == \"pyarrow\" else ()\n param = pytest.param((parser(), precision), marks=mark)\n params.append(param)\n ids.append(f\"{parser_id}-{precision}\")\n\n return {\"params\": params, \"ids\": ids}\n\n\n@pytest.fixture(\n params=_get_all_parser_float_precision_combinations()[\"params\"],\n ids=_get_all_parser_float_precision_combinations()[\"ids\"],\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture(\n params=_get_all_parser_float_precision_combinations()[\"params\"],\n ids=_get_all_parser_float_precision_combinations()[\"ids\"],\n)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 174, "n_words": 64, "vocab_size": 51, "complexity": 5, "nloc": 12, "token_counts": 105, "n_ast_nodes": 223, "n_identifiers": 20, "random_cut": "def _get_all_parser_float_precision_combinations():\n \n params = []\n ids = []\n for parser, parser_id in zip(_all_parsers, _all_parser_ids):\n if hasattr(parser, \"values\"):\n # Wrapped in pytest.param, get the actual parser back\n parser = parser.values[0]\n for precision in parser.float_precision_choices:\n # Re-wrap in pytest.param for pyarrow\n mark = pytest.mark.single_cpu if parser.engine == \"pyarrow\" els", "d_id": 39627, "documentation": { "docstring": "\n Return all allowable parser and float precision\n combinations and corresponding ids.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 21, "language": "en" } }, { "id": 201071, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/app_loading/tests.py", "file_name": "tests.py", "fun_name": "test_egg3", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_egg3(self):\n \n egg_name = \"%s/omelet.egg\" % self.egg_dir\n with extend_sys_path(egg_name):\n with self.settings(INSTALLED_APPS=[\"omelet.app_with_models\"]):\n models_module = apps.get_app_config(\"app_with_models\").models_module\n self.assertIsNotNone(models_module)\n del apps.all_models[\"app_with_models\"]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 86, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 54, "n_ast_nodes": 101, "n_identifiers": 12, "random_cut": "def test_egg3(self):\n \n egg_name = \"%s/omelet.egg\" % self.egg_dir\n with extend_sys_path(egg_name):\n ", "d_id": 49846, "documentation": { "docstring": "Models module can be loaded from an app located under an egg's top-level package", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 216155, "commit_id": "0e69e2317dfa06f114c6dd63bc70c68fc81d19b1", "repo": "salt", "path": "tests/pytests/functional/modules/win_lgpo/test_audit_settings_module.py", "file_name": "test_audit_settings_module.py", "fun_name": "test_auditing_case_names", "commit_message": "Add and update tests", "code": "def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing):\n \n lgpo.set_computer_policy(setting_name, setting)\n result = lgpo.get_policy(setting_name, \"machine\")\n assert result == setting\n\n\n@pytest.mark.parametrize(\"setting\", [\"Enabled\", \"Disabled\"])", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"setting\", [\"Enabled\", \"Disabled\"])", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 29, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 82, "n_identifiers": 11, "random_cut": "def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing):\n \n lgpo.set_computer_policy(setting_name, setting)\n result = lgpo.get_policy(setting_name, \"machine\")\n assert result == setting\n\n\n@pytest.mark.parametrize(\"setting\", [\"Enabled\",", "d_id": 54437, "documentation": { "docstring": "\n Helper function to set an audit setting and assert that it was successful\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 20, "language": "en" } }, { "id": 75348, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/tests.py", "file_name": "tests.py", "fun_name": "test_get", "commit_message": "Reformat with black", "code": "def test_get(self):\n \n # Generate signature\n signature = generate_signature(self.image.id, \"fill-800x600\")\n\n # Get the image\n response = self.client.get(\n reverse(\n \"wagtailimages_serve\", args=(signature, self.image.id, \"fill-800x600\")\n )\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.streaming)\n self.assertEqual(response[\"Content-Type\"], \"image/png\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 138, "n_words": 31, "vocab_size": 24, "complexity": 1, "nloc": 10, "token_counts": 74, "n_ast_nodes": 127, "n_identifiers": 15, "random_cut": "def test_get(self):\n \n # Generate signature\n signature = generate_signature(self.image.id, \"fill-800x600\")\n\n # Get the image\n response = self.client.get(\n reverse(\n \"wagtailimages_serve\", args=(signature, self.image.id, \"fill-800x600\")\n )\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.streaming)\n self.asse", "d_id": 16397, "documentation": { "docstring": "\n Test a valid GET request to the view\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 2705, "commit_id": "e5bfcd8016813b1d253a72da5c5071b0e0965644", "repo": "PySyft", "path": "packages/syft/src/syft/lib/python/slice.py", "file_name": "slice.py", "fun_name": "_object2proto", "commit_message": "change[syft.lib.python] syft import absolute -> relative", "code": "def _object2proto(self) -> Slice_PB:\n \n slice_pb = Slice_PB()\n if self.start:\n slice_pb.start = self.start\n slice_pb.has_start = True\n\n if self.stop:\n slice_pb.stop = self.stop\n slice_pb.has_stop = True\n\n if self.step:\n slice_pb.step = self.step\n slice_pb.has_step = True\n\n slice_pb.id.CopyFrom(serialize(obj=self._id))\n\n return slice_pb\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 149, "n_words": 34, "vocab_size": 23, "complexity": 4, "nloc": 19, "token_counts": 81, "n_ast_nodes": 133, "n_identifiers": 15, "random_cut": "def _object2proto(self) -> Slice_PB:\n \n slice_pb ", "d_id": 341, "documentation": { "docstring": "\n Serialize the Slice object instance returning a protobuf.\n\n Returns:\n Slice_PB: returns a protobuf object class representing this Slice object.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 53, "language": "en" } }, { "id": 101599, "commit_id": "98d01760e469fd2108eed8d0b0a1ba6297c3177c", "repo": "faceswap", "path": "plugins/extract/recognition/vgg_face2_keras.py", "file_name": "vgg_face2_keras.py", "fun_name": "__call__", "commit_message": "Overhaul sort:\n - Standardize image data reading and writing\n - Optimize loading (just one pass required)\n - Make all sort groups binnable (to greater or lesser results)\n - Add sort by pitch\n - Deprecate multiple options\n - linting, docs + locales", "code": "def __call__(self) -> List[Tuple[int, int]]:\n \n logger.info(\"Sorting face distances. Depending on your dataset this may take some time...\")\n if self._threshold:\n self._threshold = self._result_linkage[:, 2].max() * self._threshold\n result_order = self._seriation(self._result_linkage,\n self._num_predictions,\n self._num_predictions + self._num_predictions - 2)\n return result_order\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 158, "n_words": 36, "vocab_size": 32, "complexity": 2, "nloc": 19, "token_counts": 73, "n_ast_nodes": 115, "n_identifiers": 13, "random_cut": "def __call__(self) -> List[Tuple[int, int]]:\n \n logger.info(\"Sorting face distances. Depending on your dataset this may take some time...\")\n if self._threshold:\n self._threshold = self._result_linkage[:, 2].max() * self._threshold\n result_order = self._seriation(self._result_linkage,\n self._num_pre", "d_id": 21008, "documentation": { "docstring": " Process the linkages.\n\n Transforms a distance matrix into a sorted distance matrix according to the order implied\n by the hierarchical tree (dendrogram).\n\n Returns\n -------\n list:\n List of indices with the order implied by the hierarchical tree or list of tuples of\n (`index`, `bin`) if a binning threshold was provided\n ", "n_words": 49, "vocab_size": 34, "n_whitespaces": 114, "language": "en" } }, { "id": 211402, "commit_id": "b41194eaed10a01409451e4d3ea7f8b4812cdd23", "repo": "PaddleDetection", "path": "ppdet/modeling/post_process.py", "file_name": "post_process.py", "fun_name": "get_pred", "commit_message": "add flag skipping postprocess to support edgeboard hardware (#6719)\n\n* add flag skipping postprocess to support edgeboard hardware\r\n\r\n* add flag skipping postprocess to support edgeboard hardware\r\n\r\n* add flag skipping postprocess to support edgeboard hardware\r\n\r\n* add comment for the flag export_eb", "code": "def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):\n \n if self.export_eb:\n # enable rcnn models for edgeboard hw to skip the following postprocess.\n return bboxes, bboxes, bbox_num\n\n if not self.export_onnx:\n bboxes_list = []\n bbox_num_list = []\n id_start = 0\n fake_bboxes = paddle.to_tensor(\n np.array(\n [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32'))\n fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\n\n # add fake bbox when output is empty for each batch\n for i in range(bbox_num.shape[0]):\n if bbox_num[i] == 0:\n bboxes_i = fake_bboxes\n bbox_num_i = fake_bbox_num\n else:\n bboxes_i = bboxes[id_start:id_start + bbox_num[i], :]\n bbox_num_i = bbox_num[i]\n id_start += bbox_num[i]\n bboxes_list.append(bboxes_i)\n bbox_num_list.append(bbox_num_i)\n bboxes = paddle.concat(bboxes_list)\n bbox_num = paddle.concat(bbox_num_list)\n\n origin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n\n if not self.export_onnx:\n origin_shape_list = []\n scale_factor_list = []\n # scale_factor: scale_y, scale_x\n for i in range(bbox_num.shape[0]):\n expand_shape = paddle.expand(origin_shape[i:i + 1, :],\n [bbox_num[i], 2])\n scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]\n scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])\n expand_scale = paddle.expand(scale, [bbox_num[i], 4])\n origin_shape_list.append(expand_shape)\n scale_factor_list.append(expand_scale)\n\n self.origin_shape_list = paddle.concat(origin_shape_list)\n scale_factor_list = paddle.concat(scale_factor_list)\n\n else:\n # simplify the computation for bs=1 when exporting onnx\n scale_y, scale_x = scale_factor[0][0], scale_factor[0][1]\n scale = paddle.concat(\n [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0)\n self.origin_shape_list = paddle.expand(origin_shape,\n [bbox_num[0], 2])\n scale_factor_list = paddle.expand(scale, [bbox_num[0], 4])\n\n # bboxes: [N, 6], label, score, bbox\n pred_label = bboxes[:, 0:1]\n pred_score = bboxes[:, 1:2]\n pred_bbox = bboxes[:, 2:]\n # rescale bbox to original image\n scaled_bbox = pred_bbox / scale_factor_list\n origin_h = self.origin_shape_list[:, 0]\n origin_w = self.origin_shape_list[:, 1]\n zeros = paddle.zeros_like(origin_h)\n # clip bbox to [0, original_size]\n x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)\n y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)\n x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)\n y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)\n pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)\n # filter empty bbox\n keep_mask = nonempty_bbox(pred_bbox, return_mask=True)\n keep_mask = paddle.unsqueeze(keep_mask, [1])\n pred_label = paddle.where(keep_mask, pred_label,\n paddle.ones_like(pred_label) * -1)\n pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)\n return bboxes, pred_result, bbox_num\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1144, "n_words": 292, "vocab_size": 171, "complexity": 7, "nloc": 62, "token_counts": 651, "n_ast_nodes": 961, "n_identifiers": 58, "random_cut": "def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):\n \n if self.export_eb:\n # enable rcnn models for edgeboard hw to skip the following postprocess.\n return bboxes, bboxes, bbox_num\n\n if not self.export_onnx:\n bboxes_list = []\n bbox_num_list = []\n id_start = 0\n fake_bboxes = paddle.to_tensor(\n np.array(\n [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32'))\n fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\n\n # add fake bbox when output is empty for each batch\n for i in range(bbox_num.shape[0]):\n if bbox_num[i] == 0:\n bboxes_i = fake_bboxes\n bbox_num_i = fake_bbox_num\n else:\n bboxes_i = bboxes[id_start:id_start + bbox_num[i], :]\n bbox_num_i = bbox_num[i]\n id_start += bbox_num[i]\n bboxes_list.append(bboxes_i)\n bbox_num_list.append(bbox_num_i)\n bboxes = paddle.concat(bboxes_list)\n bbox_num = paddle.concat(bbox_num_list)\n\n origin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n\n if not self.export_onnx:\n origin_shape_list = []\n scale_factor_list = []\n # scale_factor: scale_y, scale_x\n for i in range(bbox_num.shape[0]):\n expand_shape = paddle.expand(origin_shape[i:i + 1, :],\n [bbox_num[i], 2])\n scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]\n scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])\n expand_scale = paddle.expand(scale, [bbox_num[i], 4])\n origin_shape_list.append(expand_shape)\n scale_factor_list.append(expand_scale)\n\n self.origin_shape_list = paddle.concat(origin_shape_list)\n scale_factor_list = paddle.concat(scale_factor_list)\n\n else:\n # simplify the computation for bs=1 when exporting onnx\n scale_y, scale_x = scale_factor[0][0], scale_factor[0][1]\n scale = paddle.concat(\n [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0)\n self.origin_shape_list = paddle.expand(origin_shape,\n [bbox_num[0], 2])\n scale_factor_list = paddle.expand(scale, [bbox_num[0], 4])\n\n # bboxes: [N, 6], label, score, bbox\n pred_label = bboxes[:, 0:1]\n pred_score = bboxes[:, 1:2]\n pred_bbox = bboxes[:, 2:]\n # rescale bbox to original image\n scaled_bbox = pred_bbox / scale_factor_list\n origin_h = self.origin_shape_list[:, 0]\n origin_w = self.origin_shape_list[:, 1]\n zeros = paddle.zeros_like(origin_h)\n # clip bbox to [0, original_size]\n x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)\n y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)\n x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)\n y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)\n pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)\n # filter empty bbox\n keep_mask = nonempty_bbox(pred_bbox, return_mask=True)\n keep_mask = paddle.unsqueeze(keep_mask, [1])\n pred_label = paddle.where(keep_mask, pred_label,\n paddle.ones_like(pred_label) * -1)\n pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)\n retur", "d_id": 53084, "documentation": { "docstring": "\n Rescale, clip and filter the bbox from the output of NMS to \n get final prediction. \n\n Notes:\n Currently only support bs = 1.\n\n Args:\n bboxes (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n pred_result (Tensor): The final prediction results with shape [N, 6]\n including labels, scores and bboxes.\n ", "n_words": 90, "vocab_size": 54, "n_whitespaces": 242, "language": "en" } }, { "id": 301046, "commit_id": "d8a580a90f8bf3206b31619493f4e653fceb3f4b", "repo": "core", "path": "homeassistant/components/nexia/entity.py", "file_name": "entity.py", "fun_name": "_signal_zone_update", "commit_message": "Update nexia to use asyncio (#72108)", "code": "def _signal_zone_update(self):\n \n async_dispatcher_send(self.hass, f\"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}\")\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 41, "n_identifiers": 7, "random_cut": "def _signal_zone_update(self):\n \n async_dispatcher_send(self.hass, f\"{SIGNAL_ZONE_UPD", "d_id": 99894, "documentation": { "docstring": "Signal a zone update.\n\n Whenever the underlying library does an action against\n a zone, the data for the zone is updated.\n\n Update a single zone.\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 53, "language": "en" } }, { "id": 259003, "commit_id": "5ad3421a5b5759ecfaaab93406592d988f5d487f", "repo": "scikit-learn", "path": "sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py", "file_name": "test_gradient_boosting.py", "fun_name": "test_asymmetric_error", "commit_message": "FEA add quantile HGBT (#21800)", "code": "def test_asymmetric_error(quantile):\n \n n_samples = 10_000\n rng = np.random.RandomState(42)\n # take care that X @ coef + intercept > 0\n X = np.concatenate(\n (\n np.abs(rng.randn(n_samples)[:, None]),\n -rng.randint(2, size=(n_samples, 1)),\n ),\n axis=1,\n )\n intercept = 1.23\n coef = np.array([0.5, -2])\n # For an exponential distribution with rate lambda, e.g. exp(-lambda * x),\n # the quantile at level q is:\n # quantile(q) = - log(1 - q) / lambda\n # scale = 1/lambda = -quantile(q) / log(1-q)\n y = rng.exponential(\n scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples\n )\n model = HistGradientBoostingRegressor(\n loss=\"quantile\",\n quantile=quantile,\n max_iter=25,\n random_state=0,\n max_leaf_nodes=10,\n ).fit(X, y)\n assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2)\n\n pinball_loss = PinballLoss(quantile=quantile)\n loss_true_quantile = pinball_loss(y, X @ coef + intercept)\n loss_pred_quantile = pinball_loss(y, model.predict(X))\n # we are overfitting\n assert loss_pred_quantile <= loss_true_quantile\n\n\n@pytest.mark.parametrize(\"y\", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"y\", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 287, "n_words": 133, "vocab_size": 96, "complexity": 1, "nloc": 27, "token_counts": 209, "n_ast_nodes": 361, "n_identifiers": 39, "random_cut": "def test_asymmetric_error(quantile):\n \n n_samples = 10_000\n rng = np.random.RandomState(42)\n # take care that X @ coef + intercept > 0\n X = np.concatenate(\n (\n np.abs(rng.randn(n_samples)[:, None]),\n -rng.randint(2, size=(n_samples, 1)),\n ),\n a", "d_id": 75518, "documentation": { "docstring": "Test quantile regression for asymmetric distributed targets.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 289281, "commit_id": "c717fd19de01fc822d146cc5e353959dfa86d5f7", "repo": "core", "path": "homeassistant/components/gtfs/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Move attribution to standalone attribute [e-g] (#80513)", "code": "def update(self) -> None:\n \n with self.lock:\n # Fetch valid stop information once\n if not self._origin:\n stops = self._pygtfs.stops_by_id(self.origin)\n if not stops:\n self._available = False\n _LOGGER.warning(\"Origin stop ID %s not found\", self.origin)\n return\n self._origin = stops[0]\n\n if not self._destination:\n stops = self._pygtfs.stops_by_id(self.destination)\n if not stops:\n self._available = False\n _LOGGER.warning(\n \"Destination stop ID %s not found\", self.destination\n )\n return\n self._destination = stops[0]\n\n self._available = True\n\n # Fetch next departure\n self._departure = get_next_departure(\n self._pygtfs,\n self.origin,\n self.destination,\n self._offset,\n self._include_tomorrow,\n )\n\n # Define the state as a UTC timestamp with ISO 8601 format\n if not self._departure:\n self._state = None\n else:\n self._state = self._departure[\"departure_time\"].replace(\n tzinfo=dt_util.UTC\n )\n\n # Fetch trip and route details once, unless updated\n if not self._departure:\n self._trip = None\n else:\n trip_id = self._departure[\"trip_id\"]\n if not self._trip or self._trip.trip_id != trip_id:\n _LOGGER.debug(\"Fetching trip details for %s\", trip_id)\n self._trip = self._pygtfs.trips_by_id(trip_id)[0]\n\n route_id = self._departure[\"route_id\"]\n if not self._route or self._route.route_id != route_id:\n _LOGGER.debug(\"Fetching route details for %s\", route_id)\n self._route = self._pygtfs.routes_by_id(route_id)[0]\n\n # Fetch agency details exactly once\n if self._agency is None and self._route:\n _LOGGER.debug(\"Fetching agency details for %s\", self._route.agency_id)\n try:\n self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0]\n except IndexError:\n _LOGGER.warning(\n \"Agency ID '%s' was not found in agency table, \"\n \"you may want to update the routes database table \"\n \"to fix this missing reference\",\n self._route.agency_id,\n )\n self._agency = False\n\n # Assign attributes, icon and name\n self.update_attributes()\n\n if self._agency:\n self._attr_attribution = self._agency.agency_name\n else:\n self._attr_attribution = None\n\n if self._route:\n self._icon = ICONS.get(self._route.route_type, ICON)\n else:\n self._icon = ICON\n\n name = (\n f\"{getattr(self._agency, 'agency_name', DEFAULT_NAME)} \"\n f\"{self.origin} to {self.destination} next departure\"\n )\n if not self._departure:\n name = f\"{DEFAULT_NAME}\"\n self._name = self._custom_name or name\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1402, "n_words": 259, "vocab_size": 141, "complexity": 18, "nloc": 72, "token_counts": 420, "n_ast_nodes": 727, "n_identifiers": 46, "random_cut": "def update(self) -> None:\n \n with self.lock:\n # Fetch valid stop information once\n if not self._origin:\n stops = self._pygtfs.stops_by_id(self.origin)\n if not stops:\n self._available = False\n _LOGGER.warning(\"Origin stop ID %s not found\", self.origin)\n return\n self._origin = stops[0]\n\n if not self._destination:\n stops = self._pygtfs.stops_by_id(self.destination)\n if not stops:\n self._available = False\n _LOGGER.warning(\n ", "d_id": 88424, "documentation": { "docstring": "Get the latest data from GTFS and update the states.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 296245, "commit_id": "ad5d7a845b73b6ef09b111597d6c542be4781b07", "repo": "core", "path": "tests/components/homekit_controller/test_binary_sensor.py", "file_name": "test_binary_sensor.py", "fun_name": "test_carbon_monoxide_sensor_read_state", "commit_message": "Fix HomeKit Controller device class for CO Sensors (#69949)", "code": "async def test_carbon_monoxide_sensor_read_state(hass, utcnow):\n \n helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service)\n\n await helper.async_update(\n ServicesTypes.CARBON_MONOXIDE_SENSOR,\n {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"off\"\n\n await helper.async_update(\n ServicesTypes.CARBON_MONOXIDE_SENSOR,\n {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 1},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"on\"\n\n assert state.attributes[\"device_class\"] == BinarySensorDeviceClass.CO\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 102, "n_words": 41, "vocab_size": 24, "complexity": 1, "nloc": 15, "token_counts": 92, "n_ast_nodes": 152, "n_identifiers": 16, "random_cut": "async def test_carbon_monoxide_sensor_read_state(hass, utcnow):\n \n helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service)\n\n await helper.async_update(\n ServicesTypes.CARBON_MONOXIDE_SENSOR,\n {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"off\"\n\n await helper.async_update(\n ServicesTypes.CARBON_MONOXIDE_SENSOR,\n {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 1},\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"on\"\n\n assert state.attributes[\"d", "d_id": 95240, "documentation": { "docstring": "Test that we can read the state of a HomeKit contact accessory.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 7512, "commit_id": "ed8d9cf20843744f18593b22fb6a30eaf5f325eb", "repo": "ludwig", "path": "ludwig/utils/triton_utils.py", "file_name": "triton_utils.py", "fun_name": "save_config", "commit_message": "Triton ensemble export (#2251)", "code": "def save_config(self) -> TritonArtifact:\n \n device = self.device\n if self.inference_stage != PREDICTOR:\n device = \"cpu\"\n self.config = TritonConfig(\n self.full_model_name,\n self.input_features,\n self.output_features,\n self.max_batch_size,\n self.max_queue_delay_microseconds,\n device,\n self.model_instance_count,\n self.inference_stage,\n )\n\n config_path = os.path.join(self.base_path, \"config.pbtxt\")\n with open(config_path, \"w\") as f:\n formatted_config = remove_empty_lines(self.config.get_model_config())\n f.write(formatted_config)\n\n config_artifact = TritonArtifact(\n model_name=self.full_model_name,\n model_version=self.model_version,\n platform=\"pytorch_libtorch\",\n path=config_path,\n content_type=\"text/x-protobuf\",\n content_length=os.path.getsize(config_path),\n )\n\n return config_artifact\n\n\n@dataclass", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@dataclass", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 308, "n_words": 52, "vocab_size": 44, "complexity": 2, "nloc": 31, "token_counts": 144, "n_ast_nodes": 231, "n_identifiers": 33, "random_cut": "def save_config(self) -> TritonArtifact:\n \n device = self.device\n if self.inference_stage != PREDICTOR:\n device = \"cpu\"\n self.config = TritonConfig(\n self.full_model_name,\n self.input_features,\n self.output_features,\n self.max_batch_size,\n self.max_queue_delay_microseconds,\n device,\n self.model_instance_count,\n self.inference_stage,\n )\n\n config_path = os.path.join(self.base_path, \"config.pbtxt\")\n with open(config_path, \"w\") as f:\n formatted_config = remove_empty_lines(self.config.get_model_conf", "d_id": 1224, "documentation": { "docstring": "Save the Triton config.\n\n Return the appropriate artifact.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 249236, "commit_id": "1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b", "repo": "synapse", "path": "tests/rest/admin/test_device.py", "file_name": "test_device.py", "fun_name": "test_unknown_device", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13479)\n\nReplace\r\n- `HTTPStatus.NOT_FOUND`\r\n- `HTTPStatus.FORBIDDEN`\r\n- `HTTPStatus.UNAUTHORIZED`\r\n- `HTTPStatus.CONFLICT`\r\n- `HTTPStatus.CREATED`\r\n\r\nSigned-off-by: Dirk Klimpel ", "code": "def test_unknown_device(self) -> None:\n \n url = \"/_synapse/admin/v2/users/%s/devices/unknown_device\" % urllib.parse.quote(\n self.other_user\n )\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n\n channel = self.make_request(\n \"PUT\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n\n channel = self.make_request(\n \"DELETE\",\n url,\n access_token=self.admin_user_tok,\n )\n\n # Delete unknown device returns status 200\n self.assertEqual(200, channel.code, msg=channel.json_body)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 258, "n_words": 50, "vocab_size": 31, "complexity": 1, "nloc": 26, "token_counts": 136, "n_ast_nodes": 212, "n_identifiers": 17, "random_cut": "def test_unknown_device(self) -> None:\n \n url = \"/_synapse/admin/v2/users/%s/devices/unknown_device\" % urllib.parse.quote(\n self.other_user\n )\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_F", "d_id": 72740, "documentation": { "docstring": "\n Tests that a lookup for a device that does not exist returns either 404 or 200.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 31, "language": "en" } }, { "id": 37029, "commit_id": "4868a830db5f19f56712f540979d637368221d50", "repo": "transformers", "path": "examples/research_projects/codeparrot/scripts/human_eval.py", "file_name": "human_eval.py", "fun_name": "complete_code", "commit_message": "Jia multi gpu eval (#16428)\n\n* add simple multi gpu complet\r\n\r\n* add human_eval_multi_gpu\r\n\r\n* use copy strategy to distribute across gpu, to avoid padding\r\n\r\n* add doc string\r\n\r\n* update code style\r\n\r\n* use task id to arrange output\r\n\r\n* truncate input to avoid zero pad\r\n\r\n* Stop the copy mechanism\r\n\r\n* update style\r\n\r\n* restore copies to scale better in distributed mode\r\n\r\n* update style\r\n\r\n* replace human eval\r\n\r\n* Apply suggestions from code review\r\n\r\n1. Tokenize all input at the same time\r\n2. use attention_mask to get the input length\r\n3. other small fixes\r\n\r\nCo-authored-by: Leandro von Werra \r\n\r\n* correct typo and update docstring\r\n\r\n* update code style\r\n\r\n* remove num sample division constraint\r\n\r\n* remove max len calculation\r\n\r\n* use accelerator.gather once to speed up\r\n\r\n* use accelerate set_seed; update accelerate version\r\n\r\n* correct gather bug\r\n\r\nCo-authored-by: Leandro von Werra ", "code": "def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs):\n \n gen_token_dict = defaultdict(list) # dict of list of generated tokens\n for step, batch in tqdm(enumerate(dataloader)):\n with torch.no_grad():\n gen_kwargs[\"stopping_criteria\"][0].start_length = batch[\"ids\"].shape[-1]\n generated_tokens = accelerator.unwrap_model(model).generate(\n input_ids=batch[\"ids\"][:, : batch[\"input_len\"]], num_return_sequences=batch_size, **gen_kwargs\n )\n # each task is generated batch_size times\n generated_tasks = batch[\"task_id\"].repeat(batch_size)\n generated_tokens = accelerator.pad_across_processes(\n generated_tokens, dim=1, pad_index=tokenizer.pad_token_id\n )\n\n generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks))\n generated_tokens = generated_tokens.cpu().numpy()\n generated_tasks = generated_tasks.cpu().numpy()\n\n for task, generated_tokens in zip(generated_tasks, generated_tokens):\n gen_token_dict[task].append(generated_tokens)\n\n code_gens = [[] for _ in range(n_tasks)]\n for task, generated_tokens in gen_token_dict.items():\n for s in generated_tokens:\n gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n code_gens[task].append(remove_last_block(gen_code))\n return code_gens\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 317, "n_words": 96, "vocab_size": 66, "complexity": 6, "nloc": 23, "token_counts": 246, "n_ast_nodes": 387, "n_identifiers": 46, "random_cut": "def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs):\n \n gen_token_dict = defaultdict(list) # dict of list of generated tokens\n for step, batch in tqdm(enumerate(dataloader)):\n with torch.no_grad():\n gen_kwargs[\"stopping_criteria\"][0].start_length = batch[\"ids\"].shape[-1]\n generated_tokens = accelerator.unwrap_model(model).generate(\n input_ids=batch[\"ids\"][:, : batch[\"input_len\"]], num_return_sequences=batch_size, **gen_kwargs\n )\n # each task is generated batch_size times\n generated_tasks = batch[\"task_id\"].repeat(batch_size)\n generated_tokens = accelerator.pad_across_processes(\n generated_tokens, dim=1, pad_index=tokenizer.pad_token_id\n )\n\n generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks))\n generated_tokens = generated_tokens.cpu().numpy()\n generated_tasks = generated_tasks.cpu().numpy()\n\n for task, generated_tokens in zip(generated_tasks, generated_tokens):\n gen_token_dict[task].append(generated_tokens)\n\n code_gens = [[] for _ in range(n_tasks)]\n for task, generated_tokens in gen_token_dict.items():\n for s in generated_tokens:\n gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n code_gens[task].append(remove_last_block(gen_code))\n return code_gens\n\n", "d_id": 6716, "documentation": { "docstring": "Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute\n the processing to multiple GPUs.\n dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from\n the evalution dataset to the modelm as the following:\n [p_0_0, p_0_1, ..., p_0_nc-1, p_1_0, ..., p_nt-1_nc-1]\n where nc is the number of copies of the prompt, and nt is the number of tasks.\n nc is such that num_sample = nc * batch_size\n\n Parameters\n ----------\n accelerator: Accelerator\n\n model: transformers.PreTrainedModel\n Code generation model. AutoTokenizer.from_pretrained(model_ckpt), ex model_ckpt = \"lvwerra/codeparrot\"\n\n tokenizer: transformers.AutoTokenizer\n The tokenizer used to train model\n\n dataloader: DataLoader\n The dataloader is a wrapper around a TokenizeDataset object. It is designed to be used with multiple GPUs.\n\n n_tasks: int\n The number of tasks in the dataset. It is used to determine the length of the output.\n Should be aligned with the number of tasks in the TokenizeDataset.\n\n batch_size: int\n num_return_sequences per copy of the prompt such that num_sample = batch_size * n_copies\n\n gen_kwargs: dict\n Keyword arguments for the generation function of the model.\n\n Returns\n -------\n code_gens: list of list of str, of length n_tasks\n List of generated codes for each task.\n Each element is a list of generated codes for each task, with length num_samples\n ", "n_words": 207, "vocab_size": 115, "n_whitespaces": 327, "language": "en" } }, { "id": 65710, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/crm/doctype/contract/contract.py", "file_name": "contract.py", "fun_name": "get_status", "commit_message": "style: format code with black", "code": "def get_status(start_date, end_date):\n\t\n\n\tif not end_date:\n\t\treturn \"Active\"\n\n\tstart_date = getdate(start_date)\n\tend_date = getdate(end_date)\n\tnow_date = getdate(nowdate())\n\n\treturn \"Active\" if start_date <= now_date <= end_date else \"Inactive\"\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 27, "vocab_size": 18, "complexity": 3, "nloc": 7, "token_counts": 44, "n_ast_nodes": 78, "n_identifiers": 6, "random_cut": "def get_status(start_date, end_date):\n\t\n\n\tif not end_date:\n\t\treturn \"Active\"\n\n\tstart_date = getdate(start_date)\n\tend_date = getdate(end_date)\n\tnow_date = getdate(nowdate())\n\n\treturn \"Active\" if start_date <= now_date <= end_date els", "d_id": 13993, "documentation": { "docstring": "\n\tGet a Contract's status based on the start, current and end dates\n\n\tArgs:\n\t start_date (str): The start date of the contract\n\t end_date (str): The end date of the contract\n\n\tReturns:\n\t str: 'Active' if within range, otherwise 'Inactive'\n\t", "n_words": 37, "vocab_size": 29, "n_whitespaces": 55, "language": "en" } }, { "id": 19358, "commit_id": "def289b723e9216830c2a7b2577cb31b55710167", "repo": "PythonRobotics", "path": "PathPlanning/CubicSpline/cubic_spline_planner.py", "file_name": "cubic_spline_planner.py", "fun_name": "calc_position", "commit_message": "enhance cubic spline path doc (#698)\n\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc", "code": "def calc_position(self, s):\n \n x = self.sx.calc_position(s)\n y = self.sy.calc_position(s)\n\n return x, y\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 7, "random_cut": "def calc_position(self, s):\n \n x = self.sx.calc_position(s)\n y = self.sy.calc_positi", "d_id": 2946, "documentation": { "docstring": "\n calc position\n\n Parameters\n ----------\n s : float\n distance from the start point. if `s` is outside the data point's\n range, return None.\n\n Returns\n -------\n x : float\n x position for given s.\n y : float\n y position for given s.\n ", "n_words": 40, "vocab_size": 28, "n_whitespaces": 148, "language": "en" } }, { "id": 221712, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/contextlib.py", "file_name": "contextlib.py", "fun_name": "push", "commit_message": "add python 3.10.4 for windows", "code": "def push(self, exit):\n \n # We use an unbound method rather than a bound method to follow\n # the standard lookup behaviour for special methods.\n _cb_type = type(exit)\n\n try:\n exit_method = _cb_type.__exit__\n except AttributeError:\n # Not a context manager, so assume it's a callable.\n self._push_exit_callback(exit)\n else:\n self._push_cm_exit(exit, exit_method)\n return exit # Allow use as a decorator.\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 156, "n_words": 55, "vocab_size": 46, "complexity": 2, "nloc": 9, "token_counts": 42, "n_ast_nodes": 75, "n_identifiers": 10, "random_cut": "def push(self, exit):\n \n # We use an unbound method rather than a bound method to follow\n # the standard lookup behaviour for special methods.\n _cb_type = type(exit)\n\n try:\n exit_method = _cb_type.__exit__\n except AttributeError:\n # Not a context manager, so assume it's a callable.\n sel", "d_id": 56482, "documentation": { "docstring": "Registers a callback with the standard __exit__ method signature.\n\n Can suppress exceptions the same way __exit__ method can.\n Also accepts any object with an __exit__ method (registering a call\n to the method instead of the object itself).\n ", "n_words": 37, "vocab_size": 26, "n_whitespaces": 65, "language": "en" } }, { "id": 305647, "commit_id": "6355e682fa4aeb526570597d919ad1fb76755b9a", "repo": "core", "path": "homeassistant/components/mpd/media_player.py", "file_name": "media_player.py", "fun_name": "async_media_play", "commit_message": "Improve entity type hints [m] (#77816)", "code": "async def async_media_play(self) -> None:\n \n if self._status[\"state\"] == \"pause\":\n await self._client.pause(0)\n else:\n await self._client.play()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 57, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 37, "n_ast_nodes": 69, "n_identifiers": 6, "random_cut": "async def async_media_play(self) -> None:\n \n if se", "d_id": 104431, "documentation": { "docstring": "Service to send the MPD the command for play/pause.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 67193, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/datev/datev.py", "file_name": "datev.py", "fun_name": "download_datev_csv", "commit_message": "style: format code with black", "code": "def download_datev_csv(filters):\n\t\n\tif isinstance(filters, str):\n\t\tfilters = json.loads(filters)\n\n\tvalidate(filters)\n\tcompany = filters.get(\"company\")\n\n\tfiscal_year = get_fiscal_year(date=filters.get(\"from_date\"), company=company)\n\tfilters[\"fiscal_year_start\"] = fiscal_year[1]\n\n\t# set chart of accounts used\n\tcoa = frappe.get_value(\"Company\", company, \"chart_of_accounts\")\n\tfilters[\"skr\"] = \"04\" if \"SKR04\" in coa else (\"03\" if \"SKR03\" in coa else \"\")\n\n\tdatev_settings = frappe.get_doc(\"DATEV Settings\", company)\n\tfilters[\"account_number_length\"] = datev_settings.account_number_length\n\tfilters[\"temporary_against_account_number\"] = datev_settings.temporary_against_account_number\n\n\ttransactions = get_transactions(filters)\n\taccount_names = get_account_names(filters)\n\tcustomers = get_customers(filters)\n\tsuppliers = get_suppliers(filters)\n\n\tzip_name = \"{} DATEV.zip\".format(frappe.utils.datetime.date.today())\n\tzip_and_download(\n\t\tzip_name,\n\t\t[\n\t\t\t{\n\t\t\t\t\"file_name\": \"EXTF_Buchungsstapel.csv\",\n\t\t\t\t\"csv_data\": get_datev_csv(transactions, filters, csv_class=Transactions),\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"file_name\": \"EXTF_Kontenbeschriftungen.csv\",\n\t\t\t\t\"csv_data\": get_datev_csv(account_names, filters, csv_class=AccountNames),\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"file_name\": \"EXTF_Kunden.csv\",\n\t\t\t\t\"csv_data\": get_datev_csv(customers, filters, csv_class=DebtorsCreditors),\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"file_name\": \"EXTF_Lieferanten.csv\",\n\t\t\t\t\"csv_data\": get_datev_csv(suppliers, filters, csv_class=DebtorsCreditors),\n\t\t\t},\n\t\t],\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 70, "n_words": 109, "vocab_size": 74, "complexity": 4, "nloc": 38, "token_counts": 248, "n_ast_nodes": 421, "n_identifiers": 38, "random_cut": "def download_datev_csv(filters):\n\t\n\tif isinstance(filters, str):\n\t\tfilters = json.loads(filters)\n\n\tvalidate(filters)\n\tcompany = filters.get(\"company\")\n\n\tfiscal_year = get_fiscal_year(date=filters.get(\"from_date\"), company=company)\n\tfilters[\"fiscal_year_start\"] = fiscal_year[1]\n\n\t# set chart of accounts used\n\tcoa = frappe.get_value(\"Company\", company, \"chart_of_accounts\")\n\tfilters[\"skr\"] = \"04\" if \"SKR04\" in coa else (\"03\" if \"SKR03\" in coa else \"\")\n\n\tdatev_settings = frappe.get_doc(\"DATEV Settings\", company)\n\tfilters[\"account_number_length\"] = datev_settings.account_number_length\n\tfilters[\"temporary_against_account_number\"] = datev_settings.temporary_against_account_number\n\n\ttransactions = get_transactions(filters)\n\taccount_names = get_account_names(filters)\n\tcustomers = get_customers(filters)\n\tsuppliers = get_suppliers(filters)\n\n\tzip_name = \"{} DATEV.zip\".format(frappe.utils.datetime.date.today())\n\tzip_and_download(\n\t\tzip_name,\n\t\t[\n\t\t\t{\n\t\t\t\t\"file_name\": \"EXTF_Buchungsstapel.csv\",\n\t\t\t\t\"csv_data\": get_datev_csv(transactions, filters, csv_class=Transactions),\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"file_name\": \"EXTF_Kontenbeschriftungen.csv\",\n\t\t\t\t\"csv_data\": get_datev_csv(account_names, filters, csv_class=Accoun", "d_id": 14439, "documentation": { "docstring": "\n\tProvide accounting entries for download in DATEV format.\n\n\tValidate the filters, get the data, produce the CSV file and provide it for\n\tdownload. Can be called like this:\n\n\tGET /api/method/erpnext.regional.report.datev.datev.download_datev_csv\n\n\tArguments / Params:\n\tfilters -- dict of filters to be passed to the sql query\n\t", "n_words": 45, "vocab_size": 38, "n_whitespaces": 39, "language": "en" } }, { "id": 42544, "commit_id": "8a4cf5d94eb94b6427c5d1d7907ba07b119932c5", "repo": "nltk", "path": "nltk/parse/util.py", "file_name": "util.py", "fun_name": "taggedsent_to_conll", "commit_message": "Docstring tests (#3050)\n\n* fixed pytests\r\n\r\n* fixed more pytests\r\n\r\n* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py\r\n\r\n* fixed pytests (mainly multiline or rounding issues)\r\n\r\n* fixed treebank pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed destructive.py pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed pytest (rounding issues)\r\n\r\n* fixed pytest (initialised missing object)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* added pytest +SKIP for deprecated module stanford\r\n\r\n* updated AUTHORS.md\r\n\r\n* changed docstring corrections by usage of ELLIPSIS and different roundings\r\n\r\n* fixed AUTHORS.md to be consistent\r\n\r\n* Fix framenet doctest formatting with pprint\r\n\r\n* Change docstring on MultiListBox.__init__\r\n\r\nI believe the original typo was misinterpreted and changed to something that was not originally intended.\r\n\r\nCo-authored-by: Jan Lennartz \r\nCo-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>\r\nCo-authored-by: Tom Aarsen ", "code": "def taggedsent_to_conll(sentence):\n \n for (i, (word, tag)) in enumerate(sentence, start=1):\n input_str = [str(i), word, \"_\", tag, tag, \"_\", \"0\", \"a\", \"_\", \"_\"]\n input_str = \"\\t\".join(input_str) + \"\\n\"\n yield input_str\n\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 55, "n_words": 28, "vocab_size": 22, "complexity": 2, "nloc": 5, "token_counts": 64, "n_ast_nodes": 109, "n_identifiers": 10, "random_cut": "def taggedsent_to_conll(sentence):\n ", "d_id": 7606, "documentation": { "docstring": "\n A module to convert a single POS tagged sentence into CONLL format.\n\n >>> from nltk import word_tokenize, pos_tag\n >>> text = \"This is a foobar sentence.\"\n >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE\n ... \tprint(line, end=\"\")\n 1\tThis\t_\tDT\tDT\t_\t0\ta\t_\t_\n 2\tis\t_\tVBZ\tVBZ\t_\t0\ta\t_\t_\n 3\ta\t_\tDT\tDT\t_\t0\ta\t_\t_\n 4\tfoobar\t_\tJJ\tJJ\t_\t0\ta\t_\t_\n 5\tsentence\t_\tNN\tNN\t_\t0\ta\t_\t_\n 6\t.\t\t_\t.\t.\t_\t0\ta\t_\t_\n\n :param sentence: A single input sentence to parse\n :type sentence: list(tuple(str, str))\n :rtype: iter(str)\n :return: a generator yielding a single sentence in CONLL format.\n ", "n_words": 121, "vocab_size": 60, "n_whitespaces": 140, "language": "en" } }, { "id": 206637, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/encoding.py", "file_name": "encoding.py", "fun_name": "get_system_encoding", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_system_encoding():\n \n try:\n encoding = locale.getdefaultlocale()[1] or \"ascii\"\n codecs.lookup(encoding)\n except Exception:\n encoding = \"ascii\"\n return encoding\n\n\nDEFAULT_LOCALE_ENCODING = get_system_encoding()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 51, "n_words": 19, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 33, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "def get_system_encoding():\n \n try:\n encoding = locale.getdefaultlocale()[1] or \"ascii\"\n codecs.lookup(en", "d_id": 51596, "documentation": { "docstring": "\n The encoding of the default system locale. Fallback to 'ascii' if the\n #encoding is unsupported by Python or could not be determined. See tickets\n #10335 and #5846.\n ", "n_words": 27, "vocab_size": 26, "n_whitespaces": 40, "language": "en" } }, { "id": 77977, "commit_id": "b8a9a2d319b06fc2318d68d05b5a6cdf85b5b33d", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/options.py", "file_name": "options.py", "fun_name": "get_menu_item", "commit_message": "Deprecate wagtail.contrib.modeladmin.menus.SubMenu in favour of wagtail.admin.menu.Menu\n\nThe Menu class was not originally designed to accept menu items at constructor time (instead requiring them to be passed via hooks); ModelAdmin's SubMenu class patched this functionality in, and the documentation for extending admin views piggybacked on this. Add this functionality to the base Menu class so that we don't have this unnecessary dependency on ModelAdmin.", "code": "def get_menu_item(self):\n \n if self.modeladmin_instances:\n submenu = Menu(items=self.get_submenu_items())\n return GroupMenuItem(self, self.get_menu_order(), submenu)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 47, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 9, "random_cut": "def get_menu_item(self):\n \n if self.modeladmin_instances:\n submenu = Menu(items=self.g", "d_id": 16728, "documentation": { "docstring": "\n Utilised by Wagtail's 'register_menu_item' hook to create a menu\n for this group with a submenu linking to listing pages for any\n associated ModelAdmin instances\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 53, "language": "en" } }, { "id": 110702, "commit_id": "9b8a598d00a4fcf9579415586053583ef80a1add", "repo": "matplotlib", "path": "lib/matplotlib/backend_bases.py", "file_name": "backend_bases.py", "fun_name": "_draw_text_as_path", "commit_message": "Soft deprecate the textpath module (import from text instead)\n\nThe textpath module was created in 2009, but the status has\nbeen a bit vague with many examples and exisiting code found\non the internet importing from text instead.\n\nIn this PR everything is changed to point at text, although textpath\nis still available for backwards compatibility.", "code": "def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):\n \n path, transform = self._get_text_path_transform(\n x, y, s, prop, angle, ismath)\n color = gc.get_rgb()\n gc.set_linewidth(0.0)\n self.draw_path(gc, path, transform, rgbFace=color)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 73, "n_words": 27, "vocab_size": 20, "complexity": 1, "nloc": 6, "token_counts": 69, "n_ast_nodes": 94, "n_identifiers": 17, "random_cut": "def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):\n \n path, transform = self._get_text_path_transform(\n ", "d_id": 24258, "documentation": { "docstring": "\n Draw the text by converting them to paths using `.TextToPath`.\n\n Parameters\n ----------\n x : float\n The x location of the text in display coords.\n y : float\n The y location of the text baseline in display coords.\n s : str\n The text to be converted.\n prop : `~matplotlib.font_manager.FontProperties`\n The font property.\n angle : float\n Angle in degrees to render the text at.\n ismath : bool or \"TeX\"\n If True, use mathtext parser. If \"TeX\", use tex for rendering.\n ", "n_words": 78, "vocab_size": 49, "n_whitespaces": 215, "language": "en" } }, { "id": 38114, "commit_id": "afe5d42d8d1d80af911ed980c2936bfe887078f6", "repo": "transformers", "path": "examples/research_projects/lxmert/modeling_frcnn.py", "file_name": "modeling_frcnn.py", "fun_name": "__call__", "commit_message": "Black preview (#17217)\n\n* Black preview\r\n\r\n* Fixup too!\r\n\r\n* Fix check copies\r\n\r\n* Use the same version as the CI\r\n\r\n* Bump black", "code": "def __call__(self, match_quality_matrix):\n \n assert match_quality_matrix.dim() == 2\n if match_quality_matrix.numel() == 0:\n default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64)\n # When no gt boxes exist, we define IOU = 0 and therefore set labels\n # to `self.labels[0]`, which usually defaults to background class 0\n # To choose to ignore instead,\n # can make labels=[-1,0,-1,1] + set appropriate thresholds\n default_match_labels = match_quality_matrix.new_full(\n (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8\n )\n return default_matches, default_match_labels\n\n assert torch.all(match_quality_matrix >= 0)\n\n # match_quality_matrix is M (gt) x N (predicted)\n # Max over gt elements (dim 0) to find best gt candidate for each prediction\n matched_vals, matches = match_quality_matrix.max(dim=0)\n\n match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)\n\n for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):\n low_high = (matched_vals >= low) & (matched_vals < high)\n match_labels[low_high] = l\n\n if self.allow_low_quality_matches:\n self.set_low_quality_matches_(match_labels, match_quality_matrix)\n\n return matches, match_labels\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 341, "n_words": 128, "vocab_size": 100, "complexity": 4, "nloc": 17, "token_counts": 190, "n_ast_nodes": 294, "n_identifiers": 27, "random_cut": "def __call__(self, match_quality_matrix):\n \n assert match_quality_matrix.dim() == 2\n if match_quality_matrix.numel() == 0:\n default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64)\n # When no gt boxes exist, we define IOU = 0 and therefore set labels\n # to `self.labels[0]`, which usually defaults to background class 0\n # To choose to ignore instead,\n # can make labels=[-1,0,-1,1] + set appropriate thresholds\n default_match_labels = match_quality_matrix.new_full(\n ", "d_id": 6912, "documentation": { "docstring": "\n Args:\n match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted\n elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`).\n Returns:\n matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M)\n match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored\n ", "n_words": 69, "vocab_size": 56, "n_whitespaces": 139, "language": "en" } }, { "id": 218453, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "ismemberdescriptor", "commit_message": "add python 3.10.4 for windows", "code": "def ismemberdescriptor(object):\n \n return isinstance(object, types.MemberDescriptorType)\nelse:\n # Other implementations", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "else:", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 25, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 30, "n_identifiers": 6, "random_cut": "def ismemberdescriptor(object):\n \n return isinstance(object, types.MemberDescriptorType)\nelse:\n #", "d_id": 55318, "documentation": { "docstring": "Return true if the object is a member descriptor.\n\n Member descriptors are specialized descriptors defined in extension\n modules.", "n_words": 18, "vocab_size": 17, "n_whitespaces": 31, "language": "en" } }, { "id": 153268, "commit_id": "fc539c3d70a40c9d7aabc5c50dd7280aa5e4637e", "repo": "modin", "path": "modin/core/dataframe/base/exchange/dataframe_protocol/utils.py", "file_name": "utils.py", "fun_name": "pandas_dtype_to_arrow_c", "commit_message": "FEAT-#4245: Define base interface for dataframe exchange protocol (#4246)\n\nSigned-off-by: Igoshev, Yaroslav \r\nCo-authored-by: Dmitry Chigarev ", "code": "def pandas_dtype_to_arrow_c(dtype) -> str:\n \n if isinstance(dtype, pandas.CategoricalDtype):\n return ArrowCTypes.INT64\n elif dtype == np.dtype(\"O\"):\n return ArrowCTypes.STRING\n\n format_str = getattr(ArrowCTypes, dtype.name.upper(), None)\n if format_str is not None:\n return format_str\n\n if is_datetime64_dtype(dtype):\n # Selecting the first char of resolution string:\n # dtype.str -> ' str:\n \n if isinstance(dtype, pandas.CategoricalDtype):\n return ArrowCTypes.INT64\n elif dtype == np.dtype(\"O\"):\n return ArrowCTypes.STRING\n\n format_str = getattr(ArrowCTypes, dtype.name.upper(), None)\n if format_str is not None:\n return format_str\n\n if is_datetime64_dtype(dtype):\n # Selecting the first char of resolution string:\n # dtype.str -> ' bool:\n \n if not (hasattr(obj, \"read\") or hasattr(obj, \"write\")):\n return False\n\n return bool(hasattr(obj, \"__iter__\"))\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 16, "vocab_size": 15, "complexity": 3, "nloc": 32, "token_counts": 38, "n_ast_nodes": 67, "n_identifiers": 4, "random_cut": "def is_file_like(obj) -> bool:\n \n if not (hasattr(obj, \"read\") or hasattr(obj, \"write\")):\n return False\n\n ", "d_id": 40759, "documentation": { "docstring": "\n Check if the object is a file-like object.\n\n For objects to be considered file-like, they must\n be an iterator AND have either a `read` and/or `write`\n method as an attribute.\n\n Note: file-like objects must be iterable, but\n iterable objects need not be file-like.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` has file-like properties.\n\n Examples\n --------\n >>> import io\n >>> buffer = io.StringIO(\"data\")\n >>> is_file_like(buffer)\n True\n >>> is_file_like([1, 2, 3])\n False\n ", "n_words": 76, "vocab_size": 61, "n_whitespaces": 147, "language": "en" } }, { "id": 4252, "commit_id": "56bf982cb96f831fe04f5e44a92ee4a669b9e16a", "repo": "airbyte", "path": "octavia-cli/octavia_cli/apply/resources.py", "file_name": "resources.py", "fun_name": "update", "commit_message": "🐙 octavia-cli: `apply` connections (#10881)", "code": "def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]:\n \n return self._create_or_update(self._update_fn, self.update_payload)\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 7, "token_counts": 28, "n_ast_nodes": 43, "n_identifiers": 9, "random_cut": "def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]:\n \n return self._create_or_update(self._update_fn, self.update_payload)\n", "d_id": 645, "documentation": { "docstring": "Public function to update the resource on the remote Airbyte instance.\n\n Returns:\n Union[SourceRead, DestinationRead, ConnectionRead]: The updated resource.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 43, "language": "en" } }, { "id": 247638, "commit_id": "5dd949bee6158a8b651db9f2ae417a62c8184bfd", "repo": "synapse", "path": "tests/handlers/test_oidc.py", "file_name": "test_oidc.py", "fun_name": "test_callback_session", "commit_message": "Add type hints to some tests/handlers files. (#12224)", "code": "def test_callback_session(self) -> None:\n \n request = Mock(spec=[\"args\", \"getCookie\", \"cookies\"])\n\n # Missing cookie\n request.args = {}\n request.getCookie.return_value = None\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"missing_session\", \"No session cookie found\")\n\n # Missing session parameter\n request.args = {}\n request.getCookie.return_value = \"session\"\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_request\", \"State parameter is missing\")\n\n # Invalid cookie\n request.args = {}\n request.args[b\"state\"] = [b\"state\"]\n request.getCookie.return_value = \"session\"\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_session\")\n\n # Mismatching session\n session = self._generate_oidc_session_token(\n state=\"state\",\n nonce=\"nonce\",\n client_redirect_url=\"http://client/redirect\",\n )\n request.args = {}\n request.args[b\"state\"] = [b\"mismatching state\"]\n request.getCookie.return_value = session\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"mismatching_session\")\n\n # Valid session\n request.args = {}\n request.args[b\"state\"] = [b\"state\"]\n request.getCookie.return_value = session\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_request\")\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 347, "n_words": 90, "vocab_size": 42, "complexity": 1, "nloc": 31, "token_counts": 241, "n_ast_nodes": 419, "n_identifiers": 17, "random_cut": "def test_callback_session(self) -> None:\n \n request = Mock(spec=[\"args\", \"getCookie\", \"cookies\"])\n\n # Missing cookie\n request.args = {}\n request.getCookie.return_value = None\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"missing_session\", \"No session cookie found\")\n\n # Missing session parameter\n request.args = {}\n request.getCookie.return_value = \"session\"\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_request\", \"State parameter is missing\")\n\n # Invalid cookie\n request.args = {}\n request.args[b\"state\"] = [b\"state\"]\n request.getCookie.return_value = \"session\"\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_session\")\n\n # Mismatching session\n session = self._generate_oidc_session_token(\n state=\"state\",\n nonce=\"nonce\",\n client_redirect_url=\"http://client/redirect\",\n )\n request.args = {}\n request.args[b\"state\"] = [b\"mismatching state\"]\n request.getCookie.return_value = session\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"mismatching_session\")\n\n # Valid session\n request.args = {}\n request.args[b\"state\"] = [b\"state\"]\n request.getCookie.return_value = session\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assert", "d_id": 71802, "documentation": { "docstring": "The callback verifies the session presence and validity", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 59100, "commit_id": "bbee097653559003fa0db61ab00f1ff8567eea9a", "repo": "prefect", "path": "src/prefect/filesystems.py", "file_name": "filesystems.py", "fun_name": "_create_repo_url", "commit_message": "Add private repos", "code": "def _create_repo_url(self) -> str:\n \n url_components = urllib.parse.urlparse(self.repository_url)\n if url_components.scheme == \"https\" and self.credentials is not None:\n repo_url = url_components.netloc + url_components.path\n updated_components = url_components._replace(\n netloc=f\"{self.credentials.get_secret_value()}@{url_components.netloc}\"\n )\n full_url = urllib.parse.urlunparse(updated_components)\n else:\n full_url = self.repository_url\n\n return full_url\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 140, "n_words": 35, "vocab_size": 29, "complexity": 3, "nloc": 16, "token_counts": 73, "n_ast_nodes": 142, "n_identifiers": 18, "random_cut": "def _create_repo_url(self) -> str:\n \n url_components = urllib.parse.urlparse(self.repository_url)\n if url_components.scheme == \"https\" and self.credentials is not None:\n repo_url = url_components.netloc + url_components.path\n updated_components = url_components._replace(\n netloc=f\"{self.credentials.get_se", "d_id": 11867, "documentation": { "docstring": "Format the URL provided to the `git clone` command.\n\n For private repos: https://@github.com//.git\n All other repos should be the same as `self.repository`.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 43, "language": "en" } }, { "id": 207149, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_filters/tests.py", "file_name": "tests.py", "fun_name": "test_lookup_with_dynamic_value", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_lookup_with_dynamic_value(self):\n \n modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 14, "token_counts": 86, "n_ast_nodes": 25, "n_identifiers": 6, "random_cut": "def test_lookup_with_dynamic_value(self):\n ", "d_id": 51882, "documentation": { "docstring": "\n Ensure SimpleListFilter can access self.value() inside the lookup.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 33821, "commit_id": "f7ce4f1ff789c11f129597a1171b5d549d102e09", "repo": "transformers", "path": "tests/test_tokenization_common.py", "file_name": "test_tokenization_common.py", "fun_name": "test_batch_encode_dynamic_overflowing", "commit_message": "Fix custom tokenizers test (#19052)\n\n* Fix CI for custom tokenizers\r\n\r\n* Add nightly tests\r\n\r\n* Run CI, run!\r\n\r\n* Fix paths\r\n\r\n* Typos\r\n\r\n* Fix test", "code": "def test_batch_encode_dynamic_overflowing(self):\n \n for tokenizer, pretrained_name, kwargs in self.tokenizers_list:\n tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)\n\n with self.subTest(f\"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})\"):\n\n if is_torch_available():\n returned_tensor = \"pt\"\n elif is_tf_available():\n returned_tensor = \"tf\"\n elif is_flax_available():\n returned_tensor = \"jax\"\n else:\n return\n\n if not tokenizer.pad_token or tokenizer.pad_token_id < 0:\n return\n\n tokens = tokenizer.encode_plus(\n \"HuggingFace is solving NLP one commit at a time\",\n max_length=6,\n padding=True,\n truncation=True,\n return_tensors=returned_tensor,\n return_overflowing_tokens=True,\n )\n\n for key in filter(lambda x: \"overflow_to_sample_mapping\" not in x, tokens.keys()):\n self.assertEqual(len(tokens[key].shape), 2)\n\n # Mono sample\n tokens = tokenizer.batch_encode_plus(\n [\"HuggingFace is solving NLP one commit at a time\"],\n max_length=6,\n padding=True,\n truncation=\"only_first\",\n return_tensors=returned_tensor,\n return_overflowing_tokens=True,\n )\n\n for key in filter(lambda x: \"overflow_to_sample_mapping\" not in x, tokens.keys()):\n self.assertEqual(len(tokens[key].shape), 2)\n self.assertEqual(tokens[key].shape[-1], 6)\n\n # Multi sample\n tokens = tokenizer.batch_encode_plus(\n [\"HuggingFace is solving NLP one commit at a time\", \"Very tiny input\"],\n max_length=6,\n padding=True,\n truncation=\"only_first\",\n return_tensors=returned_tensor,\n return_overflowing_tokens=True,\n )\n\n for key in filter(lambda x: \"overflow_to_sample_mapping\" not in x, tokens.keys()):\n self.assertEqual(len(tokens[key].shape), 2)\n self.assertEqual(tokens[key].shape[-1], 6)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 952, "n_words": 144, "vocab_size": 71, "complexity": 10, "nloc": 46, "token_counts": 314, "n_ast_nodes": 521, "n_identifiers": 32, "random_cut": "def test_batch_encode_dynamic_overflowing(self):\n \n for tokenizer, pretrained_name, kwargs in self.tokenizers_list:\n tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)\n\n with self.subTest(f\"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})\"):\n\n if is_torch_available():\n returned_tensor = \"pt\"\n elif is_tf_available():\n returned_tensor = \"tf\"\n elif is_flax_available():\n returned_tensor = \"jax\"\n else:\n return\n\n if not tokenizer.pad_token or tokenizer.pad_token_id < 0:\n return\n\n tokens =", "d_id": 6164, "documentation": { "docstring": "\n When calling batch_encode with multiple sequence it can returns different number of\n overflowing encoding for each sequence:\n [\n Sequence 1: [Encoding 1, Encoding 2],\n Sequence 2: [Encoding 1],\n Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]\n ]\n This needs to be padded so that it can represented as a tensor\n ", "n_words": 51, "vocab_size": 42, "n_whitespaces": 121, "language": "en" } }, { "id": 275215, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/__init__.py", "file_name": "__init__.py", "fun_name": "deserialize", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def deserialize(config, custom_objects=None):\n \n # loss_scale_optimizer has a direct dependency of optimizer, import here\n # rather than top to avoid the cyclic dependency.\n from keras.mixed_precision import (\n loss_scale_optimizer,\n ) # pylint: disable=g-import-not-at-top\n\n all_classes = {\n \"adadelta\": adadelta_v2.Adadelta,\n \"adagrad\": adagrad_v2.Adagrad,\n \"adam\": adam_v2.Adam,\n \"adamax\": adamax_v2.Adamax,\n \"experimentaladadelta\": adadelta_experimental.Adadelta,\n \"experimentaladagrad\": adagrad_experimental.Adagrad,\n \"experimentaladam\": adam_experimental.Adam,\n \"experimentalsgd\": sgd_experimental.SGD,\n \"nadam\": nadam_v2.Nadam,\n \"rmsprop\": rmsprop_v2.RMSprop,\n \"sgd\": gradient_descent_v2.SGD,\n \"ftrl\": ftrl.Ftrl,\n \"lossscaleoptimizer\": loss_scale_optimizer.LossScaleOptimizer,\n \"lossscaleoptimizerv3\": loss_scale_optimizer.LossScaleOptimizerV3,\n # LossScaleOptimizerV1 was an old version of LSO that was removed.\n # Deserializing it turns it into a LossScaleOptimizer\n \"lossscaleoptimizerv1\": loss_scale_optimizer.LossScaleOptimizer,\n }\n\n # Make deserialization case-insensitive for built-in optimizers.\n if config[\"class_name\"].lower() in all_classes:\n config[\"class_name\"] = config[\"class_name\"].lower()\n return deserialize_keras_object(\n config,\n module_objects=all_classes,\n custom_objects=custom_objects,\n printable_module_name=\"optimizer\",\n )\n\n\n@keras_export(\"keras.optimizers.get\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.optimizers.get\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 300, "n_words": 106, "vocab_size": 92, "complexity": 2, "nloc": 29, "token_counts": 156, "n_ast_nodes": 275, "n_identifiers": 34, "random_cut": "def deserialize(config, custom_objects=None):\n \n # loss_scale_optimizer has a direct dependency of optimizer, import here\n # rather than top to avoid the cyclic dependency.\n from keras.mixed_precision import (\n loss_scale_optimizer,\n ) # pylint: disable=g-import-not-at-top\n\n all_classes = {\n \"adadelta\": adadelta_v2.Adadelta,\n \"adagrad\": adagrad_v2.Adagrad,\n \"adam\": adam_v2.Adam,\n \"adamax\": adamax_v2.Adamax,\n \"experimentaladadelta\": adadelta_experimental.Adadelta,\n \"experimentaladagrad\": adagrad_experimental.Adagrad,\n \"experimentaladam\": adam_experimental.Adam,\n \"experimentalsgd\": sgd_experimental.SGD,\n \"nadam\": nadam_v2.Nadam,\n \"rmsprop\": rmsprop_v2.RMSprop,\n \"sgd\": gradient_descent_v2.SGD,\n \"ftrl\": ftrl.Ftrl,\n \"lossscaleoptimizer\": loss_scale_optimizer.LossScaleOptimizer,\n \"lossscaleoptimizerv3\": loss_scale_optimizer.LossScaleOptimizerV3,\n # LossScaleOptimizerV1 was an old version of LSO that was removed.\n # Deserializing it turns it into a LossScaleOptimizer\n \"lossscaleoptimizerv1\": loss_scale_optimizer.LossScaleOptimizer,\n }\n\n # Make deserialization case-insensitive for built-in optimizers.\n if config[\"class_name\"].lower() in all_classes:\n config[\"class_name\"] = config[\"class_name\"].lower()\n return deserialize_keras_object(\n config,\n module_objects=all_classes,\n custom_objects=custom_objects,\n printable_module_name=\"optimizer\",\n )\n\n\n@keras_export(\"keras", "d_id": 81339, "documentation": { "docstring": "Inverse of the `serialize` function.\n\n Args:\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras Optimizer instance.\n ", "n_words": 32, "vocab_size": 30, "n_whitespaces": 71, "language": "en" } }, { "id": 196189, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/permutations.py", "file_name": "permutations.py", "fun_name": "commutes_with", "commit_message": "Updated import locations", "code": "def commutes_with(self, other):\n \n a = self.array_form\n b = other.array_form\n return _af_commutes_with(a, b)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 25, "n_ast_nodes": 41, "n_identifiers": 7, "random_cut": "def commutes_with(self, other):\n \n a = s", "d_id": 47689, "documentation": { "docstring": "\n Checks if the elements are commuting.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> a = Permutation([1, 4, 3, 0, 2, 5])\n >>> b = Permutation([0, 1, 2, 3, 4, 5])\n >>> a.commutes_with(b)\n True\n >>> b = Permutation([2, 3, 5, 4, 1, 0])\n >>> a.commutes_with(b)\n False\n ", "n_words": 46, "vocab_size": 30, "n_whitespaces": 131, "language": "en" } }, { "id": 20492, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/styles/__init__.py", "file_name": "__init__.py", "fun_name": "get_all_styles", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def get_all_styles():\n \n yield from STYLE_MAP\n for name, _ in find_plugin_styles():\n yield name\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 28, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def get_all_styles():\n \n yield from STYLE_MAP\n for name, _ in find_plugin_styles():\n yield", "d_id": 3396, "documentation": { "docstring": "Return a generator for all styles by name,\n both builtin and plugin.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 14, "language": "en" } }, { "id": 39439, "commit_id": "1d7341e93d1f03387699fb3c6ae0b6c0e464296f", "repo": "recommenders", "path": "recommenders/utils/python_utils.py", "file_name": "python_utils.py", "fun_name": "lift", "commit_message": "Add new item similarity metrics for SAR (#1754)\n\n* Add mutual information similarity in SAR\r\n\r\n* Add lexicographers mutual information similarity for SAR\r\n\r\n* Add cosine similarity for SAR\r\n\r\n* Add inclusion index for SAR\r\n\r\n* Typos\r\n\r\n* Change SARSingleNode to SAR\r\n\r\n* Convert item similarity matrix to np.array\r\n\r\n* Update\r\n\r\n* Update SAR tests\r\n\r\n* Remove unused imports\r\n\r\n* Add explanations for new similarity metrics", "code": "def lift(cooccurrence):\n \n\n diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal())\n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n result = cooccurrence / (diag_rows * diag_cols)\n\n return np.array(result)\n\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 48, "n_ast_nodes": 85, "n_identifiers": 12, "random_cut": "def lift(cooccurrence):\n \n\n diag_rows, diag_cols = _get_row_and_column_matrix(co", "d_id": 7231, "documentation": { "docstring": "Helper method to calculate the Lift of a matrix of\n co-occurrences. In comparison with basic co-occurrence and Jaccard\n similarity, lift favours discoverability and serendipity, as\n opposed to co-occurrence that favours the most popular items, and\n Jaccard that is a compromise between the two.\n\n Args:\n cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items.\n\n Returns:\n numpy.ndarray: The matrix of Lifts between any two items.\n\n ", "n_words": 63, "vocab_size": 44, "n_whitespaces": 98, "language": "en" } }, { "id": 107629, "commit_id": "d69be2554cf6d1ac711bf433b1d6f176e3290d4f", "repo": "matplotlib", "path": "lib/matplotlib/artist.py", "file_name": "artist.py", "fun_name": "update", "commit_message": "Clarify error message for bad keyword arguments.\n\n`plot([], [], foo=42)` previously emitted\n```\n'Line2D' object has no property 'foo'\n```\nwhich refers to the Matplotlib-specific concept of \"properties\". It now\ninstead emits\n```\nLine2D.set() got an unexpected keyword argument 'foo'\n```\nwhich is modeled after the standard error message for unknown keyword\narguments.\n\n(To maximize backcompat, the implementation goes through a new\n_internal_update, which does *not* error when the same prop is passed\nunder different aliases. This could be changed later, but is not the\ngoal of this PR.)", "code": "def update(self, props):\n \n return self._update_props(\n props, \"{cls.__name__!r} object has no property {prop_name!r}\")\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 37, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 30, "n_identifiers": 4, "random_cut": "def update(self, props):\n \n return self._update_props(\n props, \"{cls.__name__!r} object has no property {prop_name!r}\")\n", "d_id": 22841, "documentation": { "docstring": "\n Update this artist's properties from the dict *props*.\n\n Parameters\n ----------\n props : dict\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 49, "language": "en" } }, { "id": 133396, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/worker_group.py", "file_name": "worker_group.py", "fun_name": "new_workers_size", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def new_workers_size(self):\n \n remote_resources = ray.available_resources()\n max_remote_workers = self._max_workers\n new_remote_workers = min(remote_resources.get(\"CPU\", 0), max_remote_workers)\n if self._use_gpu:\n new_remote_workers = min(remote_resources.get(\"GPU\", 0), new_remote_workers)\n return new_remote_workers\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 75, "n_words": 22, "vocab_size": 16, "complexity": 2, "nloc": 7, "token_counts": 55, "n_ast_nodes": 92, "n_identifiers": 11, "random_cut": "def new_workers_size(self):\n \n remote_resources = ray.available_resources()\n max_remote_workers = self._max_workers\n new_remote_workers = min(remote_resources.get(\"CPU\"", "d_id": 30007, "documentation": { "docstring": "Returns number of workers to create based on available resources.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 209787, "commit_id": "a2b7a28faff1db058dd22ce097a268e0ad5d1d33", "repo": "scapy", "path": "scapy/arch/windows/__init__.py", "file_name": "__init__.py", "fun_name": "setmonitor", "commit_message": "[Hinty] Core typing: windows (#3684)\n\n* Core typing: windows\r\n\r\nCo-authored-by: Pierre ", "code": "def setmonitor(self, enable=True):\n # type: (bool) -> bool\n \n # We must reset the monitor cache\n if enable:\n res = self.setmode('monitor')\n else:\n res = self.setmode('managed')\n if not res:\n log_runtime.error(\"Npcap WlanHelper returned with an error code !\")\n self.cache_mode = None\n tmp = self.cache_mode = self.ismonitor()\n return tmp if enable else (not tmp)\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 146, "n_words": 50, "vocab_size": 40, "complexity": 4, "nloc": 10, "token_counts": 66, "n_ast_nodes": 117, "n_identifiers": 10, "random_cut": "def setmonitor(self, enable=True):\n # type: (bool) -> bool\n \n # We must reset the monitor cache\n if enable:\n res = self.setmode('monitor')\n else:\n res = self.setmode('managed')\n if not res:\n log_runtime.error(\"Npcap WlanHelper returned with an error code !\")\n self.cache_mode = None\n t", "d_id": 52779, "documentation": { "docstring": "Alias for setmode('monitor') or setmode('managed')\n Only available with Npcap", "n_words": 9, "vocab_size": 9, "n_whitespaces": 15, "language": "en" } }, { "id": 225607, "commit_id": "a4d33e180c4407990afa1fc03aa079718d738ebd", "repo": "albumentations", "path": "albumentations/augmentations/geometric/rotate.py", "file_name": "rotate.py", "fun_name": "_rotated_rect_with_max_area", "commit_message": "add `crop_border` option to Rotate (#1214)", "code": "def _rotated_rect_with_max_area(h, w, angle):\n \n\n angle = math.radians(angle)\n width_is_longer = w >= h\n side_long, side_short = (w, h) if width_is_longer else (h, w)\n\n # since the solutions for angle, -angle and 180-angle are all the same,\n # it is sufficient to look at the first quadrant and the absolute values of sin,cos:\n sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))\n if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10:\n # half constrained case: two crop corners touch the longer side,\n # the other two corners are on the mid-line parallel to the longer line\n x = 0.5 * side_short\n wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)\n else:\n # fully constrained case: crop touches all 4 sides\n cos_2a = cos_a * cos_a - sin_a * sin_a\n wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a\n\n return dict(\n x_min=max(0, int(w / 2 - wr / 2)),\n x_max=min(w, int(w / 2 + wr / 2)),\n y_min=max(0, int(h / 2 - hr / 2)),\n y_max=min(h, int(h / 2 + hr / 2)),\n )\n", "url": "https://github.com/albumentations-team/albumentations.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 393, "n_words": 195, "vocab_size": 102, "complexity": 5, "nloc": 17, "token_counts": 233, "n_ast_nodes": 347, "n_identifiers": 26, "random_cut": "def _rotated_rect_with_max_area(h, w, angle):\n \n\n angle = math.radians(angle)\n width_is_longer = w >= h\n side_long, side_short = (w, h) if width_is_longer else (h, w)\n\n # since the solutions for angle, -angl", "d_id": 57477, "documentation": { "docstring": "\n Given a rectangle of size wxh that has been rotated by 'angle' (in\n degrees), computes the width and height of the largest possible\n axis-aligned rectangle (maximal area) within the rotated rectangle.\n\n Code from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 70, "language": "en" } }, { "id": 20426, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/lexer.py", "file_name": "lexer.py", "fun_name": "using", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def using(_other, **kwargs):\n \n gt_kwargs = {}\n if 'state' in kwargs:\n s = kwargs.pop('state')\n if isinstance(s, (list, tuple)):\n gt_kwargs['stack'] = s\n else:\n gt_kwargs['stack'] = ('root', s)\n\n if _other is this:", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 84, "n_words": 29, "vocab_size": 22, "complexity": 4, "nloc": 13, "token_counts": 69, "n_ast_nodes": 107, "n_identifiers": 10, "random_cut": "def using(_other, **kwargs):\n \n gt_kwargs = {}\n if 'state' in kwargs:\n s = kwargs.pop('state')\n ", "d_id": 3362, "documentation": { "docstring": "\n Callback that processes the match with a different lexer.\n\n The keyword arguments are forwarded to the lexer, except `state` which\n is handled separately.\n\n `state` specifies the state that the new lexer will start in, and can\n be an enumerable such as ('root', 'inline', 'string') or a simple\n string which is assumed to be on top of the root state.\n\n Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.\n ", "n_words": 70, "vocab_size": 55, "n_whitespaces": 95, "language": "en" } }, { "id": 319613, "commit_id": "69ef26dab04d51e7e102dcb33cd98ddc6ad975fd", "repo": "paperless-ngx", "path": "src/documents/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_unset_document_storage_path", "commit_message": "Feature: Dynamic document storage pathes (#916)\n\n* Added devcontainer\r\n\r\n* Add feature storage pathes\r\n\r\n* Exclude tests and add versioning\r\n\r\n* Check escaping\r\n\r\n* Check escaping\r\n\r\n* Check quoting\r\n\r\n* Echo\r\n\r\n* Escape\r\n\r\n* Escape :\r\n\r\n* Double escape \\\r\n\r\n* Escaping\r\n\r\n* Remove if\r\n\r\n* Escape colon\r\n\r\n* Missing \\\r\n\r\n* Esacpe :\r\n\r\n* Escape all\r\n\r\n* test\r\n\r\n* Remove sed\r\n\r\n* Fix exclude\r\n\r\n* Remove SED command\r\n\r\n* Add LD_LIBRARY_PATH\r\n\r\n* Adjusted to v1.7\r\n\r\n* Updated test-cases\r\n\r\n* Remove devcontainer\r\n\r\n* Removed internal build-file\r\n\r\n* Run pre-commit\r\n\r\n* Corrected flak8 error\r\n\r\n* Adjusted to v1.7\r\n\r\n* Updated test-cases\r\n\r\n* Corrected flak8 error\r\n\r\n* Adjusted to new plural translations\r\n\r\n* Small adjustments due to code-review backend\r\n\r\n* Adjusted line-break\r\n\r\n* Removed PAPERLESS prefix from settings variables\r\n\r\n* Corrected style change due to search+replace\r\n\r\n* First documentation draft\r\n\r\n* Revert changes to Pipfile\r\n\r\n* Add sphinx-autobuild with keep-outdated\r\n\r\n* Revert merge error that results in wrong storage path is evaluated\r\n\r\n* Adjust styles of generated files ...\r\n\r\n* Adds additional testing to cover dynamic storage path functionality\r\n\r\n* Remove unnecessary condition\r\n\r\n* Add hint to edit storage path dialog\r\n\r\n* Correct spelling of pathes to paths\r\n\r\n* Minor documentation tweaks\r\n\r\n* Minor typo\r\n\r\n* improving wrapping of filter editor buttons with new storage path button\r\n\r\n* Update .gitignore\r\n\r\n* Fix select border radius in non input-groups\r\n\r\n* Better storage path edit hint\r\n\r\n* Add note to edit storage path dialog re document_renamer\r\n\r\n* Add note to bulk edit storage path re document_renamer\r\n\r\n* Rename FILTER_STORAGE_DIRECTORY to PATH\r\n\r\n* Fix broken filter rule parsing\r\n\r\n* Show default storage if unspecified\r\n\r\n* Remove note re storage path on bulk edit\r\n\r\n* Add basic validation of filename variables\r\n\r\nCo-authored-by: Markus Kling \r\nCo-authored-by: Trenton Holmes \r\nCo-authored-by: Michael Shamoon <4887959+shamoon@users.noreply.github.com>\r\nCo-authored-by: Quinn Casey ", "code": "def test_unset_document_storage_path(self):\n \n self.assertEqual(Document.objects.filter(storage_path=None).count(), 5)\n\n bulk_edit.set_storage_path(\n [self.doc1.id],\n self.sp1.id,\n )\n\n self.assertEqual(Document.objects.filter(storage_path=None).count(), 4)\n\n bulk_edit.set_storage_path(\n [self.doc1.id],\n None,\n )\n\n self.assertEqual(Document.objects.filter(storage_path=None).count(), 5)\n\n self.async_task.assert_called()\n args, kwargs = self.async_task.call_args\n\n self.assertCountEqual(kwargs[\"document_ids\"], [self.doc1.id])\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 144, "n_words": 23, "vocab_size": 17, "complexity": 1, "nloc": 15, "token_counts": 136, "n_ast_nodes": 215, "n_identifiers": 19, "random_cut": "def test_unset_document_storage_path(self):\n \n self.assertEqual(Document.objects.filter(storage_path=None).count(), 5)\n\n bulk_edit.set_storage_path(\n [self.doc1.id],\n self.sp1.id,\n )\n\n self.assertEqual(Document.objects.filter(storage_path=None).count(), 4)\n\n bulk_edit.set_storage_path(\n [self.doc1.id],\n None,\n )\n\n self.assertEqual(Document.objects.filter(storage_path=None).count(), 5)\n\n self.async_task", "d_id": 116975, "documentation": { "docstring": "\n GIVEN:\n - 4 documents without defined storage path\n - 1 document with a defined storage\n WHEN:\n - Bulk edit called to remove storage path from 1 document\n THEN:\n - Single document storage path removed\n ", "n_words": 34, "vocab_size": 22, "n_whitespaces": 107, "language": "en" } }, { "id": 118602, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/tests/streamlit/cache_spinner_test.py", "file_name": "cache_spinner_test.py", "fun_name": "test_with_spinner", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def test_with_spinner(self):\n \n function_with_spinner()\n self.assertFalse(self.forward_msg_queue.is_empty())\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 25, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 39, "n_identifiers": 6, "random_cut": "def test_with_spinner(self):\n ", "d_id": 26314, "documentation": { "docstring": "If the show_spinner flag is set, there should be one element in the\n report queue.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 249079, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_device.py", "file_name": "test_device.py", "fun_name": "test_update_device_too_long_display_name", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_update_device_too_long_display_name(self) -> None:\n \n # Set iniital display name.\n update = {\"display_name\": \"new display\"}\n self.get_success(\n self.handler.update_device(\n self.other_user, self.other_user_device_id, update\n )\n )\n\n # Request to update a device display name with a new value that is longer than allowed.\n update = {\n \"display_name\": \"a\"\n * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1)\n }\n\n channel = self.make_request(\n \"PUT\",\n self.url,\n access_token=self.admin_user_tok,\n content=update,\n )\n\n self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.TOO_LARGE, channel.json_body[\"errcode\"])\n\n # Ensure the display name was not updated.\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(\"new display\", channel.json_body[\"display_name\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 337, "n_words": 82, "vocab_size": 61, "complexity": 1, "nloc": 29, "token_counts": 159, "n_ast_nodes": 257, "n_identifiers": 26, "random_cut": "def test_update_device_too_long_display_name(self) -> None:\n \n ", "d_id": 72586, "documentation": { "docstring": "\n Update a device with a display name that is invalid (too long).\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 217975, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/imaplib.py", "file_name": "imaplib.py", "fun_name": "xatom", "commit_message": "add python 3.10.4 for windows", "code": "def xatom(self, name, *args):\n \n name = name.upper()\n #if not name in self.capabilities: # Let the server decide!\n # raise self.error('unknown extension command: %s' % name)\n if not name in Commands:\n Commands[name] = (self.state,)\n return self._simple_command(name, *args)\n\n\n\n # Private methods\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 109, "n_words": 39, "vocab_size": 32, "complexity": 2, "nloc": 5, "token_counts": 45, "n_ast_nodes": 75, "n_identifiers": 8, "random_cut": "def xatom(self, name, *args):\n \n name = name.upper()\n #if not name in self.capabilities: # Let the server decide!\n # raise self.error('unknown extension command: %s' % name)\n if not name in Commands:\n Commands[name] = (self.state,)\n return self._simple_command(name, *args)\n\n\n\n # Private", "d_id": 55049, "documentation": { "docstring": "Allow simple extension commands\n notified by server in CAPABILITY response.\n\n Assumes command is legal in current state.\n\n (typ, [data]) = .xatom(name, arg, ...)\n\n Returns response appropriate to extension command `name'.\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 73, "language": "en" } }, { "id": 1493, "commit_id": "f3b8f6f1196e6f8a92620b4efc190715273fecab", "repo": "PySyft", "path": "packages/syft/src/syft/core/tensor/nn/loss.py", "file_name": "loss.py", "fun_name": "forward", "commit_message": "Moved all code from notebook to codebase\n\nTook 19 minutes", "code": "def forward(self, outputs, targets):\n \n outputs = outputs.clip(self.epsilon, 1 - self.epsilon)\n log_loss = targets * dp_log(outputs) + ((targets * -1) + 1) * dp_log((outputs * -1) + 1)\n log_loss = log_loss.sum(axis=1) * -1\n return log_loss.mean()\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 69, "n_words": 34, "vocab_size": 23, "complexity": 1, "nloc": 5, "token_counts": 76, "n_ast_nodes": 123, "n_identifiers": 11, "random_cut": "def forward(self, outputs, targets):\n \n outputs = outputs.clip(self.epsilon, 1 - self.epsilon)\n log_loss = targets * dp_log(outputs) + ((targets * -1) + 1) * dp_log((outputs * -1) + 1)\n log_loss = log_loss.sum(axi", "d_id": 199, "documentation": { "docstring": "Forward pass.\n\n .. math:: L = -t \\\\log(p) - (1 - t) \\\\log(1 - p)\n\n Parameters\n ----------\n outputs : numpy.array\n Predictions in (0, 1), such as sigmoidal output of a neural network.\n targets : numpy.array\n Targets in [0, 1], such as ground truth labels.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 108, "language": "en" } }, { "id": 176320, "commit_id": "34d9d630bb02426d297d3e20fedb7da8c3ced03a", "repo": "networkx", "path": "networkx/algorithms/assortativity/correlation.py", "file_name": "correlation.py", "fun_name": "numeric_assortativity_coefficient", "commit_message": "MAINT: Cleanup assortativity module, remove unused variables (#5301)\n\nRemove unused variables, sort imports,\r\nraise errors instead of accepting invalid arguments silently\r\n\r\nCo-authored-by: Dan Schult ", "code": "def numeric_assortativity_coefficient(G, attribute, nodes=None):\n \n if nodes is None:\n nodes = G.nodes\n vals = {G.nodes[n][attribute] for n in nodes}\n mapping = {d: i for i, d, in enumerate(vals)}\n M = attribute_mixing_matrix(G, attribute, nodes, mapping)\n return _numeric_ac(M, mapping)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 36, "vocab_size": 28, "complexity": 4, "nloc": 7, "token_counts": 75, "n_ast_nodes": 111, "n_identifiers": 13, "random_cut": "def numeric_assortativity_coefficient(G, attribute, nodes=None):\n \n if nodes is None:\n nodes ", "d_id": 41834, "documentation": { "docstring": "Compute assortativity for numerical node attributes.\n\n Assortativity measures the similarity of connections\n in the graph with respect to the given numeric attribute.\n\n Parameters\n ----------\n G : NetworkX graph\n\n attribute : string\n Node attribute key.\n\n nodes: list or iterable (optional)\n Compute numeric assortativity only for attributes of nodes in\n container. The default is all nodes.\n\n Returns\n -------\n r: float\n Assortativity of graph for given attribute\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_nodes_from([0, 1], size=2)\n >>> G.add_nodes_from([2, 3], size=3)\n >>> G.add_edges_from([(0, 1), (2, 3)])\n >>> print(nx.numeric_assortativity_coefficient(G, \"size\"))\n 1.0\n\n Notes\n -----\n This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation\n coefficient of the specified (scalar valued) attribute across edges.\n\n References\n ----------\n .. [1] M. E. J. Newman, Mixing patterns in networks\n Physical Review E, 67 026126, 2003\n ", "n_words": 129, "vocab_size": 99, "n_whitespaces": 244, "language": "en" } }, { "id": 270311, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distributed_file_utils.py", "file_name": "distributed_file_utils.py", "fun_name": "write_filepath", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def write_filepath(filepath, strategy):\n \n dirpath = os.path.dirname(filepath)\n base = os.path.basename(filepath)\n return os.path.join(write_dirpath(dirpath, strategy), base)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 44, "n_ast_nodes": 70, "n_identifiers": 11, "random_cut": "def write_filepath(filepath, strategy):\n \n dir", "d_id": 80425, "documentation": { "docstring": "Returns the writing file path to be used to save file distributedly.\n\n Directory to contain `filepath` would be created if it doesn't exist.\n\n Args:\n filepath: Original filepath that would be used without distribution.\n strategy: The tf.distribute strategy object currently used.\n\n Returns:\n The writing filepath that should be used to save file with distribution.\n ", "n_words": 53, "vocab_size": 36, "n_whitespaces": 80, "language": "en" } }, { "id": 32423, "commit_id": "8e8384663d716d4b5a4f510070ff954fc0ba4a52", "repo": "transformers", "path": "src/transformers/models/convnext/modeling_tf_convnext.py", "file_name": "modeling_tf_convnext.py", "fun_name": "serving", "commit_message": "Update serving code to enable `saved_model=True` (#18153)\n\n* Add serving_output and serving methods to some vision models\r\n\r\n* Add serving outputs for DeiT\r\n\r\n* Don't convert hidden states - differing shapes\r\n\r\n* Make saveable\r\n\r\n* Fix up\r\n\r\n* Make swin saveable\r\n\r\n* Add in tests\r\n\r\n* Fix funnel tests (can't convert to tensor)\r\n\r\n* Fix numpy call\r\n\r\n* Tidy up a bit\r\n\r\n* Add in hidden states - resnet\r\n\r\n* Remove numpy\r\n\r\n* Fix failing tests - tensor shape and skipping tests\r\n\r\n* Remove duplicated function\r\n\r\n* PR comments - formatting and var names\r\n\r\n* PR comments\r\nAdd suggestions made by Joao Gante:\r\n* Use tf.shape instead of shape_list\r\n* Use @tooslow decorator on tests\r\n* Simplify some of the logic\r\n\r\n* PR comments\r\nAddress Yih-Dar Sheih comments - making tensor names consistent and make types float\r\n\r\n* Types consistent with docs; disable test on swin (slow)\r\n\r\n* CI trigger\r\n\r\n* Change input_features to float32\r\n\r\n* Add serving_output for segformer\r\n\r\n* Fixup\r\n\r\nCo-authored-by: Amy Roberts ", "code": "def serving(self, inputs):\n \n output = self.call(inputs)\n return self.serving_output(output)\n\n\nCONVNEXT_START_DOCSTRING = r\n\nCONVNEXT_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 53, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def serving(self, inputs):\n \n output = self.call(inputs)\n return self.serving_output(output)\n\n\nCONVNEXT_START_DOCSTRING = r\n\nCONVNEXT_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare ", "d_id": 5922, "documentation": { "docstring": "\n Method used for serving the model.\n\n Args:\n inputs (`Dict[str, tf.Tensor]`):\n The input of the saved model as a dictionary of tensors.\n \n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n \n\n Parameters:\n config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ConvNextFeatureExtractor`]. See\n [`ConvNextFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n", "n_words": 298, "vocab_size": 171, "n_whitespaces": 518, "language": "en" } }, { "id": 218477, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getclasstree", "commit_message": "add python 3.10.4 for windows", "code": "def getclasstree(classes, unique=False):\n \n children = {}\n roots = []\n for c in classes:\n if c.__bases__:\n for parent in c.__bases__:\n if parent not in children:\n children[parent] = []\n if c not in children[parent]:\n children[parent].append(c)\n if unique and parent in classes: break\n elif c not in roots:\n roots.append(c)\n for parent in children:\n if parent not in classes:\n roots.append(parent)\n return walktree(roots, children, None)\n\n# ------------------------------------------------ argument list extraction\nArguments = namedtuple('Arguments', 'args, varargs, varkw')\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 224, "n_words": 71, "vocab_size": 41, "complexity": 11, "nloc": 17, "token_counts": 112, "n_ast_nodes": 191, "n_identifiers": 12, "random_cut": "def getclasstree(classes, unique=False):\n \n children = {}\n roots = []\n for c in classes:\n if c.__bases__:\n for parent in c.__bases__:\n if parent not in children:\n children[parent] = []\n if c not in children[parent]:\n children[parent].append(c)\n if unique and parent in classes: break\n elif c not in roots:\n roots.append(c)\n for parent in", "d_id": 55333, "documentation": { "docstring": "Arrange the given list of classes into a hierarchy of nested lists.\n\n Where a nested list appears, it contains classes derived from the class\n whose entry immediately precedes the list. Each entry is a 2-tuple\n containing a class and a tuple of its base classes. If the 'unique'\n argument is true, exactly one entry appears in the returned structure\n for each class in the given list. Otherwise, classes using multiple\n inheritance and their descendants will appear multiple times.", "n_words": 78, "vocab_size": 53, "n_whitespaces": 98, "language": "en" } }, { "id": 61435, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py", "file_name": "versioncontrol.py", "fun_name": "get_revision", "commit_message": "upd; format", "code": "def get_revision(cls, location):\n # type: (str) -> str\n \n raise NotImplementedError\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 4, "random_cut": "def get_revision(cls, location):\n # type: (s", "d_id": 12574, "documentation": { "docstring": "\n Return the current commit id of the files at the given location.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 27, "language": "en" } }, { "id": 55038, "commit_id": "95b47e807fa5ccc626a06efc2cced0d8ff8eadfa", "repo": "prefect", "path": "src/prefect/settings.py", "file_name": "settings.py", "fun_name": "get_current_settings", "commit_message": "Rewrite temporary settings to use copy_with_update", "code": "def get_current_settings() -> Settings:\n \n from prefect.context import ProfileContext\n\n profile = ProfileContext.get()\n if profile is not None:\n return profile.settings\n\n return get_settings_from_env()\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 42, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 10, "token_counts": 34, "n_ast_nodes": 58, "n_identifiers": 9, "random_cut": "def get_current_settings() -> Settings:\n \n from prefect.context import ProfileContext\n\n profile = ProfileCo", "d_id": 11185, "documentation": { "docstring": "\n Returns a settings object populated with values from the current profile or, if no\n profile is active, the environment.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 29, "language": "en" } }, { "id": 189215, "commit_id": "8a16d7d8ce5e3f97fb100af7a960224f7f80137d", "repo": "aws-cli", "path": "tests/unit/customizations/s3/test_comparator.py", "file_name": "test_comparator.py", "fun_name": "test_compare_key_greater", "commit_message": "Delete extra whitespace\n\nA correction that does not affect the operation.", "code": "def test_compare_key_greater(self):\n \n self.not_at_dest_sync_strategy.determine_should_sync.return_value = False\n\n # Try when the sync strategy says to sync the file.\n self.not_at_src_sync_strategy.determine_should_sync.return_value = True\n\n src_files = []\n dest_files = []\n ref_list = []\n result_list = []\n time = datetime.datetime.now()\n src_file = FileStat(src='', dest='',\n compare_key='domparator_test.py', size=10,\n last_update=time, src_type='local',\n dest_type='s3', operation_name='upload')\n dest_file = FileStat(src='', dest='',\n compare_key='comparator_test.py', size=10,\n last_update=time, src_type='s3',\n dest_type='local', operation_name='')\n src_files.append(src_file)\n dest_files.append(dest_file)\n ref_list.append(dest_file)\n files = self.comparator.call(iter(src_files), iter(dest_files))\n for filename in files:\n result_list.append(filename)\n self.assertEqual(result_list, ref_list)\n\n # Now try when the sync strategy says not to sync the file.\n self.not_at_src_sync_strategy.determine_should_sync.return_value = False\n result_list = []\n ref_list = []\n files = self.comparator.call(iter(src_files), iter(dest_files))\n for filename in files:\n result_list.append(filename)\n self.assertEqual(result_list, ref_list)\n\n", "url": "https://github.com/aws/aws-cli.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 456, "n_words": 101, "vocab_size": 53, "complexity": 3, "nloc": 30, "token_counts": 230, "n_ast_nodes": 378, "n_identifiers": 31, "random_cut": "def test_compare_key_greater(self):\n \n self.not_at_dest_sync_strategy.determine_should_sync.return_value = False\n\n # Try when the sync strategy says to sync the file.\n self.not_at_src_syn", "d_id": 46020, "documentation": { "docstring": "\n Confirm the appropriate action is taken when the soruce compare key\n is greater than the destination compare key.\n ", "n_words": 18, "vocab_size": 14, "n_whitespaces": 40, "language": "en" } }, { "id": 202469, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/custom_lookups/tests.py", "file_name": "tests.py", "fun_name": "test_custom_exact_lookup_none_rhs", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_custom_exact_lookup_none_rhs(self):\n \n field = Author._meta.get_field(\"birthdate\")\n OldExactLookup = field.get_lookup(\"exact\")\n author = Author.objects.create(name=\"author\", birthdate=None)\n try:\n field.register_lookup(Exactly, \"exact\")\n self.assertEqual(Author.objects.get(birthdate__exact=None), author)\n finally:\n field.register_lookup(OldExactLookup, \"exact\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 95, "n_words": 20, "vocab_size": 17, "complexity": 2, "nloc": 9, "token_counts": 77, "n_ast_nodes": 134, "n_identifiers": 18, "random_cut": "def test_custom_exact_lookup_none_rhs(self):\n \n field = Author._meta.get_field(\"birthdate\")\n OldExactLookup = field.get_lookup(\"exact\")\n author = Author.objects.create(name=\"author\", birthdate=None)\n try:\n field.register_lookup(Exactly, \"exact\"", "d_id": 50133, "documentation": { "docstring": "\n __exact=None is transformed to __isnull=True if a custom lookup class\n with lookup_name != 'exact' is registered as the `exact` lookup.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 42, "language": "en" } }, { "id": 9461, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/metrics/precision_recall.py", "file_name": "precision_recall.py", "fun_name": "batch_pairwise_distances", "commit_message": "initialize ostec", "code": "def batch_pairwise_distances(U, V):\n \n with tf.variable_scope('pairwise_dist_block'):\n # Squared norms of each row in U and V.\n norm_u = tf.reduce_sum(tf.square(U), 1)\n norm_v = tf.reduce_sum(tf.square(V), 1)\n\n # norm_u as a row and norm_v as a column vectors.\n norm_u = tf.reshape(norm_u, [-1, 1])\n norm_v = tf.reshape(norm_v, [1, -1])\n\n # Pairwise squared Euclidean distances.\n D = tf.maximum(norm_u - 2*tf.matmul(U, V, False, True) + norm_v, 0.0)\n\n return D\n\n#----------------------------------------------------------------------------\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 127, "n_words": 63, "vocab_size": 47, "complexity": 1, "nloc": 8, "token_counts": 107, "n_ast_nodes": 167, "n_identifiers": 13, "random_cut": "def batch_pairwise_distances(U, V):\n \n with tf.variable_scope('pairwise_dist_block'):\n # Squared norms of each row in U and V.\n norm_u = tf.reduce_sum(tf.square(U), 1)\n norm_v = tf.reduce_sum(tf.square(V), 1)\n\n # norm_u as a row and norm_v as a column vectors.\n norm_u = tf.reshape(norm_u, [-1, 1])\n norm_v = tf.reshape(norm_v, [1, -1])\n\n # Pairwise squared Euclidean distances.\n D = tf.maximum(norm_u - 2*tf.matmul(U, V, False, True) + norm_v, 0.0)\n\n return D\n\n#-----------------------------", "d_id": 1621, "documentation": { "docstring": " Compute pairwise distances between two batches of feature vectors.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 31219, "commit_id": "49becbaa5549b477b0d96c55f207614773c0ab42", "repo": "transformers", "path": "src/transformers/image_utils.py", "file_name": "image_utils.py", "fun_name": "expand_dims", "commit_message": "Enable crop_center method to handle (W, H, C) images (#17626)\n\n* enable crop_center method to handle (W, H, C) images\r\n\r\n* minor style and comment edits", "code": "def expand_dims(self, image):\n \n self._ensure_format_supported(image)\n\n # Do nothing if PIL image\n if isinstance(image, PIL.Image.Image):\n return image\n\n if is_torch_tensor(image):\n image = image.unsqueeze(0)\n else:\n image = np.expand_dims(image, axis=0)\n return image\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 109, "n_words": 27, "vocab_size": 19, "complexity": 3, "nloc": 9, "token_counts": 58, "n_ast_nodes": 95, "n_identifiers": 11, "random_cut": "def expand_dims(self, image):\n \n self._ensure_format", "d_id": 5702, "documentation": { "docstring": "\n Expands 2-dimensional `image` to 3 dimensions.\n\n Args:\n image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\n The image to expand.\n ", "n_words": 17, "vocab_size": 14, "n_whitespaces": 65, "language": "en" } }, { "id": 47487, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_find_executable_task_instances_order_execution_date", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_find_executable_task_instances_order_execution_date(self, dag_maker):\n \n dag_id_1 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-a'\n dag_id_2 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-b'\n task_id = 'task-a'\n session = settings.Session()\n with dag_maker(dag_id=dag_id_1, max_active_tasks=16, session=session):\n EmptyOperator(task_id=task_id)\n dr1 = dag_maker.create_dagrun(execution_date=DEFAULT_DATE + timedelta(hours=1))\n\n with dag_maker(dag_id=dag_id_2, max_active_tasks=16, session=session):\n EmptyOperator(task_id=task_id)\n dr2 = dag_maker.create_dagrun()\n\n dr1 = session.merge(dr1, load=False)\n\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n\n tis = dr1.task_instances + dr2.task_instances\n for ti in tis:\n ti.state = State.SCHEDULED\n session.merge(ti)\n session.flush()\n\n res = self.scheduler_job._executable_task_instances_to_queued(max_tis=1, session=session)\n session.flush()\n assert [ti.key for ti in res] == [tis[1].key]\n session.rollback()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 238, "n_words": 68, "vocab_size": 48, "complexity": 3, "nloc": 22, "token_counts": 193, "n_ast_nodes": 314, "n_identifiers": 38, "random_cut": "def test_find_executable_task_instances_order_execution_date(self, dag_maker):\n \n dag_id_1 = 'SchedulerJobTest.test_find_executable_task_instances_or", "d_id": 9128, "documentation": { "docstring": "\n Test that task instances follow execution_date order priority. If two dagruns with\n different execution dates are scheduled, tasks with earliest dagrun execution date will first\n be executed\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 56, "language": "en" } }, { "id": 87377, "commit_id": "a882713d1b8fc6f30ba7e8717252334d6720caa9", "repo": "sentry", "path": "src/sentry/web/frontend/base.py", "file_name": "base.py", "fun_name": "dispatch", "commit_message": "chore(hybrid-cloud): Refactor Organization ORM out of views and auth (#40362)\n\nFor hybrid cloud, the organization and related models will not exist in the control silo, but will be necessary for certain auth related flows. This change is the first of many to make the core auth flows compatible with a split silo world by introducing a service object that captures existing needs for an organization arond the `get_active_organization` method. Behavior should remain identical, except that the pure ORM object is not available in many places. Those places have been updated to use a new thinner model object that corresponds with future control silo's data availability.\r\n\r\nCo-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>", "code": "def dispatch(self, request, *args, **kwargs):\n \n\n self.determine_active_organization(request, kwargs.get(\"organization_slug\", None))\n\n if self.csrf_protect:\n if hasattr(self.dispatch.__func__, \"csrf_exempt\"):\n delattr(self.dispatch.__func__, \"csrf_exempt\")\n response = self.test_csrf(request)\n if response:\n return response\n\n if self.is_auth_required(request, *args, **kwargs):\n return self.handle_auth_required(request, *args, **kwargs)\n\n if self.is_sudo_required(request, *args, **kwargs):\n return self.handle_sudo_required(request, *args, **kwargs)\n\n args, kwargs = self.convert_args(request, *args, **kwargs)\n\n request.access = self.get_access(request, *args, **kwargs)\n\n if not self.has_permission(request, *args, **kwargs):\n return self.handle_permission_required(request, *args, **kwargs)\n\n if \"organization\" in kwargs:\n org = kwargs[\"organization\"]\n if self.is_member_disabled_from_limit(request, org):\n return self.handle_disabled_member(org)\n if self.is_not_2fa_compliant(request, org):\n return self.handle_not_2fa_compliant(request, *args, **kwargs)\n\n self.request = request\n self.default_context = self.get_context_data(request, *args, **kwargs)\n\n return self.handle(request, *args, **kwargs)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 332, "n_words": 89, "vocab_size": 47, "complexity": 10, "nloc": 25, "token_counts": 268, "n_ast_nodes": 415, "n_identifiers": 30, "random_cut": "def dispatch(self, request, *args, **kwargs):\n \n\n self.determine_active_organization(request, kwargs.get(\"organization_slug\", None))\n\n if self.csrf_protect:\n if hasattr(self.dispatch.__func__, \"csrf_exempt\"):\n delattr(self.dispatch.__func__, \"csrf_exempt\")\n response = self.test_csrf(request)\n if response:\n return response\n\n if self.is_auth_required(request, *args, **kwargs):\n return self.handle_auth_required(request, *args, **kwargs)\n\n if self.is_sudo_required(request, *args, **kwargs):\n return self.handle_sudo_required(request, *a", "d_id": 18290, "documentation": { "docstring": "\n A note on the CSRF protection process.\n\n Because the CSRF decorators don't work well with view subclasses, we\n allow them to control whether a CSRF check is done by setting\n self.csrf_protect. This has a couple of implications:\n\n 1. We need to mark this method as @csrf_exempt so that when the CSRF\n middleware checks it as part of the regular middleware sequence, it\n always passes.\n 2. If self.csrf_protect is set, we will re-run the CSRF check ourselves\n using CsrfViewMiddleware().process_view()\n 3. But first we must remove the csrf_exempt attribute that was set by\n the decorator so that the middleware doesn't shortcut and pass the\n check unconditionally again.\n\n ", "n_words": 105, "vocab_size": 77, "n_whitespaces": 212, "language": "en" } }, { "id": 101220, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "lib/align/detected_face.py", "file_name": "detected_face.py", "fun_name": "interpolator", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def interpolator(self) -> int:\n \n assert self._interpolator is not None\n return self._interpolator\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 4, "random_cut": "def interpolator(self) -> int:\n \n assert self._interpolator is", "d_id": 20640, "documentation": { "docstring": " int: The cv2 interpolator required to transpose the mask to a full frame. ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 14, "language": "en" } }, { "id": 128590, "commit_id": "e142be077f0c727ab11ba51ecaba9a98b7bfe474", "repo": "ray", "path": "python/ray/tune/tests/test_cluster.py", "file_name": "test_cluster.py", "fun_name": "test_cluster_interrupt", "commit_message": "[tune] Store sync config/checkpoint config in experiment, trial (#29019)\n\nThis is some clean-up required for future changes to the syncing/checkpointing behavior. At the moment we pass single attributes of these configs to the Experiment class, and then subsequently to the Trial class, from which it is passed on to the trainable. If we extend the configurability in the future (e.g. provide fallback mechanisms in the checkpoint config, or make retry wait times configurable in the sync config), we would have to add more and more attributes to these intermediate classes. Instead, we should just pass and store the full config.\r\n\r\nAs a next follow-up, we can pass these configs to the Trainable.\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def test_cluster_interrupt(start_connected_cluster, tmpdir):\n \n cluster = start_connected_cluster\n dirpath = str(tmpdir)\n\n # Needs to be in scope for pytest", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 17, "vocab_size": 16, "complexity": 11, "nloc": 75, "token_counts": 335, "n_ast_nodes": 31, "n_identifiers": 6, "random_cut": "def test_cluster_interrupt(start_connected_cluster, tmpdir):\n \n cluster = start_connected_cluster\n dirpath = str(tmpdir)\n\n # Needs to be in scope for pytest", "d_id": 28754, "documentation": { "docstring": "Tests run_experiment on cluster shutdown with actual interrupt.\n\n This is an end-to-end test.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 247400, "commit_id": "7e91107be1a4287873266e588a3c5b415279f4c8", "repo": "synapse", "path": "tests/rest/media/v1/test_oembed.py", "file_name": "test_oembed.py", "fun_name": "test_version", "commit_message": "Add type hints to `tests/rest` (#12146)\n\n* Add type hints to `tests/rest`\r\n\r\n* newsfile\r\n\r\n* change import from `SigningKey`", "code": "def test_version(self) -> None:\n \n for version in (\"1.0\", 1.0, 1):\n result = self.parse_response({\"version\": version, \"type\": \"link\"})\n # An empty Open Graph response is an error, ensure the URL is included.\n self.assertIn(\"og:url\", result.open_graph_result)\n\n # A missing version should be treated as 1.0.\n result = self.parse_response({\"type\": \"link\"})\n self.assertIn(\"og:url\", result.open_graph_result)\n\n # Invalid versions should be rejected.\n for version in (\"2.0\", \"1\", 1.1, 0, None, {}, []):\n result = self.parse_response({\"version\": version, \"type\": \"link\"})\n # An empty Open Graph response is an error, ensure the URL is included.\n self.assertEqual({}, result.open_graph_result)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 200, "n_words": 85, "vocab_size": 50, "complexity": 3, "nloc": 10, "token_counts": 119, "n_ast_nodes": 200, "n_identifiers": 8, "random_cut": "def test_version(self) -> None:\n \n for version in (\"1.0\", 1.0, 1):\n result = self.parse_response({\"version\": version, \"type\": \"link\"})\n # An empty Open Graph response is an error, ensure the URL is included.\n self.assertIn(\"og:url\", result.open_graph_result)\n\n # A missing version should be treated as 1.0.\n result = self.parse_response({\"type\": \"link\"})\n ", "d_id": 71656, "documentation": { "docstring": "Accept versions that are similar to 1.0 as a string or int (or missing).", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 72190, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_userbar.py", "file_name": "test_userbar.py", "fun_name": "test_page_allowing_subpages", "commit_message": "Reformat with black", "code": "def test_page_allowing_subpages(self):\n response = self.client.get(\n reverse(\"wagtailadmin_userbar_frontend\", args=(self.event_index.id,))\n )\n\n # page allows subpages, so the 'add page' button should show\n expected_url = reverse(\n \"wagtailadmin_pages:add_subpage\", args=(self.event_index.id,)\n )\n needle = f\n self.assertTagInHTML(needle, str(response.content))\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 100, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 16, "token_counts": 63, "n_ast_nodes": 106, "n_identifiers": 14, "random_cut": "def test_page_allowing_subpages(self):\n response = self.client.get(\n reverse(\"wagtailadmin_userbar_frontend\", args=(self.event_index.id,))\n )\n\n # page allows subpages, so the 'add page' button should show\n expected_url = reverse(\n ", "d_id": 15845, "documentation": { "docstring": "\n \n \n \n \n Add a child page\n \n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 116, "language": "en" } }, { "id": 161199, "commit_id": "c5d03fb3cbf5105aa45dc131474260cf140b748b", "repo": "MockingBird", "path": "mkgui/app.py", "file_name": "app.py", "fun_name": "render_output_ui", "commit_message": "Upgrade to new web service (#529)\n\n* Init new GUI\r\n\r\n* Remove unused codes\r\n\r\n* Reset layout\r\n\r\n* Add samples\r\n\r\n* Make framework to support multiple pages\r\n\r\n* Add vc mode\r\n\r\n* Add preprocessing mode\r\n\r\n* Add training mode\r\n\r\n* Remove text input in vc mode\r\n\r\n* Add entry for GUI and revise readme\r\n\r\n* Move requirement together\r\n\r\n* Add error raise when no model folder found\r\n\r\n* Add readme", "code": "def render_output_ui(self, streamlit_app, input) -> None: # type: ignore\n \n src, result = self.__root__\n \n streamlit_app.subheader(\"Synthesized Audio\")\n streamlit_app.audio(result.content, format=\"audio/wav\")\n\n fig, ax = plt.subplots()\n ax.imshow(src.mel, aspect=\"equal\", interpolation=\"none\")\n ax.set_title(\"mel spectrogram(Source Audio)\")\n streamlit_app.pyplot(fig)\n fig, ax = plt.subplots()\n ax.imshow(result.mel, aspect=\"equal\", interpolation=\"none\")\n ax.set_title(\"mel spectrogram(Result Audio)\")\n streamlit_app.pyplot(fig)\n\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 132, "n_words": 39, "vocab_size": 29, "complexity": 1, "nloc": 15, "token_counts": 111, "n_ast_nodes": 192, "n_identifiers": 21, "random_cut": "def render_output_ui(self, streamlit_app, input) -> None: # type: ignore\n \n src, result = self.__root__\n \n streamlit_app.subheader(\"Synthesized Audio\")\n streamlit_app.audio(result.content, format=\"audio/wav\")\n\n fig, ax = plt.subplots()\n ax.imshow(src.mel, aspect=\"equal\", interpolation=\"none\")\n ax.set_title(\"mel spectrogram(Source Audio)\")\n streamlit_app.pyplot(fig)\n fig, ax = plt.subplots()\n ax.imshow(result.mel, aspect=\"equal\", interpolation=\"none\")\n ax.set_title(\"mel spectrogram(Result", "d_id": 38944, "documentation": { "docstring": "Custom output UI.\n If this method is implmeneted, it will be used instead of the default Output UI renderer.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 33, "language": "en" } }, { "id": 270593, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/dtensor/layout_map.py", "file_name": "layout_map.py", "fun_name": "get_default_mesh", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_default_mesh(self):\n \n return self._default_mesh\n\n\nLayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__\n\n\n@keras_export(\"keras.dtensor.experimental.layout_map_scope\", v1=[])\n@contextlib.contextmanager", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.dtensor.experimental.layout_map_scope\", v1=[])\n@contextlib.contextmanager", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 21, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 60, "n_identifiers": 11, "random_cut": "def get_default_mesh(self):\n \n return self._default_mesh\n\n\nLayoutMap.get.__doc__ = LayoutMap", "d_id": 80492, "documentation": { "docstring": "Return the default `Mesh` set at instance creation.\n\n The `Mesh` can be used to create default replicated `Layout` when there\n isn't a match of the input string query.\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 49, "language": "en" } }, { "id": 154186, "commit_id": "8e1190c9979a1df26ea570f3ad2ccd822ad54c8e", "repo": "modin", "path": "modin/pandas/indexing.py", "file_name": "indexing.py", "fun_name": "__setitem__", "commit_message": "REFACTOR-#4730: make Indexers immutable (#4731)\n\nSigned-off-by: Brock Mendel ", "code": "def __setitem__(self, key, item): # pragma: no cover\n \n raise NotImplementedError(\"Implemented by subclasses\")\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 5, "random_cut": "def __setitem__(self, key, item): # pragma: no cover\n \n raise NotImplementedError(\"Implemented by subclasses\")\n", "d_id": 35845, "documentation": { "docstring": "\n Assign `item` value to dataset located by `key`.\n\n Parameters\n ----------\n key : callable or tuple\n The global row numbers to assign data to.\n item : modin.pandas.DataFrame, modin.pandas.Series or scalar\n Value that should be assigned to located dataset.\n\n See Also\n --------\n pandas.DataFrame.iloc\n ", "n_words": 41, "vocab_size": 36, "n_whitespaces": 127, "language": "en" } }, { "id": 202407, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_bad_origin_cannot_be_parsed", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_bad_origin_cannot_be_parsed(self):\n \n req = self._get_POST_request_with_token()\n req.META[\"HTTP_HOST\"] = \"www.example.com\"\n req.META[\"HTTP_ORIGIN\"] = \"https://[\"\n mw = CsrfViewMiddleware(post_form_view)\n self._check_referer_rejects(mw, req)\n self.assertIs(mw._origin_verified(req), False)\n with self.assertLogs(\"django.security.csrf\", \"WARNING\") as cm:\n response = mw.process_view(req, post_form_view, (), {})\n self.assertEqual(response.status_code, 403)\n msg = REASON_BAD_ORIGIN % req.META[\"HTTP_ORIGIN\"]\n self.assertEqual(cm.records[0].getMessage(), \"Forbidden (%s): \" % msg)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 130, "n_words": 42, "vocab_size": 35, "complexity": 1, "nloc": 12, "token_counts": 123, "n_ast_nodes": 210, "n_identifiers": 21, "random_cut": "def test_bad_origin_cannot_be_parsed(self):\n \n req = self._get_POST_request_with_token()\n req.META[\"HTTP_HOST\"] = \"www.example.com\"\n req.META[\"HTTP_ORIGIN\"] = \"https://[\"\n mw = CsrfViewMiddleware(post_form_view)\n self._check_referer_rejects(mw, req)\n self.assertIs(mw._origin_verified(req), False)\n with self.assertLogs(\"django.security.csrf\", \"WARNING\") as cm:\n response = mw.process_view(req, post_form_view, (), {})\n self.assertEqual(response.status_code, 403)\n msg = REASON_BAD_ORIGIN % req.META[", "d_id": 50113, "documentation": { "docstring": "\n A POST request with an origin that can't be parsed by urlparse() is\n rejected.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 109299, "commit_id": "b89ed5752c2a3b4eb9c9a3bf57848f543765fd6d", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/axis3d.py", "file_name": "axis3d.py", "fun_name": "move_from_center", "commit_message": "Deprecate helper functions in axis3d", "code": "def move_from_center(coord, centers, deltas, axmask=(True, True, True)):\n \n return _move_from_center(coord, centers, deltas, axmask=axmask)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 33, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "def move_from_center(coord, centers, deltas, axmask=(True, True, True)):\n \n return _move_from_center", "d_id": 23516, "documentation": { "docstring": "\n For each coordinate where *axmask* is True, move *coord* away from\n *centers* by *deltas*.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 24, "language": "en" } }, { "id": 130122, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/function_manager.py", "file_name": "function_manager.py", "fun_name": "get_execution_info", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_execution_info(self, job_id, function_descriptor):\n \n function_id = function_descriptor.function_id\n # If the function has already been loaded,\n # There's no need to load again\n if function_id in self._function_execution_info:\n return self._function_execution_info[function_id]\n if self._worker.load_code_from_local:\n # Load function from local code.\n if not function_descriptor.is_actor_method():\n # If the function is not able to be loaded,\n # try to load it from GCS,\n # even if load_code_from_local is set True\n if self._load_function_from_local(function_descriptor) is True:\n return self._function_execution_info[function_id]\n # Load function from GCS.\n # Wait until the function to be executed has actually been\n # registered on this worker. We will push warnings to the user if\n # we spend too long in this loop.\n # The driver function may not be found in sys.path. Try to load\n # the function from GCS.\n with profiling.profile(\"wait_for_function\"):\n self._wait_for_function(function_descriptor, job_id)\n try:\n function_id = function_descriptor.function_id\n info = self._function_execution_info[function_id]\n except KeyError as e:\n message = (\n \"Error occurs in get_execution_info: \"\n \"job_id: %s, function_descriptor: %s. Message: %s\"\n % (job_id, function_descriptor, e)\n )\n raise KeyError(message)\n return info\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 497, "n_words": 162, "vocab_size": 99, "complexity": 6, "nloc": 21, "token_counts": 118, "n_ast_nodes": 206, "n_identifiers": 17, "random_cut": "def get_execution_info(self, job_id, function_descriptor):\n \n function_id = function_descriptor.function_id\n # If the function has already been loaded,\n # There's no need to load again\n if function_id in self._function_execution_info:\n return self._function_execution_info[function_id]\n if self._worker.load_code_from_local:\n # Load function from local code.\n if not function_descriptor.is_actor_method():\n # If the function is not able to be loaded,\n # try to load it from GCS,\n # even if load_code_from_local is set True\n if self._load_function_from_local(function_descriptor) is True:\n return self._function_execution_info[function_id]\n # Load function from GCS.\n # Wait until the function to be executed has actually been\n # registered on this worker. We will push w", "d_id": 29113, "documentation": { "docstring": "Get the FunctionExecutionInfo of a remote function.\n Args:\n job_id: ID of the job that the function belongs to.\n function_descriptor: The FunctionDescriptor of the function to get.\n Returns:\n A FunctionExecutionInfo object.\n ", "n_words": 30, "vocab_size": 23, "n_whitespaces": 84, "language": "en" } }, { "id": 100533, "commit_id": "bdbbad4d310fb606b6f412aa81e9f57ccd994e97", "repo": "faceswap", "path": "lib/gpu_stats/_base.py", "file_name": "_base.py", "fun_name": "exclude_all_devices", "commit_message": "Refactor lib.gpu_stats (#1218)\n\n* inital gpu_stats refactor\r\n\r\n* Add dummy CPU Backend\r\n\r\n* Update Sphinx documentation", "code": "def exclude_all_devices(self) -> bool:\n \n return all(idx in _EXCLUDE_DEVICES for idx in range(self._device_count))\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 26, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 24, "n_ast_nodes": 40, "n_identifiers": 8, "random_cut": "def exclude_all_devices(self) -> bool:\n \n return all(idx in _EXCLUDE_DEVICES for idx in range(self._device_count))\n", "d_id": 19997, "documentation": { "docstring": " bool: ``True`` if all GPU devices have been explicitly disabled otherwise ``False`` ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 294430, "commit_id": "53245c65238e3009dd1f3412f7f9bef10385f64e", "repo": "core", "path": "homeassistant/components/alexa/resources.py", "file_name": "resources.py", "fun_name": "serialize_labels", "commit_message": "Update pylint to 2.13.0 (#68656)", "code": "def serialize_labels(self, resources):\n \n labels = []\n for label in resources:\n if label in AlexaGlobalCatalog.__dict__.values():\n label = {\"@type\": \"asset\", \"value\": {\"assetId\": label}}\n else:\n label = {\"@type\": \"text\", \"value\": {\"text\": label, \"locale\": \"en-US\"}}\n\n labels.append(label)\n\n return {\"friendlyNames\": labels}\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 126, "n_words": 35, "vocab_size": 27, "complexity": 3, "nloc": 9, "token_counts": 76, "n_ast_nodes": 141, "n_identifiers": 9, "random_cut": "def serialize_labels(self, resources):\n \n labels = []\n for label in resources:\n if label in AlexaGlobalCatalog.__dict__.values():\n label = {\"@type\": \"asset\", \"value\": {\"assetId\": label}}\n else:\n label = {\"@type\": \"text\", \"va", "d_id": 93467, "documentation": { "docstring": "Return resource label objects for friendlyNames serialized for an API response.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 243007, "commit_id": "c4d51fb2681c2434fd324098d116a66013549de7", "repo": "Pillow", "path": "src/PIL/PpmImagePlugin.py", "file_name": "PpmImagePlugin.py", "fun_name": "_decode_bitonal", "commit_message": "Added support for PPM arbitrary maxval in plain formats", "code": "def _decode_bitonal(self):\n \n data = bytearray()\n total_bytes = self.state.xsize * self.state.ysize\n\n comment_spans = False\n while len(data) != total_bytes:\n block = self._read_block() # read next block\n if not block:\n # eof\n break\n\n while block and comment_spans:\n comment_end = self._find_comment_end(block)\n if comment_end != -1: # comment ends in this block\n block = block[comment_end + 1 :] # delete tail of previous comment\n break\n else: # comment spans whole block\n block = self._read_block()\n\n block, comment_spans = self._ignore_comments(block)\n\n tokens = b\"\".join(block.split())\n for token in tokens:\n if token not in (48, 49):\n raise ValueError(f\"Invalid token for this mode: {bytes([token])}\")\n data = (data + tokens)[:total_bytes]\n invert = bytes.maketrans(b\"01\", b\"\\xFF\\x00\")\n return data.translate(invert)\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 400, "n_words": 104, "vocab_size": 65, "complexity": 8, "nloc": 23, "token_counts": 159, "n_ast_nodes": 279, "n_identifiers": 24, "random_cut": "def _decode_bitonal(self):\n \n data = bytearray()\n total_bytes = self.state.xsize * self.state.ysize\n\n comment_spans = False\n while len(data) != total_bytes:\n block = self._read_block() # read next block\n if not block:\n # eof\n break\n\n while block and comment_spans:\n comment_end = self._find_comment_end(block)\n if comment_end != -1: # comment ends in this block\n block = block[comment_end + 1 :] # delete tail of previous comment\n break\n ", "d_id": 69953, "documentation": { "docstring": "\n This is a separate method because in the plain PBM format, all data tokens are\n exactly one byte, so the inter-token whitespace is optional.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 46, "language": "en" } }, { "id": 198543, "commit_id": "883f3c95de8eaa79e04a6b78199e07f0d9fbba6c", "repo": "sympy", "path": "sympy/solvers/solvers.py", "file_name": "solvers.py", "fun_name": "recast_to_symbols", "commit_message": "ordered swaps and dict", "code": "def recast_to_symbols(eqs, symbols):\n \n if not iterable(eqs) and iterable(symbols):\n raise ValueError('Both eqs and symbols must be iterable')\n orig = list(symbols)\n symbols = list(ordered(symbols))\n swap_sym = {}\n i = 0\n for j, s in enumerate(symbols):\n if not isinstance(s, Symbol) and s not in swap_sym:\n swap_sym[s] = Dummy('X%d' % i)\n i += 1\n new_f = []\n for i in eqs:\n isubs = getattr(i, 'subs', None)\n if isubs is not None:\n new_f.append(isubs(swap_sym))\n else:\n new_f.append(i)\n restore = {v: k for k, v in swap_sym.items()}\n return new_f, [swap_sym.get(i, i) for i in orig], restore\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 200, "n_words": 88, "vocab_size": 59, "complexity": 10, "nloc": 20, "token_counts": 163, "n_ast_nodes": 262, "n_identifiers": 25, "random_cut": "def recast_to_symbols(eqs, symbols):\n \n if not iterable(eqs) and iterable(symbols):\n raise ValueError('Both eqs and symbols must be iterable')\n orig = list(symbols)\n symbols = list(ordered(symbols))\n swap_sym = {}\n i = 0\n for j, s in e", "d_id": 48995, "documentation": { "docstring": "\n Return (e, s, d) where e and s are versions of *eqs* and\n *symbols* in which any non-Symbol objects in *symbols* have\n been replaced with generic Dummy symbols and d is a dictionary\n that can be used to restore the original expressions.\n\n Examples\n ========\n\n >>> from sympy.solvers.solvers import recast_to_symbols\n >>> from sympy import symbols, Function\n >>> x, y = symbols('x y')\n >>> fx = Function('f')(x)\n >>> eqs, syms = [fx + 1, x, y], [fx, y]\n >>> e, s, d = recast_to_symbols(eqs, syms); (e, s, d)\n ([_X0 + 1, x, y], [_X0, y], {_X0: f(x)})\n\n The original equations and symbols can be restored using d:\n\n >>> assert [i.xreplace(d) for i in eqs] == eqs\n >>> assert [d.get(i, i) for i in s] == syms\n\n ", "n_words": 124, "vocab_size": 85, "n_whitespaces": 176, "language": "en" } }, { "id": 79676, "commit_id": "c8689acb3724dc12fb09a0bfc14d7e4755a1ea0f", "repo": "wagtail", "path": "wagtail/models/reference_index.py", "file_name": "reference_index.py", "fun_name": "model_is_indexable", "commit_message": "Check field for .extract_references method instead of field type\n\nCo-authored-by: Matt Westcott ", "code": "def model_is_indexable(cls, model, allow_child_models=False):\n \n if getattr(model, \"wagtail_reference_index_ignore\", False):\n return False\n\n # Don't check any models that have a parental key, references from these will be collected from the parent\n if not allow_child_models and any(\n [isinstance(field, ParentalKey) for field in model._meta.get_fields()]\n ):\n return False\n\n for field in model._meta.get_fields():\n if field.is_relation and field.many_to_one:\n if getattr(field, \"wagtail_reference_index_ignore\", False):\n continue\n\n if getattr(\n field.related_model, \"wagtail_reference_index_ignore\", False\n ):\n continue\n\n if isinstance(field, (ParentalKey, GenericRel)):\n continue\n\n return True\n\n if hasattr(field, \"extract_references\"):\n return True\n\n if issubclass(model, ClusterableModel):\n for child_relation in get_all_child_relations(model):\n if cls.model_is_indexable(\n child_relation.related_model,\n allow_child_models=True,\n ):\n return True\n\n return False\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 466, "n_words": 91, "vocab_size": 59, "complexity": 15, "nloc": 28, "token_counts": 156, "n_ast_nodes": 244, "n_identifiers": 20, "random_cut": "def model_is_indexable(cls, model, allow_child_models=False):\n \n if getattr(model, \"wagtail_reference_index_ignore\", False):\n return False\n\n # Don't check any models that have a parental key, references from these will be collected from the parent\n if not allow_child_models and any(\n [isinstance(field, ParentalKey) for field in model._meta.get_fields()]\n ):\n return False\n\n for field in model._meta.get_fields():\n if field.is_relation and field.many_to_one:\n if getattr(field, \"wagtail_reference_index_ignore\", False):\n continue\n\n if getattr(\n field.related_model, \"wagtail_reference_index_ignore\", False\n ):\n continue\n\n if isinstance(field, (ParentalKey, GenericRel)):\n continue\n\n return True\n\n if hasattr(field, \"extract_references\"):\n return True\n\n if issubclass(model, ClusterableModel):\n for child_relation in get_all_child_relations(model):\n if cls.model_is_indexable(\n child_relation.related_model,\n allow_child_models=True,\n ):\n return True\n\n return False\n", "d_id": 16955, "documentation": { "docstring": "\n Returns True if the given model may have outbound references that we would be interested in recording in the index.\n\n\n Args:\n model (type): a Django model class\n allow_child_models (boolean): Child models are not indexable on their own. If you are looking at\n a child model from the perspective of indexing it through its parent,\n set this to True to disable checking for this. Default False.\n ", "n_words": 65, "vocab_size": 55, "n_whitespaces": 191, "language": "en" } }, { "id": 22132, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "get_encodings_from_content", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def get_encodings_from_content(content):\n \n warnings.warn(\n (\n \"In requests 3.0, get_encodings_from_content will be removed. For \"\n \"more information, please see the discussion on issue #2266. (This\"\n \" warning should only appear once.)\"\n ),\n DeprecationWarning,\n )\n\n charset_re = re.compile(r']', flags=re.I)\n pragma_re = re.compile(r']', flags=re.I)\n xml_re = re.compile(r'^<\\?xml.*?encoding=[\"\\']*(.+?)[\"\\'>]')\n\n return (\n charset_re.findall(content)\n + pragma_re.findall(content)\n + xml_re.findall(content)\n )\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 150, "n_words": 51, "vocab_size": 44, "complexity": 1, "nloc": 17, "token_counts": 81, "n_ast_nodes": 135, "n_identifiers": 13, "random_cut": "def get_encodings_from_content(content):\n \n warnings.warn(\n (\n \"In requests 3.0, get_encodings_from_content will be removed. For \"\n \"more information, please see the discussion on issue #2266. (This\"\n \" warning should only appear once.)\"\n ),\n DeprecationWarning,\n )\n\n charset_re = re.compile(r']', flags=re.I", "d_id": 4204, "documentation": { "docstring": "Returns encodings from given content string.\n\n :param content: bytestring to extract encodings from.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 109752, "commit_id": "4896ec1a2cfb8c454e385632d8df213c915ced52", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/axes3d.py", "file_name": "axes3d.py", "fun_name": "_scale_axis_limits", "commit_message": "Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449)\n\n* ENH: Add pan and zoom toolbar handling to 3D Axes\r\n\r\n1) This moves the pan logic that was already in the mouse move handler\r\ninto the \"drag_pan\" method to make it available from the toolbar.\r\n\r\n2) This expands upon the panning logic to enable a zoom-to-box feature.\r\nThe zoom-to-box is done relative to the Axes, so it shrinks/expands\r\nthe box as a fraction of each delta, from lower-left Axes to lower-left\r\nzoom-box. Thus, it tries to handle non-centered zooms, which adds more\r\ncases to handle versus the current right-click zoom only scaling from\r\nthe center of the projection.\r\n\r\n* Rewrite zooming with bounding box\r\n\r\n* Rewrite 3d panning to work with a roll angle\r\n\r\n* Whats new for zoom and pan buttons\r\n\r\n* Make pan button configurable\r\n\r\n* Do not jump when zooming and mouse goes over other subplot\r\n\r\n* Rework zooming for 3d plots\r\n\r\n* Handle x/y lock when zooming and panning\r\n\r\n* Update tests\r\n\r\n* Docstrings\r\n\r\n* Dont assume a scale_z\r\n\r\n* Limit zoom box\r\n\r\n* Test zoom pan key modifiers\r\n\r\n* Save some calculation by saving view axes\r\n\r\n* Deprecation warnings for Axes3D.eye, .vvec\r\n\r\n* Remove Axes3D._prepare_view_from_bbox for now\r\n\r\n* Comments and docstrings\r\n\r\n* Switch from uvn to uvw\r\n\r\n* Save aspect to axes\r\n\r\n* Constrain zooming with mouse when one of the equal aspect ratios is set\r\n\r\n* Cleanup\r\n\r\n* Cleanup\r\n\r\n* Consolidate finding equal aspect axis indices\r\n\r\n* linting\r\n\r\n* More intuitive scaling\r\n\r\n* Box zoom keeps existing aspect ratios\r\n\r\n* Linting\r\n\r\n* Code review comments\r\n\r\n* Revert parameters for view_transformation\r\n\r\n* Fix new 3d pan/zoom view going on view stack twice\r\n\r\n* Better clipping\r\n\r\n* Test 3d toolbar navigation\r\n\r\n* Privatize helper functions\r\n\r\n* Deprecations\r\n\r\n* Code review changes\r\n\r\n* Deprecation note\r\n\r\n* Undeprecate proj3d.view_transformation\r\n\r\n* Undeprecate proj3d.view_transformation\r\n\r\n* Update doc/api/next_api_changes/deprecations/23449-SS.rst\r\n\r\n\r\nCo-authored-by: Greg Lucas \r\nCo-authored-by: Scott Shambaugh \r\nCo-authored-by: Oscar Gustafsson ", "code": "def _scale_axis_limits(self, scale_x, scale_y, scale_z):\n \n # Get the axis limits and centers\n minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()\n cx = (maxx + minx)/2\n cy = (maxy + miny)/2\n cz = (maxz + minz)/2\n\n # Scale the data range\n dx = (maxx - minx)*scale_x\n dy = (maxy - miny)*scale_y\n dz = (maxz - minz)*scale_z\n\n # Set the scaled axis limits\n self.set_xlim3d(cx - dx/2, cx + dx/2)\n self.set_ylim3d(cy - dy/2, cy + dy/2)\n self.set_zlim3d(cz - dz/2, cz + dz/2)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 177, "n_words": 79, "vocab_size": 51, "complexity": 1, "nloc": 11, "token_counts": 131, "n_ast_nodes": 201, "n_identifiers": 21, "random_cut": "def _scale_axis_limits(self, scale_x, scale_y, scale_z):\n \n # Get the axis limits and centers\n minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()\n cx = (maxx + minx)/2\n cy = (maxy + miny)/2\n cz = (maxz + minz)/2\n\n # Scale the data range\n dx = (maxx - minx)*scale_x\n dy = (maxy - miny)*scale_y\n dz = (maxz - minz)*scale_z\n\n # Set the scaled axis limits\n self.set_xlim3d(cx - dx/2, cx + dx/2)\n self.set_ylim3d(cy - dy/2, cy + dy/2)\n ", "d_id": 23734, "documentation": { "docstring": "\n Keeping the center of the x, y, and z data axes fixed, scale their\n limits by scale factors. A scale factor > 1 zooms out and a scale\n factor < 1 zooms in.\n\n Parameters\n ----------\n scale_x : float\n Scale factor for the x data axis.\n scale_y : float\n Scale factor for the y data axis.\n scale_z : float\n Scale factor for the z data axis.\n ", "n_words": 65, "vocab_size": 37, "n_whitespaces": 162, "language": "en" } }, { "id": 184113, "commit_id": "c98e1b96049369f6af013a133f204ae0a286f2c7", "repo": "textual", "path": "src/textual/widget.py", "file_name": "widget.py", "fun_name": "layers", "commit_message": "layers and docks", "code": "def layers(self) -> tuple[str, ...]:\n \n for node in self.ancestors:\n if not isinstance(node, Widget):\n break\n if node.styles.has_rule(\"layers\"):\n return node.styles.layers\n return (\"default\",)\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 93, "n_words": 20, "vocab_size": 18, "complexity": 4, "nloc": 12, "token_counts": 51, "n_ast_nodes": 84, "n_identifiers": 10, "random_cut": "def layers(self) -> tuple[str, ...]:\n \n for node in se", "d_id": 44489, "documentation": { "docstring": "Layers of from parent.\n\n Returns:\n tuple[str, ...]: Tuple of layer names.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 36, "language": "en" } }, { "id": 245114, "commit_id": "cd4e9ed8269b0c767e129169b7268b0ced7e60c9", "repo": "mmdetection", "path": "mmdet/testing/_utils.py", "file_name": "_utils.py", "fun_name": "get_roi_head_cfg", "commit_message": "Refactor Double Head, MS, Dynamic, Trident.", "code": "def get_roi_head_cfg(fname):\n \n config = _get_config_module(fname)\n model = copy.deepcopy(config.model)\n\n roi_head = model.roi_head\n train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn\n test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn\n roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg))\n return roi_head\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 33, "vocab_size": 22, "complexity": 3, "nloc": 8, "token_counts": 74, "n_ast_nodes": 117, "n_identifiers": 13, "random_cut": "def get_roi_head_cfg(fname):\n \n config = _get_config_module(fname)\n model = copy.deepcopy(config.model)\n\n roi_head = model.roi_head\n train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn\n test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn\n roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg))\n return roi_head\n\n", "d_id": 70663, "documentation": { "docstring": "Grab configs necessary to create a roi_head.\n\n These are deep copied to allow for safe modification of parameters without\n influencing other tests.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 31, "language": "en" } }, { "id": 59935, "commit_id": "298554b26fa5d866d34fed5f6e8646edb56984a4", "repo": "prefect", "path": "src/prefect/logging/loggers.py", "file_name": "loggers.py", "fun_name": "print_as_log", "commit_message": "Add `log_prints` option to redirect print to logs (#7580)\n\nCo-authored-by: Will Raphaelson <26799928+WillRaphaelson@users.noreply.github.com>\r\nCo-authored-by: Will Raphaelson \r\nCo-authored-by: Nathan Nowack \r\nCo-authored-by: Terrence Dorsey ", "code": "def print_as_log(*args, **kwargs):\n \n from prefect.context import FlowRunContext, TaskRunContext\n\n context = TaskRunContext.get() or FlowRunContext.get()\n if not context or not context.log_prints:\n return print(*args, **kwargs)\n\n logger = get_run_logger()\n\n # Print to an in-memory buffer; so we do not need to implement `print`\n buffer = io.StringIO()\n kwargs[\"file\"] = buffer\n print(*args, **kwargs)\n\n # Remove trailing whitespace to prevent duplicates\n logger.info(buffer.getvalue().rstrip())\n\n\n@contextmanager", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@contextmanager", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 95, "n_words": 56, "vocab_size": 43, "complexity": 4, "nloc": 10, "token_counts": 89, "n_ast_nodes": 157, "n_identifiers": 19, "random_cut": "def print_as_log(*args, **kwargs):\n \n from prefect.context import FlowRunContext, T", "d_id": 11963, "documentation": { "docstring": "\n A patch for `print` to send printed messages to the Prefect run logger.\n\n If no run is active, `print` will behave as if it were not patched.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 37, "language": "en" } }, { "id": 230975, "commit_id": "1d82b8822120db088bfeb6c8eae7ec8df9703783", "repo": "plotly.py", "path": "packages/python/plotly/plotly/matplotlylib/mplexporter/tests/test_basic.py", "file_name": "test_basic.py", "fun_name": "test_image", "commit_message": "Updated distutils.Version to packaging.Version", "code": "def test_image():\n # Test fails for matplotlib 1.5+ because the size of the image\n # generated by matplotlib has changed.\n if Version(matplotlib.__version__) == Version(\"3.4.1\"):\n image_size = 432\n else:\n pytest.skip(\"Test fails for older matplotlib\")\n np.random.seed(0) # image size depends on the seed\n fig, ax = plt.subplots(figsize=(2, 2))\n ax.imshow(np.random.random((10, 10)), cmap=plt.cm.jet, interpolation=\"nearest\")\n _assert_output_equal(\n fake_renderer_output(fig, FakeRenderer),\n f,\n )\n\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 110, "n_words": 55, "vocab_size": 45, "complexity": 2, "nloc": 18, "token_counts": 94, "n_ast_nodes": 159, "n_identifiers": 23, "random_cut": "def test_image():\n # Test fails for mat", "d_id": 62617, "documentation": { "docstring": "\n opening figure\n opening axes\n draw image of size {image_size} \n closing axes\n closing figure\n ", "n_words": 13, "vocab_size": 9, "n_whitespaces": 159, "language": "en" } }, { "id": 216055, "commit_id": "a5679caf65c7c79cd72841b6e5793b9b693744c9", "repo": "salt", "path": "salt/cloud/clouds/proxmox.py", "file_name": "proxmox.py", "fun_name": "ignore_cidr", "commit_message": "Add support for get IP-address from agent", "code": "def ignore_cidr(vm_, ip):\n \n from ipaddress import ip_address, ip_network\n\n cidrs = config.get_cloud_config_value(\n \"ignore_cidr\", vm_, __opts__, default=[], search_global=False\n )\n if cidrs and isinstance(cidrs, str):\n cidrs = [cidrs]\n for cidr in cidrs or []:\n if ip_address(ip) in ip_network(cidr):\n log.warning(\"IP %r found within %r; ignoring it.\", ip, cidr)\n return True\n\n return False\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 112, "n_words": 48, "vocab_size": 41, "complexity": 6, "nloc": 12, "token_counts": 83, "n_ast_nodes": 131, "n_identifiers": 17, "random_cut": "def ignore_cidr(vm_, ip):\n \n from ipaddress import ip_address, ip_network\n\n cidrs = config.get_cloud_config_value(\n \"ignore_cidr\", vm_, __opts__, default=[], search_global=False\n )\n if cidrs and isinstance(cidrs, str):\n cidrs = [cidrs]\n for cidr in cidrs or []:\n if ip_address(ip) in ip_network(cidr):\n log.warning(\"IP %r found within %r; ignoring it.\", ip, cidr)\n return True\n\n retur", "d_id": 54361, "documentation": { "docstring": "\n Return True if we are to ignore the specified IP.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 104508, "commit_id": "ba4d30c42e0702bd894c36777d7d2c0adf74516c", "repo": "datasets", "path": "src/datasets/features/features.py", "file_name": "features.py", "fun_name": "encode_nested_example", "commit_message": "Module namespace cleanup for v2.0 (#3875)\n\n* Imports cleaning\r\n\r\n* Small change\r\n\r\n* Remove unused methods\r\n\r\n* Small fix\r\n\r\n* Additional fix\r\n\r\n* Final fix\r\n\r\n* Fix benchmark test\r\n\r\n* Fix benchmark test #2", "code": "def encode_nested_example(schema, obj):\n \n # Nested structures: we allow dict, list/tuples, sequences\n if isinstance(schema, dict):\n return {k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}\n elif isinstance(schema, (list, tuple)):\n sub_schema = schema[0]\n if obj is None:\n return None\n else:\n if len(obj) > 0:\n for first_elmt in obj:\n if _check_non_null_non_empty_recursive(first_elmt, sub_schema):\n break\n if encode_nested_example(sub_schema, first_elmt) != first_elmt:\n return [encode_nested_example(sub_schema, o) for o in obj]\n return list(obj)\n elif isinstance(schema, Sequence):\n # We allow to reverse list of dict => dict of list for compatiblity with tfds\n if isinstance(schema.feature, dict):\n # dict of list to fill\n list_dict = {}\n if isinstance(obj, (list, tuple)):\n # obj is a list of dict\n for k, dict_tuples in zip_dict(schema.feature, *obj):\n list_dict[k] = [encode_nested_example(dict_tuples[0], o) for o in dict_tuples[1:]]\n return list_dict\n else:\n # obj is a single dict\n for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj):\n list_dict[k] = [encode_nested_example(sub_schema, o) for o in sub_objs]\n return list_dict\n # schema.feature is not a dict\n if isinstance(obj, str): # don't interpret a string as a list\n raise ValueError(f\"Got a string but expected a list instead: '{obj}'\")\n if obj is None:\n return None\n else:\n if len(obj) > 0:\n for first_elmt in obj:\n if _check_non_null_non_empty_recursive(first_elmt, schema.feature):\n break\n # be careful when comparing tensors here\n if not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt) != first_elmt:\n return [encode_nested_example(schema.feature, o) for o in obj]\n return list(obj)\n # Object with special encoding:\n # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks\n elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):\n return schema.encode_example(obj) if obj is not None else None\n # Other object should be directly convertible to a native Arrow type (like Translation and Translation)\n return obj\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 824, "n_words": 270, "vocab_size": 134, "complexity": 27, "nloc": 41, "token_counts": 356, "n_ast_nodes": 541, "n_identifiers": 29, "random_cut": "def encode_nested_example(schema, obj):\n \n # Nested structures: we allow dict, list/tuples, sequences\n if isinstance(schema, dict):\n return {k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}\n elif isinstance(schema, (list, tuple)):\n sub_schema = schema[0]\n if obj is None:\n return None\n else:\n if len(obj) > 0:\n for first_elmt in obj:\n if _check_non_null_non_empty_recursive(first_elmt, sub_schema):\n break\n if encode_nested_example(sub_schema, first_elmt) != first_elmt:\n return [encode_nested_example(sub_schema, o) for o in obj]\n return list(obj)\n elif isinstance(schema, Sequence):\n # We allow to reverse list of dict => dict of list for compatiblity with tfds\n if isinstance(schema.feature, dict):\n # dict of list to fill\n list_dict = {}\n if isinstance(obj, (list, tuple)):\n # obj is a list of dict\n for k, dict_tuples in zip_dict(schema.feature, *obj):\n list_dict[k] = [encode_nested_example(dict_tuples[0], o) for o in dict_tuples[1:]]\n return list_dict\n else:\n # obj is ", "d_id": 21883, "documentation": { "docstring": "Encode a nested example.\n This is used since some features (in particular ClassLabel) have some logic during encoding.\n\n To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.\n If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.\n ", "n_words": 71, "vocab_size": 55, "n_whitespaces": 83, "language": "en" } }, { "id": 101053, "commit_id": "c8122bc499afba4fcb99030e42e08bfb8d3a75e1", "repo": "faceswap", "path": "scripts/train.py", "file_name": "train.py", "fun_name": "_configure_matplotlib", "commit_message": "bugfix: Stop preview window from stealing focus", "code": "def _configure_matplotlib(cls):\n \n rcParams[\"keymap.fullscreen\"] = [k for k in rcParams[\"keymap.fullscreen\"] if k != \"f\"]\n rcParams[\"keymap.save\"] = [k for k in rcParams[\"keymap.save\"] if k != \"s\"]\n rcParams[\"keymap.home\"] = [k for k in rcParams[\"keymap.home\"] if k != \"r\"]\n rcParams[\"figure.raise_window\"] = False\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 38, "vocab_size": 17, "complexity": 7, "nloc": 5, "token_counts": 69, "n_ast_nodes": 123, "n_identifiers": 4, "random_cut": "def _configure_matplotlib(cls):\n \n rcParams[\"keymap.fullscreen\"] = [k for k in rcParams[\"keymap.fullscreen\"] if k != \"f\"", "d_id": 20490, "documentation": { "docstring": " Remove `F`, 'S' and 'R' from their default bindings and stop Matplotlib from stealing\n focus ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 23, "language": "en" } }, { "id": 82284, "commit_id": "a3110e1ff24085373898c7d2a85f628abeb8518d", "repo": "django-cms", "path": "cms/cache/permissions.py", "file_name": "permissions.py", "fun_name": "set_permission_cache", "commit_message": "Enabled isort workflow (#7200)\n\n* Ran isort\r\n\r\n* Enabled isort workflow\r\n\r\nCo-authored-by: Vinit Kumar ", "code": "def set_permission_cache(user, key, value):\n \n from django.core.cache import cache\n\n # store this key, so we can clean it when required\n cache_key = get_cache_key(user, key)\n cache.set(cache_key, value,\n get_cms_setting('CACHE_DURATIONS')['permissions'],\n version=get_cache_permission_version())\n\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 27, "vocab_size": 26, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 13, "random_cut": "def set_permission_cache(user, key, value):\n \n from django.core.cache import cache\n\n # store this key, so we can clean it when required\n cache_ke", "d_id": 17337, "documentation": { "docstring": "\n Helper method for storing values in cache. Stores used keys so\n all of them can be cleaned when clean_permission_cache gets called.\n ", "n_words": 21, "vocab_size": 21, "n_whitespaces": 31, "language": "en" } }, { "id": 251155, "commit_id": "e83ec8390ad6be6a86cfcfc57bce14cb8861bf32", "repo": "mitmproxy", "path": "mitmproxy/http.py", "file_name": "http.py", "fun_name": "cookies", "commit_message": "`pyupgrade --py39-plus **/*.py`", "code": "def cookies(self) -> multidict.MultiDictView[str, tuple[str, multidict.MultiDict[str, Optional[str]]]]:\n \n return multidict.MultiDictView(\n self._get_cookies,\n self._set_cookies\n )\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 55, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 14, "token_counts": 43, "n_ast_nodes": 63, "n_identifiers": 10, "random_cut": "def cookies(self) -> multidict.MultiDictView[str, tuple[str, multidict.MultiDict[str, Optional[str]]]]:\n \n return multidict.MultiDict", "d_id": 73611, "documentation": { "docstring": "\n The response cookies. A possibly empty `MultiDictView`, where the keys are cookie\n name strings, and values are `(cookie value, attributes)` tuples. Within\n attributes, unary attributes (e.g. `HTTPOnly`) are indicated by a `None` value.\n Modifications to the MultiDictView update `Response.headers`, and vice versa.\n\n *Warning:* Changes to `attributes` will not be picked up unless you also reassign\n the `(cookie value, attributes)` tuple directly in the `MultiDictView`.\n ", "n_words": 64, "vocab_size": 54, "n_whitespaces": 114, "language": "en" } }, { "id": 148285, "commit_id": "0e6c042e29cbbe429d81c9c1af3c75c261f00980", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "_normalize_entries", "commit_message": "[Bugfix] fix invalid excluding of Black (#24042)\n\n- We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options\r\n- Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.", "code": "def _normalize_entries(entries, separators=None):\n\t\n\tnorm_files = {}\n\tfor entry in entries:\n\t\tnorm_files[normalize_file(entry.path, separators=separators)] = entry\n\treturn norm_files\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 11, "n_words": 16, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "def _normalize_entries(entries, separators=None):\n\t\n\tnorm_files = {}\n\tfor entry in entries:\n\t\tnorm_files[normalize_file(entry.path, separators=separators)] = entry\n\treturn norm_files\n\n", "d_id": 34221, "documentation": { "docstring": "\n\tNormalizes the entry paths to use the POSIX path separator.\n\n\t*entries* (:class:`~collections.abc.Iterable` of :class:`.TreeEntry`)\n\tcontains the entries to be normalized.\n\n\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t:data:`None`) optionally contains the path separators to normalize.\n\tSee :func:`normalize_file` for more information.\n\n\tReturns a :class:`dict` mapping the each normalized file path (:class:`str`)\n\tto the entry (:class:`.TreeEntry`)\n\t", "n_words": 52, "vocab_size": 39, "n_whitespaces": 44, "language": "en" } }, { "id": 206460, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/test/testcases.py", "file_name": "testcases.py", "fun_name": "assertXMLNotEqual", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def assertXMLNotEqual(self, xml1, xml2, msg=None):\n \n try:\n result = compare_xml(xml1, xml2)\n except Exception as e:\n standardMsg = \"First or second argument is not valid XML\\n%s\" % e\n self.fail(self._formatMessage(msg, standardMsg))\n else:\n if result:\n standardMsg = \"%s == %s\" % (\n safe_repr(xml1, True),\n safe_repr(xml2, True),\n )\n self.fail(self._formatMessage(msg, standardMsg))\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 200, "n_words": 45, "vocab_size": 38, "complexity": 3, "nloc": 13, "token_counts": 85, "n_ast_nodes": 137, "n_identifiers": 13, "random_cut": "def assertXMLNotEqual(self, xml1, xml2, msg=None):\n \n try:\n result = compare_xml(xml1, xml2)\n except Exception as e:\n standardMsg = \"First or second argument is not valid XML\\n%s\" % ", "d_id": 51528, "documentation": { "docstring": "\n Assert that two XML snippets are not semantically equivalent.\n Whitespace in most cases is ignored and attribute ordering is not\n significant. The arguments must be valid XML.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 56, "language": "en" } }, { "id": 206187, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/base.py", "file_name": "base.py", "fun_name": "token_kwargs", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def token_kwargs(bits, parser, support_legacy=False):\n \n if not bits:\n return {}\n match = kwarg_re.match(bits[0])\n kwarg_format = match and match[1]\n if not kwarg_format:\n if not support_legacy:\n return {}\n if len(bits) < 3 or bits[1] != \"as\":\n return {}\n\n kwargs = {}\n while bits:\n if kwarg_format:\n match = kwarg_re.match(bits[0])\n if not match or not match[1]:\n return kwargs\n key, value = match.groups()\n del bits[:1]\n else:\n if len(bits) < 3 or bits[1] != \"as\":\n return kwargs\n key, value = bits[2], bits[0]\n del bits[:3]\n kwargs[key] = parser.compile_filter(value)\n if bits and not kwarg_format:\n if bits[0] != \"and\":\n return kwargs\n del bits[:1]\n return kwargs\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 334, "n_words": 95, "vocab_size": 40, "complexity": 16, "nloc": 29, "token_counts": 188, "n_ast_nodes": 303, "n_identifiers": 13, "random_cut": "def token_kwargs(bits, parser, support_legacy=False):\n \n if not bits:\n return {}\n match = kwarg_re.match(bits[0])\n kwarg_format = match and match[1]\n if not kwarg_format:\n if not support_legacy:\n return {}\n if len(bits) < 3 or bits[1] != \"as\":\n return {}\n\n kwargs = {}\n while bits:\n if kwarg_format:\n match = kwarg_re.match(bits[0])\n if not match or not match[1]:\n return kwargs\n key, value = match.groups()\n del bits[:1]\n else:\n if len(bits) < 3 or bits[1] != \"as\":\n return kwargs\n key, value = bits[2], bits[0]\n del bits[:3]\n kwargs[key] = parser.compile_filter(value)\n if bits and not kwarg_format:\n if bits[0] != \"and\":\n return kwargs\n del bits[:1]\n return kwargs\n", "d_id": 51399, "documentation": { "docstring": "\n Parse token keyword arguments and return a dictionary of the arguments\n retrieved from the ``bits`` token list.\n\n `bits` is a list containing the remainder of the token (split by spaces)\n that is to be checked for arguments. Valid arguments are removed from this\n list.\n\n `support_legacy` - if True, the legacy format ``1 as foo`` is accepted.\n Otherwise, only the standard ``foo=1`` format is allowed.\n\n There is no requirement for all remaining token ``bits`` to be keyword\n arguments, so return the dictionary as soon as an invalid argument format\n is reached.\n ", "n_words": 90, "vocab_size": 59, "n_whitespaces": 124, "language": "en" } }, { "id": 65547, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py", "file_name": "supplier_scorecard_variable.py", "fun_name": "get_cost_of_delayed_shipments", "commit_message": "style: format code with black", "code": "def get_cost_of_delayed_shipments(scorecard):\n\t\n\treturn get_total_cost_of_shipments(scorecard) - get_cost_of_on_time_shipments(scorecard)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 4, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def get_cost_of_delayed_shipments(scorecard):\n\t\n\treturn get_total_cost_of_shipments(scorecard) - get_cost_of_on_time_shipments(scorec", "d_id": 13924, "documentation": { "docstring": "Gets the total cost of all delayed shipments in the period (based on Purchase Receipts - POs)", "n_words": 17, "vocab_size": 16, "n_whitespaces": 16, "language": "en" } }, { "id": 244126, "commit_id": "3b2e9655631a2edd28bb94c640bd6a74c0bfad55", "repo": "mmdetection", "path": "mmdet/models/losses/utils.py", "file_name": "utils.py", "fun_name": "weight_reduce_loss", "commit_message": "[Fix] Fix reduction=mean in CELoss. (#7449)\n\n* [Fix] Fix ignore in CELoss.\r\n\r\n* add ut\r\n\r\n* fix and add comments\r\n\r\n* add avg_non_ignore option\r\n\r\n* bce avg\r\n\r\n* fix lint", "code": "def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):\n \n # if weight is specified, apply element-wise weight\n if weight is not None:\n loss = loss * weight\n\n # if avg_factor is not specified, just reduce the loss\n if avg_factor is None:\n loss = reduce_loss(loss, reduction)\n else:\n # if reduction is mean, then average the loss by avg_factor\n if reduction == 'mean':\n # Avoid causing ZeroDivisionError when avg_factor is 0.0,\n # i.e., all labels of an image belong to ignore index.\n eps = torch.finfo(torch.float32).eps\n loss = loss.sum() / (avg_factor + eps)\n # if reduction is 'none', then do nothing, otherwise raise an error\n elif reduction != 'none':\n raise ValueError('avg_factor can not be used with reduction=\"sum\"')\n return loss\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 230, "n_words": 112, "vocab_size": 69, "complexity": 5, "nloc": 12, "token_counts": 86, "n_ast_nodes": 150, "n_identifiers": 12, "random_cut": "def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):\n \n # if weight is specified, apply element-wise weight\n if weight is not None:\n loss = loss * weight\n\n # if avg_factor is not specified, just reduce the loss\n if avg_factor is None:\n loss = reduce_loss(loss, reduction)\n els", "d_id": 70253, "documentation": { "docstring": "Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights.\n reduction (str): Same as built-in losses of PyTorch.\n avg_factor (float): Average factor when computing the mean of losses.\n\n Returns:\n Tensor: Processed loss values.\n ", "n_words": 38, "vocab_size": 32, "n_whitespaces": 82, "language": "en" } }, { "id": 241889, "commit_id": "e477bab940324648c6f6e2fb53f0932dff19b11b", "repo": "scipy", "path": "scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py", "file_name": "test_lobpcg.py", "fun_name": "test_failure_to_run_iterations", "commit_message": "Update test_lobpcg.py\n\ncopy/paste from #15280", "code": "def test_failure_to_run_iterations():\n \n rnd = np.random.RandomState(0)\n X = rnd.standard_normal((100, 10))\n A = X @ X.T\n Q = rnd.standard_normal((X.shape[0], 4))\n with pytest.warns(UserWarning, match=\"Exited at iteration\"):\n eigenvalues, _ = lobpcg(A, Q, maxiter=20)\n assert(np.max(eigenvalues) > 0)\n\n\n@pytest.mark.filterwarnings(\"ignore:The problem size\")", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "@pytest.mark.filterwarnings(\"ignore:The problem size\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 62, "n_words": 35, "vocab_size": 30, "complexity": 1, "nloc": 8, "token_counts": 88, "n_ast_nodes": 158, "n_identifiers": 22, "random_cut": "def test_failure_to_run_iterations():\n \n rnd = np.random.RandomState(0)\n X = rnd.standard_normal((100, 10))\n A = X @ X.T\n Q = rnd.standard_normal((X.shape[0], 4))\n with pytest.warns(UserWarning, ", "d_id": 69727, "documentation": { "docstring": "Check that the code exists gracefully without breaking. Issue #10974.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 13, "language": "en" } }, { "id": 116154, "commit_id": "02a831997cdffafca7cb160eb1938e72020ee049", "repo": "mindsdb", "path": "tests/unit/test_executor.py", "file_name": "test_executor.py", "fun_name": "test_predictor_tableau_header", "commit_message": "executor tests", "code": "def test_predictor_tableau_header(self, mock_handler):\n df = pd.DataFrame([\n {'a': 1, 'b': 'one'},\n {'a': 2, 'b': 'two'},\n {'a': 1, 'b': 'three'},\n ])\n self.set_handler(mock_handler, name='pg', tables={'tasks': df})\n\n # --- use predictor ---\n predicted_value = 5\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical\n },\n 'predicted_value': predicted_value\n }\n self.set_predictor(predictor)\n ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))\n\n # second column is having last value of 'b'\n # 3: count rows, 4: sum of 'a', 5 max of prediction\n assert ret.data[0] == [3, 4, 5]\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 298, "n_words": 82, "vocab_size": 64, "complexity": 1, "nloc": 32, "token_counts": 143, "n_ast_nodes": 250, "n_identifiers": 22, "random_cut": "def test_predictor_tableau_header(self, mock_handler):\n df = pd.DataFrame([\n {'a': 1, 'b': 'one'},\n {'a': 2, 'b': 'two'},\n {'a': 1, 'b': 'three'},\n ])\n self.set_handler(mock_handler, name='pg', tables={'tasks': df})\n\n # --- use predictor ---\n predicted_value = 5\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical\n },\n 'predicted_value': predicted_value\n }\n self.set_predictor(predictor)\n ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))\n\n # second column is having last value of 'b'\n # 3: count rows, 4: sum of 'a', 5 max of pre", "d_id": 25675, "documentation": { "docstring": "\n SELECT \n SUM(1) AS `cnt__0B4A4E8BD11C48FFB4730D4D2C32191A_ok`,\n sum(`Custom SQL Query`.`a`) AS `sum_height_ok`,\n max(`Custom SQL Query`.`p`) AS `sum_length1_ok`\n FROM (\n SELECT res.a, res.p \n FROM pg.tasks as source\n JOIN mindsdb.task_model as res\n ) `Custom SQL Query`\n HAVING (COUNT(1) > 0)\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 176, "language": "en" } }, { "id": 48733, "commit_id": "48a21aa0eb3a95d32456c2a927eff9552a04231e", "repo": "django-rest-framework", "path": "tests/test_routers.py", "file_name": "test_routers.py", "fun_name": "test_conflicting_specified_basename_different_models", "commit_message": "raise ImproperlyConfigured exception if `basename` is not unique (#8438)\n\n* raise ImproperlyConfigured if basename already exists\r\n\r\n* rename already_registered function; return True/False\r\n\r\n* additional basename tests\r\n\r\n* additional basename tests\r\n\r\n* Update rest_framework/routers.py\r\n\r\nCo-authored-by: David Graves \r\nCo-authored-by: Asif Saif Uddin ", "code": "def test_conflicting_specified_basename_different_models(self):\n \n self.router.register(r'notes', NoteViewSet)\n with pytest.raises(ImproperlyConfigured):\n self.router.register(r'notes_basename', BasenameViewSet, basename='routertestmodel')\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 40, "n_ast_nodes": 69, "n_identifiers": 10, "random_cut": "def test_conflicting_specified_basename_different_models(self):\n \n self.router.register(r'notes', NoteViewSet)\n with pytest.raises(ImproperlyConfigured):\n self.router.register(r'notes_basename', BasenameViewSet, basename='routertestmodel')\n", "d_id": 9584, "documentation": { "docstring": "\n Ensure 2 routers with different models, and a conflicting basename specified\n throws an exception\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 89279, "commit_id": "16b946cef6e851e40d552e1f9a9d44d0f7d31509", "repo": "sentry", "path": "src/sentry/dynamic_sampling/latest_release_booster.py", "file_name": "latest_release_booster.py", "fun_name": "_get_boosted_releases", "commit_message": "ref(metrics): Change implementation of latest release [TET-555] (#41757)", "code": "def _get_boosted_releases(self) -> BoostedReleases:\n \n boosted_releases = BoostedReleases()\n for boosted_release_cache_key, timestamp in self.redis_client.hgetall(\n self._generate_cache_key_for_boosted_releases_hash()\n ).items():\n extracted_data = self._extract_data_from_cache_key(boosted_release_cache_key)\n if extracted_data:\n release_id, environment = extracted_data\n boosted_releases.add_release(\n cache_key=boosted_release_cache_key,\n id=release_id,\n timestamp=float(timestamp),\n environment=environment,\n )\n\n return boosted_releases\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 220, "n_words": 31, "vocab_size": 27, "complexity": 3, "nloc": 21, "token_counts": 77, "n_ast_nodes": 123, "n_identifiers": 18, "random_cut": "def _get_boosted_releases(self) -> BoostedReleases:\n \n boosted_releases = BoostedReleases()\n for boosted_release_cache_key, timestamp in self.redis_client.hgetall(\n self._generate_cache_key_for_boosted_releases_hash()\n ).items():\n extracted_data = self._extr", "d_id": 18525, "documentation": { "docstring": "\n Returns all the boosted releases and parses them based on key and value data.\n\n This method should not be called directly as the boosted releases are not extended, thus they contain only a\n subset of information.\n ", "n_words": 36, "vocab_size": 31, "n_whitespaces": 65, "language": "en" } }, { "id": 130772, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/internal/internal_api.py", "file_name": "internal_api.py", "fun_name": "store_stats_summary", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def store_stats_summary(reply):\n \n store_summary = \"--- Aggregate object store stats across all nodes ---\\n\"\n # TODO(ekl) it would be nice if we could provide a full memory usage\n # breakdown by type (e.g., pinned by worker, primary, etc.)\n store_summary += (\n \"Plasma memory usage {} MiB, {} objects, {}% full, {}% \"\n \"needed\\n\".format(\n int(reply.store_stats.object_store_bytes_used / (1024 * 1024)),\n reply.store_stats.num_local_objects,\n round(\n 100\n * reply.store_stats.object_store_bytes_used\n / reply.store_stats.object_store_bytes_avail,\n 2,\n ),\n round(\n 100\n * reply.store_stats.object_store_bytes_primary_copy\n / reply.store_stats.object_store_bytes_avail,\n 2,\n ),\n )\n )\n if reply.store_stats.object_store_bytes_fallback > 0:\n store_summary += \"Plasma filesystem mmap usage: {} MiB\\n\".format(\n int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024))\n )\n if reply.store_stats.spill_time_total_s > 0:\n store_summary += (\n \"Spilled {} MiB, {} objects, avg write throughput {} MiB/s\\n\".format(\n int(reply.store_stats.spilled_bytes_total / (1024 * 1024)),\n reply.store_stats.spilled_objects_total,\n int(\n reply.store_stats.spilled_bytes_total\n / (1024 * 1024)\n / reply.store_stats.spill_time_total_s\n ),\n )\n )\n if reply.store_stats.restore_time_total_s > 0:\n store_summary += (\n \"Restored {} MiB, {} objects, avg read throughput {} MiB/s\\n\".format(\n int(reply.store_stats.restored_bytes_total / (1024 * 1024)),\n reply.store_stats.restored_objects_total,\n int(\n reply.store_stats.restored_bytes_total\n / (1024 * 1024)\n / reply.store_stats.restore_time_total_s\n ),\n )\n )\n if reply.store_stats.consumed_bytes > 0:\n store_summary += \"Objects consumed by Ray tasks: {} MiB.\\n\".format(\n int(reply.store_stats.consumed_bytes / (1024 * 1024))\n )\n if reply.store_stats.object_pulls_queued:\n store_summary += \"Object fetches queued, waiting for available memory.\"\n\n return store_summary\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 800, "n_words": 194, "vocab_size": 101, "complexity": 6, "nloc": 56, "token_counts": 272, "n_ast_nodes": 438, "n_identifiers": 20, "random_cut": "def store_stats_summary(reply):\n \n store_summary = \"--- Aggregate object store stats across all nodes ---\\n\"\n # TODO(ekl) it would be nice if we could provide a full memory usage\n # breakdown by type (e.g., pinned by worker, primar", "d_id": 29350, "documentation": { "docstring": "Returns formatted string describing object store stats in all nodes.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 250395, "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", "repo": "synapse", "path": "tests/handlers/test_register.py", "file_name": "test_register.py", "fun_name": "test_spam_checker_deny", "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", "code": "def test_spam_checker_deny(self) -> None:\n \n self.get_failure(self.handler.register_user(localpart=\"user\"), SynapseError)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def test_spam_checker_deny(self) -> None:\n \n self.get_failure(self.handler.register_user(localpart=\"user\"), SynapseError)\n", "d_id": 73416, "documentation": { "docstring": "A spam checker can deny registration, which results in an error.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 101668, "commit_id": "a7d0898f64adc9816427c4923074c7955ce95ac8", "repo": "faceswap", "path": "lib/align/aligned_face.py", "file_name": "aligned_face.py", "fun_name": "_get_pitch_yaw_roll", "commit_message": "sort tool: Add sort by roll", "code": "def _get_pitch_yaw_roll(self) -> None:\n \n proj_matrix = np.zeros((3, 4), dtype=\"float32\")\n proj_matrix[:3, :3] = cv2.Rodrigues(self._rotation)[0]\n euler = cv2.decomposeProjectionMatrix(proj_matrix)[-1]\n self._pitch_yaw_roll = cast(Tuple[float, float, float], tuple(euler.squeeze()))\n logger.trace(\"yaw_pitch: %s\", self._pitch_yaw_roll) # type: ignore\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 28, "vocab_size": 25, "complexity": 1, "nloc": 7, "token_counts": 90, "n_ast_nodes": 143, "n_identifiers": 19, "random_cut": "def _get_pitch_yaw_roll(self) -> None:\n \n proj_matrix = np.zeros((3, 4), dtype=\"float32\")\n proj_matrix[:3,", "d_id": 21073, "documentation": { "docstring": " Obtain the yaw, roll and pitch from the :attr:`_rotation` in eular angles. ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 13, "language": "en" } }, { "id": 292922, "commit_id": "dbbb5655e5df0d72ca6b5534af624b54027cbb6d", "repo": "core", "path": "tests/components/dlna_dmr/test_data.py", "file_name": "test_data.py", "fun_name": "aiohttp_notify_servers_mock", "commit_message": "Bump async-upnp-client to 0.25.0 (#66414)\n\nCo-authored-by: J. Nick Koston ", "code": "def aiohttp_notify_servers_mock() -> Iterable[Mock]:\n \n with patch(\n \"homeassistant.components.dlna_dmr.data.AiohttpNotifyServer\"\n ) as mock_constructor:\n servers = []\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 36, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 17, "token_counts": 50, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "def aiohttp_notify_servers_mock() -> Iterable[Mock]:\n \n with patch(\n \"homeassistant.", "d_id": 91989, "documentation": { "docstring": "Construct mock AiohttpNotifyServer on demand, eliminating network use.\n\n This fixture provides a list of the constructed servers.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 23, "language": "en" } }, { "id": 109916, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/art3d.py", "file_name": "art3d.py", "fun_name": "line_collection_2d_to_3d", "commit_message": "Improve mpl_toolkit documentation", "code": "def line_collection_2d_to_3d(col, zs=0, zdir='z'):\n \n segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)\n col.__class__ = Line3DCollection\n col.set_segments(segments3d)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 25, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 64, "n_identifiers": 10, "random_cut": "def line_collection_2d_to_3d(col, zs=0, zdir='z'):\n \n segments3d = _paths_to_3d_segments(col.get_p", "d_id": 23823, "documentation": { "docstring": "Convert a `.LineCollection` to a `.Line3DCollection` object.", "n_words": 7, "vocab_size": 6, "n_whitespaces": 6, "language": "en" } }, { "id": 64235, "commit_id": "456f27724c975685c2d5a92c20296737b11c084d", "repo": "erpnext", "path": "erpnext/patches/v13_0/convert_to_website_item_in_item_card_group_template.py", "file_name": "convert_to_website_item_in_item_card_group_template.py", "fun_name": "execute", "commit_message": "fix: Convert Item links to Website Item links in Item Card Group template data\n\n- Changed link option to Website Item in Item card group template\n- patch to convert pre-existing data", "code": "def execute():\n \n frappe.reload_doc(\"e_commerce\", \"web_template\", \"item_card_group\")\n\n blocks = frappe.db.get_all(\n \"Web Page Block\",\n filters={\"web_template\": \"Item Card Group\"},\n fields=[\"parent\", \"web_template_values\", \"name\"]\n )\n\n fields = generate_fields_to_edit()\n\n for block in blocks:\n web_template_value = json.loads(block.get('web_template_values'))\n\n for field in fields:\n item = web_template_value.get(field)\n if not item:\n continue\n\n if frappe.db.exists(\"Website Item\", {\"item_code\": item}):\n website_item = frappe.db.get_value(\"Website Item\", {\"item_code\": item})\n else:\n website_item = make_new_website_item(item, web_template_value, field)\n continue\n\n if website_item:\n web_template_value[field] = website_item\n\n frappe.db.set_value(\"Web Page Block\", block.name, \"web_template_values\", json.dumps(web_template_value))\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 259, "n_words": 69, "vocab_size": 51, "complexity": 6, "nloc": 22, "token_counts": 159, "n_ast_nodes": 275, "n_identifiers": 23, "random_cut": "def execute():\n \n frappe.reload_doc(\"e_commerce\", \"web_template\", \"item_card_group\")\n\n blocks = frappe.db.get_all(\n \"Web Page Block\",\n filters={\"web_template\": \"Item Card Group\"},\n fields=[\"parent\", \"web_template_values\", \"name\"]\n )\n\n fields = generate_fields_to_edit()\n\n for block in blocks:\n web_template_value = json.loads(block.get('web_template_values'))\n\n for field in fields:\n item = web_template_value.get(field)\n if not item:\n continue\n\n if frappe.db.exists(\"Website Item\", {\"item_code\": item}):\n website_item = frappe.db.get_value(\"Website Item\", {\"item_c", "d_id": 13581, "documentation": { "docstring": "\n Convert all Item links to Website Item link values in\n exisitng 'Item Card Group' Web Page Block data.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 36, "language": "en" } }, { "id": 109136, "commit_id": "91f47d6eff63187f582c395c007d0152980be6b3", "repo": "matplotlib", "path": "lib/matplotlib/lines.py", "file_name": "lines.py", "fun_name": "set_pickradius", "commit_message": "Unify set_pickradius argument", "code": "def set_pickradius(self, pickradius):\n \n if not isinstance(pickradius, Number) or pickradius < 0:\n raise ValueError(\"pick radius should be a distance\")\n self._pickradius = pickradius\n\n pickradius = property(get_pickradius, set_pickradius)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 25, "vocab_size": 22, "complexity": 3, "nloc": 4, "token_counts": 31, "n_ast_nodes": 65, "n_identifiers": 9, "random_cut": "def set_pickradius(self, pickradius):\n \n if not isinstance(pickradius, Number) or pickradius < 0:\n raise ValueError(\"pick radius should be a distance\")\n self._pickradius = pickradius\n\n pickradius = property(ge", "d_id": 23452, "documentation": { "docstring": "\n Set the pick radius used for containment tests.\n\n See `.contains` for more details.\n\n Parameters\n ----------\n pickradius : float\n Pick radius, in points.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 76, "language": "en" } }, { "id": 100509, "commit_id": "71c20252c2e747f692289cdefe80ad0d5a456ea6", "repo": "faceswap", "path": "tools/preview/preview.py", "file_name": "preview.py", "fun_name": "_predict", "commit_message": "bugfix: Preview Tool, ensure all config items are written", "code": "def _predict(self):\n \n with self._lock:\n self._predicted_images = []\n for frame in self._input_images:\n self._predictor.in_queue.put(frame)\n idx = 0\n while idx < self._sample_size:\n logger.debug(\"Predicting face %s of %s\", idx + 1, self._sample_size)\n items = self._predictor.out_queue.get()\n if items == \"EOF\":\n logger.debug(\"Received EOF\")\n break\n for item in items:\n self._predicted_images.append(item)\n logger.debug(\"Predicted face %s of %s\", idx + 1, self._sample_size)\n idx += 1\n logger.debug(\"Predicted faces\")\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 292, "n_words": 57, "vocab_size": 40, "complexity": 5, "nloc": 17, "token_counts": 117, "n_ast_nodes": 198, "n_identifiers": 18, "random_cut": "def _predict(self):\n \n with self._lock:\n self._predicted_images = []\n for frame in self._input_images:\n sel", "d_id": 19977, "documentation": { "docstring": " Predict from the loaded frames.\n\n With a threading lock (to prevent stacking), run the selected faces through the Faceswap\n model predict function and add the output to :attr:`predicted`\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 50, "language": "en" } }, { "id": 198396, "commit_id": "7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c", "repo": "sympy", "path": "sympy/polys/polytools.py", "file_name": "polytools.py", "fun_name": "exclude", "commit_message": "Cleanup loops and ranges", "code": "def exclude(f):\n \n J, new = f.rep.exclude()\n gens = [gen for j, gen in enumerate(f.gens) if j not in J]\n\n return f.per(new, gens=gens)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 50, "n_words": 22, "vocab_size": 20, "complexity": 3, "nloc": 4, "token_counts": 49, "n_ast_nodes": 78, "n_identifiers": 10, "random_cut": "def exclude(f):\n \n J, new = f.rep.exclude()\n gens = [gen for j, gen in enumer", "d_id": 48907, "documentation": { "docstring": "\n Remove unnecessary generators from ``f``.\n\n Examples\n ========\n\n >>> from sympy import Poly\n >>> from sympy.abc import a, b, c, d, x\n\n >>> Poly(a + x, a, b, c, d, x).exclude()\n Poly(a + x, a, x, domain='ZZ')\n\n ", "n_words": 36, "vocab_size": 22, "n_whitespaces": 93, "language": "en" } }, { "id": 107424, "commit_id": "695bc25c7a9b198e00c54496a8eed56a8a908cbf", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "set_ticks", "commit_message": "Expanded documentation of Axis.set_ticks as per discussion in issue #22262 (#22270)\n\n* Expanded documentation of Axis.set_ticks()\r\n\r\n* Fix flake8 W293 (blank line contains whitespace) warnings\r\n\r\n* Expanded the documentation even more based on discussion in issue #22262\r\n\r\n* Update lib/matplotlib/axis.py - @jklymak rewording\r\n\r\nCo-authored-by: Jody Klymak \r\n\r\n* Reduced verbosity of doc by @jklymak 's suggestion.\r\n\r\n* On second thought, the previous wording could be seen as very ambiguous.\r\n\r\n* Update set_ticks docstring by @timhoffm compromise suggestion\r\n\r\nCo-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com>\r\n\r\n* Removed extra sentence as per @timhoffm review\r\n\r\n* Blank line whitespace issue crept up again\r\n\r\n* Update lib/matplotlib/axis.py as per correction by @timhoffm\r\n\r\nCo-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com>\r\n\r\nCo-authored-by: unknown <>\r\nCo-authored-by: Jody Klymak \r\nCo-authored-by: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com>", "code": "def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs):\n \n result = self._set_tick_locations(ticks, minor=minor)\n if labels is not None:\n self.set_ticklabels(labels, minor=minor, **kwargs)\n return result\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 21, "vocab_size": 20, "complexity": 2, "nloc": 5, "token_counts": 54, "n_ast_nodes": 81, "n_identifiers": 9, "random_cut": "def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs):\n \n result = self._set_tick_locations(ticks, minor=minor)\n if labels is not None:\n self.set_ticklabels(label", "d_id": 22741, "documentation": { "docstring": "\n Set this Axis' tick locations and optionally labels.\n\n If necessary, the view limits of the Axis are expanded so that all\n given ticks are visible.\n\n Parameters\n ----------\n ticks : list of floats\n List of tick locations. The axis `.Locator` is replaced by a\n `~.ticker.FixedLocator`.\n\n Some tick formatters will not label arbitrary tick positions;\n e.g. log formatters only label decade ticks by default. In\n such a case you can set a formatter explicitly on the axis\n using `.Axis.set_major_formatter` or provide formatted\n *labels* yourself.\n labels : list of str, optional\n List of tick labels. If not set, the labels are generated with\n the axis tick `.Formatter`.\n minor : bool, default: False\n If ``False``, set the major ticks; if ``True``, the minor ticks.\n **kwargs\n `.Text` properties for the labels. These take effect only if you\n pass *labels*. In other cases, please use `~.Axes.tick_params`.\n\n Notes\n -----\n The mandatory expansion of the view limits is an intentional design\n choice to prevent the surprise of a non-visible tick. If you need\n other limits, you should set the limits explicitly after setting the\n ticks.\n ", "n_words": 177, "vocab_size": 115, "n_whitespaces": 423, "language": "en" } }, { "id": 67567, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/startup/leaderboard.py", "file_name": "leaderboard.py", "fun_name": "get_all_customers", "commit_message": "style: format code with black", "code": "def get_all_customers(date_range, company, field, limit=None):\n\tif field == \"outstanding_amount\":\n\t\tfilters = [[\"docstatus\", \"=\", \"1\"], [\"company\", \"=\", company]]\n\t\tif date_range:\n\t\t\tdate_range = frappe.parse_json(date_range)\n\t\t\tfilters.append([\"posting_date\", \">=\", \"between\", [date_range[0], date_range[1]]])\n\t\treturn frappe.db.get_all(\n\t\t\t\"Sales Invoice\",\n\t\t\tfields=[\"customer as name\", \"sum(outstanding_amount) as value\"],\n\t\t\tfilters=filters,\n\t\t\tgroup_by=\"customer\",\n\t\t\torder_by=\"value desc\",\n\t\t\tlimit=limit,\n\t\t)\n\telse:\n\t\tif field == \"total_sales_amount\":\n\t\t\tselect_field = \"sum(so_item.base_net_amount)\"\n\t\telif field == \"total_qty_sold\":\n\t\t\tselect_field = \"sum(so_item.stock_qty)\"\n\n\t\tdate_condition = get_date_condition(date_range, \"so.transaction_date\")\n\n\t\treturn frappe.db.sql(\n\t\t\t.format(\n\t\t\t\tselect_field, date_condition\n\t\t\t),\n\t\t\t(company, cint(limit)),\n\t\t\tas_dict=1,\n\t\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 45, "n_words": 73, "vocab_size": 57, "complexity": 5, "nloc": 35, "token_counts": 162, "n_ast_nodes": 280, "n_identifiers": 22, "random_cut": "def get_all_customers(date_range, company, field, limit=None):\n\tif field == \"outstanding_amount\":\n\t\tfilters = [[\"docstatus\", \"=\", \"1\"], [\"company\", \"=\", company]]\n\t\tif date_range:\n\t\t\tdate_range = frappe.parse_json(date_range)\n\t\t\tfilters.append([\"posting_date\", \">=\", \"between\", [date_range[0], date_range[1]]])\n\t\treturn frappe.db.get_all(\n\t\t\t\"Sales Invoice\",\n\t\t\tfields=[\"customer as name\", \"sum(outstanding_amount) as value\"],\n\t\t\tfilters=filters,\n\t\t\tgroup_by=\"customer\",\n\t\t\torder_by=\"value desc\",\n\t\t\tlimit=limit,\n\t\t)\n\telse:\n\t\tif field == \"total_sales_amount\":\n\t\t\tselect_field = \"sum(so_item.base_net_amount)\"\n\t\telif field == \"total_qty_sold\":\n\t\t\tselect_field = \"sum(so_item.stock_qty)\"\n\n\t\tdate_condition = get_date_condition(date_range, \"so.transaction_date\")\n\n\t\treturn frappe.db.sql(\n\t\t\t.format(\n\t\t\t\tselect_field, dat", "d_id": 14558, "documentation": { "docstring": "\n\t\t\tselect so.customer as name, {0} as value\n\t\t\tFROM `tabSales Order` as so JOIN `tabSales Order Item` as so_item\n\t\t\t\tON so.name = so_item.parent\n\t\t\twhere so.docstatus = 1 {1} and so.company = %s\n\t\t\tgroup by so.customer\n\t\t\torder by value DESC\n\t\t\tlimit %s\n\t\t", "n_words": 40, "vocab_size": 30, "n_whitespaces": 33, "language": "en" } }, { "id": 305079, "commit_id": "f78b39bdbfbe151e8bab72610b6fe03afc8c0747", "repo": "core", "path": "homeassistant/components/zha/config_flow.py", "file_name": "config_flow.py", "fun_name": "_async_create_radio_entity", "commit_message": "ZHA backup/restore config flow (#77044)", "code": "async def _async_create_radio_entity(self) -> FlowResult:\n \n assert self._title is not None\n assert self._radio_type is not None\n assert self._device_path is not None\n assert self._device_settings is not None\n\n device_settings = self._device_settings.copy()\n device_settings[CONF_DEVICE_PATH] = await self.hass.async_add_executor_job(\n usb.get_serial_by_id, self._device_path\n )\n\n return self.async_create_entry(\n title=self._title,\n data={\n CONF_DEVICE: device_settings,\n CONF_RADIO_TYPE: self._radio_type.name,\n },\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 189, "n_words": 45, "vocab_size": 30, "complexity": 1, "nloc": 17, "token_counts": 94, "n_ast_nodes": 143, "n_identifiers": 20, "random_cut": "async def _async_create_radio_entity(self) -> FlowResult:\n \n assert self._title is not None\n assert self._radio_type is not None\n assert self._device_path is not None\n assert self._device_settings is not None\n\n device_settings = self._device_settings.copy()\n device_settings[CONF_DEVICE_PATH] = await self.hass.async_add_executor_job(\n usb.get_serial_by_id, self._device_path\n )\n\n return self.async_create_entry(\n title=self._title,\n data={\n CONF_DEVICE: device_settings,\n CONF_RADIO_TYPE: self._radio_type.name,\n },\n )\n", "d_id": 103871, "documentation": { "docstring": "Create a config entity with the current flow state.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 215279, "commit_id": "d4e6111086ff713eb6609dc6c98cec98aded2564", "repo": "salt", "path": "salt/transport/zeromq.py", "file_name": "zeromq.py", "fun_name": "publish_daemon", "commit_message": "Refactor into transports and channels", "code": "def publish_daemon(self, publish_payload, *args, **kwargs):\n \n context = zmq.Context(1)\n ioloop = salt.ext.tornado.ioloop.IOLoop()\n ioloop.make_current()\n # Set up the context", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 52, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 9, "token_counts": 68, "n_ast_nodes": 67, "n_identifiers": 14, "random_cut": "def publish_daemon(self, publish_payload, *args, **kwargs):\n \n context = zmq.Context(1)\n ioloop = salt.ext.tornado.ioloop.IOLoo", "d_id": 53905, "documentation": { "docstring": "\n Bind to the interface specified in the configuration file\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 196417, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/printing/str.py", "file_name": "str.py", "fun_name": "_print_Pow", "commit_message": "Moved imports to higher level", "code": "def _print_Pow(self, expr, rational=False):\n \n PREC = precedence(expr)\n\n if expr.exp is S.Half and not rational:\n return \"sqrt(%s)\" % self._print(expr.base)\n\n if expr.is_commutative:\n if -expr.exp is S.Half and not rational:\n # Note: Don't test \"expr.exp == -S.Half\" here, because that will\n # match -0.5, which we don't want.\n return \"%s/sqrt(%s)\" % tuple(map(lambda arg: self._print(arg), (S.One, expr.base)))\n if expr.exp is -S.One:\n # Similarly to the S.Half case, don't test with \"==\" here.\n return '%s/%s' % (self._print(S.One),\n self.parenthesize(expr.base, PREC, strict=False))\n\n e = self.parenthesize(expr.exp, PREC, strict=False)\n if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1:\n # the parenthesized exp should be '(Rational(a, b))' so strip parens,\n # but just check to be sure.\n if e.startswith('(Rational'):\n return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e[1:-1])\n return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 362, "n_words": 124, "vocab_size": 84, "complexity": 11, "nloc": 15, "token_counts": 218, "n_ast_nodes": 347, "n_identifiers": 23, "random_cut": "def _print_Pow(self, expr, rational=False):\n \n PREC = precedence(expr)\n\n if expr.exp is S.Half and not rational:\n return \"sqrt(%s)\" % self._print(expr.base)\n\n if expr.is_commutative:\n if -expr.exp is S.Half and ", "d_id": 47917, "documentation": { "docstring": "Printing helper function for ``Pow``\n\n Parameters\n ==========\n\n rational : bool, optional\n If ``True``, it will not attempt printing ``sqrt(x)`` or\n ``x**S.Half`` as ``sqrt``, and will use ``x**(1/2)``\n instead.\n\n See examples for additional details\n\n Examples\n ========\n\n >>> from sympy import sqrt, StrPrinter\n >>> from sympy.abc import x\n\n How ``rational`` keyword works with ``sqrt``:\n\n >>> printer = StrPrinter()\n >>> printer._print_Pow(sqrt(x), rational=True)\n 'x**(1/2)'\n >>> printer._print_Pow(sqrt(x), rational=False)\n 'sqrt(x)'\n >>> printer._print_Pow(1/sqrt(x), rational=True)\n 'x**(-1/2)'\n >>> printer._print_Pow(1/sqrt(x), rational=False)\n '1/sqrt(x)'\n\n Notes\n =====\n\n ``sqrt(x)`` is canonicalized as ``Pow(x, S.Half)`` in SymPy,\n so there is no need of defining a separate printer for ``sqrt``.\n Instead, it should be handled here as well.\n ", "n_words": 102, "vocab_size": 81, "n_whitespaces": 307, "language": "en" } }, { "id": 320838, "commit_id": "4026854f45b63ec71bdbef42d71831beb5f10714", "repo": "qutebrowser", "path": "qutebrowser/misc/sessions.py", "file_name": "sessions.py", "fun_name": "_save_tab", "commit_message": "Add --minimal option to session-save command\n\nCurrently the session-save commande make a dump of all tabs history and stores\nthem in the session file. --minimal flag adds the option to store only the last\nitem of the history.\n\nSigned-off-by: shirenn ", "code": "def _save_tab(self, tab, active, minimal=False):\n \n data: _JsonType = {'history': []}\n if active:\n data['active'] = True\n\n if minimal:\n history = [tab.history.current_item()]\n else:\n history = tab.history\n\n for idx, item in enumerate(history):\n qtutils.ensure_valid(item)\n item_data = self._save_tab_item(tab, idx, item)\n if item.url().scheme() == 'qute' and item.url().host() == 'back':\n # don't add qute://back to the session file\n if item_data.get('active', False) and data['history']:\n # mark entry before qute://back as active\n data['history'][-1]['active'] = True\n else:\n data['history'].append(item_data)\n return data\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 279, "n_words": 70, "vocab_size": 54, "complexity": 8, "nloc": 17, "token_counts": 148, "n_ast_nodes": 253, "n_identifiers": 21, "random_cut": "def _save_tab(self, tab, active, minimal=False):\n \n data: _JsonType = {'history': []}\n if active:\n data['active'] = True\n\n if minimal:\n history = [tab.history.current_item()]\n else:\n history = tab.history\n\n for idx, item in enumerate(history):\n qtutils.ensure_valid(item)\n item_data = self._save_tab_item(tab, idx, item)\n if item.url().scheme() == 'qute' and item.url().host() == 'back':\n # don't add qute://back to the session file\n if item_data.get('active', False) and data['history']:\n # mark entry before qute://back as active\n data['history'][-1]", "d_id": 117386, "documentation": { "docstring": "Get a dict with data for a single tab.\n\n Args:\n tab: The WebView to save.\n active: Whether the tab is currently active.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 58, "language": "en" } }, { "id": 132415, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/tests/test_checkpoint_manager.py", "file_name": "test_checkpoint_manager.py", "fun_name": "testBestCheckpoints", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testBestCheckpoints(self):\n \n keep_checkpoints_num = 4\n checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)\n checkpoints = [\n Checkpoint(Checkpoint.PERSISTENT, i, self.mock_result(i)) for i in range(16)\n ]\n random.shuffle(checkpoints)\n\n for checkpoint in checkpoints:\n checkpoint_manager.on_checkpoint(checkpoint)\n\n best_checkpoints = checkpoint_manager.best_checkpoints()\n self.assertEqual(len(best_checkpoints), keep_checkpoints_num)\n for i in range(len(best_checkpoints)):\n self.assertEqual(best_checkpoints[i].value, i + 12)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 141, "n_words": 38, "vocab_size": 29, "complexity": 4, "nloc": 13, "token_counts": 104, "n_ast_nodes": 164, "n_identifiers": 18, "random_cut": "def testBestCheckpoints(self):\n \n keep_checkpoints_num = 4\n checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)\n checkpoints = [\n Checkpoint(Checkpoint.PERSISTENT, i, self.mock_result(i)) for i in range(16)\n ]\n random.shuffle(checkpoints)\n\n for checkpoint in checkpoints:\n checkpoint_manager.on_checkpoint(checkpoint)\n\n best_checkpoints = checkpoint_manager.best_checkpoints()\n self.assertEqual(len(best_checkpoints), keep_checkpoints_num)\n for i in range(len(best_checkpoints)):\n self.assertEqual(best_checkpoints[i].val", "d_id": 29751, "documentation": { "docstring": "\n Tests that the best checkpoints are tracked and ordered correctly.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 173342, "commit_id": "4ea80e9810a14ca3617f08a4ae5cfa6b50482e9a", "repo": "calibre-web", "path": "cps/config_sql.py", "file_name": "config_sql.py", "fun_name": "save", "commit_message": "Code cosmetics", "code": "def save(self):\n \n s = self._read_from_storage() # type: _Settings\n\n for k, v in self.__dict__.items():\n if k[0] == '_':\n continue\n if hasattr(s, k):\n setattr(s, k, v)\n\n log.debug(\"_ConfigSQL updating storage\")\n self._session.merge(s)\n try:\n self._session.commit()\n except OperationalError as e:\n log.error('Database error: %s', e)\n self._session.rollback()\n self.load()\n", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 182, "n_words": 40, "vocab_size": 38, "complexity": 5, "nloc": 15, "token_counts": 99, "n_ast_nodes": 170, "n_identifiers": 20, "random_cut": "def save(self):\n \n s = self._read_from_storage() # type: _Settings\n\n for k, v in self.__dict__.items():\n if k[0] == '_':\n continue\n if hasattr(s, k):\n setattr(s, k, v)\n\n log.debug(\"_ConfigSQL updating storage\")\n self._session.merge(s)\n try:\n self._sessi", "d_id": 40839, "documentation": { "docstring": "Apply all configuration values to the underlying storage.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 89408, "commit_id": "583a7ec15744b2ca8a9c56df484516111dbf783d", "repo": "sentry", "path": "tests/sentry/rules/history/test_preview.py", "file_name": "test_preview.py", "fun_name": "test_transactions", "commit_message": "feat(alert-preview): last triggered (#42098)\n\nAttaches `last_triggered` to group info. `preview` now returns a mapping\r\nof group_ids to triggers, updated tests to reflect that.", "code": "def test_transactions(self):\n prev_hour = timezone.now() - timedelta(hours=1)\n event = self.transaction_data.copy()\n event.update(\n {\n \"start_timestamp\": iso_format(prev_hour - timedelta(minutes=1)),\n \"timestamp\": iso_format(prev_hour),\n \"tags\": {\"foo\": \"bar\"},\n \"transaction\": \"this is where a transaction's 'message' is stored\",\n }\n )\n transaction = self.store_event(project_id=self.project.id, data=event)\n\n perf_issue = transaction.groups[0]\n perf_issue.update(first_seen=prev_hour)\n Activity.objects.create(\n project=self.project,\n group=perf_issue,\n type=ActivityType.SET_REGRESSION.value,\n datetime=prev_hour,\n data={\"event_id\": transaction.event_id},\n )\n conditions = [{\"id\": \"sentry.rules.conditions.regression_event.RegressionEventCondition\"}]\n filters = [\n {\n \"id\": \"sentry.rules.filters.tagged_event.TaggedEventFilter\",\n \"key\": \"foo\",\n \"match\": \"eq\",\n \"value\": \"bar\",\n }\n ]\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id in result\n\n filters[0][\"value\"] = \"baz\"\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id not in result\n\n filters = [\n {\n \"id\": \"sentry.rules.filters.event_attribute.EventAttributeFilter\",\n \"attribute\": \"message\",\n \"match\": \"eq\",\n \"value\": \"this is where a transaction's 'message' is stored\",\n }\n ]\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id in result\n\n filters[0][\"value\"] = \"wrong message\"\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id not in result\n # this can be tested when SNS-1891 is fixed\n \n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 636, "n_words": 153, "vocab_size": 81, "complexity": 1, "nloc": 59, "token_counts": 311, "n_ast_nodes": 524, "n_identifiers": 36, "random_cut": "def test_transactions(self):\n prev_hour = timezone.now() - timedelta(hours=1)\n event = self.transaction_data.copy()\n event.update(\n {\n \"start_timestamp\": iso_format(prev_hour - timedelta(minutes=1)),\n \"timestamp\": iso_format(prev_hour),\n \"tags\": {\"foo\": \"bar\"},\n \"transaction\": \"this is where a transaction's 'message' is stored\",\n }\n )\n transaction = self.store_event(project_id=self.project.id, data=event)\n\n perf_issue = transaction.groups[0]\n perf_issue.update(first_seen=prev_hour)\n Activity.objects.create(\n project=self.project,\n group=perf_issue,\n type=ActivityType.SET_REGRESSION.value,\n datetime=prev_hour,\n data={\"event_id\": transaction.event_id},\n )\n conditions = [{\"id\": \"sentry.rules.conditions.regression_event.RegressionEventCondition\"}]\n filters = [\n {\n \"id\": \"sentry.rules.filters.tagged_event.TaggedEventFilter\",\n \"key\": \"foo\",\n \"match\": \"eq\",\n \"value\": \"bar\",\n }\n ]\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id in result\n\n filters[0][\"value\"] = \"baz\"\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id not in result\n\n filters = [\n {\n \"id\": \"sentry.rules.filters.event_attribute.EventAttributeFilter\",\n \"attribute\": \"message\",\n \"match\": \"eq\",\n \"value\": \"this is where a transaction's 'message' is stored\",\n }\n ]\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id in result\n\n filters[0][\"value\"] = \"wrong message\"\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n", "d_id": 18538, "documentation": { "docstring": "\n conditions = [{\"id\": \"sentry.rules.conditions.first_seen_event.FirstSeenEventCondition\"}]\n filters = [{\n \"id\": \"sentry.rules.filters.tagged_event.TaggedEventFilter\",\n \"key\": \"foo\",\n \"match\": \"eq\",\n \"value\": \"bar\",\n }]\n result = preview(self.project, conditions, filters, \"all\", \"all\", 0)\n assert perf_issue.id in result\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 115, "language": "en" } }, { "id": 96067, "commit_id": "8e70206e59a81fba2f9a833aed8aa762848c335c", "repo": "sentry", "path": "tests/sentry/models/test_release.py", "file_name": "test_release.py", "fun_name": "test_follows_semver_all_releases_semver_and_missing_package_semver_release_version", "commit_message": "fix(semver): Fixes semver check bug (#31567)\n\nFixes bug that considers a release\r\nto be following semver even if the release\r\ndoes not have a package", "code": "def test_follows_semver_all_releases_semver_and_missing_package_semver_release_version(self):\n \n assert (\n follows_semver_versioning_scheme(\n org_id=self.org.id, project_id=self.proj_1.id, release_version=\"2.0.0\"\n )\n is False\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 81, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 9, "random_cut": "def test_follows_semver_all_releases_semver_and_missing_package_semver_release_version(self):\n \n assert (\n follows_semver_versioning_scheme(\n org_id=self.org.id, project_id=self.proj_1.id, release_version=\"2.0.0\"\n )\n is False\n )\n", "d_id": 19271, "documentation": { "docstring": "\n Test that ensures that even if a project is following semver, then if the release_version\n supplied lacks a package, then for that specific release we opt the project out of being\n considered a semver project\n ", "n_words": 35, "vocab_size": 26, "n_whitespaces": 64, "language": "en" } }, { "id": 197534, "commit_id": "7fe8e027ae1d7f683243c0229b961671a6cbb4c5", "repo": "sympy", "path": "sympy/stats/joint_rv_types.py", "file_name": "joint_rv_types.py", "fun_name": "MultivariateT", "commit_message": "Improved some documentation in the stats module", "code": "def MultivariateT(syms, mu, sigma, v):\n \n return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v)\n\n\n#-------------------------------------------------------------------------------\n# Multivariate Normal Gamma distribution ---------------------------------------\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def MultivariateT(syms, mu, sigma, v):\n \n return multivariate_rv(Mu", "d_id": 48612, "documentation": { "docstring": "\n Creates a joint random variable with multivariate T-distribution.\n\n Parameters\n ==========\n\n syms : A symbol/str\n For identifying the random variable.\n mu : A list/matrix\n Representing the location vector\n sigma : The shape matrix for the distribution\n\n Examples\n ========\n\n >>> from sympy.stats import density, MultivariateT\n >>> from sympy import Symbol\n\n >>> x = Symbol(\"x\")\n >>> X = MultivariateT(\"x\", [1, 1], [[1, 0], [0, 1]], 2)\n\n >>> density(X)(1, 2)\n 2/(9*pi)\n\n Returns\n =======\n\n RandomSymbol\n\n ", "n_words": 70, "vocab_size": 56, "n_whitespaces": 139, "language": "en" } }, { "id": 165559, "commit_id": "3aec1d5756f363e25062914dbb82bd8b25b399ce", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "_can_hold_identifiers_and_holds_name", "commit_message": "BUG: DataFrame.getattribute raising if columns have dtype string (#46301)", "code": "def _can_hold_identifiers_and_holds_name(self, name) -> bool:\n \n if self.is_object() or is_string_dtype(self.dtype) or self.is_categorical():\n return name in self\n return False\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 17, "vocab_size": 15, "complexity": 4, "nloc": 12, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 8, "random_cut": "def _can_hold_identifiers_and_holds_name(self, name) -> bool:\n \n if self.is_object() or is_string_dtype(self.dtype) or self.is_categorical():\n return name in self\n return False\n", "d_id": 39678, "documentation": { "docstring": "\n Faster check for ``name in self`` when we know `name` is a Python\n identifier (e.g. in NDFrame.__getattr__, which hits this to support\n . key lookup). For indexes that can't hold identifiers (everything\n but object & categorical) we just return False.\n\n https://github.com/pandas-dev/pandas/issues/19764\n ", "n_words": 41, "vocab_size": 39, "n_whitespaces": 84, "language": "en" } }, { "id": 247605, "commit_id": "5dd949bee6158a8b651db9f2ae417a62c8184bfd", "repo": "synapse", "path": "tests/handlers/test_directory.py", "file_name": "test_directory.py", "fun_name": "test_remove_other_alias", "commit_message": "Add type hints to some tests/handlers files. (#12224)", "code": "def test_remove_other_alias(self) -> None:\n \n # Create a second alias.\n other_test_alias = \"#test2:test\"\n other_room_alias = self._add_alias(other_test_alias)\n\n # Set the alias as the canonical alias for this room.\n self._set_canonical_alias(\n {\n \"alias\": self.test_alias,\n \"alt_aliases\": [self.test_alias, other_test_alias],\n }\n )\n\n data = self._get_canonical_alias()\n self.assertEqual(data[\"content\"][\"alias\"], self.test_alias)\n self.assertEqual(\n data[\"content\"][\"alt_aliases\"], [self.test_alias, other_test_alias]\n )\n\n # Delete the second alias.\n self.get_success(\n self.handler.delete_association(\n create_requester(self.admin_user), other_room_alias\n )\n )\n\n data = self._get_canonical_alias()\n self.assertEqual(data[\"content\"][\"alias\"], self.test_alias)\n self.assertEqual(data[\"content\"][\"alt_aliases\"], [self.test_alias])\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 282, "n_words": 63, "vocab_size": 44, "complexity": 1, "nloc": 23, "token_counts": 146, "n_ast_nodes": 247, "n_identifiers": 15, "random_cut": "def test_remove_other_alias(self) -> None:\n \n # Create a second alias.\n other_test_alias = \"#test2:test\"\n other_room_alias = self._add_alias(other_test_alias)\n\n # Set the alias as the canonical alias for this room.\n self._set_canonical_alias(\n {\n \"alias\": self.test_alias,\n \"alt_aliases\": [self.test_alias, other_test_alias],\n }\n )\n\n data = self._get_canonical_alias()\n self.assertEqual(data[\"content\"][\"alias\"], self.test_alias)\n self.assertEqual(\n data[\"content\"][\"alt_aliases\"], [self.test_alias, other_test_alias]\n )\n\n # Delete the second alia", "d_id": 71773, "documentation": { "docstring": "Removing an alias listed as in alt_aliases should remove it there too.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 319960, "commit_id": "6d5d308d6c7b7e359ba72964a300634e1065ace9", "repo": "paperless-ngx", "path": "src/documents/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_get_comments_no_doc", "commit_message": "Starts on implementing tests for the new API", "code": "def test_get_comments_no_doc(self):\n \n response = self.client.get(\n \"/api/documents/500/comments/\",\n format=\"json\",\n )\n self.assertEqual(response.status_code, 404)\n\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 31, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def test_get_comments_no_doc(self):\n \n response = self.client.get(\n \"/api/documents/500/comments/\",\n format=\"js", "d_id": 117026, "documentation": { "docstring": "\n GIVEN:\n - A request to get comments from a non-existent document\n WHEN:\n - API request for document comments is made\n THEN:\n - HTTP 404 is returned\n ", "n_words": 26, "vocab_size": 20, "n_whitespaces": 88, "language": "en" } }, { "id": 321351, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "tests/unit/keyinput/test_basekeyparser.py", "file_name": "test_basekeyparser.py", "fun_name": "test_mapping_keypad", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def test_mapping_keypad(self, config_stub, keyparser):\n \n config_stub.val.bindings.commands = {'normal': {'a': 'nop'}}\n config_stub.val.bindings.key_mappings = {'1': 'a'}\n\n info = keyutils.KeyInfo(Qt.Key.Key_1, Qt.KeyboardModifier.KeypadModifier)\n keyparser.handle(info.to_event())\n keyparser.execute.assert_called_once_with('nop', None)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 78, "n_ast_nodes": 134, "n_identifiers": 20, "random_cut": "def test_mapping_keypad(self, config_stub, keyparser):\n \n config_stub.val.bindings.commands = {'normal': {'a': 'nop'}}\n config_stub.val.bindings.key_", "d_id": 117678, "documentation": { "docstring": "Make sure falling back to non-numpad keys works with mappings.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 5799, "commit_id": "2a157d452611d37cf50ccb7d56ff1a06e9790ecb", "repo": "InstaPy", "path": "instapy/like_util.py", "file_name": "like_util.py", "fun_name": "verify_liked_image", "commit_message": "PR - Fix `extract_text_from_element()`and `find_element*()` to `find_element()` (#6438)\n\n* Updated getUserData() and find_element*\r\nSigned-off-by: elulcao \r\n\r\nThanks @breuerfelix for reviewing, 🚀 \r\nPeople in this thread please let me know if something is not OK, IG changed a lot these days. 🤗 @her", "code": "def verify_liked_image(browser, logger):\n \n\n browser.refresh()\n unlike_xpath = read_xpath(like_image.__name__, \"unlike\")\n like_elem = browser.find_elements(By.XPATH, unlike_xpath)\n\n if len(like_elem) == 1:\n return True\n else:\n logger.warning(\"--> Image was NOT liked! You have a BLOCK on likes!\")\n return False\n\n", "url": "https://github.com/InstaPy/InstaPy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 71, "n_words": 32, "vocab_size": 30, "complexity": 2, "nloc": 9, "token_counts": 55, "n_ast_nodes": 94, "n_identifiers": 14, "random_cut": "def verify_liked_image(browser, logger):\n \n\n browser.refresh()\n unlike_xpath = read_xpath(like_image.__name__, \"un", "d_id": 826, "documentation": { "docstring": "Check for a ban on likes using the last liked image", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 245505, "commit_id": "af063a6f25ddae4de90646f86b2db824f3d00138", "repo": "mmdetection", "path": "mmdet/structures/mask/structures.py", "file_name": "structures.py", "fun_name": "get_bboxes", "commit_message": "[Refactor] Refactor pipelines with boxlist. (#8562)\n\n* Refactor pipelines and data_preprocesser by boxlist\r\n\r\n* Refactor browse_dataset.py\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* update\r\n\r\n* Update\r\n\r\n* Change with_box_wrapped to with_boxlist\r\n\r\n* Fix comments\r\n\r\n* Fix commits\r\n\r\n* Update UT", "code": "def get_bboxes(self, dst_type='hbb'):\n \n from ..bbox import get_box_type\n _, box_type_cls = get_box_type(dst_type)\n return box_type_cls.from_instance_masks(self)\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 41, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 8, "random_cut": "def get_bboxes(self, dst_type='hbb'):\n \n from ..bbox import get_box_type\n ", "d_id": 70811, "documentation": { "docstring": "Get the certain type boxes from masks.\n\n Please refer to ``mmdet.structures.bbox.box_type`` for more details of\n the box type.\n\n Args:\n dst_type: Destination box type.\n\n Returns:\n :obj:`BaseBoxes`: Certain type boxes.\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 85, "language": "en" } }, { "id": 197116, "commit_id": "cba899d4137b0b65f6850120ee42cd4fcd4f9dbf", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "deprecate_call", "commit_message": "Update the various tensor deprecations", "code": "def deprecate_call():\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.5\",\n active_deprecations_target=\"deprecated-tensor-fun-eval\",\n stacklevel=4,\n )\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 10, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def deprecate_call():\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.5\",\n active_deprecations_target=\"deprecated-tensor-", "d_id": 48349, "documentation": { "docstring": "\n Calling a tensor like Tensor(*indices) is deprecated. Use\n Tensor.substitute_indices() instead.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 32, "language": "en" } }, { "id": 60271, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/net_spec.py", "file_name": "net_spec.py", "fun_name": "assign_proto", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def assign_proto(proto, name, val):\n \n\n is_repeated_field = hasattr(getattr(proto, name), 'extend')\n if is_repeated_field and not isinstance(val, list):\n val = [val]\n if isinstance(val, list):\n if isinstance(val[0], dict):\n for item in val:\n proto_item = getattr(proto, name).add()\n for k, v in six.iteritems(item):\n assign_proto(proto_item, k, v)\n else:\n getattr(proto, name).extend(val)\n elif isinstance(val, dict):\n for k, v in six.iteritems(val):\n assign_proto(getattr(proto, name), k, v)\n else:\n setattr(proto, name, val)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 194, "n_words": 59, "vocab_size": 37, "complexity": 9, "nloc": 17, "token_counts": 151, "n_ast_nodes": 230, "n_identifiers": 19, "random_cut": "def assign_proto(proto, name, val):\n \n\n is_repeated_field = hasattr(getattr(proto, name), 'extend')\n if is_repeated_field and not isinstance(val, list):\n val = [val]\n if isinstance(val, list):\n if isinstance(val[0], dict):\n for item in val:\n proto_item = getattr(proto, name).add()\n for k, v in six.iteritems(item):\n ", "d_id": 12059, "documentation": { "docstring": "Assign a Python object to a protobuf message, based on the Python\n type (in recursive fashion). Lists become repeated fields/messages, dicts\n become messages, and other types are assigned directly. For convenience,\n repeated fields whose values are not lists are converted to single-element\n lists; e.g., `my_repeated_int_field=3` is converted to\n `my_repeated_int_field=[3]`.", "n_words": 49, "vocab_size": 40, "n_whitespaces": 63, "language": "en" } }, { "id": 196759, "commit_id": "ad766d1c02943e86f50559abfd0c72e582c9ca6a", "repo": "sympy", "path": "sympy/assumptions/handlers/common.py", "file_name": "common.py", "fun_name": "__new__", "commit_message": "Update the AskHandler deprecation warnings\n\nn.b., the issue number in the original warning message was wrong. It should\nhave been #20837.", "code": "def __new__(cls, *args, **kwargs):\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.8\",\n active_deprecations_target='deprecated-askhandler',\n )\n return super().__new__(cls, *args, **kwargs)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 66, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 10, "token_counts": 39, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def __new__(cls, *args, **kwargs):\n sympy_deprecation_warning(\n ,\n depr", "d_id": 48155, "documentation": { "docstring": "\n The AskHandler system is deprecated. The AskHandler class should\n be replaced with the multipledispatch handler of Predicate\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 51, "language": "en" } }, { "id": 260996, "commit_id": "02e36b4d866d7c7b14397ab291cb3e97d1105d5c", "repo": "scikit-learn", "path": "sklearn/utils/sparsefuncs.py", "file_name": "sparsefuncs.py", "fun_name": "incr_mean_variance_axis", "commit_message": "DOC Ensures that incr_mean_variance_axis passes numpydoc validation (#24477)", "code": "def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):\n \n _raise_error_wrong_axis(axis)\n\n if not isinstance(X, (sp.csr_matrix, sp.csc_matrix)):\n _raise_typeerror(X)\n\n if np.size(last_n) == 1:\n last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype)\n\n if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)):\n raise ValueError(\"last_mean, last_var, last_n do not have the same shapes.\")\n\n if axis == 1:\n if np.size(last_mean) != X.shape[0]:\n raise ValueError(\n \"If axis=1, then last_mean, last_n, last_var should be of \"\n f\"size n_samples {X.shape[0]} (Got {np.size(last_mean)}).\"\n )\n else: # axis == 0\n if np.size(last_mean) != X.shape[1]:\n raise ValueError(\n \"If axis=0, then last_mean, last_n, last_var should be of \"\n f\"size n_features {X.shape[1]} (Got {np.size(last_mean)}).\"\n )\n\n X = X.T if axis == 1 else X\n\n if weights is not None:\n weights = _check_sample_weight(weights, X, dtype=X.dtype)\n\n return _incr_mean_var_axis0(\n X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 308, "n_words": 121, "vocab_size": 74, "complexity": 9, "nloc": 26, "token_counts": 206, "n_ast_nodes": 366, "n_identifiers": 22, "random_cut": "def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):\n \n _raise_error_wrong_axis(axis)\n\n if not isinstance(X, (sp.csr_matrix, sp.csc_matrix)):\n _raise_typeerror(X)\n\n if np.size(last_n) == 1:\n last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype)\n\n if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)):\n raise ValueError(\"last_mean, last_var, last_n do not have the same shapes.\")\n\n", "d_id": 76613, "documentation": { "docstring": "Compute incremental mean and variance along an axis on a CSR or CSC matrix.\n\n last_mean, last_var are the statistics computed at the last step by this\n function. Both must be initialized to 0-arrays of the proper size, i.e.\n the number of features in X. last_n is the number of samples encountered\n until now.\n\n Parameters\n ----------\n X : CSR or CSC sparse matrix of shape (n_samples, n_features)\n Input data.\n\n axis : {0, 1}\n Axis along which the axis should be computed.\n\n last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating\n Array of means to update with the new data X.\n Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.\n\n last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating\n Array of variances to update with the new data X.\n Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.\n\n last_n : float or ndarray of shape (n_features,) or (n_samples,), \\\n dtype=floating\n Sum of the weights seen so far, excluding the current weights\n If not float, it should be of shape (n_features,) if\n axis=0 or (n_samples,) if axis=1. If float it corresponds to\n having same weights for all samples (or features).\n\n weights : ndarray of shape (n_samples,) or (n_features,), default=None\n If axis is set to 0 shape is (n_samples,) or\n if axis is set to 1 shape is (n_features,).\n If it is set to None, then samples are equally weighted.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n means : ndarray of shape (n_features,) or (n_samples,), dtype=floating\n Updated feature-wise means if axis = 0 or\n sample-wise means if axis = 1.\n\n variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating\n Updated feature-wise variances if axis = 0 or\n sample-wise variances if axis = 1.\n\n n : ndarray of shape (n_features,) or (n_samples,), dtype=integral\n Updated number of seen samples per feature if axis=0\n or number of seen features per sample if axis=1.\n\n If weights is not None, n is a sum of the weights of the seen\n samples or features instead of the actual number of seen\n samples or features.\n\n Notes\n -----\n NaNs are ignored in the algorithm.\n ", "n_words": 344, "vocab_size": 134, "n_whitespaces": 579, "language": "en" } }, { "id": 308105, "commit_id": "f453726b1862d1d247f6aefdd5f23455b87c11cf", "repo": "core", "path": "tests/components/homekit/test_type_thermostats.py", "file_name": "test_type_thermostats.py", "fun_name": "test_thermostat_with_no_off_after_recheck", "commit_message": "Cleanup HVACAction and HVACMode in tests (#78813)", "code": "async def test_thermostat_with_no_off_after_recheck(hass, hk_driver, events):\n \n entity_id = \"climate.test\"\n\n # support_auto = True\n hass.states.async_set(\n entity_id,\n HVACMode.COOL,\n {\n ATTR_SUPPORTED_FEATURES: SUPPORT_TARGET_TEMPERATURE\n | SUPPORT_TARGET_TEMPERATURE_RANGE,\n ATTR_HVAC_MODES: [],\n },\n )\n await hass.async_block_till_done()\n acc = Thermostat(hass, hk_driver, \"Climate\", entity_id, 1, None)\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.char_cooling_thresh_temp.value == 23.0\n assert acc.char_heating_thresh_temp.value == 19.0\n\n assert acc.char_cooling_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP\n assert acc.char_cooling_thresh_temp.properties[PROP_MIN_VALUE] == 7.0\n assert acc.char_cooling_thresh_temp.properties[PROP_MIN_STEP] == 0.1\n assert acc.char_heating_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP\n assert acc.char_heating_thresh_temp.properties[PROP_MIN_VALUE] == 7.0\n assert acc.char_heating_thresh_temp.properties[PROP_MIN_STEP] == 0.1\n\n assert acc.char_target_heat_cool.value == 2\n\n hass.states.async_set(\n entity_id,\n HVACMode.HEAT_COOL,\n {\n ATTR_TARGET_TEMP_HIGH: 22.0,\n ATTR_TARGET_TEMP_LOW: 20.0,\n ATTR_CURRENT_TEMPERATURE: 18.0,\n ATTR_HVAC_ACTION: HVACAction.HEATING,\n ATTR_HVAC_MODES: [HVACMode.HEAT_COOL, HVACMode.AUTO],\n },\n )\n await hass.async_block_till_done()\n assert acc.char_heating_thresh_temp.value == 20.0\n assert acc.char_cooling_thresh_temp.value == 22.0\n assert acc.char_current_heat_cool.value == 1\n assert acc.char_target_heat_cool.value == 3\n assert acc.char_current_temp.value == 18.0\n assert acc.char_display_units.value == 0\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 346, "n_words": 118, "vocab_size": 69, "complexity": 1, "nloc": 43, "token_counts": 294, "n_ast_nodes": 406, "n_identifiers": 38, "random_cut": "async def test_thermostat_with_no_off_after_recheck(hass, hk_driver, events):\n \n entity_id = \"climate.test\"\n\n # support_auto = True\n hass.states.async_set(\n entity_id,\n HVACMode.COOL,\n {\n ATTR_SUPPORTED_FEATURES: SUPPORT_TARGET_TEMPERATURE\n | SUPPORT_TARGET_TEMPERATURE_RANGE,\n ATTR_HVAC_MODES: [],\n },\n )\n await hass.async_block_till_done()\n acc = Thermostat(hass, hk_driver, \"Climate\", entity_id, 1, None)\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.char_cooling_thresh_temp.value == 23.0\n assert acc.char_heating_thresh_temp.value == 19.0\n\n assert acc.char_cooling_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP\n assert acc.char_cooling_thresh_temp.properties[PROP_MIN_VALUE] == 7.0\n assert acc.char_cooling_thresh_temp.properties[PROP_MIN_STEP] == 0.1\n assert acc.char_heating_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP\n assert acc.char_heating_thresh_temp.properties[PROP_MIN_VALUE] == 7.0\n assert acc.char_heating_thresh_temp.properties[PROP_MIN_STEP] == 0.1\n\n assert acc.char_target_heat_cool.value == 2\n", "d_id": 106866, "documentation": { "docstring": "Test if a thermostat that is not ready when we first see it that actually does not have off.", "n_words": 19, "vocab_size": 17, "n_whitespaces": 18, "language": "en" } }, { "id": 76592, "commit_id": "10f8e8d21640f019eeb22e91ba3ee1c5284c4574", "repo": "wagtail", "path": "wagtail/contrib/forms/models.py", "file_name": "models.py", "fun_name": "save", "commit_message": "AbstractFormField.save - add to the docstring about clean name", "code": "def save(self, *args, **kwargs):\n \n\n is_new = self.pk is None\n if is_new:\n clean_name = get_field_clean_name(self.label)\n self.clean_name = clean_name\n\n super().save(*args, **kwargs)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 47, "n_ast_nodes": 78, "n_identifiers": 10, "random_cut": "def save(self, *args, **kwargs):\n \n\n is_new = self.pk is None\n ", "d_id": 16548, "documentation": { "docstring": "\n When new fields are created, generate a template safe ascii name to use as the\n JSON storage reference for this field. Previously created fields will be updated\n to use the legacy unidecode method via checks & _migrate_legacy_clean_name.\n We do not want to update the clean name on any subsequent changes to the label\n as this would invalidate any previously submitted data.\n ", "n_words": 61, "vocab_size": 49, "n_whitespaces": 104, "language": "en" } }, { "id": 133135, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/dask/scheduler.py", "file_name": "scheduler.py", "fun_name": "dask_task_wrapper", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *args):\n \n if ray_pretask_cbs is not None:\n pre_states = [\n cb(key, args) if cb is not None else None for cb in ray_pretask_cbs\n ]\n repacked_args, repacked_deps = repack(args)\n # Recursively execute Dask-inlined tasks.\n actual_args = [_execute_task(a, repacked_deps) for a in repacked_args]\n # Execute the actual underlying Dask task.\n result = func(*actual_args)\n\n if ray_posttask_cbs is not None:\n for cb, pre_state in zip(ray_posttask_cbs, pre_states):\n if cb is not None:\n cb(key, result, pre_state)\n\n return result\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 162, "n_words": 77, "vocab_size": 52, "complexity": 8, "nloc": 13, "token_counts": 107, "n_ast_nodes": 159, "n_identifiers": 17, "random_cut": "def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *args):\n \n if ray_pretask_cbs is not None:\n pre_states = [\n cb(key, args) if cb is not None else None for cb in ray_pretask_cbs\n ]\n repacked_args, repacked_deps = repack(args)\n # Recursively execute Dask-inlined tasks.\n actual_args = [_execute_task(a, repacked_deps) for a in repacked_args]\n # Execute the actual underlying Dask task.\n result = func(*actual_args)\n\n if ray_posttask_cbs is not None:\n for cb, pre_state in zip(ray_posttask_cbs, pre_states):\n if cb is not None:\n cb(key, result, pre_state)\n\n return result\n\n", "d_id": 29934, "documentation": { "docstring": "\n A Ray remote function acting as a Dask task wrapper. This function will\n repackage the given flat `args` into its original data structures using\n `repack`, execute any Dask subtasks within the repackaged arguments\n (inlined by Dask's optimization pass), and then pass the concrete task\n arguments to the provide Dask task function, `func`.\n\n Args:\n func (callable): The Dask task function to execute.\n repack (callable): A function that repackages the provided args into\n the original (possibly nested) Python objects.\n key (str): The Dask key for this task.\n ray_pretask_cbs (callable): Pre-task execution callbacks.\n ray_posttask_cbs (callable): Post-task execution callback.\n *args (ObjectRef): Ray object references representing the Dask task's\n arguments.\n\n Returns:\n The output of the Dask task. In the context of Ray, a\n dask_task_wrapper.remote() invocation will return a Ray object\n reference representing the Ray task's result.\n ", "n_words": 131, "vocab_size": 87, "n_whitespaces": 241, "language": "en" } }, { "id": 206716, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/lorem_ipsum.py", "file_name": "lorem_ipsum.py", "fun_name": "words", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def words(count, common=True):\n \n word_list = list(COMMON_WORDS) if common else []\n c = len(word_list)\n if count > c:\n count -= c\n while count > 0:\n c = min(count, len(WORDS))\n count -= c\n word_list += random.sample(WORDS, c)\n else:\n word_list = word_list[:count]\n return \" \".join(word_list)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 114, "n_words": 42, "vocab_size": 28, "complexity": 4, "nloc": 12, "token_counts": 80, "n_ast_nodes": 131, "n_identifiers": 13, "random_cut": "def words(count, common=True):\n \n word_list = list(COMMON_WORDS) if common else []\n c = len(word_list)\n if count > c:\n count -= c\n while count > 0:\n c = min(count, len(WORDS))\n ", "d_id": 51651, "documentation": { "docstring": "\n Return a string of `count` lorem ipsum words separated by a single space.\n\n If `common` is True, then the first 19 words will be the standard\n 'lorem ipsum' words. Otherwise, all words will be selected randomly.\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 49, "language": "en" } }, { "id": 62402, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/_inputstream.py", "file_name": "_inputstream.py", "fun_name": "jumpTo", "commit_message": "upd; format", "code": "def jumpTo(self, bytes):\n \n try:\n self._position = self.index(bytes, self.position) + len(bytes) - 1\n except ValueError:\n raise StopIteration\n return True\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 68, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 38, "n_ast_nodes": 62, "n_identifiers": 9, "random_cut": "def jumpTo(self, bytes):\n", "d_id": 12969, "documentation": { "docstring": "Look for the next sequence of bytes matching a given sequence. If\n a match is found advance the position to the last byte of the match", "n_words": 26, "vocab_size": 20, "n_whitespaces": 32, "language": "en" } }, { "id": 290182, "commit_id": "b4ad03784f1d02995da39f3094c80adb4a60492b", "repo": "core", "path": "homeassistant/components/mqtt/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "available", "commit_message": "Improve MQTT type hints part 1 (#80523)\n\n* Improve typing alarm_control_panel\r\n\r\n* Improve typing binary_sensor\r\n\r\n* Improve typing button\r\n\r\n* Add misssed annotation\r\n\r\n* Move CONF_EXPIRE_AFTER to _setup_from_config\r\n\r\n* Use CALL_BACK type\r\n\r\n* Remove assert, improve code style", "code": "def available(self) -> bool:\n \n expire_after: int | None = self._config.get(CONF_EXPIRE_AFTER)\n # mypy doesn't know about fget: https://github.com/python/mypy/issues/6185\n return MqttAvailability.available.fget(self) and ( # type: ignore[attr-defined]\n expire_after is None or not self._expired\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 78, "n_words": 31, "vocab_size": 29, "complexity": 3, "nloc": 6, "token_counts": 42, "n_ast_nodes": 71, "n_identifiers": 11, "random_cut": "def available(self) -> bool:\n \n expire_after: int | None = self._config.get(CONF_EXPIRE_AFTER)\n ", "d_id": 89301, "documentation": { "docstring": "Return true if the device is available and value has not expired.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 196691, "commit_id": "9ad8ab9fe58051cf11626ba6654852fcfec60147", "repo": "sympy", "path": "sympy/stats/crv_types.py", "file_name": "crv_types.py", "fun_name": "Logistic", "commit_message": "Documentation cleanup 5", "code": "def Logistic(name, mu, s):\n r\n\n return rv(name, LogisticDistribution, (mu, s))\n\n#-------------------------------------------------------------------------------\n# Log-logistic distribution --------------------------------------------------------\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 49, "token_counts": 24, "n_ast_nodes": 36, "n_identifiers": 6, "random_cut": "def Logistic(name, mu, s):\n r\n\n return rv(name, LogisticDistribution, (mu, s))\n\n#-----------", "d_id": 48109, "documentation": { "docstring": "\n Create a continuous random variable with a logistic distribution.\n\n Explanation\n ===========\n\n The density of the logistic distribution is given by\n\n .. math::\n f(x) := \\frac{e^{-(x-\\mu)/s}} {s\\left(1+e^{-(x-\\mu)/s}\\right)^2}\n\n Parameters\n ==========\n\n mu : Real number, the location (mean)\n s : Real number, `s > 0`, a scale\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import Logistic, density, cdf\n >>> from sympy import Symbol\n\n >>> mu = Symbol(\"mu\", real=True)\n >>> s = Symbol(\"s\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = Logistic(\"x\", mu, s)\n\n >>> density(X)(z)\n exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2)\n\n >>> cdf(X)(z)\n 1/(exp((mu - z)/s) + 1)\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Logistic_distribution\n .. [2] http://mathworld.wolfram.com/LogisticDistribution.html\n\n ", "n_words": 105, "vocab_size": 77, "n_whitespaces": 200, "language": "en" } }, { "id": 25205, "commit_id": "1f9400dd7374ce9cc47981372e324ff412e53ba3", "repo": "PaddleOCR", "path": "ppocr/modeling/heads/local_graph.py", "file_name": "local_graph.py", "fun_name": "__call__", "commit_message": "add drrg", "code": "def __call__(self, feat_maps, comp_attribs):\n \n\n assert isinstance(feat_maps, paddle.Tensor)\n assert comp_attribs.ndim == 3\n assert comp_attribs.shape[2] == 8\n\n sorted_dist_inds_batch = []\n local_graph_batch = []\n knn_batch = []\n node_feat_batch = []\n node_label_batch = []\n\n for batch_ind in range(comp_attribs.shape[0]):\n num_comps = int(comp_attribs[batch_ind, 0, 0])\n comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7]\n node_labels = comp_attribs[batch_ind, :num_comps, 7].astype(\n np.int32)\n\n comp_centers = comp_geo_attribs[:, 0:2]\n distance_matrix = euclidean_distance_matrix(comp_centers,\n comp_centers)\n\n batch_id = np.zeros(\n (comp_geo_attribs.shape[0], 1), dtype=np.float32) * batch_ind\n comp_geo_attribs[:, -2] = np.clip(comp_geo_attribs[:, -2], -1, 1)\n angle = np.arccos(comp_geo_attribs[:, -2]) * np.sign(\n comp_geo_attribs[:, -1])\n angle = angle.reshape((-1, 1))\n rotated_rois = np.hstack(\n [batch_id, comp_geo_attribs[:, :-2], angle])\n rois = paddle.to_tensor(rotated_rois)\n content_feats = self.pooling(feat_maps[batch_ind].unsqueeze(0),\n rois)\n\n content_feats = content_feats.reshape([content_feats.shape[0], -1])\n geo_feats = feature_embedding(comp_geo_attribs,\n self.node_geo_feat_dim)\n geo_feats = paddle.to_tensor(geo_feats)\n node_feats = paddle.concat([content_feats, geo_feats], axis=-1)\n\n sorted_dist_inds = np.argsort(distance_matrix, axis=1)\n pivot_local_graphs, pivot_knns = self.generate_local_graphs(\n sorted_dist_inds, node_labels)\n\n node_feat_batch.append(node_feats)\n node_label_batch.append(node_labels)\n local_graph_batch.append(pivot_local_graphs)\n knn_batch.append(pivot_knns)\n sorted_dist_inds_batch.append(sorted_dist_inds)\n\n (node_feats, adjacent_matrices, knn_inds, gt_linkage) = \\\n self.generate_gcn_input(node_feat_batch,\n node_label_batch,\n local_graph_batch,\n knn_batch,\n sorted_dist_inds_batch)\n\n return node_feats, adjacent_matrices, knn_inds, gt_linkage\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 845, "n_words": 146, "vocab_size": 103, "complexity": 2, "nloc": 48, "token_counts": 406, "n_ast_nodes": 607, "n_identifiers": 58, "random_cut": "def __call__(self, feat_maps, comp_attribs):\n \n\n assert isinstance(feat_maps, paddle.Tensor)\n assert comp_attribs.ndim == 3\n assert comp_attribs.shape[2] == 8\n\n sorted_dist_inds_batch = []\n local_graph_batch = []\n knn_batch = []\n node_feat_batch = []\n node_label_batch = []\n\n for batch_ind in range(comp_attribs.shape[0]):\n num_comps = int(comp_attribs[batch_ind, 0, 0])\n comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7]\n node_labels = comp_attribs[batch_ind, :num_comps, 7].astype(\n np.int32)\n\n comp_centers = comp_geo_attribs[:, 0:2]\n distance_matrix = euclidean_distance_matrix(comp_centers,\n comp_centers)\n\n ", "d_id": 4867, "documentation": { "docstring": "Generate local graphs as GCN input.\n\n Args:\n feat_maps (Tensor): The feature maps to extract the content\n features of text components.\n comp_attribs (ndarray): The text component attributes.\n\n Returns:\n local_graphs_node_feat (Tensor): The node features of graph.\n adjacent_matrices (Tensor): The adjacent matrices of local graphs.\n pivots_knn_inds (Tensor): The k-nearest neighbor indices in local\n graph.\n gt_linkage (Tensor): The surpervision signal of GCN for linkage\n prediction.\n ", "n_words": 61, "vocab_size": 43, "n_whitespaces": 193, "language": "en" } }, { "id": 153659, "commit_id": "0c1a2129df64cf45bf1ff49c8ed92c510fdb1c82", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/omnisci_on_native/exchange/dataframe_protocol/dataframe.py", "file_name": "dataframe.py", "fun_name": "_is_zero_copy_arrow_op", "commit_message": "FEAT-#4244: Implement dataframe exchange protocol for OmniSci (#4269)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Vasily Litvinov \r\nSigned-off-by: Dmitry Chigarev ", "code": "def _is_zero_copy_arrow_op(cls, op) -> bool:\n \n is_zero_copy_op = False\n if isinstance(op, (FrameNode, TransformNode, UnionNode)):\n # - FrameNode: already materialized PyArrow table\n # - TransformNode: select certain columns of the table, implemented zero-copy (``df._arrow_select``)\n # - UnionNode: concatenate PyArrow tables, implemented zero-copy (``df._arrow_concat``)\n is_zero_copy_op = True\n elif isinstance(op, MaskNode) and (\n isinstance(op.row_positions, slice) or is_range_like(op.row_positions)\n ):\n # Can select rows zero-copy if indexer is a slice-like (``df._arrow_row_slice``)\n is_zero_copy_op = True\n return is_zero_copy_op and all(\n # Walk the computation tree\n cls._is_zero_copy_arrow_op(_op)\n for _op in getattr(op, \"inputs\", [])\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 244, "n_words": 85, "vocab_size": 64, "complexity": 7, "nloc": 23, "token_counts": 83, "n_ast_nodes": 133, "n_identifiers": 16, "random_cut": "def _is_zero_copy_arrow_op(cls, op) -> bool:\n \n is_zero_copy_op = False\n if isinstance(op, (FrameNode, TransformNode, UnionNode)):\n # - FrameNode: already materialized PyArrow table\n # - TransformNode: select certain columns of the table, implemented zero-copy (``df._arrow_select``)\n # - UnionNode: concatenate PyArrow tables, implemented zero-copy (``df._arrow_concat``)\n is_zero_copy_op = True\n elif isinstance(op, Mas", "d_id": 35525, "documentation": { "docstring": "\n Check whether the passed node of the delayed computation tree could be executed zero-copy via PyArrow execution.\n\n Parameters\n ----------\n op : DFAlgNode\n\n Returns\n -------\n bool\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 82, "language": "en" } }, { "id": 211688, "commit_id": "41d8be66e84d066d98cfabbe13d4c7a5877cb009", "repo": "PaddleDetection", "path": "ppdet/modeling/assigners/uniform_assigner.py", "file_name": "uniform_assigner.py", "fun_name": "batch_p_dist", "commit_message": "support YOLOF (#7336)", "code": "def batch_p_dist(x, y, p=2):\n \n x = x.unsqueeze(1)\n diff = x - y\n return paddle.norm(diff, p=p, axis=list(range(2, diff.dim())))\n\n\n@register", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "@register", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 29, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 52, "n_ast_nodes": 85, "n_identifiers": 13, "random_cut": "def batch_p_dist(x, y, p=2):\n \n x = x.unsqueeze(1)\n diff = x - y\n return paddle", "d_id": 53129, "documentation": { "docstring": "\n calculate pairwise p_dist, the first index of x and y are batch\n return [x.shape[0], y.shape[0]]\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 68200, "commit_id": "625a9f69f592be8c50c9b1bd1a16e0b7b9157988", "repo": "erpnext", "path": "erpnext/hr/doctype/shift_assignment/shift_assignment.py", "file_name": "shift_assignment.py", "fun_name": "get_employee_shift", "commit_message": "refactor: consider timeslots in `get_employee_shift`", "code": "def get_employee_shift(employee, for_timestamp=None, consider_default_shift=False, next_shift_direction=None):\n\t\n\tif for_timestamp is None:\n\t\tfor_timestamp = now_datetime()\n\n\tshift_details = get_shift_for_timestamp(employee, for_timestamp)\n\n\t# if shift assignment is not found, consider default shift\n\tdefault_shift = frappe.db.get_value('Employee', employee, 'default_shift')\n\tif not shift_details and consider_default_shift:\n\t\tshift_details = get_shift_details(default_shift, for_timestamp.date())\n\n\t# if its a holiday, reset\n\tif shift_details and is_holiday_date(employee, shift_details):\n\t\tshift_details = None\n\n\t# if no shift is found, find next or prev shift based on direction\n\tif not shift_details and next_shift_direction:\n\t\tshift_details = get_prev_or_next_shift(employee, for_timestamp, consider_default_shift, default_shift, next_shift_direction)\n\n\treturn shift_details\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 67, "n_words": 82, "vocab_size": 51, "complexity": 8, "nloc": 12, "token_counts": 103, "n_ast_nodes": 164, "n_identifiers": 16, "random_cut": "def get_employee_shift(employee, for_timestamp=None, consider_default_shift=False, next_shift_direction=None):\n\t\n\tif for_timestamp is None:\n\t\tfor_timestamp = now_datetime()\n\n\tshift_details = get_shift_for_timestamp(employee, for_timestamp)\n\n\t# if shift assignment is not found, consider default shift\n\tdefault_shift = frappe.db.get_value('Employee', employee, 'default_shift')\n\tif not shift_details and consider_default_shift:\n\t\tshift_details = get_shift_details(default_shift, for_timestamp.date())\n\n\t# if its a holiday, reset\n\tif ", "d_id": 14737, "documentation": { "docstring": "Returns a Shift Type for the given employee on the given date. (excluding the holidays)\n\n\t:param employee: Employee for which shift is required.\n\t:param for_timestamp: DateTime on which shift is required\n\t:param consider_default_shift: If set to true, default shift is taken when no shift assignment is found.\n\t:param next_shift_direction: One of: None, 'forward', 'reverse'. Direction to look for next shift if shift not found on given date.\n\t", "n_words": 67, "vocab_size": 45, "n_whitespaces": 62, "language": "en" } }, { "id": 70602, "commit_id": "fb48f9863d8ba1856e0697552fb454de162281b8", "repo": "wagtail", "path": "wagtail/admin/views/workflows.py", "file_name": "workflows.py", "fun_name": "get_create_form_class", "commit_message": "Split out data retrieval methods from BaseTaskChooserView.dispatch\n\nThis ensures that we don't do redundant setup for sub-views that don't need it, e.g. setting up creation forms for the results-only view.", "code": "def get_create_form_class(self):\n \n self.create_model = self.get_create_model()\n if self.create_model:\n return get_task_form_class(self.create_model)\n else:\n return None\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 62, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 54, "n_identifiers": 5, "random_cut": "def get_create_form_class(self):\n \n self.create_model = self.get_create_model()\n if self.create_model:\n return ge", "d_id": 15530, "documentation": { "docstring": "\n To be called after dispatch(); returns the form class for creating a new task\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 27995, "commit_id": "5d1a36b9aaf408016957db04f86397b2e53c2500", "repo": "saleor", "path": "saleor/thumbnail/utils.py", "file_name": "utils.py", "fun_name": "retrieve_image", "commit_message": "Better media thumbnails including WebP support (#9988)\n\n* Add thumbnail app\r\n\r\n* Update get_thumbnail_size method and add tests\r\n\r\n* Add logic for creating thumbnails\r\n\r\n* Update logic for getting thumbnail\r\n\r\n* Allow defining format for tumbnail generation\r\n\r\n* Clear handle_thumbnail views\r\n\r\n* Add prepare_image_proxy_url method\r\n\r\n* Use ImageField for user avatar\r\n\r\n* Allow defining thumbnail format when querying user avatar\r\n\r\n* Use ImageField for category backgound_image\r\n\r\n* Use ImageField for Collection backgound_image\r\n\r\n* Use ImageField for ProductMedia image\r\n\r\n* Ensure that thumbnails are deleted when category background_image is changed or deleted\r\n\r\n* Ensure that thumbnails are deleted when collection background_image is changed or deleted\r\n\r\n* Update product media deleteion task and failing tests\r\n\r\n* Delete thumbnail from storage when thumbnail objects is deleted\r\n\r\n* Fix import in product test_bulk_delete\r\n\r\n* Drop create_thumbnails command\r\n\r\n* Update Product.thumbnail resolver\r\n\r\n* Update OrderLine thumbnail resolver\r\n\r\n* Add missing ADDED_IN_35 and PREVIEW_FEATURE labels\r\n\r\n* Update account and product signals - ensure the image is deleted from storage\r\n\r\n* Refactor product_images methods\r\n\r\n* Add signal for product media image delete\r\n\r\n* Drop create_thumbnails method and not longer valid settings fields\r\n\r\n* Clean the ProcessedImage class\r\n\r\n* Drop versatileimagefield from INSTALLED_APPS\r\n\r\n* Update changelog\r\n\r\n* Drop comments from ThumbnailFormat\r\n\r\n* Add get_image_or_proxy_url method\r\n\r\n* Apply reiew suggestions - add ThumbnailField and use get_image_or_proxy_ur when it's possible\r\n\r\n* Update changelog\r\n\r\n* Replace ADDED_IN_35 with ADDED_IN_36 label\r\n\r\n* Update changelog\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def retrieve_image(self):\n \n image = self.storage.open(self.image_path, \"rb\")\n image_format = self.get_image_metadata_from_file(image)\n return (Image.open(image), image_format)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 9, "random_cut": "def retrieve_image(self):\n \n image = self.s", "d_id": 5151, "documentation": { "docstring": "Return a PIL Image instance stored at `image_path`.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 159724, "commit_id": "58dbe260a2e41c31f1ab03e1abdb1f01da4c1edc", "repo": "numpy", "path": "numpy/polynomial/chebyshev.py", "file_name": "chebyshev.py", "fun_name": "chebval", "commit_message": "MAINT, DOC: discard repeated words", "code": "def chebval(x, c, tensor=True):\n \n c = np.array(c, ndmin=1, copy=True)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n if isinstance(x, (tuple, list)):\n x = np.asarray(x)\n if isinstance(x, np.ndarray) and tensor:\n c = c.reshape(c.shape + (1,)*x.ndim)\n\n if len(c) == 1:\n c0 = c[0]\n c1 = 0\n elif len(c) == 2:\n c0 = c[0]\n c1 = c[1]\n else:\n x2 = 2*x\n c0 = c[-2]\n c1 = c[-1]\n for i in range(3, len(c) + 1):\n tmp = c0\n c0 = c[-i] - c1\n c1 = tmp + c1*x2\n return c0 + c1*x\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 224, "n_words": 87, "vocab_size": 50, "complexity": 8, "nloc": 23, "token_counts": 196, "n_ast_nodes": 305, "n_identifiers": 27, "random_cut": "def chebval(x, c, tensor=True):\n \n c = np.array(c, ndmin=1, copy=True)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n if isinstance(x, (tuple, list)):\n x = np.asarray(x)\n if isinstance(x, np.ndarray) and tensor:\n c = c.reshape(c.shape + (1,)*x.ndim)\n\n if len(c) == 1:\n c0 = c[0]\n c1 = 0\n elif len(c) == 2:\n c0 = c[0]\n c1 = c[1]\n else:\n ", "d_id": 38408, "documentation": { "docstring": "\n Evaluate a Chebyshev series at points x.\n\n If `c` is of length `n + 1`, this function returns the value:\n\n .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)\n\n The parameter `x` is converted to an array only if it is a tuple or a\n list, otherwise it is treated as a scalar. In either case, either `x`\n or its elements must support multiplication and addition both with\n themselves and with the elements of `c`.\n\n If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If\n `c` is multidimensional, then the shape of the result depends on the\n value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +\n x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that\n scalars have shape (,).\n\n Trailing zeros in the coefficients will be used in the evaluation, so\n they should be avoided if efficiency is a concern.\n\n Parameters\n ----------\n x : array_like, compatible object\n If `x` is a list or tuple, it is converted to an ndarray, otherwise\n it is left unchanged and treated as a scalar. In either case, `x`\n or its elements must support addition and multiplication with\n themselves and with the elements of `c`.\n c : array_like\n Array of coefficients ordered so that the coefficients for terms of\n degree n are contained in c[n]. If `c` is multidimensional the\n remaining indices enumerate multiple polynomials. In the two\n dimensional case the coefficients may be thought of as stored in\n the columns of `c`.\n tensor : boolean, optional\n If True, the shape of the coefficient array is extended with ones\n on the right, one for each dimension of `x`. Scalars have dimension 0\n for this action. The result is that every column of coefficients in\n `c` is evaluated for every element of `x`. If False, `x` is broadcast\n over the columns of `c` for the evaluation. This keyword is useful\n when `c` is multidimensional. The default value is True.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n values : ndarray, algebra_like\n The shape of the return value is described above.\n\n See Also\n --------\n chebval2d, chebgrid2d, chebval3d, chebgrid3d\n\n Notes\n -----\n The evaluation uses Clenshaw recursion, aka synthetic division.\n\n ", "n_words": 369, "vocab_size": 191, "n_whitespaces": 578, "language": "en" } }, { "id": 46033, "commit_id": "afd3c135c7d1815c56578d020625a33dc27fe640", "repo": "airflow", "path": "airflow/www/views.py", "file_name": "views.py", "fun_name": "dagrun_queued", "commit_message": "Add queue button to click-on-DagRun interface. (#21555)\n\n* Initial implementation of adding Queue button to DagRun interface\r\n\r\n* Implement the test cases\r\n\r\n* FIX Add all required MyPy ignores\r\n\r\n* FIX import\r\n\r\n* Update airflow/www/views.py\r\n\r\nFIX Documentation\r\n\r\nCo-authored-by: Brent Bovenzi \r\n\r\n* update modal UI\r\n\r\nCo-authored-by: Brent Bovenzi ", "code": "def dagrun_queued(self):\n \n dag_id = request.form.get('dag_id')\n dag_run_id = request.form.get('dag_run_id')\n confirmed = request.form.get('confirmed') == 'true'\n origin = get_safe_url(request.form.get('origin'))\n return self._mark_dagrun_state_as_queued(dag_id, dag_run_id, confirmed, origin)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 64, "n_ast_nodes": 112, "n_identifiers": 11, "random_cut": "def dagrun_queued(self):\n \n dag_i", "d_id": 8767, "documentation": { "docstring": "Queue DagRun so tasks that haven't run yet can be started.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 81752, "commit_id": "33c0fb79d66f56374d7c042ba79887faa85e2885", "repo": "awx", "path": "awx/main/tests/functional/api/test_job_runtime_params.py", "file_name": "test_job_runtime_params.py", "fun_name": "data_to_internal", "commit_message": "JT param everything (#12646)\n\n* Making almost all fields promptable on job templates and config models\r\n* Adding EE, IG and label access checks\r\n* Changing jobs preferred instance group function to handle the new IG cache field\r\n* Adding new ask fields to job template modules\r\n* Address unit/functional tests\r\n* Adding migration file", "code": "def data_to_internal(data):\n \n internal = data.copy()\n if 'extra_vars' in data:\n internal['extra_vars'] = json.loads(data['extra_vars'])\n if 'credentials' in data:\n internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])\n if 'inventory' in data:\n internal['inventory'] = Inventory.objects.get(pk=data['inventory'])\n if 'execution_environment' in data:\n internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment'])\n if 'labels' in data:\n internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']]\n if 'instance_groups' in data:\n internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']]\n return internal\n\n\n# End of setup, tests start here\n@pytest.mark.django_db\n@pytest.mark.job_runtime_vars", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "@pytest.mark.django_db\n@pytest.mark.job_runtime_vars", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 136, "n_words": 70, "vocab_size": 41, "complexity": 10, "nloc": 15, "token_counts": 168, "n_ast_nodes": 314, "n_identifiers": 20, "random_cut": "def data_to_internal(data):\n \n internal = data.copy()\n if 'extra_vars' in data:\n internal['extra_vars'] = json.loads(data['extra_vars'])\n if 'credentials' in data:\n internal['credentials'] = set(Cr", "d_id": 17255, "documentation": { "docstring": "\n returns internal representation, model objects, dictionaries, etc\n as opposed to integer primary keys and JSON strings\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 127299, "commit_id": "ffe12a5f103b9f06d728429fc0d930b76523726f", "repo": "ray", "path": "python/ray/tune/progress_reporter.py", "file_name": "progress_reporter.py", "fun_name": "_generate_sys_info_str", "commit_message": "[Tune] Add rich output for ray tune progress updates in notebooks (#26263)\n\nThese changes are part of a series intended to improve integration with notebooks. This PR modifies the tune progress status shown to the user if tuning is run from a notebook.\r\n\r\nPreviously, part of the trial progress was reported in an HTML table before; now, all progress is displayed in an organized HTML template.\r\n\r\nSigned-off-by: pdmurray ", "code": "def _generate_sys_info_str(*sys_info) -> str:\n \n if sys_info:\n return \"
    \".join(sys_info).replace(\"\\n\", \"
    \")\n return \"\"\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 27, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 10, "token_counts": 28, "n_ast_nodes": 56, "n_identifiers": 5, "random_cut": "def _generate_sys_info_str(*sys_info) -> str:\n \n if sys_info:\n return \"
    \".join(sys_info).replace(\"\\n\", \"
    \")\n return \"\"\n\n", "d_id": 28411, "documentation": { "docstring": "Format system info into a string.\n *sys_info: System info strings to be included.\n\n Returns:\n Formatted string containing system information.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 278926, "commit_id": "3613c3defc39c236fb1592c4f7ba1a9cc887343a", "repo": "keras", "path": "keras/saving/saved_model/json_utils.py", "file_name": "json_utils.py", "fun_name": "default", "commit_message": "Remove pylint comments.\n\nPiperOrigin-RevId: 452353044", "code": "def default(self, obj):\n \n if isinstance(obj, tf.TensorShape):\n items = obj.as_list() if obj.rank is not None else None\n return {\"class_name\": \"TensorShape\", \"items\": items}\n return get_json_type(obj)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 23, "vocab_size": 20, "complexity": 3, "nloc": 5, "token_counts": 49, "n_ast_nodes": 82, "n_identifiers": 10, "random_cut": "def default(self, obj):\n \n if isinstance(obj, tf.TensorShape):\n items = obj.as_list() i", "d_id": 82758, "documentation": { "docstring": "Encodes objects for types that aren't handled by the default\n encoder.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 17, "language": "en" } }, { "id": 307669, "commit_id": "26251895295d74fcd2c73e37804c23675c433247", "repo": "core", "path": "homeassistant/components/forked_daapd/media_player.py", "file_name": "media_player.py", "fun_name": "_pause_and_wait_for_callback", "commit_message": "Use async_timeout in forked_daapd (#78451)", "code": "async def _pause_and_wait_for_callback(self):\n \n self._pause_requested = True\n await self.async_media_pause()\n try:", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "async def _pause_and_wait_for_callback(self):\n \"\"\"Send pause and wait for the pause callback to be received.\"\"\"\n self._pause_requested = True\n await self.async_media_pause()\n try:", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 37, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 9, "token_counts": 53, "n_ast_nodes": 34, "n_identifiers": 4, "random_cut": "async def _pause_and_wait_for_callback(self):\n \n self._pause_requested = True\n await self.async_media_pause()\n try:", "d_id": 106437, "documentation": { "docstring": "Send pause and wait for the pause callback to be received.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 132825, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/trainable.py", "file_name": "trainable.py", "fun_name": "delete_checkpoint", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def delete_checkpoint(self, checkpoint_path):\n \n # Ensure TrialCheckpoints are converted\n if isinstance(checkpoint_path, TrialCheckpoint):\n checkpoint_path = checkpoint_path.local_path\n\n try:\n checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)\n except FileNotFoundError:\n # The checkpoint won't exist locally if the\n # trial was rescheduled to another worker.\n logger.debug(\n f\"Local checkpoint not found during garbage collection: \"\n f\"{self.trial_id} - {checkpoint_path}\"\n )\n return\n else:\n if self.uses_cloud_checkpointing:\n self.storage_client.delete(self._storage_path(checkpoint_dir))\n\n if os.path.exists(checkpoint_dir):\n shutil.rmtree(checkpoint_dir)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 249, "n_words": 56, "vocab_size": 49, "complexity": 5, "nloc": 16, "token_counts": 80, "n_ast_nodes": 148, "n_identifiers": 22, "random_cut": "def delete_checkpoint(self, checkpoint_path):\n \n # Ensure TrialCheckpoints are converted\n if isinstance(checkpoint_path, TrialCheckpoint):\n checkpoint_path = checkpoint_path.local_path\n\n try:\n checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)\n except FileNotFoundError:\n # The checkpoint won't exist locally if the\n # trial was rescheduled to another worker.\n logger.debug(\n ", "d_id": 29817, "documentation": { "docstring": "Deletes local copy of checkpoint.\n\n Args:\n checkpoint_path (str): Path to checkpoint.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 36, "language": "en" } }, { "id": 133356, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/torch_trainer.py", "file_name": "torch_trainer.py", "fun_name": "_resize_worker_group", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _resize_worker_group(self, state_dict, max_retries=10):\n \n old_workers = self.worker_group.num_workers\n self.worker_group.reset()\n\n time.sleep(1)\n for i in range(max_retries):\n new_workers = self.worker_group.new_workers_size()\n if new_workers:\n self._last_resize = time.time()\n startup_success = self._start_workers(int(new_workers))\n if not startup_success:\n logger.info(\n f\"Worker startup failed. Retrying \"\n f\"{max_retries-i-1} more times.\"\n )\n self.worker_group.reset()\n continue\n self.load_state_dict(state_dict, blocking=True)\n if self.use_local and new_workers == 1 and old_workers > 1:\n # Major hack. If we go from LocalDistributedRunner to a\n # standard TorchRunner we have to manually reset the\n # dummy actor handle global vars.\n # TODO(amog): Refactor LocalDistributedTorchRunner to\n # not use global variables for resource reservation.\n ray.util.sgd.torch.distributed_torch_runner._dummy_cuda_actor = None\n ray.util.sgd.torch.distributed_torch_runner._dummy_cpu_actor = None\n return\n else:\n delay = 2 ** i\n logger.warning(\"No new workers found. Retrying in %d sec.\" % delay)\n time.sleep(delay)\n raise RuntimeError(\"Exceeded max_retries for relaunching workers.\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 585, "n_words": 119, "vocab_size": 92, "complexity": 7, "nloc": 26, "token_counts": 169, "n_ast_nodes": 294, "n_identifiers": 33, "random_cut": "def _resize_worker_group(self, state_dict, max_retries=10):\n \n old_workers = self.worker_group.num_workers\n self.worker_group.reset()\n\n time.sleep(1)\n for i in range(max_retries):\n new_workers = self.worker_group.new_workers_size()\n if new_workers:\n self._last_resize = time.time()\n startup_success = self._start_workers(int(new_workers))\n if not startup_success:\n logger.info(\n f\"Worker startup failed. Retrying \"\n f\"{max_retries-i-1} more times.\"\n )\n self.worker_group.reset()\n continue\n self.load_state_dict(state_dict, blocking=True)\n if self.use_local and new_workers == 1 and old_workers > 1:\n # Major hack. If we go from LocalDistributedRunner to a\n # standard TorchRunner we have to manually reset the\n # dummy actor handle global vars.\n # TODO(amog): Refactor LocalDistributedTorchRunner to\n # not use global variables for resource reservation.\n ray.util.sgd.torch.distributed_torch", "d_id": 29987, "documentation": { "docstring": "Resizes the number of remote workers based on available resources.\n Total number of workers will never exceed `num_workers` amount.\n\n Args:\n state_dict (dict): The state dict to load to all workers.\n max_retries (int): How many times to attempt to resize workers\n before failing.\n ", "n_words": 42, "vocab_size": 35, "n_whitespaces": 100, "language": "en" } }, { "id": 244941, "commit_id": "6146a83cb898110ba0170f956903b74741a6ac37", "repo": "mmdetection", "path": "tests/test_datasets/test_pipelines/utils.py", "file_name": "utils.py", "fun_name": "check_result_same", "commit_message": "Refactor Autoaugment", "code": "def check_result_same(results, pipeline_results, check_keys):\n \n for key in check_keys:\n if results.get(key, None) is None:\n continue\n if isinstance(results[key], (BitmapMasks, PolygonMasks)):\n assert_allclose(pipeline_results[key].to_ndarray(),\n results[key].to_ndarray())\n else:\n assert_allclose(pipeline_results[key], results[key])\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 110, "n_words": 23, "vocab_size": 22, "complexity": 4, "nloc": 9, "token_counts": 77, "n_ast_nodes": 117, "n_identifiers": 11, "random_cut": "def check_result_same(results, pipeline_results, check_keys):\n \n for key in check_keys:\n if results.get(key, None) is None:\n continue\n if isinstance(results[key], (BitmapMasks, PolygonMasks)):\n assert_allclose(pipeline_results[key].to_ndarray(),\n results[key].to_ndarray())\n else:\n assert_allclose(pipeline_", "d_id": 70593, "documentation": { "docstring": "Check whether the ``pipeline_results`` is the same with the predefined\n ``results``.\n\n Args:\n results (dict): Predefined results which should be the standard\n output of the transform pipeline.\n pipeline_results (dict): Results processed by the transform\n pipeline.\n check_keys (tuple): Keys that need to be checked between\n results and pipeline_results.\n ", "n_words": 46, "vocab_size": 35, "n_whitespaces": 109, "language": "en" } }, { "id": 159308, "commit_id": "40d5139b3ec136b82e28cdc80d99076b9e6b1e6a", "repo": "rasa", "path": "scripts/release.py", "file_name": "release.py", "fun_name": "get_rasa_sdk_version", "commit_message": "add changelog for 3.0.6 release (#10771)\n\n* add changelog\r\n\r\n* update poetry.lock", "code": "def get_rasa_sdk_version() -> Text:\n \n\n dependencies_filename = \"pyproject.toml\"\n toml_data = toml.load(project_root() / dependencies_filename)\n try:\n sdk_version = toml_data[\"tool\"][\"poetry\"][\"dependencies\"][\"rasa-sdk\"]\n return sdk_version[1:].strip()\n except AttributeError:\n raise Exception(f\"Failed to find Rasa SDK version in {dependencies_filename}\")\n\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 65, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 58, "n_ast_nodes": 110, "n_identifiers": 11, "random_cut": "def get_rasa_sdk_version() -> Text:\n \n\n dependencies_filename = \"pyproject.toml\"\n toml_data = toml.load(project_root() / dependencies_filename)\n try:\n ", "d_id": 38186, "documentation": { "docstring": "Find out what the referenced version of the Rasa SDK is.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 250287, "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", "repo": "synapse", "path": "tests/handlers/test_e2e_room_keys.py", "file_name": "test_e2e_room_keys.py", "fun_name": "test_update_omitted_version", "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", "code": "def test_update_omitted_version(self) -> None:\n \n version = self.get_success(\n self.handler.create_version(\n self.local_user,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"first_version_auth_data\",\n },\n )\n )\n self.assertEqual(version, \"1\")\n\n self.get_success(\n self.handler.update_version(\n self.local_user,\n version,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"revised_first_version_auth_data\",\n },\n )\n )\n\n # check we can retrieve it as the current version\n res = self.get_success(self.handler.get_version_info(self.local_user))\n del res[\"etag\"] # etag is opaque, so don't test its contents\n self.assertDictEqual(\n res,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"revised_first_version_auth_data\",\n \"version\": version,\n \"count\": 0,\n },\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 464, "n_words": 68, "vocab_size": 47, "complexity": 1, "nloc": 33, "token_counts": 122, "n_ast_nodes": 214, "n_identifiers": 12, "random_cut": "def test_update_omitted_version(self) -> None:\n \n version = self.get_success(\n self.handler.create_version(\n self.local_user,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"first_version_auth_data\",\n },\n )\n )\n self.assertEqual(version, \"1\")\n\n self.get_success(\n self.handler.update_version(\n self.local_user,\n version,\n {\n \"algorithm\": \"m.megolm_backup.v1\",\n \"auth_data\": \"revised_first_version_auth_data\",\n },\n )\n )\n\n # check we can retrieve it as the current version\n res = self.get_success(self.handler.get_version_info(self.local_user))\n del res[\"etag\"] # etag is opaque, so don't test i", "d_id": 73365, "documentation": { "docstring": "Check that the update succeeds if the version is missing from the body", "n_words": 13, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 196273, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/plane.py", "file_name": "plane.py", "fun_name": "parameter_value", "commit_message": "Updated import locations", "code": "def parameter_value(self, other, u, v=None):\n \n from sympy.geometry.point import Point\n if not isinstance(other, GeometryEntity):\n other = Point(other, dim=self.ambient_dimension)\n if not isinstance(other, Point):\n raise ValueError(\"other must be a point\")\n if other == self.p1:\n return other\n if isinstance(u, Symbol) and v is None:\n delta = self.arbitrary_point(u) - self.p1\n eq = delta - (other - self.p1).unit\n sol = solve(eq, u, dict=True)\n elif isinstance(u, Symbol) and isinstance(v, Symbol):\n pt = self.arbitrary_point(u, v)\n sol = solve(pt - other, (u, v), dict=True)\n else:\n raise ValueError('expecting 1 or 2 symbols')\n if not sol:\n raise ValueError(\"Given point is not on %s\" % func_name(self))\n return sol[0] # {t: tval} or {u: uval, v: vval}\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 285, "n_words": 104, "vocab_size": 73, "complexity": 9, "nloc": 20, "token_counts": 184, "n_ast_nodes": 286, "n_identifiers": 25, "random_cut": "def parameter_value(self, other, u, v=None):\n \n from sympy.geometry.point import Point\n if not isinstance(other, GeometryEntity):\n other = Point(other, dim=self.ambient_dimension)\n if not isinstance(other, Point):\n raise ValueError(\"o", "d_id": 47773, "documentation": { "docstring": "Return the parameter(s) corresponding to the given point.\n\n Examples\n ========\n\n >>> from sympy import pi, Plane\n >>> from sympy.abc import t, u, v\n >>> p = Plane((2, 0, 0), (0, 0, 1), (0, 1, 0))\n\n By default, the parameter value returned defines a point\n that is a distance of 1 from the Plane's p1 value and\n in line with the given point:\n\n >>> on_circle = p.arbitrary_point(t).subs(t, pi/4)\n >>> on_circle.distance(p.p1)\n 1\n >>> p.parameter_value(on_circle, t)\n {t: pi/4}\n\n Moving the point twice as far from p1 does not change\n the parameter value:\n\n >>> off_circle = p.p1 + (on_circle - p.p1)*2\n >>> off_circle.distance(p.p1)\n 2\n >>> p.parameter_value(off_circle, t)\n {t: pi/4}\n\n If the 2-value parameter is desired, supply the two\n parameter symbols and a replacement dictionary will\n be returned:\n\n >>> p.parameter_value(on_circle, u, v)\n {u: sqrt(10)/10, v: sqrt(10)/30}\n >>> p.parameter_value(off_circle, u, v)\n {u: sqrt(10)/5, v: sqrt(10)/15}\n ", "n_words": 139, "vocab_size": 91, "n_whitespaces": 335, "language": "en" } }, { "id": 211415, "commit_id": "d4e34fe165c09db65fd00113708be1b711ac957c", "repo": "PaddleDetection", "path": "ppdet/modeling/architectures/pose3d_metro.py", "file_name": "pose3d_metro.py", "fun_name": "orthographic_projection", "commit_message": "pose3d metro modeling (#6612)\n\n* pose3d metro modeling\r\n\r\n* delete extra comments", "code": "def orthographic_projection(X, camera):\n \n camera = camera.reshape((-1, 1, 3))\n X_trans = X[:, :, :2] + camera[:, :, 1:]\n shape = paddle.shape(X_trans)\n X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape)\n return X_2d\n\n\n@register", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "@register", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 48, "n_words": 31, "vocab_size": 25, "complexity": 1, "nloc": 6, "token_counts": 86, "n_ast_nodes": 137, "n_identifiers": 9, "random_cut": "def orthographic_projection(X, camera):\n \n camera = camera.reshape((-1, 1, 3))\n X_trans = X[:, :, :2] + camera[:, :, 1:]\n shape = paddle.shape(X_trans)\n X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape)\n return X_2d\n", "d_id": 53089, "documentation": { "docstring": "Perform orthographic projection of 3D points X using the camera parameters\n Args:\n X: size = [B, N, 3]\n camera: size = [B, 3]\n Returns:\n Projected 2D points -- size = [B, N, 2]\n ", "n_words": 33, "vocab_size": 24, "n_whitespaces": 63, "language": "en" } }, { "id": 133390, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/worker_group.py", "file_name": "worker_group.py", "fun_name": "_load_state_id", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _load_state_id(self, state_id):\n \n remote_calls = [\n worker.load_state_stream.remote(state_id) for worker in self.remote_workers\n ]\n return remote_calls\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 45, "n_identifiers": 8, "random_cut": "def _load_state_id(self, state_id):\n \n remote_calls = [\n worker.load_state_stream.remote(state_id) for worker in self.remote_workers\n ]\n ", "d_id": 30003, "documentation": { "docstring": "Loads the object with id `state_id` to all workers.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 198278, "commit_id": "2a1afca9477eb781f16d5d6b63fa37abed7740a3", "repo": "sympy", "path": "sympy/geometry/line.py", "file_name": "line.py", "fun_name": "__new__", "commit_message": "Use sympify less", "code": "def __new__(cls, p1, pt=None, angle=None, **kwargs):\n p1 = Point(p1, dim=2)\n if pt is not None and angle is None:\n try:\n p2 = Point(pt, dim=2)\n except (NotImplementedError, TypeError, ValueError):\n raise ValueError(filldedent())\n if p1 == p2:\n raise ValueError('A Ray requires two distinct points.')\n elif angle is not None and pt is None:\n # we need to know if the angle is an odd multiple of pi/2\n angle = sympify(angle)\n c = _pi_coeff(angle)\n p2 = None\n if c is not None:\n if c.is_Rational:\n if c.q == 2:\n if c.p == 1:\n p2 = p1 + Point(0, 1)\n elif c.p == 3:\n p2 = p1 + Point(0, -1)\n elif c.q == 1:\n if c.p == 0:\n p2 = p1 + Point(1, 0)\n elif c.p == 1:\n p2 = p1 + Point(-1, 0)\n if p2 is None:\n c *= S.Pi\n else:\n c = angle % (2*S.Pi)\n if not p2:\n m = 2*c/S.Pi\n left = And(1 < m, m < 3) # is it in quadrant 2 or 3?\n x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True))\n y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (tan(c), True)), True))\n p2 = p1 + Point(x, y)\n else:\n raise ValueError('A 2nd point or keyword \"angle\" must be used.')\n\n return LinearEntity2D.__new__(cls, p1, p2, **kwargs)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 788, "n_words": 210, "vocab_size": 113, "complexity": 17, "nloc": 41, "token_counts": 367, "n_ast_nodes": 557, "n_identifiers": 30, "random_cut": "def __new__(cls, p1, pt=None, angle=None, **kwargs):\n p1 = Point(p1, dim=2)\n if pt is not None and angle is None:\n try:\n p2 = Point(pt, dim=2)\n except (NotImplementedError, TypeError, ValueError):\n raise ValueError(filldedent())\n if p1 == p2:\n raise ValueError('A Ray requires two distinct points.')\n elif angle is not None and pt is None:\n # we need to know if the angle is an odd multiple of pi/2\n angle = sympify(angle)\n c = _pi_coeff(angle)\n p2 = None\n if c is not None:\n if c.is_Rational:\n if c.q == 2:\n if c.p == 1:\n p2 = p1 + Point(0, 1)\n elif c.p == 3:\n p2 = p1 + Point(0, -1)\n elif c.q == 1:\n if c.p == 0:\n p2 = p1 + Point(1, 0)\n elif c.p == 1:\n p2 = p1 + Point(-1, 0)\n if p2 is None:\n c *= S.Pi\n else:\n c = angle % (2*S.Pi)\n if not p2:\n m = 2*c/S.Pi\n left = And(1 < m, m < 3) # is it in quadrant 2 or 3?\n x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True))\n y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), ", "d_id": 48851, "documentation": { "docstring": "\n The 2nd argument was not a valid Point; if\n it was meant to be an angle it should be\n given with keyword \"angle\".", "n_words": 23, "vocab_size": 20, "n_whitespaces": 80, "language": "en" } }, { "id": 266772, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/delegation.py", "file_name": "delegation.py", "fun_name": "delegate_command", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def delegate_command(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None\n \n con = host_state.controller_profile.get_origin_controller_connection()\n working_directory = host_state.controller_profile.get_working_directory()\n host_delegation = not isinstance(args.controller, OriginConfig)\n\n if host_delegation:\n if data_context().content.collection:\n content_root = os.path.join(working_directory, data_context().content.collection.directory)\n else:\n content_root = os.path.join(working_directory, 'ansible')\n\n ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin')\n\n with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payload_file:\n create_payload(args, payload_file.name)\n con.extract_archive(chdir=working_directory, src=payload_file)\n else:\n content_root = working_directory\n ansible_bin_path = ANSIBLE_BIN_PATH\n\n command = generate_command(args, host_state.controller_profile.python, ansible_bin_path, content_root, exclude, require)\n\n if isinstance(con, SshConnection):\n ssh = con.settings\n else:\n ssh = None\n\n options = []\n\n if isinstance(args, IntegrationConfig) and args.controller.is_managed and all(target.is_managed for target in args.targets):\n if not args.allow_destructive:\n options.append('--allow-destructive')\n\n with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase]\n if containers:\n options.extend(['--containers', json.dumps(containers.to_dict())])\n\n # Run unit tests unprivileged to prevent stray writes to the source tree.\n # Also disconnect from the network once requirements have been installed.\n if isinstance(args, UnitsConfig) and isinstance(con, DockerConnection):\n pytest_user = 'pytest'\n\n writable_dirs = [\n os.path.join(content_root, ResultType.JUNIT.relative_path),\n os.path.join(content_root, ResultType.COVERAGE.relative_path),\n ]\n\n con.run(['mkdir', '-p'] + writable_dirs)\n con.run(['chmod', '777'] + writable_dirs)\n con.run(['chmod', '755', working_directory])\n con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)])\n con.run(['useradd', pytest_user, '--create-home'])\n con.run(insert_options(command, options + ['--requirements-mode', 'only']))\n\n container = con.inspect()\n networks = container.get_network_names()\n\n if networks is not None:\n for network in networks:\n con.disconnect_network(network)\n else:\n display.warning('Network disconnection is not supported (this is normal under podman). '\n 'Tests will not be isolated from the network. Network-related tests may misbehave.')\n\n options.extend(['--requirements-mode', 'skip'])\n\n con.user = pytest_user\n\n success = False\n\n try:\n con.run(insert_options(command, options))\n success = True\n finally:\n if host_delegation:\n download_results(args, con, content_root, success)\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 770, "n_words": 231, "vocab_size": 154, "complexity": 16, "nloc": 57, "token_counts": 487, "n_ast_nodes": 803, "n_identifiers": 76, "random_cut": "def delegate_command(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None\n \n con = host_state.controller_profile.get_origin_controller_connection()\n working_directory = host_state.controller_profile.get_working_directory()\n host_delegation = not isinstance(args.controller, OriginConfig)\n\n if host_delegation:\n if data_context().content.collection:\n content_root = os.path.join(working_directory, data_context().content.collection.directory)\n else:\n content_root = os.path.join(working_directory, 'ansible')\n\n ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin')\n\n with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payl", "d_id": 78575, "documentation": { "docstring": "Delegate execution based on the provided host state.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 156292, "commit_id": "9000abdd43772a82dcbf7999c5126b571d698d8a", "repo": "dask", "path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "fun_name": "test_in_predicate_requires_an_iterable", "commit_message": "Check that values for the `in` predicate in `read_parquet` are correct (#8846)\n\nAs reported in #8720, the _flatten_filters function required the value\r\nto be hashable. This implicitly required the value to be a tuple,\r\nalthough lists and sets would also be appropriate since they support the\r\n'in' operation. _flatten_filters was only used to determine which\r\ncolumns should be filtered, so it can be refactored and removed.\r\n\r\nThe 'in' predicate also requires that the value be iterable.\r\nNon-iterable values are now caught and an appropriate message is raised\r\nindicating what the user needs to change. This must be done in two\r\nplaces to support both fastparquet and pyarrow.", "code": "def test_in_predicate_requires_an_iterable(tmp_path, engine, filter_value):\n \n path = tmp_path / \"gh_8720_pandas.parquet\"\n df = pd.DataFrame(\n {\"A\": [1, 2, 3, 4], \"B\": [1, 1, 2, 2]},\n )\n df.to_parquet(path, engine=engine)\n with pytest.raises(TypeError, match=\"Value of 'in' filter\"):\n dd.read_parquet(path, engine=engine, filters=filter_value)\n\n # pandas to_parquet outputs a single file, dask outputs a folder with global\n # metadata that changes the filtering code path\n ddf = dd.from_pandas(df, npartitions=2)\n path = tmp_path / \"gh_8720_dask.parquet\"\n ddf.to_parquet(path, engine=engine)\n with pytest.raises(TypeError, match=\"Value of 'in' filter\"):\n dd.read_parquet(path, engine=engine, filters=filter_value)\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 132, "n_words": 75, "vocab_size": 52, "complexity": 1, "nloc": 13, "token_counts": 137, "n_ast_nodes": 218, "n_identifiers": 19, "random_cut": "def test_in_predicate_requires_an_iterable(tmp_path, engine, filter_value):\n \n path = tmp_path / \"gh_8720_pandas.parquet\"\n df = pd.DataFrame(\n {\"A\": [1, 2, 3, 4], \"B\": [1, 1, 2, 2]},\n )\n df.to_parquet(path, engine=engine)\n with pytest.raises(TypeError, match=\"Value of 'in' filter\"):\n dd.read_parquet(path, engine=engine, filters=filter_value)\n\n # pandas to_parquet outputs a single file, dask outputs a folder with global\n # metadata that changes the filtering code path\n ddf = dd.from_pandas(df, npartitions=2)\n path = tmp_path / \"gh_8720_dask.parquet\"\n ddf.to_parquet(path, engine=engine)\n with pytest.raises(TypeError, match=", "d_id": 36629, "documentation": { "docstring": "Regression test for https://github.com/dask/dask/issues/8720", "n_words": 4, "vocab_size": 4, "n_whitespaces": 3, "language": "en" } }, { "id": 266768, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/config.py", "file_name": "config.py", "fun_name": "only_targets", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig]\n \n if not self.targets:\n raise Exception('There must be one or more targets.')\n\n assert type_guard(self.targets, target_type)\n\n return t.cast(t.List[THostConfig], self.targets)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 25, "vocab_size": 25, "complexity": 2, "nloc": 5, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig]\n \n if not self.targets:\n raise Exception('There must be one or more targets.')\n\n assert type_guard(self.targets, targ", "d_id": 78571, "documentation": { "docstring": "\n Return a list of target host configurations.\n Requires that there are one or more targets, all the specified type.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 107070, "commit_id": "334cc617b8ed3b6b4ec6cb64ff16a040ef454149", "repo": "matplotlib", "path": "lib/matplotlib/widgets.py", "file_name": "widgets.py", "fun_name": "_get_animated_artists", "commit_message": "Fix z_order", "code": "def _get_animated_artists(self):\n \n return tuple([a for ax_ in self.ax.get_figure().get_axes()\n for a in ax_.get_children()\n if a.get_animated() and a not in self.artists])\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 75, "n_words": 19, "vocab_size": 15, "complexity": 5, "nloc": 4, "token_counts": 48, "n_ast_nodes": 78, "n_identifiers": 11, "random_cut": "def _get_animated_artists(self):\n \n return tuple([a for ax_ in self.ax.get_figure().get_axes()\n for a in ax_.g", "d_id": 22584, "documentation": { "docstring": "\n Convenience method to get all animated artists of a figure, except\n those already present in self.artists. 'z_order' is ignored.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 109376, "commit_id": "438d30b227b1fef7e8733578f851e76a8e360f24", "repo": "matplotlib", "path": "lib/mpl_toolkits/axisartist/axislines.py", "file_name": "axislines.py", "fun_name": "new_gridlines", "commit_message": "Get rcParams from mpl", "code": "def new_gridlines(self, ax):\n \n gridlines = GridlinesCollection(\n None, transform=ax.transData, colors=mpl.rcParams['grid.color'],\n linestyles=mpl.rcParams['grid.linestyle'],\n linewidths=mpl.rcParams['grid.linewidth'])\n ax._set_artist_props(gridlines)\n gridlines.set_grid_helper(self)\n\n ax.axes._set_artist_props(gridlines)\n # gridlines.set_clip_path(self.axes.patch)\n # set_clip_path need to be deferred after Axes.cla is completed.\n # It is done inside the cla.\n\n return gridlines\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 131, "n_words": 35, "vocab_size": 31, "complexity": 1, "nloc": 9, "token_counts": 69, "n_ast_nodes": 114, "n_identifiers": 15, "random_cut": "def new_gridlines(self, ax):\n \n g", "d_id": 23558, "documentation": { "docstring": "\n Create and return a new GridlineCollection instance.\n\n *which* : \"major\" or \"minor\"\n *axis* : \"both\", \"x\" or \"y\"\n\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 47, "language": "en" } }, { "id": 20522, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/util.py", "file_name": "util.py", "fun_name": "duplicates_removed", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def duplicates_removed(it, already_seen=()):\n \n lst = []\n seen = set()\n for i in it:\n if i in seen or i in already_seen:\n continue\n lst.append(i)\n seen.add(i)\n return lst\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 73, "n_words": 26, "vocab_size": 19, "complexity": 4, "nloc": 14, "token_counts": 49, "n_ast_nodes": 82, "n_identifiers": 9, "random_cut": "def duplicates_removed(it, already_seen=()):\n \n lst = []\n seen = set()\n for i in it:\n if i in seen or i in already_seen:\n continue\n lst.append(i)\n seen.", "d_id": 3409, "documentation": { "docstring": "\n Returns a list with duplicates removed from the iterable `it`.\n\n Order is preserved.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 23, "language": "en" } }, { "id": 160110, "commit_id": "119bf865b15747bea815ec3ced10e2bbc1ba8de1", "repo": "numpy", "path": "numpy/ma/extras.py", "file_name": "extras.py", "fun_name": "masked_all", "commit_message": "DOC: fix data type of parameter shape (#21251)\n\n`np.ma.masked_all` uses `np.empty` under the hood, so the parameter\r\ndescription for shape in `masked_all` should be updated to match\r\nthat of `np.empty`.\r\n\r\nRelevant issue: #21203", "code": "def masked_all(shape, dtype=float):\n \n a = masked_array(np.empty(shape, dtype),\n mask=np.ones(shape, make_mask_descr(dtype)))\n return a\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 40, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 61, "n_identifiers": 11, "random_cut": "def masked_all(shape, dtype=float):\n \n a = masked_array(np.empty(shape, dtype),\n ", "d_id": 38490, "documentation": { "docstring": "\n Empty masked array with all elements masked.\n\n Return an empty masked array of the given shape and dtype, where all the\n data are masked.\n\n Parameters\n ----------\n shape : int or tuple of ints\n Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``.\n dtype : dtype, optional\n Data type of the output.\n\n Returns\n -------\n a : MaskedArray\n A masked array with all data masked.\n\n See Also\n --------\n masked_all_like : Empty masked array modelled on an existing array.\n\n Examples\n --------\n >>> import numpy.ma as ma\n >>> ma.masked_all((3, 3))\n masked_array(\n data=[[--, --, --],\n [--, --, --],\n [--, --, --]],\n mask=[[ True, True, True],\n [ True, True, True],\n [ True, True, True]],\n fill_value=1e+20,\n dtype=float64)\n\n The `dtype` parameter defines the underlying data type.\n\n >>> a = ma.masked_all((3, 3))\n >>> a.dtype\n dtype('float64')\n >>> a = ma.masked_all((3, 3), dtype=np.int32)\n >>> a.dtype\n dtype('int32')\n\n ", "n_words": 136, "vocab_size": 84, "n_whitespaces": 306, "language": "en" } }, { "id": 176676, "commit_id": "58b63cb57cd1747c23611ee0b46991a5be2db751", "repo": "networkx", "path": "networkx/algorithms/centrality/closeness.py", "file_name": "closeness.py", "fun_name": "closeness_centrality", "commit_message": "added example to closeness.py (#5645)\n\n* added example on closeness\r\n\r\n* docstring improvement", "code": "def closeness_centrality(G, u=None, distance=None, wf_improved=True):\n r\n if G.is_directed():\n G = G.reverse() # create a reversed graph view\n\n if distance is not None:\n # use Dijkstra's algorithm with specified attribute as edge weight\n path_length = functools.partial(\n nx.single_source_dijkstra_path_length, weight=distance\n )\n else:\n path_length = nx.single_source_shortest_path_length\n\n if u is None:\n nodes = G.nodes\n else:\n nodes = [u]\n closeness_centrality = {}\n for n in nodes:\n sp = path_length(G, n)\n totsp = sum(sp.values())\n len_G = len(G)\n _closeness_centrality = 0.0\n if totsp > 0.0 and len_G > 1:\n _closeness_centrality = (len(sp) - 1.0) / totsp\n # normalize to number of nodes-1 in connected part\n if wf_improved:\n s = (len(sp) - 1.0) / (len_G - 1)\n _closeness_centrality *= s\n closeness_centrality[n] = _closeness_centrality\n if u is not None:\n return closeness_centrality[u]\n else:\n return closeness_centrality\n\n\n@not_implemented_for(\"directed\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"directed\")", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 333, "n_words": 125, "vocab_size": 79, "complexity": 9, "nloc": 118, "token_counts": 186, "n_ast_nodes": 294, "n_identifiers": 25, "random_cut": "def closeness_centrality(G, u=None, distance=None, wf_improved=True):\n r\n if G.is_directed():\n G = G.reverse() # create a reversed graph view\n\n if distance is not None:\n # use Dijkstra's algorithm with specified attribute as edge weight\n path_length = functools.partial(\n nx.single_source_dijkstra_path_length, weight=distance\n )\n else:\n path_length = nx.single_source_shortest_path_length\n\n if u is None:\n nodes = G.nodes\n ", "d_id": 42034, "documentation": { "docstring": "Compute closeness centrality for nodes.\n\n Closeness centrality [1]_ of a node `u` is the reciprocal of the\n average shortest path distance to `u` over all `n-1` reachable nodes.\n\n .. math::\n\n C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n\n where `d(v, u)` is the shortest-path distance between `v` and `u`,\n and `n-1` is the number of nodes reachable from `u`. Notice that the\n closeness distance function computes the incoming distance to `u`\n for directed graphs. To use outward distance, act on `G.reverse()`.\n\n Notice that higher values of closeness indicate higher centrality.\n\n Wasserman and Faust propose an improved formula for graphs with\n more than one connected component. The result is \"a ratio of the\n fraction of actors in the group who are reachable, to the average\n distance\" from the reachable actors [2]_. You might think this\n scale factor is inverted but it is not. As is, nodes from small\n components receive a smaller closeness value. Letting `N` denote\n the number of nodes in the graph,\n\n .. math::\n\n C_{WF}(u) = \\frac{n-1}{N-1} \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n u : node, optional\n Return only the value for node u\n\n distance : edge attribute key, optional (default=None)\n Use the specified edge attribute as the edge distance in shortest\n path calculations\n\n wf_improved : bool, optional (default=True)\n If True, scale by the fraction of nodes reachable. This gives the\n Wasserman and Faust improved formula. For single component graphs\n it is the same as the original formula.\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with closeness centrality as the value.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])\n >>> nx.closeness_centrality(G)\n {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75}\n\n See Also\n --------\n betweenness_centrality, load_centrality, eigenvector_centrality,\n degree_centrality, incremental_closeness_centrality\n\n Notes\n -----\n The closeness centrality is normalized to `(n-1)/(|G|-1)` where\n `n` is the number of nodes in the connected part of graph\n containing the node. If the graph is not completely connected,\n this algorithm computes the closeness centrality for each\n connected part separately scaled by that parts size.\n\n If the 'distance' keyword is set to an edge attribute key then the\n shortest-path length will be computed using Dijkstra's algorithm with\n that edge attribute as the edge weight.\n\n The closeness centrality uses *inward* distance to a node, not outward.\n If you want to use outword distances apply the function to `G.reverse()`\n\n In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the\n outward distance rather than the inward distance. If you use a 'distance'\n keyword and a DiGraph, your results will change between v2.2 and v2.3.\n\n References\n ----------\n .. [1] Linton C. Freeman: Centrality in networks: I.\n Conceptual clarification. Social Networks 1:215-239, 1979.\n https://doi.org/10.1016/0378-8733(78)90021-7\n .. [2] pg. 201 of Wasserman, S. and Faust, K.,\n Social Network Analysis: Methods and Applications, 1994,\n Cambridge University Press.\n ", "n_words": 467, "vocab_size": 258, "n_whitespaces": 708, "language": "en" } }, { "id": 249367, "commit_id": "46bd7f4ed9020bbed459c03a11c26d7f7c3093b0", "repo": "synapse", "path": "synapse/storage/databases/main/event_push_actions.py", "file_name": "event_push_actions.py", "fun_name": "_remove_old_push_actions_that_have_rotated", "commit_message": "Clarifications for event push action processing. (#13485)\n\n* Clarifies comments.\r\n* Fixes an erroneous comment (about return type) added in #13455\r\n (ec24813220f9d54108924dc04aecd24555277b99).\r\n* Clarifies the name of a variable.\r\n* Simplifies logic of pulling out the latest join for the requesting user.", "code": "async def _remove_old_push_actions_that_have_rotated(self) -> None:\n \n\n # We want to clear out anything that is older than a day that *has* already\n # been rotated.\n rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol(\n table=\"event_push_summary_stream_ordering\",\n keyvalues={},\n retcol=\"stream_ordering\",\n )\n\n max_stream_ordering_to_delete = min(\n rotated_upto_stream_ordering, self.stream_ordering_day_ago\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 131, "n_words": 38, "vocab_size": 34, "complexity": 3, "nloc": 18, "token_counts": 64, "n_ast_nodes": 72, "n_identifiers": 11, "random_cut": "async def _remove_old_push_actions_that_have_rotated(self) -> None:\n \n\n # We want to clear out anything that is older than a day that *has* already\n # been rotated.\n rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol(\n table=\"event_push_summary_stream_ordering\",\n keyvalues={},\n retcol=\"stream_ordering\",\n ", "d_id": 72870, "documentation": { "docstring": "Clear out old push actions that have been summarised.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 181606, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/export_tests.py", "file_name": "export_tests.py", "fun_name": "test_indent", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_indent():\n \n multiline_string = \n\n indented_multiline_string = \n\n assert indented_multiline_string == _indent(multiline_string, 4)\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 10, "token_counts": 20, "n_ast_nodes": 39, "n_identifiers": 4, "random_cut": "def test_indent():\n \n multiline_string = \n\n indented_multiline_string = \n\n", "d_id": 43394, "documentation": { "docstring": "Assert that indenting a multiline string by 4 spaces prepends 4 spaces before each new line.test\ntest1\ntest2\ntest3 test\n test1\n test2\n test3", "n_words": 23, "vocab_size": 18, "n_whitespaces": 31, "language": "en" } }, { "id": 254598, "commit_id": "2e70f6769ca9b9d0e859fbbd6854f3abc478897b", "repo": "onnx", "path": "onnx/tools/update_model_dims.py", "file_name": "update_model_dims.py", "fun_name": "update_inputs_outputs_dims", "commit_message": "six: remove all references (#3926)\n\n* six: remove all references\r\n\r\nONNX has dropped python 2 support and does not need this anymore.\r\n\r\nSigned-off-by: Chris Hua \r\n\r\n* six: use bytes for binary_type\r\n\r\nI misread the changes; the correct migration is binary_type -> bytes in python3.\r\nSigned-off-by: Christopher Hua \r\n\r\n* remove additional checks for Python version\r\n\r\nno more need to check for Python 3\r\n\r\nSigned-off-by: Christopher Hua \r\n\r\n* remove unused import\r\n\r\nSigned-off-by: Christopher Hua \r\n\r\nCo-authored-by: Ashwini Khade \r\nCo-authored-by: Chun-Wei Chen ", "code": "def update_inputs_outputs_dims(model, input_dims, output_dims): # type: (ModelProto, Dict[Text, List[Any]], Dict[Text, List[Any]]) -> ModelProto\n \n dim_param_set = set() # type: Set[Text]\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 19, "vocab_size": 16, "complexity": 5, "nloc": 19, "token_counts": 139, "n_ast_nodes": 28, "n_identifiers": 6, "random_cut": "def update_inputs_outputs_dims(model, input_dims, output_dims): # type: (ModelProto, Dict[Text, List[Any]], Dict[Text, List[Any]]) -> ModelProto\n \n dim_param_set = set() # type: Set[Text]\n", "d_id": 74702, "documentation": { "docstring": "\n This function updates the dimension sizes of the model's inputs and outputs to the values\n provided in input_dims and output_dims. if the dim value provided is negative, a unique dim_param\n will be set for that dimension.\n\n Example. if we have the following shape for inputs and outputs:\n shape(input_1) = ('b', 3, 'w', 'h')\n shape(input_2) = ('b', 4)\n and shape(output) = ('b', 'd', 5)\n\n The parameters can be provided as:\n input_dims = {\n \"input_1\": ['b', 3, 'w', 'h'],\n \"input_2\": ['b', 4],\n }\n output_dims = {\n \"output\": ['b', -1, 5]\n }\n\n Putting it together:\n model = onnx.load('model.onnx')\n updated_model = update_inputs_outputs_dims(model, input_dims, output_dims)\n onnx.save(updated_model, 'model.onnx')\n ", "n_words": 102, "vocab_size": 74, "n_whitespaces": 364, "language": "en" } }, { "id": 114801, "commit_id": "0c2fc2e6f9d32e8b6785890cdfd7a2bf320b4273", "repo": "mindsdb", "path": "mindsdb/integrations/lightwood_handler/lightwood_handler/utils.py", "file_name": "utils.py", "fun_name": "get_aliased_columns", "commit_message": "add utils file", "code": "def get_aliased_columns(aliased_columns, model_alias, targets, mode=None):\n \n for col in targets:\n if mode == 'input':\n if str(col.parts[0]) != model_alias and col.alias is not None:\n aliased_columns[aliased_columns.index(col.parts[-1])] = str(col.alias)\n\n if mode == 'output':\n if str(col.parts[0]) == model_alias and col.alias is not None:\n aliased_columns[aliased_columns.index('prediction')] = str(col.alias)\n\n return aliased_columns", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 118, "n_words": 43, "vocab_size": 28, "complexity": 8, "nloc": 9, "token_counts": 109, "n_ast_nodes": 173, "n_identifiers": 10, "random_cut": "def get_aliased_columns(aliased_columns, model_alias, targets, mode=None):\n \n for col in targets:\n if mode == 'input':\n if str(col.parts[0]) != model_alias and col.alias is not None:\n aliased_columns[aliased_columns.index(col.parts[-1])] = str(col.alias)\n\n if mode == 'output':\n if str(col.parts[0]) == model_alias and col.alias is not None:\n aliased_columns[aliased_columns.index('prediction')] = str(col.alias)\n\n return aliased_columns", "d_id": 25277, "documentation": { "docstring": " This method assumes mdb_sql will alert if there are two columns with the same alias ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 16, "language": "en" } }, { "id": 265786, "commit_id": "0d7851ed9de2792ea6d9ed223c315c235290ddd7", "repo": "netbox", "path": "netbox/utilities/urls.py", "file_name": "urls.py", "fun_name": "get_model_urls", "commit_message": "#9072: Implement a mechanism for dynamically registering model detail views", "code": "def get_model_urls(app_label, model_name):\n \n paths = []\n\n # Retrieve registered views for this model\n try:\n views = registry['views'][app_label][model_name]\n except KeyError:\n # No views have been registered for this model\n views = []\n\n for view in views:\n # Import the view class or function\n callable = import_string(view['path'])\n if issubclass(callable, View):\n callable = callable.as_view()\n # Create a path to the view\n paths.append(\n path(f\"{view['name']}/\", callable, name=f\"{model_name}_{view['name']}\", kwargs=view['kwargs'])\n )\n\n return paths\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 172, "n_words": 66, "vocab_size": 45, "complexity": 4, "nloc": 14, "token_counts": 88, "n_ast_nodes": 172, "n_identifiers": 17, "random_cut": "def get_model_urls(app_label, model_name):\n \n paths = []\n\n # Retrieve reg", "d_id": 78195, "documentation": { "docstring": "\n Return a list of URL paths for detail views registered to the given model.\n\n Args:\n app_label: App/plugin name\n model_name: Model name\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 45, "language": "en" } }, { "id": 22110, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/sessions.py", "file_name": "sessions.py", "fun_name": "head", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def head(self, url, **kwargs):\n r\n\n kwargs.setdefault(\"allow_redirects\", False)\n return self.request(\"HEAD\", url, **kwargs)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 9, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 6, "random_cut": "def head(self, url, **kwargs):\n r\n\n kwargs.setdefault(\"allow_redirects\", False)\n return self.request", "d_id": 4186, "documentation": { "docstring": "Sends a HEAD request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :rtype: requests.Response\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 52, "language": "en" } }, { "id": 53293, "commit_id": "9efee44dbee3a326e3e754139f4ea0d721849561", "repo": "prefect", "path": "src/prefect/agent.py", "file_name": "agent.py", "fun_name": "get_and_submit_flow_runs", "commit_message": "Improve `prefect orion start` output", "code": "async def get_and_submit_flow_runs(self) -> List[FlowRun]:\n \n if not self.started:\n raise RuntimeError(\"Agent is not started. Use `async with OrionAgent()...`\")\n\n self.logger.debug(\"Checking for flow runs...\")\n\n submittable_runs = await self.client.read_flow_runs(\n sort=FlowRunSort.NEXT_SCHEDULED_START_TIME_ASC,\n flow_run_filter=self.flow_run_query_filter(),\n )\n\n for flow_run in submittable_runs:\n self.logger.info(f\"Submitting flow run '{flow_run.id}'\")\n self.submitting_flow_run_ids.add(flow_run.id)\n self.task_group.start_soon(\n self.submit_run,\n flow_run,\n )\n return submittable_runs\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 199, "n_words": 43, "vocab_size": 38, "complexity": 3, "nloc": 19, "token_counts": 93, "n_ast_nodes": 160, "n_identifiers": 24, "random_cut": "async def get_and_submit_flow_runs(self) -> List[FlowRun]:\n \n if not self.started:\n raise RuntimeError(\"Agent is not started. Use `async with OrionAgent()...`\")\n\n self.logger.debug(\"Checking for flow runs...\")\n\n submittable_runs = await self.client.read_flow_runs(\n sort=FlowRunSort.NEXT_SCHEDULED_S", "d_id": 10774, "documentation": { "docstring": "\n Queries for scheduled flow runs and submits them for execution in parallel\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 223543, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "get_quoted_string", "commit_message": "add python 3.10.4 for windows", "code": "def get_quoted_string(value):\n \n quoted_string = QuotedString()\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n quoted_string.append(token)\n token, value = get_bare_quoted_string(value)\n quoted_string.append(token)\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n quoted_string.append(token)\n return quoted_string, value\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 84, "n_words": 35, "vocab_size": 17, "complexity": 5, "nloc": 11, "token_counts": 77, "n_ast_nodes": 127, "n_identifiers": 9, "random_cut": "def get_quoted_string(value):\n \n quoted_string = QuotedString()\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n quoted_string.append(token)\n token, value = get_bare_quoted_string(value)\n quoted_string.append(token)\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n quoted_string.append(token)\n return quoted_string, value\n", "d_id": 56962, "documentation": { "docstring": "quoted-string = [CFWS] [CFWS]\n\n 'bare-quoted-string' is an intermediate class defined by this\n parser and not by the RFC grammar. It is the quoted string\n without any attached CFWS.\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 42, "language": "en" } }, { "id": 3584, "commit_id": "c8ee3f834120aa365acaa90b5eb583ac52c476ca", "repo": "airbyte", "path": "airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/tests/test_core.py", "file_name": "test_core.py", "fun_name": "test_defined_keyword_exist_in_schema", "commit_message": "SAT: check for not allowed keywords `allOf`, `not` in connectors schema (#9851)\n\n* test_defined_keyword_exist_in_schema added\r\n\r\nSigned-off-by: Sergey Chvalyuk ", "code": "def test_defined_keyword_exist_in_schema(self, keyword, discovered_catalog):\n \n schemas_errors = []\n for stream_name, stream in discovered_catalog.items():\n check_result = find_keyword_schema(stream.json_schema, key=keyword)\n if check_result:\n schemas_errors.append(stream_name)\n\n assert not schemas_errors, f\"Found not allowed `{keyword}` keyword for selected streams: {schemas_errors}.\"\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 96, "n_words": 31, "vocab_size": 28, "complexity": 3, "nloc": 7, "token_counts": 52, "n_ast_nodes": 91, "n_identifiers": 13, "random_cut": "def test_defined_keyword_exist_in_schema(self, keyword, discovered_catalog):\n \n schemas_errors = []\n for stream_name, stream in discovered_catalog.items():\n check_result = find_keyword_schema(stream.json_schema, key=keyword)\n if check_result:\n schemas_errors", "d_id": 491, "documentation": { "docstring": "Checking for the presence of not allowed keywords within each json schema", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 204719, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/utils.py", "file_name": "utils.py", "fun_name": "normalize_path_patterns", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def normalize_path_patterns(patterns):\n \n patterns = [os.path.normcase(p) for p in patterns]\n dir_suffixes = {\"%s*\" % path_sep for path_sep in {\"/\", os.sep}}\n norm_patterns = []\n for pattern in patterns:\n for dir_suffix in dir_suffixes:\n if pattern.endswith(dir_suffix):\n norm_patterns.append(pattern[: -len(dir_suffix)])\n break\n else:\n norm_patterns.append(pattern)\n return norm_patterns\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 123, "n_words": 39, "vocab_size": 29, "complexity": 6, "nloc": 12, "token_counts": 86, "n_ast_nodes": 141, "n_identifiers": 15, "random_cut": "def normalize_path_patterns(patterns):\n \n patterns = [os.path.normcase(p) for p in patterns]\n dir_suffixes = {\"%s*\" % path_sep for path_sep in {\"/\", os.sep}}\n norm_patterns = []\n for pattern in patterns:\n for dir_suffix in dir_suffixes:\n if pattern.endswith(dir_suffix):\n norm_patterns.append(pattern[: -len(dir_suffix)])\n break\n else:\n norm_patt", "d_id": 50851, "documentation": { "docstring": "Normalize an iterable of glob style patterns based on OS.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 1383, "commit_id": "14892f3e25065f85fcca953eac681f50880c0c48", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/node_service/success_resp_message.py", "file_name": "success_resp_message.py", "fun_name": "get_protobuf_schema", "commit_message": "ADD new Syft ErrorResponseMessage", "code": "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n \n\n return ErrorResponseMessage_PB\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 14, "token_counts": 9, "n_ast_nodes": 18, "n_identifiers": 3, "random_cut": "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n \n\n return ErrorResponseMessage_PB\n", "d_id": 186, "documentation": { "docstring": "Return the type of protobuf object which stores a class of this type\n As a part of serialization and deserialization, we need the ability to\n lookup the protobuf object type directly from the object type. This\n static method allows us to do this.\n Importantly, this method is also used to create the reverse lookup ability within\n the metaclass of Serializable. In the metaclass, it calls this method and then\n it takes whatever type is returned from this method and adds an attribute to it\n with the type of this class attached to it. See the MetaSerializable class for\n details.\n :return: the type of protobuf object which corresponds to this class.\n :rtype: GeneratedProtocolMessageType\n ", "n_words": 112, "vocab_size": 63, "n_whitespaces": 189, "language": "en" } }, { "id": 248467, "commit_id": "2fc787c341ff540e5880932f116498ec0ed7a2c2", "repo": "synapse", "path": "tests/rest/media/test_media_retention.py", "file_name": "test_media_retention.py", "fun_name": "test_remote_media_cache_retention", "commit_message": "Add config options for media retention (#12732)", "code": "def test_remote_media_cache_retention(self) -> None:\n \n # Advance 31 days (in seconds)\n self.reactor.advance(31 * 24 * 60 * 60)\n\n # Check that media has been correctly purged.\n # Local media should be unaffected.\n # Remote media accessed <30 days ago should still exist.\n self._assert_if_mxc_uris_purged(\n purged=[\n (self.remote_server_name, self.remote_not_recently_accessed_media),\n ],\n not_purged=[\n (self.remote_server_name, self.remote_recently_accessed_media),\n (self.hs.config.server.server_name, self.local_recently_accessed_media),\n (\n self.hs.config.server.server_name,\n self.local_not_recently_accessed_media,\n ),\n (self.hs.config.server.server_name, self.local_never_accessed_media),\n ],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 287, "n_words": 59, "vocab_size": 47, "complexity": 1, "nloc": 20, "token_counts": 106, "n_ast_nodes": 158, "n_identifiers": 17, "random_cut": "def test_remote_media_cache_retention(self) -> None:\n \n # Advance 31 days (in seconds)\n self.reactor.advance(31 * 24 * 60 * 60)\n\n # Check that media has been correctly purged.\n # Local media should be unaffected.\n # Remote media accessed <30 days ago should still exist.\n self._assert_if_mxc_uris_purged(\n purged=[\n (self.remote_server_name, self.remote_not_recently_accessed_media),\n ", "d_id": 72295, "documentation": { "docstring": "\n Tests that entries from the remote media cache that have not been accessed\n recently is purged, while local media is unaffected.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 43, "language": "en" } }, { "id": 209791, "commit_id": "a2b7a28faff1db058dd22ce097a268e0ad5d1d33", "repo": "scapy", "path": "scapy/arch/windows/__init__.py", "file_name": "__init__.py", "fun_name": "setmodulation", "commit_message": "[Hinty] Core typing: windows (#3684)\n\n* Core typing: windows\r\n\r\nCo-authored-by: Pierre ", "code": "def setmodulation(self, modu):\n # type: (int) -> bool\n \n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501\n self._check_npcap_requirement()\n _modus = {\n 0: \"dsss\",\n 1: \"fhss\",\n 2: \"irbaseband\",\n 3: \"ofdm\",\n 4: \"hrdss\",\n 5: \"erp\",\n 6: \"ht\",\n 7: \"vht\",\n 8: \"ihv\",\n 9: \"mimo-ofdm\",\n 10: \"mimo-ofdm\",\n }\n m = _modus.get(modu, \"unknown\") if isinstance(modu, int) else modu\n return self._npcap_set(\"modu\", str(m))\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 232, "n_words": 54, "vocab_size": 50, "complexity": 2, "nloc": 17, "token_counts": 92, "n_ast_nodes": 159, "n_identifiers": 11, "random_cut": "def setmodulation(self, modu):\n # type: (int) -> bool\n \n # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501\n self._check_npcap_requirement()\n _modus = {\n 0: \"dsss\",\n 1: \"fhss\",\n 2: \"irbaseband\",\n 3: \"ofdm\",\n 4: \"hrdss\",\n 5: \"erp\",\n ", "d_id": 52781, "documentation": { "docstring": "Set the interface modulation. It can be:\n - 0: dsss\n - 1: fhss\n - 2: irbaseband\n - 3: ofdm\n - 4: hrdss\n - 5: erp\n - 6: ht\n - 7: vht\n - 8: ihv\n - 9: mimo-ofdm\n - 10: mimo-ofdm\n - the value directly\n Only available with Npcap.", "n_words": 48, "vocab_size": 35, "n_whitespaces": 174, "language": "en" } }, { "id": 215415, "commit_id": "ab4803984bce4a4de7cc10910e7310c4babf557e", "repo": "salt", "path": "salt/transport/rabbitmq.py", "file_name": "rabbitmq.py", "fun_name": "_on_connection_error", "commit_message": "Start to add base class defs", "code": "def _on_connection_error(self, connection, exception):\n \n log.error(\"Failed to connect\", exc_info=True)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 7, "random_cut": "def _on_connection_error(self, connection, exception):\n \n log.error(\"Failed to connect\", exc_info=True)\n", "d_id": 53959, "documentation": { "docstring": "\n Invoked by pika when connection on connection error\n :param connection:\n :param exception:\n :return:\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 49, "language": "en" } }, { "id": 168095, "commit_id": "62a69beddbedde349891378992c902c0b9341a9f", "repo": "pandas", "path": "pandas/core/indexes/multi.py", "file_name": "multi.py", "fun_name": "is_monotonic_decreasing", "commit_message": "DOC: Add numpydoc SS06 validation (#47885)", "code": "def is_monotonic_decreasing(self) -> bool:\n \n # monotonic decreasing if and only if reverse is monotonic increasing\n return self[::-1].is_monotonic_increasing\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 38, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 17, "n_ast_nodes": 32, "n_identifiers": 4, "random_cut": "def is_monotonic_decreasing(self) -> bool:\n ", "d_id": 40202, "documentation": { "docstring": "\n Return a boolean if the values are equal or decreasing.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 99591, "commit_id": "1730c481f1a8a71446326fa1ff72e10663016385", "repo": "sentry", "path": "tests/sentry/notifications/test_notifications.py", "file_name": "test_notifications.py", "fun_name": "test_sends_assignment_notification", "commit_message": "fix(notifications): Use `metrics_key` (#34572)", "code": "def test_sends_assignment_notification(self):\n \n\n url = f\"/api/0/issues/{self.group.id}/\"\n with self.tasks():\n response = self.client.put(url, format=\"json\", data={\"assignedTo\": self.user.username})\n assert response.status_code == 200, response.content\n\n msg = mail.outbox[0]\n # check the txt version\n assert f\"assigned {self.short_id} to themselves\" in msg.body\n # check the html version\n assert f\"{self.short_id} to themselves

    \" in msg.alternatives[0][0]\n\n attachment, text = get_attachment()\n\n assert text == f\"Issue assigned to {self.name} by themselves\"\n assert attachment[\"title\"] == self.group.title\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 201, "n_words": 70, "vocab_size": 50, "complexity": 1, "nloc": 15, "token_counts": 114, "n_ast_nodes": 230, "n_identifiers": 28, "random_cut": "def test_sends_assignment_notification(self):\n \n\n url = f\"/api/0/issues/{self.group.id}/\"\n with self.tasks():\n response = self.client.put(url, format=\"json\", data={\"assignedTo\": self.user.username})\n assert response.status_code == 200, response.content\n\n msg = mail.outbox[0]\n # check the txt version\n assert f\"assigned {self.s", "d_id": 19669, "documentation": { "docstring": "\n Test that an email AND Slack notification are sent with\n the expected values when an issue is assigned.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 202903, "commit_id": "0ab58c120939093fea90822f376e1866fc714d1f", "repo": "django", "path": "django/db/migrations/questioner.py", "file_name": "questioner.py", "fun_name": "_ask_default", "commit_message": "Refs #29026 -- Allowed customizing InteractiveMigrationQuestioner's prompt destination.\n\nPreviously, the questioner did not obey the value of stdout provided\nto the command.", "code": "def _ask_default(self, default=''):\n \n self.prompt_output.write('Please enter the default value as valid Python.')\n if default:\n self.prompt_output.write(\n f\"Accept the default '{default}' by pressing 'Enter' or \"\n f\"provide another value.\"\n )\n self.prompt_output.write(\n 'The datetime and django.utils.timezone modules are available, so '\n 'it is possible to provide e.g. timezone.now as a value.'\n )\n self.prompt_output.write(\"Type 'exit' to exit this prompt\")\n while True:\n if default:\n prompt = \"[default: {}] >>> \".format(default)\n else:\n prompt = \">>> \"\n self.prompt_output.write(prompt, ending='')\n code = input()\n if not code and default:\n code = default\n if not code:\n self.prompt_output.write(\"Please enter some code, or 'exit' (without quotes) to exit.\")\n elif code == \"exit\":\n sys.exit(1)\n else:\n try:\n return eval(code, {}, {'datetime': datetime, 'timezone': timezone})\n except (SyntaxError, NameError) as e:\n self.prompt_output.write('Invalid input: %s' % e)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 473, "n_words": 119, "vocab_size": 91, "complexity": 9, "nloc": 30, "token_counts": 158, "n_ast_nodes": 288, "n_identifiers": 18, "random_cut": "def _ask_default(self, default=''):\n \n self.prompt_output.write('Please enter the default value as valid Python.')\n if default:\n self.prompt_output.write(\n f\"Accept the default '{default}' by pressing 'Enter' or \"\n f\"provide another value.\"\n )\n self.prompt_output.write(\n 'The datetime and django.utils.timezone modules are available, so '\n 'it is possible to provide e.g. timezone.now as a valu", "d_id": 50164, "documentation": { "docstring": "\n Prompt for a default value.\n\n The ``default`` argument allows providing a custom default value (as a\n string) which will be shown to the user and used as the return value\n if the user doesn't provide any other input.\n ", "n_words": 38, "vocab_size": 31, "n_whitespaces": 74, "language": "en" } }, { "id": 186689, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/parser.py", "file_name": "parser.py", "fun_name": "reset_modules", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def reset_modules(self) -> None:\n \n self.modules = {}\n self.update_modules()\n self.parse_modules()\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 37, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 24, "n_ast_nodes": 44, "n_identifiers": 5, "random_cut": "def reset_modules(self) -> None:\n \n self.modules = {}\n self.update_modules", "d_id": 45596, "documentation": { "docstring": "Reset the loaded modules list. This is called from cleanup to clear\n temporarily loaded modules.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 21, "language": "en" } }, { "id": 133800, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/ppo/tests/test_ddppo.py", "file_name": "test_ddppo.py", "fun_name": "test_ddppo_compilation", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_ddppo_compilation(self):\n \n config = ppo.ddppo.DEFAULT_CONFIG.copy()\n config[\"num_gpus_per_worker\"] = 0\n num_iterations = 2\n\n for _ in framework_iterator(config, frameworks=\"torch\"):\n trainer = ppo.ddppo.DDPPOTrainer(config=config, env=\"CartPole-v0\")\n for i in range(num_iterations):\n results = trainer.train()\n check_train_results(results)\n print(results)\n # Make sure, weights on all workers are the same (including\n # local one).\n weights = trainer.workers.foreach_worker(lambda w: w.get_weights())\n for w in weights[1:]:\n check(w, weights[0])\n\n check_compute_single_action(trainer)\n trainer.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 259, "n_words": 56, "vocab_size": 45, "complexity": 4, "nloc": 15, "token_counts": 121, "n_ast_nodes": 202, "n_identifiers": 28, "random_cut": "def test_ddppo_compilation(self):\n ", "d_id": 30112, "documentation": { "docstring": "Test whether a DDPPOTrainer can be built with both frameworks.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 268862, "commit_id": "89879e2c76e86c685e44c47a6cdb82f7e645c142", "repo": "keras", "path": "keras/losses.py", "file_name": "losses.py", "fun_name": "log_cosh", "commit_message": "Fix keras docstrings\n\nPiperOrigin-RevId: 424275818", "code": "def log_cosh(y_true, y_pred):\n \n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n\n def _logcosh(x):\n return x + tf.math.softplus(-2. * x) - tf.cast(\n tf.math.log(2.), x.dtype)\n\n return backend.mean(_logcosh(y_pred - y_true), axis=-1)\n\n\n@keras_export('keras.metrics.categorical_crossentropy',\n 'keras.losses.categorical_crossentropy')\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export('keras.metrics.categorical_crossentropy',\n 'keras.losses.categorical_crossentropy')\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 56, "n_words": 30, "vocab_size": 26, "complexity": 1, "nloc": 5, "token_counts": 47, "n_ast_nodes": 157, "n_identifiers": 19, "random_cut": "def log_cosh(y_true, y_pred):\n \n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n\n def _logcosh(x):\n return x + tf.math.softplus(-", "d_id": 79731, "documentation": { "docstring": "Logarithm of the hyperbolic cosine of the prediction error.\n\n `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and\n to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly\n like the mean squared error, but will not be so strongly affected by the\n occasional wildly incorrect prediction.\n\n Standalone usage:\n\n >>> y_true = np.random.random(size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.logcosh(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> x = y_pred - y_true\n >>> assert np.allclose(\n ... loss.numpy(),\n ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - tf.math.log(2.), axis=-1),\n ... atol=1e-5)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.\n ", "n_words": 131, "vocab_size": 93, "n_whitespaces": 169, "language": "en" } }, { "id": 271597, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "make_predict_function", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def make_predict_function(self, force=False):\n \n if self.predict_function is not None and not force:\n return self.predict_function\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 38, "n_words": 13, "vocab_size": 11, "complexity": 6, "nloc": 17, "token_counts": 83, "n_ast_nodes": 41, "n_identifiers": 4, "random_cut": "def make_predict_function(self, force=False):\n \n if self.predict_function is not None and not force:\n return self.predict_function\n", "d_id": 80821, "documentation": { "docstring": "Creates a function that executes one step of inference.\n\n This method can be overridden to support custom inference logic.\n This method is called by `Model.predict` and `Model.predict_on_batch`.\n\n Typically, this method directly controls `tf.function` and\n `tf.distribute.Strategy` settings, and delegates the actual evaluation\n logic to `Model.predict_step`.\n\n This function is cached the first time `Model.predict` or\n `Model.predict_on_batch` is called. The cache is cleared whenever\n `Model.compile` is called. You can skip the cache and generate again the\n function with `force=True`.\n\n Args:\n force: Whether to regenerate the predict function and skip the cached\n function if available.\n\n Returns:\n Function. The function created by this method should accept a\n `tf.data.Iterator`, and return the outputs of the `Model`.\n ", "n_words": 110, "vocab_size": 71, "n_whitespaces": 232, "language": "en" } }, { "id": 260715, "commit_id": "c9d4e1f86e6d8c58441b1aa01d0a79f25cf3a999", "repo": "scikit-learn", "path": "sklearn/feature_selection/_rfe.py", "file_name": "_rfe.py", "fun_name": "fit", "commit_message": "MAINT Add parameter validation to RFE and RFECV. (#24137)\n\nCo-authored-by: Thomas J. Fan ", "code": "def fit(self, X, y, **fit_params):\n \n self._validate_params()\n return self._fit(X, y, **fit_params)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def fit(self, X, y, **fit_params):\n \n", "d_id": 76436, "documentation": { "docstring": "Fit the RFE model and then the underlying estimator on the selected features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The training input samples.\n\n y : array-like of shape (n_samples,)\n The target values.\n\n **fit_params : dict\n Additional parameters passed to the `fit` method of the underlying\n estimator.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ", "n_words": 58, "vocab_size": 45, "n_whitespaces": 176, "language": "en" } }, { "id": 55468, "commit_id": "21b8eed6887646c8c2a752961a84c855dd4fed22", "repo": "prefect", "path": "tests/cli/test_storage.py", "file_name": "test_storage.py", "fun_name": "test_invalid_number_selection_fails", "commit_message": "basic tests for storage cli", "code": "def test_invalid_number_selection_fails():\n \n number_string = \"99999999\"\n result = get_first_menu_and_fail(number_string)\n lines = result.stdout.splitlines()\n # Strange string addition are due to coloring, I believe\n assert lines[-1] == f\"\\x1b[31mInvalid selection {number_string}\\x1b[0m\"\n assert result.exit_code == 1\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 52, "n_words": 31, "vocab_size": 27, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 74, "n_identifiers": 8, "random_cut": "def test_invalid_number_selection_fails():\n \n number_string = \"99999999\"\n result = get_first_menu_and_fail(number_string)\n lines = result.stdout.splitlines()\n # Strange string addition are due to coloring, I believe\n assert lines[-1] == f\"\\x1b[31mInvalid selection {number_string}\\x1b[0m\"\n assert result.exit_code == 1\n\n", "d_id": 11321, "documentation": { "docstring": "\n We need to make sure that if we give an invalid number that the CLI\n will exit.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 27, "language": "en" } }, { "id": 197470, "commit_id": "2047f4855577845b1b99e0926b887d313725a6e7", "repo": "sympy", "path": "sympy/utilities/misc.py", "file_name": "misc.py", "fun_name": "filldedent", "commit_message": "Pass keyword arguments to filldedent() through to fill()", "code": "def filldedent(s, w=70, **kwargs):\n \n return '\\n' + fill(dedent(str(s)).strip('\\n'), width=w, **kwargs)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 38, "n_ast_nodes": 66, "n_identifiers": 9, "random_cut": "def filldedent(s, w=70, **kwargs):\n \n return '\\n' + fill(de", "d_id": 48569, "documentation": { "docstring": "\n Strips leading and trailing empty lines from a copy of ``s``, then dedents,\n fills and returns it.\n\n Empty line stripping serves to deal with docstrings like this one that\n start with a newline after the initial triple quote, inserting an empty\n line at the beginning of the string.\n\n Additional keyword arguments will be passed to ``textwrap.fill()``.\n\n See Also\n ========\n strlines, rawlines\n\n ", "n_words": 61, "vocab_size": 52, "n_whitespaces": 92, "language": "en" } }, { "id": 160788, "commit_id": "4ed458f16d9dd64554ccf49e315c5b8fb577d4cd", "repo": "numpy", "path": "numpy/lib/arraysetops.py", "file_name": "arraysetops.py", "fun_name": "in1d", "commit_message": "MAINT: change kind names for in1d\n\n- Switch dictionary->table, mergesort->sort", "code": "def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None):\n \n # Ravel both arrays, behavior for the first array could be different\n ar1 = np.asarray(ar1).ravel()\n ar2 = np.asarray(ar2).ravel()\n\n # Ensure that iteration through object arrays yields size-1 arrays\n if ar2.dtype == object:\n ar2 = ar2.reshape(-1, 1)\n # Convert booleans to uint8 so we can use the fast integer algorithm\n if ar1.dtype == bool:\n ar1 = ar1.view(np.uint8)\n if ar2.dtype == bool:\n ar2 = ar2.view(np.uint8)\n\n # Check if we can use a fast integer algorithm:\n integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and\n np.issubdtype(ar2.dtype, np.integer))\n\n if kind not in {None, 'sort', 'table'}:\n raise ValueError(\n \"Invalid kind: {0}. \".format(kind)\n + \"Please use None, 'sort' or 'table'.\")\n\n if integer_arrays and kind in {None, 'table'}:\n ar2_min = np.min(ar2)\n ar2_max = np.max(ar2)\n\n ar2_range = int(ar2_max) - int(ar2_min)\n\n # Constraints on whether we can actually use the table method:\n range_safe_from_overflow = ar2_range < np.iinfo(ar2.dtype).max\n below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)\n\n # Optimal performance is for approximately\n # log10(size) > (log10(range) - 2.27) / 0.927.\n # However, here we set the requirement that by default\n # the intermediate array can only be 6x\n # the combined memory allocation of the original\n # arrays. See discussion on \n # https://github.com/numpy/numpy/pull/12065.\n\n if (\n range_safe_from_overflow and \n (below_memory_constraint or kind == 'table')\n ):\n\n if invert:\n outgoing_array = np.ones_like(ar1, dtype=bool)\n else:\n outgoing_array = np.zeros_like(ar1, dtype=bool)\n\n # Make elements 1 where the integer exists in ar2\n if invert:\n isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)\n isin_helper_ar[ar2 - ar2_min] = 0\n else:\n isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)\n isin_helper_ar[ar2 - ar2_min] = 1\n\n # Mask out elements we know won't work\n basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)\n outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -\n ar2_min]\n\n return outgoing_array\n elif kind == 'table': # not range_safe_from_overflow\n raise RuntimeError(\n \"You have specified kind='table', \"\n \"but the range of values in `ar2` exceeds the \"\n \"maximum integer of the datatype. \"\n \"Please set `kind` to None or 'sort'.\"\n )\n elif kind == 'table':\n raise ValueError(\n \"The 'table' method is only \"\n \"supported for boolean or integer arrays. \"\n \"Please select 'sort' or None for kind.\"\n )\n\n\n # Check if one of the arrays may contain arbitrary objects\n contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject\n\n # This code is run when\n # a) the first condition is true, making the code significantly faster\n # b) the second condition is true (i.e. `ar1` or `ar2` may contain\n # arbitrary objects), since then sorting is not guaranteed to work\n if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:\n if invert:\n mask = np.ones(len(ar1), dtype=bool)\n for a in ar2:\n mask &= (ar1 != a)\n else:\n mask = np.zeros(len(ar1), dtype=bool)\n for a in ar2:\n mask |= (ar1 == a)\n return mask\n\n # Otherwise use sorting\n if not assume_unique:\n ar1, rev_idx = np.unique(ar1, return_inverse=True)\n ar2 = np.unique(ar2)\n\n ar = np.concatenate((ar1, ar2))\n # We need this to be a stable sort, so always use 'mergesort'\n # here. The values from the first array should always come before\n # the values from the second array.\n order = ar.argsort(kind='mergesort')\n sar = ar[order]\n if invert:\n bool_ar = (sar[1:] != sar[:-1])\n else:\n bool_ar = (sar[1:] == sar[:-1])\n flag = np.concatenate((bool_ar, [invert]))\n ret = np.empty(ar.shape, dtype=bool)\n ret[order] = flag\n\n if assume_unique:\n return ret[:len(ar1)]\n else:\n return ret[rev_idx]\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1357, "n_words": 528, "vocab_size": 272, "complexity": 24, "nloc": 80, "token_counts": 598, "n_ast_nodes": 978, "n_identifiers": 56, "random_cut": "def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None):\n \n # Ravel both arrays, behavior for the first array could be different\n ar1 = np.asarray(ar1).ravel()\n ar2 = np.asarray(ar2).ravel()\n\n # Ensure that iteration through object arrays yields size-1 arrays\n if ar2.dtype == object:\n ar2 = ar2.reshape(-1, 1)\n # Convert booleans to uint8 so we can use the fast integer algorithm\n if ar1.dtype == bool:\n ar1 = ar1.view(np.uint8)\n if ar2.dtype == bool:\n ar2 = ar2.view(np.uint8)\n\n # Check if we can use a fast integer algorithm:\n integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and\n np.issubdtype(ar2.dtype, np.integer))\n\n if kind not in {None, 'sort', 'table'}:\n raise ValueError(\n \"Invalid kind: {0}. \".format(kind)\n + \"Please use None, 'sort' or 'table'.\")\n\n if integer_arrays and kind in {None, 'table'}:\n ar2_min = np.min(ar2)\n ar2_max = np.max(ar2)\n\n ar2_range = int(ar2_max) - int(ar2_min)\n\n # Constraints on whether we can actually use the table method:\n range_safe_from_overflow = ar2_range < np.iinfo(ar2.dtype).max\n below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)\n\n # Optimal performance is for approximately\n # log10(size) > (log10(range) - 2.27) / 0.927.\n # However, here we set the requirement that by default\n # the intermediate array can only be 6x\n # the combined memory allocation of the original\n # arrays. See discussion on \n # https://github.com/numpy/numpy/pull/12065.\n\n if (\n range_safe_from_overflow and \n (below_memory_constraint or kind == 'table')\n ):\n\n if invert:\n outgoing_array = np.ones_like(ar1, dtype=bool)\n else:\n outgoing_array = np.zeros_like(ar1, dtype=bool)\n\n # Make elements 1 where the integer exists in ar2\n if invert:\n isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)\n isin_helper_ar[ar2 - ar2_min] = 0\n else:\n isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)\n isin_helper_ar[ar2 - ar2_min] = 1\n\n # Mask out elements we know won't work\n basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)\n outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -\n ar2_min]\n\n return outgoing_array\n elif kind == 'table': # not range_safe_from_overflow\n raise RuntimeError(\n \"You have specified kind='table', \"\n \"but the range of values in `ar2` exceeds the \"\n \"maximum integer of the datatype. \"\n \"Please set `kind` to None or 'sort'.\"\n )\n elif kind == 'table':\n raise ValueError(\n \"The 'table' method is only \"\n \"supported for boolean or integer arrays. \"\n \"Please select 'sort' or None for kind.\"\n )\n\n\n # Check if one of the arrays may contain arbitrary objects\n contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject\n\n # This code is run when\n # a) the first condition is true, making the code significantly faster\n # b) the second condition is true (i.e. `ar1` or `ar2` may contain\n # arbitrary objects), since then sorting is not guaranteed to work\n if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:\n if invert:\n mask = np.ones(len(ar1), dtype=bool)\n for a in ar2:\n mask &= (ar1 != a)\n else:\n mask = np.zeros(len(ar1), dtype=bool)\n for a in ar2:\n mask |= (ar1 == a)\n return mask\n\n # Otherwise use sorting\n if not assume_unique:\n ar1, rev_idx = np.unique(", "d_id": 38732, "documentation": { "docstring": "\n Test whether each element of a 1-D array is also present in a second array.\n\n Returns a boolean array the same length as `ar1` that is True\n where an element of `ar1` is in `ar2` and False otherwise.\n\n We recommend using :func:`isin` instead of `in1d` for new code.\n\n Parameters\n ----------\n ar1 : (M,) array_like\n Input array.\n ar2 : array_like\n The values against which to test each value of `ar1`.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted (that is,\n False where an element of `ar1` is in `ar2` and True otherwise).\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\n to (but is faster than) ``np.invert(in1d(a, b))``.\n kind : {None, 'sort', 'table'}, optional\n The algorithm to use. This will not affect the final result,\n but will affect the speed. Default will select automatically\n based on memory considerations.\n\n * If 'sort', will use a mergesort-based approach. This will have\n a memory usage of roughly 6 times the sum of the sizes of\n `ar1` and `ar2`, not accounting for size of dtypes.\n * If 'table', will use a key-dictionary approach similar\n to a counting sort. This is only available for boolean and\n integer arrays. This will have a memory usage of the\n size of `ar1` plus the max-min value of `ar2`. This tends\n to be the faster method if the following formula is true:\n ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,\n but may use greater memory.\n * If `None`, will automatically choose 'table' if\n the required memory allocation is less than or equal to\n 6 times the sum of the sizes of `ar1` and `ar2`,\n otherwise will use 'sort'. This is done to not use\n a large amount of memory by default, even though\n 'table' may be faster in most cases.\n\n .. versionadded:: 1.8.0\n\n Returns\n -------\n in1d : (M,) ndarray, bool\n The values `ar1[in1d]` are in `ar2`.\n\n See Also\n --------\n isin : Version of this function that preserves the\n shape of ar1.\n numpy.lib.arraysetops : Module with a number of other functions for\n performing set operations on arrays.\n\n Notes\n -----\n `in1d` can be considered as an element-wise function version of the\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\n equivalent to ``np.array([item in b for item in a])``.\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\n container: As ``ar2`` is converted to an array, in those cases\n ``asarray(ar2)`` is an object array rather than the expected array of\n contained values.\n\n .. versionadded:: 1.4.0\n\n Examples\n --------\n >>> test = np.array([0, 1, 2, 5, 0])\n >>> states = [0, 2]\n >>> mask = np.in1d(test, states)\n >>> mask\n array([ True, False, True, False, True])\n >>> test[mask]\n array([0, 2, 0])\n >>> mask = np.in1d(test, states, invert=True)\n >>> mask\n array([False, True, False, True, False])\n >>> test[mask]\n array([1, 5])\n ", "n_words": 485, "vocab_size": 257, "n_whitespaces": 921, "language": "en" } }, { "id": 246202, "commit_id": "901b264c0c88f39cbfb8b2229e0dc57968882658", "repo": "synapse", "path": "tests/rest/admin/test_user.py", "file_name": "test_user.py", "fun_name": "test_devices", "commit_message": "Add type hints to `tests/rest/admin` (#11851)", "code": "def test_devices(self) -> None:\n \n # Login in as the user\n self._get_token()\n\n # Check that we don't see a new device in our devices list\n channel = self.make_request(\n \"GET\", \"devices\", b\"{}\", access_token=self.other_user_tok\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n\n # We should only see the one device (from the login in `prepare`)\n self.assertEqual(len(channel.json_body[\"devices\"]), 1)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 124, "n_words": 50, "vocab_size": 42, "complexity": 1, "nloc": 8, "token_counts": 66, "n_ast_nodes": 111, "n_identifiers": 14, "random_cut": "def test_devices(self) -> None:\n \n # Login in as the user\n self._get_token()\n\n # Check that we don't see a new device in our devices list\n channel = self.make_request(\n \"GET\", \"devices\", b\"{}\", access_token=self.other_user_tok\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n\n # We should only see the one device (from the login in `prepare`)\n self.assertEqual(len(", "d_id": 71096, "documentation": { "docstring": "Tests that logging in as a user doesn't create a new device for them.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 14346, "commit_id": "594effa279668bd955e98f1cd5c036b37d3bbd40", "repo": "pydantic", "path": "pydantic/_internal/_validation_functions.py", "file_name": "_validation_functions.py", "fun_name": "in_ipython", "commit_message": "Switching to `pydantic_core` (#4516)\n\n* working on core schema generation\r\n\r\n* adapting main.py\r\n\r\n* getting tests to run\r\n\r\n* fix tests\r\n\r\n* disable pyright, fix mypy\r\n\r\n* moving to class-based model generation\r\n\r\n* working on validators\r\n\r\n* change how models are created\r\n\r\n* start fixing test_main.py\r\n\r\n* fixing mypy\r\n\r\n* SelfType\r\n\r\n* recursive models working, more tests fixed\r\n\r\n* fix tests on <3.10\r\n\r\n* get docs build to pass\r\n\r\n* starting to cleanup types.py\r\n\r\n* starting works on custom types\r\n\r\n* working on using annotated-types\r\n\r\n* using annoated types for constraints\r\n\r\n* lots of cleanup, fixing network tests\r\n\r\n* network tests passing :tada:\r\n\r\n* working on types\r\n\r\n* working on types and cleanup\r\n\r\n* fixing UUID type, restructing again\r\n\r\n* more types and newer pydantic-core\r\n\r\n* working on Iterable\r\n\r\n* more test_types tests\r\n\r\n* support newer pydantic-core, fixing more test_types.py\r\n\r\n* working through more test_types.py\r\n\r\n* test_types.py at last passing locally :tada:\r\n\r\n* fixing more tests in test_types.py\r\n\r\n* fix datetime_parse tests and linting\r\n\r\n* get tests running again, rename to test_datetime.py\r\n\r\n* renaming internal modules\r\n\r\n* working through mypy errors\r\n\r\n* fixing mypy\r\n\r\n* refactoring _generate_schema.py\r\n\r\n* test_main.py passing\r\n\r\n* uprev deps\r\n\r\n* fix conftest and linting?\r\n\r\n* importing Annotated\r\n\r\n* ltining\r\n\r\n* import Annotated from typing_extensions\r\n\r\n* fixing 3.7 compatibility\r\n\r\n* fixing tests on 3.9\r\n\r\n* fix linting\r\n\r\n* fixing SecretField and 3.9 tests\r\n\r\n* customising get_type_hints\r\n\r\n* ignore warnings on 3.11\r\n\r\n* spliting repr out of utils\r\n\r\n* removing unused bits of _repr, fix tests for 3.7\r\n\r\n* more cleanup, removing many type aliases\r\n\r\n* clean up repr\r\n\r\n* support namedtuples and typeddicts\r\n\r\n* test is_union\r\n\r\n* removing errors, uprev pydantic-core\r\n\r\n* fix tests on 3.8\r\n\r\n* fixing private attributes and model_post_init\r\n\r\n* renaming and cleanup\r\n\r\n* remove unnecessary PydanticMetadata inheritance\r\n\r\n* fixing forward refs and mypy tests\r\n\r\n* fix signatures, change how xfail works\r\n\r\n* revert mypy tests to 3.7 syntax\r\n\r\n* correct model title\r\n\r\n* try to fix tests\r\n\r\n* fixing ClassVar forward refs\r\n\r\n* uprev pydantic-core, new error format\r\n\r\n* add \"force\" argument to model_rebuild\r\n\r\n* Apply suggestions from code review\r\n\r\nSuggestions from @tiangolo and @hramezani :pray:\r\n\r\nCo-authored-by: Hasan Ramezani \r\nCo-authored-by: Sebastián Ramírez \r\n\r\n* more suggestions from @tiangolo\r\n\r\n* extra -> json_schema_extra on Field\r\n\r\nCo-authored-by: Hasan Ramezani \r\nCo-authored-by: Sebastián Ramírez ", "code": "def in_ipython() -> bool:\n \n try:\n eval('__IPYTHON__')\n except NameError:\n return False\n else: # pragma: no cover\n return True\n", "url": "https://github.com/pydantic/pydantic.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 10, "token_counts": 22, "n_ast_nodes": 44, "n_identifiers": 4, "random_cut": "def in_ipython() -> bool:\n \n try:\n eval('__IPYTHON__')\n except NameError:\n ", "d_id": 2845, "documentation": { "docstring": "\n Check whether we're in an ipython environment, including jupyter notebooks.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 260097, "commit_id": "122876e9ab1ab494b4bf0ca3360d5a1527caf2e7", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_param_validation.py", "file_name": "test_param_validation.py", "fun_name": "test_stroptions_deprecated_internal_subset", "commit_message": "MNT Param validation: do not expose internal values in error msg (#23459)\n\n* allow to not expose internal valid params in error msg\n\n* ensure deprecated and internal do not overlap\n\n* deprecated and internal must be subsets of options\n\n* black", "code": "def test_stroptions_deprecated_internal_subset():\n \n with pytest.raises(ValueError, match=\"deprecated options must be a subset\"):\n StrOptions({\"a\", \"b\", \"c\"}, deprecated={\"a\", \"d\"})\n\n with pytest.raises(ValueError, match=\"internal options must be a subset\"):\n StrOptions({\"a\", \"b\", \"c\"}, internal={\"a\", \"d\"})\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 51, "n_words": 28, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 65, "n_ast_nodes": 124, "n_identifiers": 8, "random_cut": "def test_stroptions_deprecated_internal_subset():\n ", "d_id": 76072, "documentation": { "docstring": "Check that the deprecated and internal parameters must be subsets of options.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 207805, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_change_view_without_object_change_permission", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_change_view_without_object_change_permission(self):\n \n change_url = reverse(\"admin9:admin_views_article_change\", args=(self.a1.pk,))\n self.client.force_login(self.viewuser)\n response = self.client.get(change_url)\n self.assertEqual(response.context[\"title\"], \"View article\")\n self.assertContains(response, \"View article | Django site admin\")\n self.assertContains(response, \"

    View article

    \")\n self.assertContains(\n response,\n 'Close',\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 114, "n_words": 29, "vocab_size": 27, "complexity": 1, "nloc": 11, "token_counts": 81, "n_ast_nodes": 138, "n_identifiers": 15, "random_cut": "def test_change_view_without_object_change_permission(self):\n \n change_url = reverse(\"admin9:admin_views_article_change\", args=(self.a1.pk,))\n self.client.force_login(self.viewuser)\n response = self.client.get(change_url)\n self.assertEqual(response.context[\"title\"], \"View article\")\n self.assertContains(response, \"View article | Django site admin\")\n self.assertContains(response, \"

    View article

    \")\n self.assertContains(\n response,\n 'Close',\n )\n", "d_id": 52110, "documentation": { "docstring": "\n The object should be read-only if the user has permission to view it\n and change objects of that type but not to change the current object.\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 48, "language": "en" } }, { "id": 163015, "commit_id": "abd74363b3f911cbba79cfd4d78fb17521b39928", "repo": "pandas", "path": "pandas/__init__.py", "file_name": "__init__.py", "fun_name": "__getattr__", "commit_message": "API: hide NumericIndex from public top-level namespace in favor of pd.Index (#44819)", "code": "def __getattr__(name):\n import warnings\n\n if name in __deprecated_num_index_names:\n warnings.warn(\n f\"pandas.{name} is deprecated \"\n \"and will be removed from pandas in a future version. \"\n \"Use pandas.Index with the appropriate dtype instead.\",\n FutureWarning,\n stacklevel=2,\n )\n from pandas.core.api import Float64Index, Int64Index, UInt64Index\n\n return {\n \"Float64Index\": Float64Index,\n \"Int64Index\": Int64Index,\n \"UInt64Index\": UInt64Index,\n }[name]\n elif name == \"datetime\":\n warnings.warn(\n \"The pandas.datetime class is deprecated \"\n \"and will be removed from pandas in a future version. \"\n \"Import from datetime module instead.\",\n FutureWarning,\n stacklevel=2,\n )\n\n from datetime import datetime as dt\n\n return dt\n\n elif name == \"np\":\n\n warnings.warn(\n \"The pandas.np module is deprecated \"\n \"and will be removed from pandas in a future version. \"\n \"Import numpy directly instead.\",\n FutureWarning,\n stacklevel=2,\n )\n import numpy as np\n\n return np\n\n elif name in {\"SparseSeries\", \"SparseDataFrame\"}:\n warnings.warn(\n f\"The {name} class is removed from pandas. Accessing it from \"\n \"the top-level namespace will also be removed in the next version.\",\n FutureWarning,\n stacklevel=2,\n )\n\n return type(name, (), {})\n\n elif name == \"SparseArray\":\n\n warnings.warn(\n \"The pandas.SparseArray class is deprecated \"\n \"and will be removed from pandas in a future version. \"\n \"Use pandas.arrays.SparseArray instead.\",\n FutureWarning,\n stacklevel=2,\n )\n from pandas.core.arrays.sparse import SparseArray as _SparseArray\n\n return _SparseArray\n\n raise AttributeError(f\"module 'pandas' has no attribute '{name}'\")\n\n\n# module level doc-string\n__doc__ = \n\n# Use __all__ to let type checkers know what is part of the public API.\n# Pandas is not (yet) a py.typed library: the public API is determined\n# based on the documentation.\n__all__ = [\n \"BooleanDtype\",\n \"Categorical\",\n \"CategoricalDtype\",\n \"CategoricalIndex\",\n \"DataFrame\",\n \"DateOffset\",\n \"DatetimeIndex\",\n \"DatetimeTZDtype\",\n \"ExcelFile\",\n \"ExcelWriter\",\n \"Flags\",\n \"Float32Dtype\",\n \"Float64Dtype\",\n \"Grouper\",\n \"HDFStore\",\n \"Index\",\n \"IndexSlice\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"Int8Dtype\",\n \"Interval\",\n \"IntervalDtype\",\n \"IntervalIndex\",\n \"MultiIndex\",\n \"NA\",\n \"NaT\",\n \"NamedAgg\",\n \"Period\",\n \"PeriodDtype\",\n \"PeriodIndex\",\n \"RangeIndex\",\n \"Series\",\n \"SparseDtype\",\n \"StringDtype\",\n \"Timedelta\",\n \"TimedeltaIndex\",\n \"Timestamp\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"UInt8Dtype\",\n \"api\",\n \"array\",\n \"arrays\",\n \"bdate_range\",\n \"concat\",\n \"crosstab\",\n \"cut\",\n \"date_range\",\n \"describe_option\",\n \"errors\",\n \"eval\",\n \"factorize\",\n \"get_dummies\",\n \"get_option\",\n \"infer_freq\",\n \"interval_range\",\n \"io\",\n \"isna\",\n \"isnull\",\n \"json_normalize\",\n \"lreshape\",\n \"melt\",\n \"merge\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notna\",\n \"notnull\",\n \"offsets\",\n \"option_context\",\n \"options\",\n \"period_range\",\n \"pivot\",\n \"pivot_table\",\n \"plotting\",\n \"qcut\",\n \"read_clipboard\",\n \"read_csv\",\n \"read_excel\",\n \"read_feather\",\n \"read_fwf\",\n \"read_gbq\",\n \"read_hdf\",\n \"read_html\",\n \"read_json\",\n \"read_orc\",\n \"read_parquet\",\n \"read_pickle\",\n \"read_sas\",\n \"read_spss\",\n \"read_sql\",\n \"read_sql_query\",\n \"read_sql_table\",\n \"read_stata\",\n \"read_table\",\n \"read_xml\",\n \"reset_option\",\n \"set_eng_float_format\",\n \"set_option\",\n \"show_versions\",\n \"test\",\n \"testing\",\n \"timedelta_range\",\n \"to_datetime\",\n \"to_numeric\",\n \"to_pickle\",\n \"to_timedelta\",\n \"tseries\",\n \"unique\",\n \"value_counts\",\n \"wide_to_long\",\n]\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1142, "n_words": 355, "vocab_size": 229, "complexity": 6, "nloc": 55, "token_counts": 187, "n_ast_nodes": 793, "n_identifiers": 25, "random_cut": "def __getattr__(name):\n import warnings\n\n if name in __deprecated_num_index_names:\n warnings.warn(\n f\"pandas.{name} is deprecated \"\n \"and will be removed from pandas in a future version. \"\n \"Use pandas.Index with the appropriate dtype instead.\",\n FutureWarning,\n stacklevel=2,\n )\n from pandas.core.api import Float64Index, Int64Index, UInt64Index\n\n return {\n \"Float64Index\": Float64Index,\n \"Int64Index\": Int64Index,\n \"UInt64Index\": UInt64Index,\n }[name]\n elif name == \"datetime\":\n warnings.warn(\n \"The pandas.datetime class is deprecated \"\n \"and will be removed from pandas in a future version. \"\n \"Import from datetime module instead.\",\n FutureWarning,\n stacklevel=2,\n )\n\n from datetime import datetime as dt\n\n return dt\n\n elif name == \"np\":\n\n warnings.warn(\n \"The pandas.np module is deprecated \"\n \"and will be removed from pandas in a future version. \"\n \"Import numpy directly instead.\",\n FutureWarning,\n stacklevel=2,\n )\n import numpy as np\n\n return np\n\n elif name in {\"SparseSeries\", \"SparseDataFrame\"}:\n warnings.warn(\n f\"The {name} class is removed from pandas. Accessing it from \"\n \"the top-level namespace will also be removed in the next version.\",\n FutureWarning,\n stacklevel=2,\n )\n\n retur", "d_id": 39358, "documentation": { "docstring": "\npandas - a powerful data analysis and manipulation library for Python\n=====================================================================\n\n**pandas** is a Python package providing fast, flexible, and expressive data\nstructures designed to make working with \"relational\" or \"labeled\" data both\neasy and intuitive. It aims to be the fundamental high-level building block for\ndoing practical, **real world** data analysis in Python. Additionally, it has\nthe broader goal of becoming **the most powerful and flexible open source data\nanalysis / manipulation tool available in any language**. It is already well on\nits way toward this goal.\n\nMain Features\n-------------\nHere are just a few of the things that pandas does well:\n\n - Easy handling of missing data in floating point as well as non-floating\n point data.\n - Size mutability: columns can be inserted and deleted from DataFrame and\n higher dimensional objects\n - Automatic and explicit data alignment: objects can be explicitly aligned\n to a set of labels, or the user can simply ignore the labels and let\n `Series`, `DataFrame`, etc. automatically align the data for you in\n computations.\n - Powerful, flexible group by functionality to perform split-apply-combine\n operations on data sets, for both aggregating and transforming data.\n - Make it easy to convert ragged, differently-indexed data in other Python\n and NumPy data structures into DataFrame objects.\n - Intelligent label-based slicing, fancy indexing, and subsetting of large\n data sets.\n - Intuitive merging and joining data sets.\n - Flexible reshaping and pivoting of data sets.\n - Hierarchical labeling of axes (possible to have multiple labels per tick).\n - Robust IO tools for loading data from flat files (CSV and delimited),\n Excel files, databases, and saving/loading data from the ultrafast HDF5\n format.\n - Time series-specific functionality: date range generation and frequency\n conversion, moving window statistics, date shifting and lagging.\n", "n_words": 289, "vocab_size": 187, "n_whitespaces": 321, "language": "en" } }, { "id": 215777, "commit_id": "6680407756aac9ee0eaf150f3a69622b658f7321", "repo": "salt", "path": "salt/modules/file.py", "file_name": "file.py", "fun_name": "readlink", "commit_message": "Use salt.utils.path.readlink to handle junctions on Windows", "code": "def readlink(path, canonicalize=False):\n \n path = os.path.expanduser(path)\n\n if not os.path.isabs(path):\n raise SaltInvocationError(\"Path to link must be absolute.\")\n\n if not os.path.islink(path):\n raise SaltInvocationError(\"A valid link was not specified.\")\n\n if canonicalize:\n return os.path.realpath(path)\n else:\n return salt.utils.path.readlink(path)\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 33, "vocab_size": 26, "complexity": 4, "nloc": 10, "token_counts": 77, "n_ast_nodes": 130, "n_identifiers": 11, "random_cut": "def readlink(path, canonicalize=False):\n \n path = os.path.expanduser(path)\n\n if not os.path.isabs(path):\n raise SaltInvocationError(\"Path to link must be absolute.\")\n\n if not os.path.islink(path):\n raise SaltInvocationError(\"A valid link was not specified", "d_id": 54169, "documentation": { "docstring": "\n .. versionadded:: 2014.1.0\n\n Return the path that a symlink points to\n If canonicalize is set to True, then it return the final target\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' file.readlink /path/to/link\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 58, "language": "en" } }, { "id": 196714, "commit_id": "fdaacf84861404f7857081bc85850a781a78979f", "repo": "sympy", "path": "sympy/testing/pytest.py", "file_name": "pytest.py", "fun_name": "warns_deprecated_sympy", "commit_message": "Note that warns_deprecated_sympy should not be used outside of the test suite", "code": "def warns_deprecated_sympy():\n \n with warns(SymPyDeprecationWarning):\n yield\n\n@contextlib.contextmanager", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "@contextlib.contextmanager", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 18, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def warns_deprecated_sympy():\n \n with warns(SymPyDeprecationWarning):\n yield\n\n@contextlib.contextmanager", "d_id": 48128, "documentation": { "docstring": "\n Shorthand for ``warns(SymPyDeprecationWarning)``\n\n This is the recommended way to test that ``SymPyDeprecationWarning`` is\n emitted for deprecated features in SymPy. To test for other warnings use\n ``warns``. To suppress warnings without asserting that they are emitted\n use ``ignore_warnings``.\n\n .. note::\n\n ``warns_deprecated_sympy()`` is only intended for internal use in the\n SymPy test suite to test that a deprecation warning triggers properly.\n All other code in the SymPy codebase, including documentation examples,\n should not use deprecated behavior.\n\n If you are a user of SymPy and you want to disable\n SymPyDeprecationWarnings, use ``warnings`` filters (see\n :ref:`silencing-sympy-deprecation-warnings`).\n\n >>> from sympy.testing.pytest import warns_deprecated_sympy\n >>> from sympy.utilities.exceptions import SymPyDeprecationWarning\n >>> with warns_deprecated_sympy():\n ... SymPyDeprecationWarning(\"Don't use\", feature=\"old thing\",\n ... deprecated_since_version=\"1.0\", issue=123).warn()\n\n >>> with warns_deprecated_sympy():\n ... pass\n Traceback (most recent call last):\n ...\n Failed: DID NOT WARN. No warnings of type \\\n SymPyDeprecationWarning was emitted. The list of emitted warnings is: [].\n\n ", "n_words": 143, "vocab_size": 97, "n_whitespaces": 256, "language": "en" } }, { "id": 160832, "commit_id": "cafec60a5e28af98fb8798049edd7942720d2d74", "repo": "numpy", "path": "numpy/testing/_private/utils.py", "file_name": "utils.py", "fun_name": "assert_array_equal", "commit_message": "ENH: Add strict parameter to assert_array_equal. (#21595)\n\nFixes #9542\r\n\r\nCo-authored-by: Bas van Beek <43369155+BvB93@users.noreply.github.com>", "code": "def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False):\n \n __tracebackhide__ = True # Hide traceback for py.test\n assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,\n verbose=verbose, header='Arrays are not equal',\n strict=strict)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 83, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 5, "token_counts": 51, "n_ast_nodes": 77, "n_identifiers": 11, "random_cut": "def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False):\n \n __tracebackhide__ =", "d_id": 38751, "documentation": { "docstring": "\n Raises an AssertionError if two array_like objects are not equal.\n\n Given two array_like objects, check that the shape is equal and all\n elements of these objects are equal (but see the Notes for the special\n handling of a scalar). An exception is raised at shape mismatch or\n conflicting values. In contrast to the standard usage in numpy, NaNs\n are compared like numbers, no assertion is raised if both objects have\n NaNs in the same positions.\n\n The usual caution for verifying equality with floating point numbers is\n advised.\n\n Parameters\n ----------\n x : array_like\n The actual object to check.\n y : array_like\n The desired, expected object.\n err_msg : str, optional\n The error message to be printed in case of failure.\n verbose : bool, optional\n If True, the conflicting values are appended to the error message.\n strict : bool, optional\n If True, raise an AssertionError when either the shape or the data\n type of the array_like objects does not match. The special\n handling for scalars mentioned in the Notes section is disabled.\n\n Raises\n ------\n AssertionError\n If actual and desired objects are not equal.\n\n See Also\n --------\n assert_allclose: Compare two array_like objects for equality with desired\n relative and/or absolute precision.\n assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal\n\n Notes\n -----\n When one of `x` and `y` is a scalar and the other is array_like, the\n function checks that each element of the array_like object is equal to\n the scalar. This behaviour can be disabled with the `strict` parameter.\n\n Examples\n --------\n The first assert does not raise an exception:\n\n >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],\n ... [np.exp(0),2.33333, np.nan])\n\n Assert fails with numerical imprecision with floats:\n\n >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],\n ... [1, np.sqrt(np.pi)**2, np.nan])\n Traceback (most recent call last):\n ...\n AssertionError:\n Arrays are not equal\n \n Mismatched elements: 1 / 3 (33.3%)\n Max absolute difference: 4.4408921e-16\n Max relative difference: 1.41357986e-16\n x: array([1. , 3.141593, nan])\n y: array([1. , 3.141593, nan])\n\n Use `assert_allclose` or one of the nulp (number of floating point values)\n functions for these cases instead:\n\n >>> np.testing.assert_allclose([1.0,np.pi,np.nan],\n ... [1, np.sqrt(np.pi)**2, np.nan],\n ... rtol=1e-10, atol=0)\n\n As mentioned in the Notes section, `assert_array_equal` has special\n handling for scalars. Here the test checks that each value in `x` is 3:\n\n >>> x = np.full((2, 5), fill_value=3)\n >>> np.testing.assert_array_equal(x, 3)\n\n Use `strict` to raise an AssertionError when comparing a scalar with an\n array:\n\n >>> np.testing.assert_array_equal(x, 3, strict=True)\n Traceback (most recent call last):\n ...\n AssertionError:\n Arrays are not equal\n \n (shapes (2, 5), () mismatch)\n x: array([[3, 3, 3, 3, 3],\n [3, 3, 3, 3, 3]])\n y: array(3)\n\n The `strict` parameter also ensures that the array data types match:\n\n >>> x = np.array([2, 2, 2])\n >>> y = np.array([2., 2., 2.], dtype=np.float32)\n >>> np.testing.assert_array_equal(x, y, strict=True)\n Traceback (most recent call last):\n ...\n AssertionError:\n Arrays are not equal\n \n (dtypes int64, float32 mismatch)\n x: array([2, 2, 2])\n y: array([2., 2., 2.], dtype=float32)\n ", "n_words": 461, "vocab_size": 239, "n_whitespaces": 937, "language": "en" } }, { "id": 110849, "commit_id": "f16da868d016363c4cd734b2abd6535230b094df", "repo": "matplotlib", "path": "lib/matplotlib/transforms.py", "file_name": "transforms.py", "fun_name": "update_from_data_x", "commit_message": "[Doc] Fix ndarray-links for arguments", "code": "def update_from_data_x(self, x, ignore=None):\n \n x = np.ravel(x)\n self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]),\n ignore=ignore, updatey=False)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 64, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 50, "n_ast_nodes": 77, "n_identifiers": 11, "random_cut": "def update_from_data_x(self, x, ignore=None):\n \n x = np.ravel(x)\n self.update", "d_id": 24328, "documentation": { "docstring": "\n Update the x-bounds of the `Bbox` based on the passed in data. After\n updating, the bounds will have positive *width*, and *x0* will be the\n minimal value.\n\n Parameters\n ----------\n x : `~numpy.ndarray`\n Array of x-values.\n\n ignore : bool, optional\n - When ``True``, ignore the existing bounds of the `Bbox`.\n - When ``False``, include the existing bounds of the `Bbox`.\n - When ``None``, use the last value passed to :meth:`ignore`.\n ", "n_words": 69, "vocab_size": 45, "n_whitespaces": 167, "language": "en" } }, { "id": 202417, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_https_malformed_referer", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_https_malformed_referer(self):\n \n malformed_referer_msg = \"Referer checking failed - Referer is malformed.\"\n req = self._get_POST_request_with_token()\n req._is_secure_override = True\n req.META[\"HTTP_REFERER\"] = \"http://http://www.example.com/\"\n mw = CsrfViewMiddleware(post_form_view)\n self._check_referer_rejects(mw, req)\n response = mw.process_view(req, post_form_view, (), {})\n self.assertContains(\n response,\n \"Referer checking failed - Referer is insecure while host is secure.\",\n status_code=403,\n )\n # Empty\n req.META[\"HTTP_REFERER\"] = \"\"\n self._check_referer_rejects(mw, req)\n response = mw.process_view(req, post_form_view, (), {})\n self.assertContains(response, malformed_referer_msg, status_code=403)\n # Non-ASCII\n req.META[\"HTTP_REFERER\"] = \"ØBöIß\"\n self._check_referer_rejects(mw, req)\n response = mw.process_view(req, post_form_view, (), {})\n self.assertContains(response, malformed_referer_msg, status_code=403)\n # missing scheme\n # >>> urlparse('//example.com/')\n # ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='')\n req.META[\"HTTP_REFERER\"] = \"//example.com/\"\n self._check_referer_rejects(mw, req)\n response = mw.process_view(req, post_form_view, (), {})\n self.assertContains(response, malformed_referer_msg, status_code=403)\n # missing netloc\n # >>> urlparse('https://')\n # ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='')\n req.META[\"HTTP_REFERER\"] = \"https://\"\n self._check_referer_rejects(mw, req)\n response = mw.process_view(req, post_form_view, (), {})\n self.assertContains(response, malformed_referer_msg, status_code=403)\n # Invalid URL\n # >>> urlparse('https://[')\n # ValueError: Invalid IPv6 URL\n req.META[\"HTTP_REFERER\"] = \"https://[\"\n self._check_referer_rejects(mw, req)\n response = mw.process_view(req, post_form_view, (), {})\n self.assertContains(response, malformed_referer_msg, status_code=403)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 477, "n_words": 157, "vocab_size": 65, "complexity": 1, "nloc": 33, "token_counts": 292, "n_ast_nodes": 472, "n_identifiers": 15, "random_cut": "def test_https_malformed_referer(self):\n \n malformed_referer_msg = \"Referer checking failed - Referer is malformed.\"\n req = self._get_POST_request_with_token()\n req._is_secure_override = True\n req.META[\"HTTP_REFERER\"] = \"http://http://www.example.com/\"\n mw = CsrfViewMiddleware(post_form_view)\n self._check_referer_rejects(mw, req)\n response = mw.process_view(req, post_form_view, (), {})\n self.assertContains(\n response,\n \"Referer checking failed - Referer is insecure while host is secure.\",\n status_code=403,\n )\n # Empty\n req.META[\"HTTP_REFERER\"] = \"\"\n self._check_referer_r", "d_id": 50119, "documentation": { "docstring": "\n A POST HTTPS request with a bad referer is rejected.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 102162, "commit_id": "bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d", "repo": "pytorch", "path": "tools/test/test_gen_backend_stubs.py", "file_name": "test_gen_backend_stubs.py", "fun_name": "test_missing_cpp_namespace", "commit_message": "Revert \"Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels\" (#69950)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/69950\n\nThis reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa.\n\nTest Plan: Imported from OSS\n\nReviewed By: albanD\n\nDifferential Revision: D33113545\n\nPulled By: bdhirsh\n\nfbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288", "code": "def test_missing_cpp_namespace(self) -> None:\n yaml_str = \n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, )\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 7, "token_counts": 26, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def test_missing_cpp_namespace(self) -> None:\n y", "d_id": 21477, "documentation": { "docstring": "\\\nbackend: XLA\nsupported:\n- absYou must provide a value for \"cpp_namespace\"", "n_words": 12, "vocab_size": 12, "n_whitespaces": 8, "language": "en" } }, { "id": 177506, "commit_id": "d82815dba6c8ddce19cd49f700298dc82a58f066", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/weighted.py", "file_name": "weighted.py", "fun_name": "single_source_dijkstra_path_length", "commit_message": "Hide edges with a weight of None in A*. (#5945)\n\n* Hide edges with a weight of None in A*.\r\n\r\nThis matches the Dijkstra's weight interface.\r\n\r\n* Update Dijkstra's and A* docs for weights of None.\r\n\r\n* Add tests for A* with weight of None.\r\n\r\n* Add another test for A* with a weight function.\r\n\r\n* Document that None indicates a hidden edge.", "code": "def single_source_dijkstra_path_length(G, source, cutoff=None, weight=\"weight\"):\n \n return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 33, "n_ast_nodes": 50, "n_identifiers": 6, "random_cut": "def single_source_dijkstra_path_length(G, source, cutoff=None, weight=\"weight\"):\n \n return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)\n\n", "d_id": 42410, "documentation": { "docstring": "Find shortest weighted path lengths in G from a source node.\n\n Compute the shortest path length between source and all other\n reachable nodes for a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n Starting node for path\n\n cutoff : integer or float, optional\n Length (sum of edge weights) at which the search is stopped.\n If cutoff is provided, only return paths with summed weight <= cutoff.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number or None to indicate a hidden edge.\n\n Returns\n -------\n length : dict\n Dict keyed by node to shortest path length from source.\n\n Raises\n ------\n NodeNotFound\n If `source` is not in `G`.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> length = nx.single_source_dijkstra_path_length(G, 0)\n >>> length[4]\n 4\n >>> for node in [0, 1, 2, 3, 4]:\n ... print(f\"{node}: {length[node]}\")\n 0: 0\n 1: 1\n 2: 2\n 3: 3\n 4: 4\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n The weight function can be used to hide edges by returning None.\n So ``weight = lambda u, v, d: 1 if d['color']==\"red\" else None``\n will find the shortest red path.\n\n See Also\n --------\n single_source_dijkstra, single_source_bellman_ford_path_length\n\n ", "n_words": 289, "vocab_size": 174, "n_whitespaces": 512, "language": "en" } }, { "id": 281536, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/behavioural_analysis/ba_controller.py", "file_name": "ba_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n has_ticker_start = \"\" if self.ticker else \"[unvl]\"\n has_ticker_end = \"\" if self.ticker else \"[/unvl]\"\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Behavioural Analysis\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 51, "n_words": 24, "vocab_size": 18, "complexity": 3, "nloc": 40, "token_counts": 39, "n_ast_nodes": 119, "n_identifiers": 11, "random_cut": "def print_help(self):\n has_ticker_start = \"\" if self.ticker else \"[unvl]\"\n has_ticker_end = \"\" if self.ticker else \"[/unvl]\"\n help_text = f\n c", "d_id": 83834, "documentation": { "docstring": "[cmds]\n load load a specific stock ticker for analysis\n\n[param]Ticker: [/param]{self.ticker.upper() or None}\n{has_ticker_start}\n[src][Finbrain][/src]\n headlines sentiment from 15+ major news headlines\n[src][Finnhub][/src]\n stats sentiment stats including comparison with sector{has_ticker_end}\n[src][Reddit][/src]\n wsb show what WSB gang is up to in subreddit wallstreetbets\n watchlist show other users watchlist\n popular show popular tickers\n spac_c show other users spacs announcements from subreddit SPACs community\n spac show other users spacs announcements from other subs{has_ticker_start}\n getdd gets due diligence from another user's post{has_ticker_end}\n[src][Stocktwits][/src]\n trending trending stocks\n stalker stalk stocktwits user's last messages{has_ticker_start}\n bullbear estimate quick sentiment from last 30 messages on board\n messages output up to the 30 last messages on the board\n[src][Twitter][/src]\n infer infer about stock's sentiment from latest tweets\n sentiment in-depth sentiment prediction from tweets over time\n[src][Google][/src]\n mentions interest over time based on stock's mentions\n regions regions that show highest interest in stock\n queries top related queries with this stock\n rise top rising related queries with stock{has_ticker_end}\n[src][SentimentInvestor][/src]\n popularsi show most popular stocks on social media right now\n emerging show stocks that are being talked about more than usual{has_ticker_start}\n metrics core social sentiment metrics for this stock\n social social media figures for stock popularity\n historical plot the past week of data for a selected metric{has_ticker_end}[/cmds]\n ", "n_words": 205, "vocab_size": 124, "n_whitespaces": 425, "language": "en" } }, { "id": 263916, "commit_id": "299ff39d1260cf80241b81ed39bbb89b78709583", "repo": "pyinstaller", "path": "PyInstaller/lib/modulegraph/util.py", "file_name": "util.py", "fun_name": "iterate_instructions", "commit_message": "Make modulegraph EXTENDED_ARG opcode aware (#7055).\n\nFix :class:`AssertionError` during build when analysing a ``.pyc`` file\ncontaining more that 255 variable names followed by an import statement all in\nthe same namespace.\n\nFixes #7055.", "code": "def iterate_instructions(code_object):\n \n # The arg extension the EXTENDED_ARG opcode represents is automatically handled by get_instructions() but the\n # instruction is left in. Get rid of it to make subsequent parsing easier/safer.\n yield from (i for i in get_instructions(code_object) if i.opname != \"EXTENDED_ARG\")\n\n yield None\n\n # For each constant in this code object that is itself a code object,\n # parse this constant in the same manner.\n for constant in code_object.co_consts:\n if inspect.iscode(constant):\n yield from iterate_instructions(constant)\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 117, "n_words": 75, "vocab_size": 56, "complexity": 5, "nloc": 6, "token_counts": 47, "n_ast_nodes": 83, "n_identifiers": 9, "random_cut": "def iterate_instructions(code_object):\n \n # The arg extension the EXTENDED_ARG opcode represents is automatically handled by get_instructions() but the\n # instruction is left in. Get rid of it to make subsequent parsing easie", "d_id": 77511, "documentation": { "docstring": "Delivers the byte-code instructions as a continuous stream.\n\n Yields `dis.Instruction`. After each code-block (`co_code`), `None` is\n yielded to mark the end of the block and to interrupt the steam.\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 38, "language": "en" } }, { "id": 127486, "commit_id": "5c500f6308dce526c50a5230029fb4909b492a35", "repo": "ray", "path": "python/ray/tune/tuner.py", "file_name": "tuner.py", "fun_name": "fit", "commit_message": "[docs/air] Fix up some minor docstrings (#28361)", "code": "def fit(self) -> ResultGrid:\n \n\n if not self._is_ray_client:\n try:\n return self._local_tuner.fit()\n except Exception as e:\n raise TuneError(\n f\"Tune run failed. \"\n f'Please use tuner = Tuner.restore(\"'\n f'{self._local_tuner.get_experiment_checkpoint_dir()}\") to resume.'\n ) from e\n else:\n experiment_checkpoint_dir = ray.get(\n self._remote_tuner.get_experiment_checkpoint_dir.remote()\n )\n try:\n return ray.get(self._remote_tuner.fit.remote())\n except Exception as e:\n raise TuneError(\n f\"Tune run failed. \"\n f'Please use tuner = Tuner.restore(\"'\n f'{experiment_checkpoint_dir}\") to resume.'\n ) from e\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 367, "n_words": 61, "vocab_size": 37, "complexity": 4, "nloc": 41, "token_counts": 93, "n_ast_nodes": 175, "n_identifiers": 14, "random_cut": "def fit(self) -> ResultGrid:\n \n\n if not self._is_ray_client:\n try:\n return self._local_tuner.fit()\n except Exception as e:\n ", "d_id": 28451, "documentation": { "docstring": "Executes hyperparameter tuning job as configured and returns result.\n\n Failure handling:\n For the kind of exception that happens during the execution of a trial,\n one may inspect it together with stacktrace through the returned result grid.\n See ``ResultGrid`` for reference. Each trial may fail up to a certain number.\n This is configured by ``RunConfig.FailureConfig.max_failures``.\n\n Exception that happens beyond trials will be thrown by this method as well.\n In such cases, there will be instruction like the following printed out\n at the end of console output to inform users on how to resume.\n\n Please use tuner = Tuner.restore(\"~/ray_results/tuner_resume\")\n to resume.\n\n Raises:\n RayTaskError: If user-provided trainable raises an exception\n TuneError: General Ray Tune error.\n ", "n_words": 112, "vocab_size": 92, "n_whitespaces": 218, "language": "en" } }, { "id": 311203, "commit_id": "9d404b749a0aa0d0527e145c911559df5ecf2afd", "repo": "core", "path": "homeassistant/components/tradfri/base_class.py", "file_name": "base_class.py", "fun_name": "_handle_coordinator_update", "commit_message": "Implement coordinator class for Tradfri integration (#64166)\n\n* Initial commit coordinator\r\n\r\n* More coordinator implementation\r\n\r\n* More coordinator implementation\r\n\r\n* Allow integration reload\r\n\r\n* Move API calls to try/catch block\r\n\r\n* Move back fixture\r\n\r\n* Remove coordinator test file\r\n\r\n* Ensure unchanged file\r\n\r\n* Ensure unchanged conftest.py file\r\n\r\n* Remove coordinator key check\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Import RequestError\r\n\r\n* Move async_setup_platforms to end of setup_entry\r\n\r\n* Remove centralised handling of device data and device controllers\r\n\r\n* Remove platform_type argument\r\n\r\n* Remove exception\r\n\r\n* Remove the correct exception\r\n\r\n* Refactor coordinator error handling\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Remove platform type from base class\r\n\r\n* Remove timeout context manager\r\n\r\n* Refactor exception callback\r\n\r\n* Simplify starting device observation\r\n\r\n* Update test\r\n\r\n* Move observe start into update method\r\n\r\n* Remove await self.coordinator.async_request_refresh()\r\n\r\n* Refactor cover.py\r\n\r\n* Uncomment const.py\r\n\r\n* Add back extra_state_attributes\r\n\r\n* Update homeassistant/components/tradfri/coordinator.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Refactor switch platform\r\n\r\n* Expose switch state\r\n\r\n* Refactor sensor platform\r\n\r\n* Put back accidentally deleted code\r\n\r\n* Add set_hub_available\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Fix tests for fan platform\r\n\r\n* Update homeassistant/components/tradfri/base_class.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update homeassistant/components/tradfri/base_class.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Fix non-working tests\r\n\r\n* Refresh sensor state\r\n\r\n* Remove commented line\r\n\r\n* Add group coordinator\r\n\r\n* Add groups during setup\r\n\r\n* Refactor light platform\r\n\r\n* Fix tests\r\n\r\n* Move outside of try...except\r\n\r\n* Remove error handler\r\n\r\n* Remove unneeded methods\r\n\r\n* Update sensor\r\n\r\n* Update .coveragerc\r\n\r\n* Move signal\r\n\r\n* Add signals for groups\r\n\r\n* Fix signal\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def _handle_coordinator_update(self) -> None:\n \n self._refresh()\n super()._handle_coordinator_update()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 8, "token_counts": 20, "n_ast_nodes": 38, "n_identifiers": 4, "random_cut": "def _handle_coordinator_update(self) -> None:\n \n sel", "d_id": 109870, "documentation": { "docstring": "\n Handle updated data from the coordinator.\n\n Tests fails without this method.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 156109, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/dataframe/core.py", "file_name": "core.py", "fun_name": "_skew_1d", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def _skew_1d(self, column, bias=True, nan_policy=\"propagate\"):\n \n # import depends on scipy, not installed by default\n from dask.array import stats as da_stats\n\n if pd.Int64Dtype.is_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"skew-1d-\" + tokenize(column)\n\n array_skew = da_stats.skew(\n column.values, axis=0, bias=bias, nan_policy=nan_policy\n )\n\n layer = {(name, 0): (methods.wrap_skew_reduction, (array_skew._name,), None)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew])\n\n return new_dd_object(\n graph, name, column._meta_nonempty.skew(), divisions=[None, None]\n )\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 195, "n_words": 67, "vocab_size": 55, "complexity": 3, "nloc": 15, "token_counts": 164, "n_ast_nodes": 252, "n_identifiers": 35, "random_cut": "def _skew_1d(self, column, bias=True, nan_policy=\"propagate\"):\n \n # import depends on scipy, not installed by default\n from dask.array import stats as da_stats\n\n if pd.Int64Dtype.is_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"skew-1d-\" + tokenize(column)\n\n array_skew", "d_id": 36562, "documentation": { "docstring": "1D version of the skew calculation.\n\n Uses the array version from da.stats in case we are passing in a single series\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 290876, "commit_id": "b6586d5c34bf7ea5c30fbb1b62c438078ea14f39", "repo": "core", "path": "tests/components/sensor/test_init.py", "file_name": "test_init.py", "fun_name": "test_device_classes_aligned", "commit_message": "Align number and sensor device classes (#81909)\n\n* Align number and sensor device classes\r\n\r\n* Add tests\r\n\r\n* Tweak tests", "code": "def test_device_classes_aligned():\n \n\n for device_class in NumberDeviceClass:\n assert hasattr(SensorDeviceClass, device_class.name)\n assert getattr(SensorDeviceClass, device_class.name).value == device_class.value\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 34, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def test_device_classes_aligned():\n \n\n for device_class in NumberDeviceClass:\n assert hasattr(SensorDeviceClass, device_class.name)\n assert getattr(SensorDeviceClass, device_class.name).value ==", "d_id": 89989, "documentation": { "docstring": "Make sure all number device classes are also available in SensorDeviceClass.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 177252, "commit_id": "50ff08de69c6e9541cd6c029bede5dabf56cfe73", "repo": "networkx", "path": "networkx/algorithms/operators/all.py", "file_name": "all.py", "fun_name": "intersection_all", "commit_message": "Make all.py generator friendly (#5984)\n\n* Make compose_all generator friendly\r\n\r\n* Make disjoint_union_all and intersection_all generator friendly\r\n\r\n* Refactor disjoint_union_all to yield relabeled graphs\r\n\r\n* Make union_all generator friendly\r\n\r\n* Fix intersection_all\r\n\r\n* Fix union_all signature\r\n\r\n* Allow passing an infinite rename generator to union_all\r\n\r\n* Copy over generalizations to binary.py\r\n\r\n* Clean up rename\r\n\r\n* Simplify first_label in disjoint_union_all\r\n\r\n* Simplify disjoint_union_all\r\n\r\n* Add missing R.graph.update in intersection_all", "code": "def intersection_all(graphs):\n \n R = None\n\n for i, G in enumerate(graphs):\n G_nodes_set = set(G.nodes)\n G_edges_set = set(G.edges(keys=True) if G.is_multigraph() else G.edges())\n if i == 0:\n # create new graph\n R = G.__class__()\n node_intersection = G_nodes_set\n edge_intersection = G_edges_set\n elif G.is_multigraph() != R.is_multigraph():\n raise nx.NetworkXError(\"All graphs must be graphs or multigraphs.\")\n else:\n node_intersection &= G_nodes_set\n edge_intersection &= G_edges_set\n\n R.graph.update(G.graph)\n\n if R is None:\n raise ValueError(\"cannot apply intersection_all to an empty list\")\n\n R.add_nodes_from(node_intersection)\n R.add_edges_from(edge_intersection)\n\n return R\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 220, "n_words": 73, "vocab_size": 53, "complexity": 6, "nloc": 20, "token_counts": 132, "n_ast_nodes": 223, "n_identifiers": 23, "random_cut": "def intersection_all(graphs):\n \n R = None\n\n for i, G in enumerate(graphs):\n G_nodes_set = set(G.nodes)\n G_edges_set = set(G.edges(keys=True) if G.is_multigraph() else G.edges())\n if i == 0:\n # create new graph\n R = G.__class__()\n node_intersection = G_nodes_set\n edge_intersection = G_edges_set", "d_id": 42316, "documentation": { "docstring": "Returns a new graph that contains only the nodes and the edges that exist in\n all graphs.\n\n Parameters\n ----------\n graphs : iterable\n Iterable of NetworkX graphs\n\n Returns\n -------\n R : A new graph with the same type as the first graph in list\n\n Raises\n ------\n ValueError\n If `graphs` is an empty list.\n\n Notes\n -----\n Attributes from the graph, nodes, and edges are not copied to the new\n graph.\n ", "n_words": 68, "vocab_size": 52, "n_whitespaces": 125, "language": "en" } }, { "id": 19466, "commit_id": "7e33fcae4384563b4c927fd44318c29dd524a097", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/locations/__init__.py", "file_name": "__init__.py", "fun_name": "_looks_like_red_hat_scheme", "commit_message": "Vendor in pip 21.2.4 release (from pip 21.2.2 prior). (#5009)\n\n* Vendor in pip 21.2.4 release (from pip 21.2.2 prior).\r\n\r\n* Add news fragment for pip 21.2.4 vendor update.\r\n\r\n* Add potentially missing LICENSE files", "code": "def _looks_like_red_hat_scheme() -> bool:\n \n from distutils.command.install import install\n from distutils.dist import Distribution\n\n cmd: Any = install(Distribution())\n cmd.finalize_options()\n return (\n cmd.exec_prefix == f\"{os.path.normpath(sys.exec_prefix)}/local\"\n and cmd.prefix == f\"{os.path.normpath(sys.prefix)}/local\"\n )\n\n\n@functools.lru_cache(maxsize=None)", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@functools.lru_cache(maxsize=None)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 62, "n_words": 28, "vocab_size": 25, "complexity": 2, "nloc": 16, "token_counts": 52, "n_ast_nodes": 137, "n_identifiers": 19, "random_cut": "def _looks_like_red_hat_scheme() -> bool:", "d_id": 2983, "documentation": { "docstring": "Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``.\n\n Red Hat's ``00251-change-user-install-location.patch`` changes the install\n command's ``prefix`` and ``exec_prefix`` to append ``\"/local\"``. This is\n (fortunately?) done quite unconditionally, so we create a default command\n object without any configuration to detect this.\n ", "n_words": 38, "vocab_size": 35, "n_whitespaces": 53, "language": "en" } }, { "id": 45910, "commit_id": "401419432082d222b823e4f2a66f21e5cc3ab28d", "repo": "airflow", "path": "airflow/providers/databricks/operators/databricks_sql.py", "file_name": "databricks_sql.py", "fun_name": "_create_sql_query", "commit_message": "Add new options to DatabricksCopyIntoOperator (#22076)\n\nThis includes:\r\n* `encryption` - to specify encryption options for a given location\r\n* `credential` - to specify authentication options for a given location\r\n* `validate` - to control validation of schema & data", "code": "def _create_sql_query(self) -> str:\n escaper = ParamEscaper()\n maybe_with = \"\"\n if self._encryption is not None or self._credential is not None:\n maybe_encryption = \"\"\n if self._encryption is not None:\n maybe_encryption = self._generate_options(\"ENCRYPTION\", escaper, self._encryption, False)\n maybe_credential = \"\"\n if self._credential is not None:\n maybe_credential = self._generate_options(\"CREDENTIAL\", escaper, self._credential, False)\n maybe_with = f\" WITH ({maybe_credential} {maybe_encryption})\"\n location = escaper.escape_item(self._file_location) + maybe_with\n if self._expression_list is not None:\n location = f\"(SELECT {self._expression_list} FROM {location})\"\n files_or_pattern = \"\"\n if self._pattern is not None:\n files_or_pattern = f\"PATTERN = {escaper.escape_item(self._pattern)}\\n\"\n elif self._files is not None:\n files_or_pattern = f\"FILES = {escaper.escape_item(self._files)}\\n\"\n format_options = self._generate_options(\"FORMAT_OPTIONS\", escaper, self._format_options) + \"\\n\"\n copy_options = self._generate_options(\"COPY_OPTIONS\", escaper, self._copy_options) + \"\\n\"\n validation = \"\"\n if self._validate is not None:\n if isinstance(self._validate, bool):\n if self._validate:\n validation = \"VALIDATE ALL\\n\"\n elif isinstance(self._validate, int):\n if self._validate < 0:\n raise AirflowException(\n \"Number of rows for validation should be positive, got: \" + str(self._validate)\n )\n validation = f\"VALIDATE {self._validate} ROWS\\n\"\n else:\n raise AirflowException(\"Incorrect data type for validate parameter: \" + type(self._validate))\n # TODO: think on how to make sure that table_name and expression_list aren't used for SQL injection\n sql = f\n return sql.strip()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 579, "n_words": 184, "vocab_size": 105, "complexity": 13, "nloc": 40, "token_counts": 257, "n_ast_nodes": 522, "n_identifiers": 33, "random_cut": "def _create_sql_query(self) -> str:\n escaper = ParamEscaper()\n maybe_with = \"\"\n if self._encryption is not None or self._credential is not None:\n maybe_encryption = \"\"\n if self._encryption is not None:\n maybe_encryption = self._generate_options(\"ENCRYPTION\", escaper, self._encryption, False)\n maybe_credential = \"\"\n if self._credential is not None:\n maybe_credential = self._generate_options(\"CREDENTIAL\", escaper, self._credential, False)\n maybe_with = f\" WITH ({maybe_credential} {maybe_encryption})\"\n location = escaper.escape_item(self._file_location) + maybe_with\n if self._expression_list is not None:\n location = f\"(SELECT {self._expression_list} FROM {location})\"\n files_or_pattern = \"\"\n if self._pattern is not None:\n files_or_pattern = f\"PATTERN = {escaper.escape_item(self._pattern)}\\n\"\n elif self._files is not None:\n files_or_pattern = f\"FILES = {escaper.e", "d_id": 8737, "documentation": { "docstring": "COPY INTO {self._table_name}\nFROM {location}\nFILEFORMAT = {self._file_format}\n{validation}{files_or_pattern}{format_options}{copy_options}\n", "n_words": 9, "vocab_size": 9, "n_whitespaces": 5, "language": "en" } }, { "id": 277193, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/wrappers/scikit_learn.py", "file_name": "scikit_learn.py", "fun_name": "predict", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def predict(self, x, **kwargs):\n \n proba = self.model.predict(x, **kwargs)\n if proba.shape[-1] > 1:\n classes = proba.argmax(axis=-1)\n else:\n classes = (proba > 0.5).astype(\"int32\")\n return self.classes_[classes]\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 80, "n_words": 23, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 69, "n_ast_nodes": 109, "n_identifiers": 12, "random_cut": "def predict(self, x, **kwargs):\n ", "d_id": 81887, "documentation": { "docstring": "Returns the class predictions for the given test data.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n **kwargs: dictionary arguments\n Legal arguments are the arguments\n of `Sequential.predict`.\n\n Returns:\n preds: array-like, shape `(n_samples,)`\n Class predictions.\n ", "n_words": 48, "vocab_size": 35, "n_whitespaces": 177, "language": "en" } }, { "id": 49439, "commit_id": "9b3119dfb63c4cbb7acfb9f1f1c09ac24e6d68d2", "repo": "PaddleHub", "path": "modules/image/text_recognition/ppocrv3_det_ch/processor.py", "file_name": "processor.py", "fun_name": "resize_image_type0", "commit_message": "add module", "code": "def resize_image_type0(self, img):\n \n limit_side_len = self.max_side_len\n h, w, _ = img.shape\n\n # limit the max side\n if max(h, w) > limit_side_len:\n if h > w:\n ratio = float(limit_side_len) / h\n else:\n ratio = float(limit_side_len) / w\n else:\n ratio = 1.\n resize_h = int(h * ratio)\n resize_w = int(w * ratio)\n\n resize_h = int(round(resize_h / 32) * 32)\n resize_w = int(round(resize_w / 32) * 32)\n\n try:\n if int(resize_w) <= 0 or int(resize_h) <= 0:\n return None, (None, None)\n img = cv2.resize(img, (int(resize_w), int(resize_h)))\n except:\n print(img.shape, resize_w, resize_h)\n sys.exit(0)\n ratio_h = resize_h / float(h)\n ratio_w = resize_w / float(w)\n # return img, np.array([h, w])\n return img, [ratio_h, ratio_w]\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 340, "n_words": 106, "vocab_size": 66, "complexity": 6, "nloc": 24, "token_counts": 190, "n_ast_nodes": 302, "n_identifiers": 23, "random_cut": "def resize_image_type0(self, img):\n \n limit_side_len = self.max_side_len\n h, w, _ = img.shape\n\n # limit the max side\n if max(h, w) > limit_side_len:\n if h > w:\n ratio = float(limit_side_len) / h\n else:\n ratio = float(limit_side_len) / w\n else:\n ratio = 1.\n resize_h = int(h * ratio)\n resize_w = int(w * ratio)\n\n resize_h = int(round(resize_h / 32) * 32)\n resize_w = int(round(resize_w / 32) * 32)\n\n try:\n if int(resize_w) <= 0 or int(resize_h) <= 0:\n return None, (None, None)\n img = cv2.resize(img, (int(resize_w), int(resize_h)))\n except:\n ", "d_id": 9744, "documentation": { "docstring": "\n resize image to a size multiple of 32 which is required by the network\n args:\n img(array): array with shape [h, w, c]\n return(tuple):\n img, (ratio_h, ratio_w)\n ", "n_words": 26, "vocab_size": 26, "n_whitespaces": 77, "language": "en" } }, { "id": 271560, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "metrics", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def metrics(self):\n \n metrics = []\n if self._is_compiled:\n # TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects\n # so that attr names are not load-bearing.\n if self.compiled_loss is not None:\n metrics += self.compiled_loss.metrics\n if self.compiled_metrics is not None:\n metrics += self.compiled_metrics.metrics\n\n for l in self._flatten_layers():\n metrics.extend(l._metrics) # pylint: disable=protected-access\n return metrics\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 169, "n_words": 48, "vocab_size": 36, "complexity": 5, "nloc": 10, "token_counts": 64, "n_ast_nodes": 107, "n_identifiers": 9, "random_cut": "def metrics(self):\n \n metrics = []\n if self._is_compiled:\n # TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects\n # so that attr names are not l", "d_id": 80796, "documentation": { "docstring": "Returns the model's metrics added using `compile()`, `add_metric()` APIs.\n\n Note: Metrics passed to `compile()` are available only after a `keras.Model`\n has been trained/evaluated on actual data.\n\n Examples:\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> outputs = tf.keras.layers.Dense(2)(inputs)\n >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs)\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\"])\n >>> [m.name for m in model.metrics]\n []\n\n >>> x = np.random.random((2, 3))\n >>> y = np.random.randint(0, 2, (2, 2))\n >>> model.fit(x, y)\n >>> [m.name for m in model.metrics]\n ['loss', 'mae']\n\n >>> inputs = tf.keras.layers.Input(shape=(3,))\n >>> d = tf.keras.layers.Dense(2, name='out')\n >>> output_1 = d(inputs)\n >>> output_2 = d(inputs)\n >>> model = tf.keras.models.Model(\n ... inputs=inputs, outputs=[output_1, output_2])\n >>> model.add_metric(\n ... tf.reduce_sum(output_2), name='mean', aggregation='mean')\n >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\", \"acc\"])\n >>> model.fit(x, (y, y))\n >>> [m.name for m in model.metrics]\n ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae',\n 'out_1_acc', 'mean']\n\n ", "n_words": 128, "vocab_size": 83, "n_whitespaces": 330, "language": "en" } }, { "id": 204162, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/messages/api.py", "file_name": "api.py", "fun_name": "set_level", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def set_level(request, level):\n \n if not hasattr(request, \"_messages\"):\n return False\n request._messages.level = level\n return True\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 33, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 48, "n_identifiers": 5, "random_cut": "def set_level(request, level):\n \n if not hasattr(request, \"_messages\"):\n retur", "d_id": 50661, "documentation": { "docstring": "\n Set the minimum level of messages to be recorded, and return ``True`` if\n the level was recorded successfully.\n\n If set to ``None``, use the default level (see the get_level() function).\n ", "n_words": 30, "vocab_size": 24, "n_whitespaces": 43, "language": "en" } }, { "id": 177139, "commit_id": "19c1454d3dfa70a893ea67f2d78515658e8c08e5", "repo": "networkx", "path": "networkx/algorithms/lowest_common_ancestors.py", "file_name": "lowest_common_ancestors.py", "fun_name": "all_pairs_lowest_common_ancestor", "commit_message": "Replace LCA with naive implementations (#5883)\n\n* WIP: Replace functions to evaluate tests.\r\n\r\n* Raise prompt exceptions by wrapping generator.\r\n\r\n* Fix erroneous ground-truth self-ancestor in tests.\r\n\r\n* Move pair creation outside of generator and validate.\r\n\r\n* Convert input with fromkeys to preserve order and rm duplicates.\r\n\r\n* Replace LCA implementations & update tests.\r\n\r\n* Test cleanup: move new tests into old class.\r\n\r\nAllows us to get rid of duplication/another test setup.\r\n\r\n* Rm naive fns from refguide.\r\n\r\n* Add release note.\r\n\r\n* Remove unused imports.\r\n\r\n* Remove missed duplicate function (bad rebase).\r\n\r\nCo-authored-by: Dilara Tekinoglu ", "code": "def all_pairs_lowest_common_ancestor(G, pairs=None):\n \n if not nx.is_directed_acyclic_graph(G):\n raise nx.NetworkXError(\"LCA only defined on directed acyclic graphs.\")\n if len(G) == 0:\n raise nx.NetworkXPointlessConcept(\"LCA meaningless on null graphs.\")\n\n if pairs is None:\n pairs = combinations_with_replacement(G, 2)\n else:\n # Convert iterator to iterable, if necessary. Trim duplicates.\n pairs = dict.fromkeys(pairs)\n # Verify that each of the nodes in the provided pairs is in G\n nodeset = set(G)\n for pair in pairs:\n if set(pair) - nodeset:\n raise nx.NodeNotFound(\n f\"Node(s) {set(pair) - nodeset} from pair {pair} not in G.\"\n )\n\n # Once input validation is done, construct the generator", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 226, "n_words": 92, "vocab_size": 67, "complexity": 6, "nloc": 17, "token_counts": 100, "n_ast_nodes": 171, "n_identifiers": 15, "random_cut": "def all_pairs_lowest_common_ancestor(G, pairs=None):\n \n if not nx.is_directed_acyclic_graph(G):\n raise nx.NetworkXError", "d_id": 42282, "documentation": { "docstring": "Return the lowest common ancestor of all pairs or the provided pairs\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n pairs : iterable of pairs of nodes, optional (default: all pairs)\n The pairs of nodes of interest.\n If None, will find the LCA of all pairs of nodes.\n\n Yields\n ------\n ((node1, node2), lca) : 2-tuple\n Where lca is least common ancestor of node1 and node2.\n Note that for the default case, the order of the node pair is not considered,\n e.g. you will not get both ``(a, b)`` and ``(b, a)``\n\n Raises\n ------\n NetworkXPointlessConcept\n If `G` is null.\n NetworkXError\n If `G` is not a DAG.\n\n Examples\n --------\n The default behavior is to yield the lowest common ancestor for all\n possible combinations of nodes in `G`, including self-pairings:\n\n >>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)])\n >>> dict(nx.all_pairs_lowest_common_ancestor(G))\n {(0, 0): 0, (0, 1): 0, (0, 3): 0, (0, 2): 0, (1, 1): 1, (1, 3): 0, (1, 2): 1, (3, 3): 3, (3, 2): 0, (2, 2): 2}\n\n The pairs argument can be used to limit the output to only the\n specified node pairings:\n\n >>> dict(nx.all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)]))\n {(1, 2): 1, (2, 3): 0}\n\n Notes\n -----\n Only defined on non-null directed acyclic graphs.\n\n See Also\n --------\n lowest_common_ancestor\n ", "n_words": 208, "vocab_size": 126, "n_whitespaces": 344, "language": "en" } }, { "id": 54884, "commit_id": "7895fb5013aa98955bb31be96953ac13979a5b94", "repo": "prefect", "path": "tests/test_task_runners.py", "file_name": "test_task_runners.py", "fun_name": "test_is_pickleable_after_start", "commit_message": "Introduces testing module and task runner standard test suite (PrefectHQ/orion#1655)", "code": "async def test_is_pickleable_after_start(self, task_runner):\n \n task_runner.client_kwargs[\"set_as_default\"] = True", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 6, "token_counts": 50, "n_ast_nodes": 30, "n_identifiers": 4, "random_cut": "async def test_is_pickleable_after_start(self, task_runner):\n \n task_runner.client_kwargs[\"set_as_default\"] = True", "d_id": 11176, "documentation": { "docstring": "\n The task_runner must be picklable as it is attached to `PrefectFuture` objects\n Reimplemented to set Dask client as default to allow unpickling\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 44, "language": "en" } }, { "id": 220349, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/base_events.py", "file_name": "base_events.py", "fun_name": "_add_callback_signalsafe", "commit_message": "add python 3.10.4 for windows", "code": "def _add_callback_signalsafe(self, handle):\n \n self._add_callback(handle)\n self._write_to_self()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 26, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def _add_callback_signalsafe(self, handle):\n \n self._add_callback(handle)\n self._write_to_self()\n", "d_id": 55971, "documentation": { "docstring": "Like _add_callback() but called from a signal handler.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 30115, "commit_id": "fa2ad657482aca9dc628e6d7062b8badf2706bb6", "repo": "spotify-downloader", "path": "spotdl/utils/ffmpeg.py", "file_name": "ffmpeg.py", "fun_name": "get_local_ffmpeg", "commit_message": "v4 init", "code": "def get_local_ffmpeg() -> Optional[Path]:\n \n\n ffmpeg_path = Path(\n get_spotdl_path(), \"ffmpeg\" + \".exe\" if platform.system() == \"Windows\" else \"\"\n )\n\n if ffmpeg_path.is_file():\n return ffmpeg_path\n\n return None\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 53, "n_words": 24, "vocab_size": 21, "complexity": 3, "nloc": 10, "token_counts": 43, "n_ast_nodes": 79, "n_identifiers": 8, "random_cut": "def get_local_ffmpeg() -> Optional[Path]:\n \n\n ", "d_id": 5324, "documentation": { "docstring": "\n Get local ffmpeg binary path or None if not found.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 216231, "commit_id": "21d3f4bc9eb7b9fb1118c59073595a9e9ee836bd", "repo": "salt", "path": "salt/modules/cmdmod.py", "file_name": "cmdmod.py", "fun_name": "_render_cmd", "commit_message": "fixes salt bug 61507", "code": "def _render_cmd(cmd, cwd, template, saltenv=None, pillarenv=None, pillar_override=None):\n \n if saltenv is None:\n saltenv = __opts__.get(\"saltenv\", \"base\")\n if not template:\n return (cmd, cwd)\n\n # render the path as a template using path_template_engine as the engine\n if template not in salt.utils.templates.TEMPLATE_REGISTRY:\n raise CommandExecutionError(\n \"Attempted to render file paths with unavailable engine {}\".format(template)\n )\n\n kwargs = {}\n kwargs[\"salt\"] = __salt__\n if pillarenv is not None or pillar_override is not None:\n pillarenv = pillarenv or __opts__[\"pillarenv\"]\n kwargs[\"pillar\"] = _gather_pillar(pillarenv, pillar_override)\n else:\n kwargs[\"pillar\"] = __pillar__\n kwargs[\"grains\"] = __grains__\n kwargs[\"opts\"] = __opts__\n kwargs[\"saltenv\"] = saltenv\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 184, "n_words": 88, "vocab_size": 60, "complexity": 7, "nloc": 23, "token_counts": 155, "n_ast_nodes": 225, "n_identifiers": 20, "random_cut": "def _render_cmd(cmd, cwd, template, saltenv=None, pillarenv=None, pillar_override=None):\n \n if saltenv is None:\n saltenv = __opts__.get(\"saltenv\", \"base\")\n if not template:\n return (cmd, cwd)\n\n # render the path as a template using path_template_engine as the engine\n if template not in salt.utils.templates.TEMPLATE_REGISTRY:\n raise CommandExecutionError(\n \"Attempted to render file paths with unavailable engine {}\".format(template)\n )\n\n kwargs = {}\n kwargs[\"salt\"] = __salt__\n if pillarenv is not None or pillar_override is not None:\n pillarenv = pillarenv or __opts__[\"pillarenv\"]\n kwargs[\"pillar\"] = _gather_pillar(pillarenv, pillar_override)", "d_id": 54474, "documentation": { "docstring": "\n If template is a valid template engine, process the cmd and cwd through\n that engine.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 25, "language": "en" } }, { "id": 247373, "commit_id": "7e91107be1a4287873266e588a3c5b415279f4c8", "repo": "synapse", "path": "tests/rest/media/v1/test_html_preview.py", "file_name": "test_html_preview.py", "fun_name": "test_windows_1252", "commit_message": "Add type hints to `tests/rest` (#12146)\n\n* Add type hints to `tests/rest`\r\n\r\n* newsfile\r\n\r\n* change import from `SigningKey`", "code": "def test_windows_1252(self) -> None:\n \n html = b\n tree = decode_body(html, \"http://example.com/test.html\")\n og = parse_html_to_open_graph(tree, \"http://example.com/test.html\")\n self.assertEqual(og, {\"og:title\": \"ó\", \"og:description\": \"Some text.\"})\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 13, "token_counts": 44, "n_ast_nodes": 83, "n_identifiers": 8, "random_cut": "def test_windows_1252(self) -> None:\n \n html = b\n tree = decode_body(html, \"http://example.com/test.html\")\n og = parse", "d_id": 71631, "documentation": { "docstring": "A body which uses cp1252, but doesn't declare that.\n \n \\xf3\n \n Some text.\n \n \n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 65, "language": "en" } }, { "id": 300606, "commit_id": "4885331509eeffe50f42d76b234996467b06170f", "repo": "core", "path": "homeassistant/helpers/template.py", "file_name": "template.py", "fun_name": "square_root", "commit_message": "Fail template functions when no default specified (#71687)", "code": "def square_root(value, default=_SENTINEL):\n \n try:\n return math.sqrt(float(value))\n except (ValueError, TypeError):\n if default is _SENTINEL:\n raise_no_default(\"sqrt\", value)\n return default\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 58, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 7, "token_counts": 42, "n_ast_nodes": 70, "n_identifiers": 10, "random_cut": "def square_root(value, default=_SENTINEL):\n \n try:\n return m", "d_id": 99466, "documentation": { "docstring": "Filter and function to get square root of the value.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 291738, "commit_id": "c576a68d336bc91fd82c299d9b3e5dfdc1c14960", "repo": "core", "path": "tests/test_core.py", "file_name": "test_core.py", "fun_name": "test_track_task_functions", "commit_message": "Upgrade pytest-aiohttp (#82475)\n\n* Upgrade pytest-aiohttp\r\n\r\n* Make sure executors, tasks and timers are closed\r\n\r\nSome test will trigger warnings on garbage collect, these warnings\r\nspills over into next test.\r\n\r\nSome test trigger tasks that raise errors on shutdown, these spill\r\nover into next test.\r\n\r\nThis is to mimic older pytest-aiohttp and it's behaviour on test\r\ncleanup.\r\n\r\nDiscussions on similar changes for pytest-aiohttp are here:\r\nhttps://github.com/pytest-dev/pytest-asyncio/pull/309\r\n\r\n* Replace loop with event_loop\r\n\r\n* Make sure time is frozen for tests\r\n\r\n* Make sure the ConditionType is not async\r\n\r\n /home-assistant/homeassistant/helpers/template.py:2082: RuntimeWarning: coroutine 'AsyncMockMixin._execute_mock_call' was never awaited\r\n def wrapper(*args, **kwargs):\r\n Enable tracemalloc to get traceback where the object was allocated.\r\n See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.\r\n\r\n* Increase litejet press tests with a factor 10\r\n\r\nThe times are simulated anyway, and we can't stop the normal\r\nevent from occuring.\r\n\r\n* Use async handlers for aiohttp\r\n\r\ntests/components/motioneye/test_camera.py::test_get_still_image_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_still_image_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_stream_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_stream_from_camera\r\ntests/components/motioneye/test_camera.py::test_camera_option_stream_url_template\r\ntests/components/motioneye/test_camera.py::test_camera_option_stream_url_template\r\n /Users/joakim/src/hass/home-assistant/venv/lib/python3.9/site-packages/aiohttp/web_urldispatcher.py:189: DeprecationWarning: Bare functions are deprecated, use async ones\r\n warnings.warn(\r\n\r\n* Switch to freezegun in modbus tests\r\n\r\nThe tests allowed clock to tick in between steps\r\n\r\n* Make sure skybell object are fully mocked\r\n\r\nOld tests would trigger attempts to post to could services:\r\n\r\n```\r\nDEBUG:aioskybell:HTTP post https://cloud.myskybell.com/api/v3/login/ Request with headers: {'content-type': 'application/json', 'accept': '*/*', 'x-skybell-app-id': 'd2b542c7-a7e4-4e1e-b77d-2b76911c7c46', 'x-skybell-client-id': '1f36a3c0-6dee-4997-a6db-4e1c67338e57'}\r\n```\r\n\r\n* Fix sorting that broke after rebase", "code": "async def test_track_task_functions(event_loop):\n \n hass = ha.HomeAssistant()\n try:\n assert hass._track_task\n\n hass.async_stop_track_tasks()\n assert not hass._track_task\n\n hass.async_track_tasks()\n assert hass._track_task\n finally:\n await hass.async_stop()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 19, "vocab_size": 15, "complexity": 2, "nloc": 10, "token_counts": 46, "n_ast_nodes": 83, "n_identifiers": 9, "random_cut": "async def test_track_task_functions(event_loop):\n \n hass = ha.HomeAssistant()\n try:\n assert hass._track_task\n\n hass.async_stop_track_tasks()\n assert not hass._track_task\n\n hass.async_track_tasks()\n assert hass._track_task\n finally:\n await hass.async_stop()\n\n", "d_id": 90842, "documentation": { "docstring": "Test function to start/stop track task and initial state.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 293281, "commit_id": "ea82f2e293f43d3e5be103a64b68d088c4b65545", "repo": "core", "path": "homeassistant/components/kaleidescape/media_player.py", "file_name": "media_player.py", "fun_name": "media_position_updated_at", "commit_message": "Add Kaleidescape integration (#67711)", "code": "def media_position_updated_at(self) -> datetime | None:\n \n if self._device.movie.play_status in KALEIDESCAPE_PLAYING_STATES:\n return utcnow()\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 27, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def media_position_updated_at(self) -> datetime | None:\n \n ", "d_id": 92344, "documentation": { "docstring": "When was the position of the current playing media valid.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 220720, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/sslproto.py", "file_name": "sslproto.py", "fun_name": "write", "commit_message": "add python 3.10.4 for windows", "code": "def write(self, data):\n \n if not isinstance(data, (bytes, bytearray, memoryview)):\n raise TypeError(f\"data: expecting a bytes-like instance, \"\n f\"got {type(data).__name__}\")\n if not data:\n return\n self._ssl_protocol._write_appdata(data)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 100, "n_words": 23, "vocab_size": 21, "complexity": 3, "nloc": 7, "token_counts": 44, "n_ast_nodes": 84, "n_identifiers": 12, "random_cut": "def write(self, data):\n \n if n", "d_id": 56101, "documentation": { "docstring": "Write some data bytes to the transport.\n\n This does not block; it buffers the data and arranges for it\n to be sent out asynchronously.\n ", "n_words": 24, "vocab_size": 20, "n_whitespaces": 45, "language": "en" } }, { "id": 189484, "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", "repo": "manim", "path": "manim/mobject/svg/text_mobject.py", "file_name": "text_mobject.py", "fun_name": "_extract_color_tags", "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", "code": "def _extract_color_tags(self):\n \n tags = re.finditer(\n r'(.+?)',\n self.original_text,\n re.S,\n )\n\n colormap = []\n for tag in tags:\n start = self._count_real_chars(self.original_text[: tag.start(0)])\n end = start + self._count_real_chars(tag.group(4))\n offsets = tag.group(3).split(\",\") if tag.group(3) else [0]\n start_offset = int(offsets[0]) if offsets[0] else 0\n end_offset = int(offsets[1]) if len(offsets) == 2 and offsets[1] else 0\n\n colormap.append(\n {\n \"start\": start,\n \"end\": end,\n \"color\": tag.group(1),\n \"start_offset\": start_offset,\n \"end_offset\": end_offset,\n },\n )\n self.text = re.sub(\"]+>(.+?)\", r\"\\1\", self.text, 0, re.S)\n return colormap\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 357, "n_words": 73, "vocab_size": 58, "complexity": 6, "nloc": 24, "token_counts": 188, "n_ast_nodes": 296, "n_identifiers": 22, "random_cut": "def _extract_color_tags(self):\n \n tags = re.finditer(\n r'(.+?)',\n self.original_text,\n re.S,\n )\n\n colormap = []\n for tag in tags:\n start = self._count_real_chars(self.original_text[: tag.start(0)])\n end = start + self._count_real_chars(tag.group(4))\n offsets = tag.group(3).split(\",\") if tag.group(3) else [0]\n start_offset = int(offsets[0]) if offsets[0] else 0\n end_offset = int(offsets[1]) if len(offsets) == 2 and offsets[1] else 0\n\n colormap.append(\n {\n \"start\": start,\n \"end\": end,\n \"color\": tag.group(1),\n \"start_offset\": start_offset,\n \"end_offset\": end_offset,\n ", "d_id": 46085, "documentation": { "docstring": "Used to determine which parts (if any) of the string should be formatted\n with a custom color.\n\n Removes the ```` tag, as it is not part of Pango's markup and would cause an error.\n\n Note: Using the ```` tags is deprecated. As soon as the legacy syntax is gone, this function\n will be removed.\n ", "n_words": 54, "vocab_size": 45, "n_whitespaces": 89, "language": "en" } }, { "id": 162729, "commit_id": "9120cdffe618c6c2ff16fe6a311b6a1367efdbc8", "repo": "AutoEq", "path": "frequency_response.py", "file_name": "frequency_response.py", "fun_name": "write_readme", "commit_message": "Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.", "code": "def write_readme(self, file_path, parametric_eq_peqs=None, fixed_band_eq_peq=None):\n \n file_path = os.path.abspath(file_path)\n dir_path = os.path.dirname(file_path)\n model = self.name\n\n # Write model\n s = '# {}\\n'.format(model)\n s += 'See [usage instructions](https://github.com/jaakkopasanen/AutoEq#usage) for more options and info.\\n\\n'\n\n # Add parametric EQ settings\n if parametric_eq_peqs is not None:\n s += '### Parametric EQs\\n'\n if len(parametric_eq_peqs) > 1:\n compound = PEQ(self.frequency.copy(), parametric_eq_peqs[0].fs, [])\n n = 0\n filter_ranges = ''\n preamps = ''\n for i, peq in enumerate(parametric_eq_peqs):\n for filt in peq.filters:\n compound.add_filter(filt)\n filter_ranges += f'1-{len(peq.filters) + n}'\n preamps += f'{-compound.max_gain - 0.1:.1f} dB'\n if i < len(parametric_eq_peqs) - 2:\n filter_ranges += ', '\n preamps += ', '\n elif i == len(parametric_eq_peqs) - 2:\n filter_ranges += ' or '\n preamps += ' or '\n n += len(peq.filters)\n s += f'You can use filters {filter_ranges}. Apply preamp of {preamps}, respectively.\\n\\n'\n else:\n compound = PEQ(self.frequency.copy(), parametric_eq_peqs[0].fs, [])\n for peq in parametric_eq_peqs:\n for filt in peq.filters:\n compound.add_filter(filt)\n s += f'Apply preamp of -{compound.max_gain + 0.1:.1f} dB when using parametric equalizer.\\n\\n'\n s += compound.markdown_table() + '\\n\\n'\n\n # Add fixed band eq\n if fixed_band_eq_peq is not None:\n s += f'### Fixed Band EQs\\nWhen using fixed band (also called graphic) equalizer, apply preamp of ' \\\n f'**-{fixed_band_eq_peq.max_gain + 0.1:.1f} dB** (if available) and set gains manually with these ' \\\n f'parameters.\\n\\n{fixed_band_eq_peq.markdown_table()}\\n\\n'\n\n # Write image link\n img_path = os.path.join(dir_path, model + '.png')\n if os.path.isfile(img_path):\n img_url = f'./{os.path.split(img_path)[1]}'\n img_url = urllib.parse.quote(img_url, safe=\"%/:=&?~#+!$,;'@()*[]\")\n s += f'### Graphs\\n![]({img_url})\\n'\n\n # Write file\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(s)\n", "url": "https://github.com/jaakkopasanen/AutoEq.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 888, "n_words": 239, "vocab_size": 135, "complexity": 11, "nloc": 44, "token_counts": 312, "n_ast_nodes": 638, "n_identifiers": 44, "random_cut": "def write_readme(self, file_path, parametric_eq_peqs=None, fixed_band_eq_peq=None):\n \n file_path = os.path.abspath(file_path)\n dir_path = os.path.dirname(file_path)\n model = self.name\n\n # Write model\n s = '# {}\\n'.format(model)\n s += 'See [usage instructions](https://github.com/jaakkopasanen/AutoEq#usage) for more options and info.\\n\\n'\n\n # Add parametric EQ settings\n if parametric_eq_peqs is not None:\n s += '### Parametric EQs\\n'\n ", "d_id": 39268, "documentation": { "docstring": "Writes README.md with picture and Equalizer APO settings.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 256950, "commit_id": "ac5617e757e9ace6f30b7291686d9dbbc339f433", "repo": "haystack", "path": "haystack/telemetry.py", "file_name": "telemetry.py", "fun_name": "_get_execution_environment", "commit_message": "Add basic telemetry features (#2314)\n\n* add basic telemetry features\r\n\r\n* change pipeline_config to _component_config\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add super().__init__() calls to error classes\r\n\r\n* make posthog mock work with python 3.7\r\n\r\n* Update Documentation & Code Style\r\n\r\n* update link to docs web page\r\n\r\n* log exceptions, send event for raised HaystackErrors, refactor Path(CONFIG_PATH)\r\n\r\n* add comment on send_event in BaseComponent.init() and fix mypy\r\n\r\n* mock NonPrivateParameters and fix pylint undefined-variable\r\n\r\n* Update Documentation & Code Style\r\n\r\n* check model path contains multiple /\r\n\r\n* add test for writing to file\r\n\r\n* add test for en-/disable telemetry\r\n\r\n* Update Documentation & Code Style\r\n\r\n* merge file deletion methods and ignore pylint global statement\r\n\r\n* Update Documentation & Code Style\r\n\r\n* set env variable in demo to activate telemetry\r\n\r\n* fix mock of HAYSTACK_TELEMETRY_ENABLED\r\n\r\n* fix mypy and linter\r\n\r\n* add CI as env variable to execution contexts\r\n\r\n* remove threading, add test for custom error event\r\n\r\n* Update Documentation & Code Style\r\n\r\n* simplify config/log file deletion\r\n\r\n* add test for final event being sent\r\n\r\n* force writing config file in test\r\n\r\n* make test compatible with python 3.7\r\n\r\n* switch to posthog production server\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def _get_execution_environment():\n \n if os.environ.get(\"CI\", \"False\").lower() == \"true\":\n execution_env = \"ci\"\n elif \"google.colab\" in sys.modules:\n execution_env = \"colab\"\n elif \"KUBERNETES_SERVICE_HOST\" in os.environ:\n execution_env = \"kubernetes\"\n elif HAYSTACK_DOCKER_CONTAINER in os.environ:\n execution_env = os.environ.get(HAYSTACK_DOCKER_CONTAINER)\n # check if pytest is imported\n elif \"pytest\" in sys.modules:\n execution_env = \"test\"\n else:\n try:\n execution_env = get_ipython().__class__.__name__ # pylint: disable=undefined-variable\n except NameError:\n execution_env = \"script\"\n return execution_env\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 158, "n_words": 59, "vocab_size": 36, "complexity": 7, "nloc": 17, "token_counts": 94, "n_ast_nodes": 180, "n_identifiers": 13, "random_cut": "def _get_execution_environment():\n \n if os.environ.get(\"CI\", \"False\").lower() == \"true\":\n execution_env ", "d_id": 74967, "documentation": { "docstring": "\n Identifies the execution environment that Haystack is running in.\n Options are: colab notebook, kubernetes, CPU/GPU docker container, test environment, jupyter notebook, python script\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 33, "language": "en" } }, { "id": 60716, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/collector.py", "file_name": "collector.py", "fun_name": "_ensure_html_response", "commit_message": "upd; format", "code": "def _ensure_html_response(url, session):\n # type: (str, PipSession) -> None\n \n scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)\n if scheme not in {'http', 'https'}:\n raise _NotHTTP()\n\n resp = session.head(url, allow_redirects=True)\n raise_for_status(resp)\n\n _ensure_html_header(resp)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 58, "n_words": 30, "vocab_size": 29, "complexity": 2, "nloc": 7, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 17, "random_cut": "def _ensure_html_response(url, session):\n # type: (str, PipSession) -> None\n \n scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)\n if scheme not in {'http', 'https'}:\n raise _NotHTTP()\n\n resp = session.head(url, allow_redirects=True)\n raise_for_status(resp)\n\n _ensure_html", "d_id": 12258, "documentation": { "docstring": "Send a HEAD request to the URL, and ensure the response contains HTML.\n\n Raises `_NotHTTP` if the URL is not available for a HEAD request, or\n `_NotHTML` if the content type is not text/html.\n ", "n_words": 34, "vocab_size": 26, "n_whitespaces": 43, "language": "en" } }, { "id": 21808, "commit_id": "8faa74cdc9da20cfdcc69f5ec29b91112c95b4c9", "repo": "pipenv", "path": "pipenv/vendor/tomlkit/parser.py", "file_name": "parser.py", "fun_name": "_parse_item", "commit_message": "Update tomlkit==0.9.2\n\nUsed:\n\n python -m invoke vendoring.update --package=tomlkit", "code": "def _parse_item(self) -> Optional[Tuple[Optional[Key], Item]]:\n \n self.mark()\n with self._state as state:\n while True:\n c = self._current\n if c == \"\\n\":\n # Found a newline; Return all whitespace found up to this point.\n self.inc()\n\n return None, Whitespace(self.extract())\n elif c in \" \\t\\r\":\n # Skip whitespace.\n if not self.inc():\n return None, Whitespace(self.extract())\n elif c == \"#\":\n # Found a comment, parse it\n indent = self.extract()\n cws, comment, trail = self._parse_comment_trail()\n\n return None, Comment(Trivia(indent, cws, comment, trail))\n elif c == \"[\":\n # Found a table, delegate to the calling function.\n return\n else:\n # Begining of a KV pair.\n # Return to beginning of whitespace so it gets included\n # as indentation for the KV about to be parsed.\n state.restore = True\n break\n\n return self._parse_key_value(True)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 576, "n_words": 120, "vocab_size": 77, "complexity": 7, "nloc": 25, "token_counts": 144, "n_ast_nodes": 253, "n_identifiers": 23, "random_cut": "def _parse_item(self) -> Optional[Tuple[Optional[Key], Item]]:\n \n self.mark()\n with self._state as state:\n while True:\n c = self._current\n if c", "d_id": 4051, "documentation": { "docstring": "\n Attempts to parse the next item and returns it, along with its key\n if the item is value-like.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 200570, "commit_id": "49222e1e3645ca6948277b8e1a9b526daeb0948d", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "_xreplace", "commit_message": "Add TensMul._xreplace to dedupe dummy indices\n\nThis apparently should not be done.\nhttps://github.com/sympy/sympy/pull/24333#issuecomment-1333783127", "code": "def _xreplace(self, rule):\n \n if self in rule:\n return rule[self], True\n elif rule:\n rule = self._dedupe_indices_in_rule(rule)\n args = []\n changed = False\n for a in self.args:\n _xreplace = getattr(a, '_xreplace', None)\n if _xreplace is not None:\n a_xr = _xreplace(rule)\n args.append(a_xr[0])\n changed |= a_xr[1]\n else:\n args.append(a)\n args = tuple(args)\n if changed:\n return self.func(*args), True\n return self, False\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 296, "n_words": 55, "vocab_size": 39, "complexity": 6, "nloc": 19, "token_counts": 113, "n_ast_nodes": 182, "n_identifiers": 12, "random_cut": "def _xreplace(self, rule):\n \n if self in rule:\n return rule[self], True\n elif rule:\n rule = self._dedupe_indices_in_rule(rule)\n args = []\n changed = False\n for a in self.args:\n _xreplace = getattr(a, '_xreplace', None)\n if _xreplace is not None:\n a_xr = _xreplace(rule)\n args.append(a_xr[0])\n changed |= a_xr[1]\n else:\n args.append(a)\n args = tuple(args)\n if changed:\n r", "d_id": 49705, "documentation": { "docstring": "\n Helper for xreplace. Tracks whether a replacement actually occurred.\n\n Given that the rule has entries {old:new, ...}, this handles the fact\n that if a dummy index in new is the same as an index in self, the\n dummy index in new must be renamed.\n ", "n_words": 44, "vocab_size": 33, "n_whitespaces": 80, "language": "en" } }, { "id": 305646, "commit_id": "6355e682fa4aeb526570597d919ad1fb76755b9a", "repo": "core", "path": "homeassistant/components/mpd/media_player.py", "file_name": "media_player.py", "fun_name": "async_turn_on", "commit_message": "Improve entity type hints [m] (#77816)", "code": "async def async_turn_on(self) -> None:\n \n await self._client.play()\n await self._update_playlists(no_throttle=True)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 25, "n_ast_nodes": 46, "n_identifiers": 6, "random_cut": "async def async_turn_on(self) -> None:\n \n await self._client.play()\n await self._update_playlists(no_throttle=True)\n", "d_id": 104430, "documentation": { "docstring": "Service to send the MPD the command to start playing.", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 270345, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distributed_training_utils_v1.py", "file_name": "distributed_training_utils_v1.py", "fun_name": "_build_network_on_replica", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _build_network_on_replica(model, mode, inputs=None, targets=None):\n \n # Need to do imports here since we run into a circular dependency error.\n from keras import models # pylint: disable=g-import-not-at-top\n from keras.engine import sequential # pylint: disable=g-import-not-at-top\n\n # We rely on the internal methods to avoid having share_weights weights in the\n # public API.\n if isinstance(model, sequential.Sequential):\n updated_model = models._clone_sequential_model(\n model, input_tensors=inputs, layer_fn=models.share_weights\n )\n else:\n updated_model = models._clone_functional_model(\n model, input_tensors=inputs, layer_fn=models.share_weights\n )\n # Callable losses added directly to a functional Model need to be added\n # here.\n updated_model._callable_losses = model._callable_losses\n\n # Recast all low precision outputs back to float32 since we only casted\n # the inputs to bfloat16 and not targets. This is done so that we can preserve\n # precision when calculating the loss value.", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 228, "n_words": 122, "vocab_size": 88, "complexity": 6, "nloc": 33, "token_counts": 188, "n_ast_nodes": 133, "n_identifiers": 18, "random_cut": "def _build_network_on_replica(model, mode, inputs=None, targets=None):\n \n # Need to do imports here since we run into a circular dependency error.\n from keras import models # pylint: disable=g-import-not-at-top\n from keras.engine import sequential # pylint: disable=g-import-not-at-top\n\n # We rely on the internal methods to avoid having share_weights weights in the\n # public API.\n if isinstance(model, sequential.Sequential):\n updated_model = models._clone_sequential_model(\n model, input_tensors=inputs, layer_fn=models.share_weights\n )\n else:\n updated_model = models._clone_functional_model(\n model, input_tensors=inputs, layer_fn=models.share_weights\n )\n # Callable losses added directly to a functional Model need to be added\n # here.\n updated_model._callable_losses = model._callable_losses\n\n # Recast all low precision outputs back to float32 since we only casted\n # the inputs to bfloat16 and not targets. This is done so that we can preserve\n # precision when calculating the loss value.", "d_id": 80442, "documentation": { "docstring": "Build an updated model on replicas.\n\n We create a new Keras model while sharing the variables from the old graph.\n Building a new sub-graph is required since the original keras model creates\n placeholders for the input and the output that are not accessible till we\n call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.\n\n The sharing of weights and layers between the old and the new model guarantee\n that we're using Strategy variables and any updates on either model are\n reflected correctly in callbacks and loop iterations.\n\n We need to make sure we share the optimizers between the old and the new model\n as well so that optimizer state is not lost if the user is running fit\n multiple times.\n\n Args:\n model: Model to be replicated across Replicas\n mode: Which of fit/eval/predict is building the distributed network\n inputs: Input variables to be passed to the model\n targets: Target tensor to be passed to model.compile\n\n Returns:\n A new model with shared layers with the old model.\n ", "n_words": 163, "vocab_size": 103, "n_whitespaces": 227, "language": "en" } }, { "id": 280800, "commit_id": "1b32391798a952176b733660c940b1589c2fc8a4", "repo": "keras", "path": "keras/utils/tf_utils.py", "file_name": "tf_utils.py", "fun_name": "can_jit_compile", "commit_message": "Set `jit_compile` only when TensorFlow XLA is available for the platform.\n\nFixes issue of using new optimizers on Mac M1 as TF on Mac M1 is not built with XLA.\n\nPiperOrigin-RevId: 497158007", "code": "def can_jit_compile(warn=False):\n \n if platform.system() == \"Darwin\" and \"arm\" in platform.processor().lower():\n if warn:\n logging.warning(\n \"Tensorflow is not compiled with XLA on Mac M1 Arm processors, \"\n \"so cannot set `jit_compile` to True.\"\n )\n return False\n return True\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 111, "n_words": 36, "vocab_size": 34, "complexity": 4, "nloc": 9, "token_counts": 43, "n_ast_nodes": 82, "n_identifiers": 8, "random_cut": "def can_jit_compile(warn=False):\n \n if platform.system() == \"Darwin\" and \"arm\" in pla", "d_id": 83438, "documentation": { "docstring": "Returns True if TensorFlow XLA is available for the platform.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 128633, "commit_id": "23b3a599b9df8100558c477e94b0b19b1a38ac27", "repo": "ray", "path": "rllib/algorithms/algorithm.py", "file_name": "algorithm.py", "fun_name": "__setstate__", "commit_message": "[RLlib] Algorithm/Policy checkpoint overhaul and Policy Model export (in native formats). (#28166)", "code": "def __setstate__(self, state) -> None:\n \n # TODO (sven): Validate that our config and the config in state are compatible.\n # For example, the model architectures may differ.\n # Also, what should the behavior be if e.g. some training parameter\n # (e.g. lr) changed?\n\n if hasattr(self, \"workers\") and \"worker\" in state:\n self.workers.local_worker().set_state(state[\"worker\"])\n remote_state = ray.put(state[\"worker\"])\n for r in self.workers.remote_workers():\n r.set_state.remote(remote_state)\n if self.evaluation_workers:\n # If evaluation workers are used, also restore the policies\n # there in case they are used for evaluation purpose.\n for r in self.evaluation_workers.remote_workers():\n r.set_state.remote(remote_state)\n # If necessary, restore replay data as well.\n if self.local_replay_buffer is not None:\n # TODO: Experimental functionality: Restore contents of replay\n # buffer from checkpoint, only if user has configured this.\n if self.config.get(\"store_buffer_in_checkpoints\"):\n if \"local_replay_buffer\" in state:\n self.local_replay_buffer.set_state(state[\"local_replay_buffer\"])\n else:\n logger.warning(\n \"`store_buffer_in_checkpoints` is True, but no replay \"\n \"data found in state!\"\n )\n elif \"local_replay_buffer\" in state and log_once(\n \"no_store_buffer_in_checkpoints_but_data_found\"\n ):\n logger.warning(\n \"`store_buffer_in_checkpoints` is False, but some replay \"\n \"data found in state!\"\n )\n\n if self.train_exec_impl is not None:\n self.train_exec_impl.shared_metrics.get().restore(state[\"train_exec_impl\"])\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 637, "n_words": 165, "vocab_size": 106, "complexity": 12, "nloc": 33, "token_counts": 176, "n_ast_nodes": 318, "n_identifiers": 23, "random_cut": "def __setstate__(self, state) -> None:\n \n # TODO (sven): Validate that our config and the config in state are compatible.\n # For example, the model architectures may differ.\n # Also, what should the behavior be if e.g. some training parameter\n # (e.g. lr) changed?\n\n if hasattr(self, \"workers\") and \"worker\" in state:\n self.workers.local_worker().set_state(state[\"worker\"])\n remote_state = ray.put(state[\"worker\"])\n for r in self.workers.remote_workers():\n r.set_state.remote(remote_state)\n if self.evaluation_workers:\n # If evaluation workers are used, also restore the policies\n # there in case they are used for evaluation purpose.\n for r in self.evaluation_workers.remote_workers():\n r.set_state.remote(remote_state)\n # If necessary, restore replay data as well.\n if self.local_replay_buffer is not None:\n # TODO: Experimental functionality: Restore contents of replay\n # buffer from checkpoint, only if user has configured this.\n if self.config.get(\"store_buffer_in_checkpoints\"):\n if \"local_replay_buffer\" in state:\n self.local_replay_buffer.set_state(state[\"local_replay_buffer\"])\n else:\n logger.warning(\n \"`store_buffer_in_checkpoints` is True, but no replay \"\n ", "d_id": 28767, "documentation": { "docstring": "Sets the algorithm to the provided state.\n\n Args:\n state: The state dict to restore this Algorithm instance to. `state` may\n have been returned by a call to an Algorithm's `__getstate__()` method.\n ", "n_words": 31, "vocab_size": 28, "n_whitespaces": 71, "language": "en" } }, { "id": 251891, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/http/test_http2.py", "file_name": "test_http2.py", "fun_name": "test_http2_client_aborts", "commit_message": "make it black!", "code": "def test_http2_client_aborts(tctx, stream, when, how):\n \n server = Placeholder(Server)\n flow = Placeholder(HTTPFlow)\n playbook, cff = start_h2_client(tctx)\n resp = Placeholder(bytes)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 33, "n_words": 18, "vocab_size": 15, "complexity": 12, "nloc": 86, "token_counts": 494, "n_ast_nodes": 62, "n_identifiers": 15, "random_cut": "def test_http2_client_aborts(tctx, stream, when, how):\n \n server = P", "d_id": 73880, "documentation": { "docstring": "\n Test handling of the case where a client aborts during request or response transmission.\n\n If the client aborts the request transmission, we must trigger an error hook,\n if the client disconnects during response transmission, no error hook is triggered.\n ", "n_words": 39, "vocab_size": 28, "n_whitespaces": 52, "language": "en" } }, { "id": 203421, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/options.py", "file_name": "options.py", "fun_name": "get_changelist_instance", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_changelist_instance(self, request):\n \n list_display = self.get_list_display(request)\n list_display_links = self.get_list_display_links(request, list_display)\n # Add the action checkboxes if any actions are available.\n if self.get_actions(request):\n list_display = [\"action_checkbox\", *list_display]\n sortable_by = self.get_sortable_by(request)\n ChangeList = self.get_changelist(request)\n return ChangeList(\n request,\n self.model,\n list_display,\n list_display_links,\n self.get_list_filter(request),\n self.date_hierarchy,\n self.get_search_fields(request),\n self.get_list_select_related(request),\n self.list_per_page,\n self.list_max_show_all,\n self.list_editable,\n self,\n sortable_by,\n self.search_help_text,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 277, "n_words": 49, "vocab_size": 43, "complexity": 2, "nloc": 23, "token_counts": 117, "n_ast_nodes": 174, "n_identifiers": 21, "random_cut": "def get_changelist_instance(self, request):\n \n list_display = self.get_list_display(request)\n list_display_links = self.get_list_display_links(request, list_display)\n # Add the action checkboxes if any actions are available.\n if self.get_actions(request):\n list_display = [\"action_checkbox\", *list_display]\n sortable_by = self.get_sortable_by(request)\n ChangeList = self.get_changelist(request)\n return ChangeList(\n request,\n self.model,\n list_display,\n list_display_links,\n self.get_list_filter(request),\n self.date_hierarchy,\n self.get_search_fields(request),\n ", "d_id": 50366, "documentation": { "docstring": "\n Return a `ChangeList` instance based on `request`. May raise\n `IncorrectLookupParameters`.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 32, "language": "en" } }, { "id": 120516, "commit_id": "db73670ec3fc72f75e6f832351620ac79e9b0c6f", "repo": "jax", "path": "jax/_src/lax/qdwh.py", "file_name": "qdwh.py", "fun_name": "_use_cholesky", "commit_message": "Add support for padded arrays in QDWH algorithm.\n\nThis change is in preparation for adding a jit-table QDWH-eig implementation.\n\nPiperOrigin-RevId: 448571523", "code": "def _use_cholesky(u, m, n, params):\n \n a, b, c = params\n _, N = u.shape\n x = c * (u.T.conj() @ u) + jnp.eye(N, dtype=jnp.dtype(u))\n # Pads the lower-right corner with the identity matrix to prevent the Cholesky\n # decomposition from failing due to the matrix not being PSD if padded with\n # zeros.\n x = _mask(x, (n, n), jnp.eye(N, dtype=x.dtype))\n\n # `y` is lower triangular.\n y = lax_linalg.cholesky(x, symmetrize_input=False)\n\n z = lax_linalg.triangular_solve(\n y, u.T, left_side=True, lower=True, conjugate_a=True).conj()\n\n z = lax_linalg.triangular_solve(y, z, left_side=True, lower=True,\n transpose_a=True, conjugate_a=True).T.conj()\n\n e = b / c\n u = e * u + (a - e) * z\n return u\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 156, "n_words": 103, "vocab_size": 72, "complexity": 1, "nloc": 13, "token_counts": 174, "n_ast_nodes": 261, "n_identifiers": 29, "random_cut": "def _use_cholesky(u, m, n, params):\n \n a, b, c = params\n _, N = u.shape\n x = c * (u.T.conj() @ u) + jnp.eye(N, dtype=jnp.dtype(u))\n # Pads the lower-right corner with the identity matrix to prevent the Cholesky\n # decomposition from failing due to the matrix not being PSD if padded with\n # zeros.\n x = _mask(x, (n, n), jnp.eye(N, ", "d_id": 26878, "documentation": { "docstring": "QDWH iteration using Cholesky decomposition.\n\n Args:\n u: a matrix, with static (padded) shape M x N\n m, n: the dynamic shape of the matrix, where m <= M and n <= N.\n params: the QDWH parameters.\n ", "n_words": 36, "vocab_size": 29, "n_whitespaces": 41, "language": "en" } }, { "id": 195863, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/functions/elementary/piecewise.py", "file_name": "piecewise.py", "fun_name": "piecewise_integrate", "commit_message": "Improved documentation formatting", "code": "def piecewise_integrate(self, x, **kwargs):\n \n from sympy.integrals import integrate\n return self.func(*[(integrate(e, x, **kwargs), c) for e, c in self.args])\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 39, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 3, "token_counts": 47, "n_ast_nodes": 71, "n_identifiers": 11, "random_cut": "def piecewise_integrate(self, x, **kwargs):\n \n from sympy.integrals import integrate\n return self.func(*[(integrate(e, x, **kwargs), c) for e, c in self.args])\n", "d_id": 47450, "documentation": { "docstring": "Return the Piecewise with each expression being\n replaced with its antiderivative. To obtain a continuous\n antiderivative, use the :func:`~.integrate` function or method.\n\n Examples\n ========\n\n >>> from sympy import Piecewise\n >>> from sympy.abc import x\n >>> p = Piecewise((0, x < 0), (1, x < 1), (2, True))\n >>> p.piecewise_integrate(x)\n Piecewise((0, x < 0), (x, x < 1), (2*x, True))\n\n Note that this does not give a continuous function, e.g.\n at x = 1 the 3rd condition applies and the antiderivative\n there is 2*x so the value of the antiderivative is 2:\n\n >>> anti = _\n >>> anti.subs(x, 1)\n 2\n\n The continuous derivative accounts for the integral *up to*\n the point of interest, however:\n\n >>> p.integrate(x)\n Piecewise((0, x < 0), (x, x < 1), (2*x - 1, True))\n >>> _.subs(x, 1)\n 1\n\n See Also\n ========\n Piecewise._eval_integral\n ", "n_words": 135, "vocab_size": 85, "n_whitespaces": 310, "language": "en" } }, { "id": 173321, "commit_id": "fbac3e38ac116855b930ee60fb3c997337ae17b7", "repo": "calibre-web", "path": "cps/helper.py", "file_name": "helper.py", "fun_name": "check_send_to_ereader", "commit_message": "Eenabled send epubs to E-Reader devices", "code": "def check_send_to_ereader(entry):\n \n formats = list()\n book_formats = list()\n if len(entry.data):\n for ele in iter(entry.data):\n if ele.uncompressed_size < config.mail_size:\n formats.append(ele.format)\n if 'EPUB' in formats:\n book_formats.append({'format': 'Epub',\n 'convert': 0,\n 'text': _('Send %(format)s to E-Reader', format='Epub')})\n if 'MOBI' in formats:\n book_formats.append({'format': 'Mobi',\n 'convert': 0,\n 'text': _('Send %(format)s to E-Reader', format='Mobi')})\n if 'PDF' in formats:\n book_formats.append({'format': 'Pdf',\n 'convert': 0,\n 'text': _('Send %(format)s to E-Reader', format='Pdf')})\n if 'AZW' in formats:\n book_formats.append({'format': 'Azw',\n 'convert': 0,\n 'text': _('Send %(format)s to E-Reader', format='Azw')})\n if config.config_converterpath:\n book_formats.extend(check_send_to_ereader_with_converter(formats))\n return book_formats\n else:\n log.error(u'Cannot find book entry %d', entry.id)\n return None\n\n\n# Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return\n# list with supported formats", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 527, "n_words": 114, "vocab_size": 65, "complexity": 9, "nloc": 29, "token_counts": 202, "n_ast_nodes": 370, "n_identifiers": 21, "random_cut": "def check_send_to_ereader(entry):\n \n formats = list()\n book_formats = list()\n if len(entry.data):\n for ele in iter(entry.data):\n if ele.uncompressed_size < config.mail_size:\n formats.append(ele.format)\n if 'EPUB' in formats:\n book_formats.append({'format': 'Epub',\n 'convert': 0,\n 'text':", "d_id": 40835, "documentation": { "docstring": "\n returns all available book formats for sending to E-Reader\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 20, "language": "en" } }, { "id": 197528, "commit_id": "7fe8e027ae1d7f683243c0229b961671a6cbb4c5", "repo": "sympy", "path": "sympy/stats/joint_rv_types.py", "file_name": "joint_rv_types.py", "fun_name": "Multinomial", "commit_message": "Improved some documentation in the stats module", "code": "def Multinomial(syms, n, *p):\n \n if not isinstance(p[0], list):\n p = (list(p), )\n return multivariate_rv(MultinomialDistribution, syms, n, p[0])\n\n#-------------------------------------------------------------------------------\n# Negative Multinomial Distribution --------------------------------------------\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 4, "token_counts": 46, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "def Multinomial(syms, n, *p):\n \n if not isinstance(p[0], list):\n p = (list(p), )\n return multivariate_rv(MultinomialDistribution, syms, n, p[0])\n\n#-------------------------------------------------------------------------------\n# Negative", "d_id": 48606, "documentation": { "docstring": "\n Creates a discrete random variable with Multinomial Distribution.\n\n The density of the said distribution can be found at [1].\n\n Parameters\n ==========\n\n n : Positive integer\n Represents number of trials\n p : List of event probabilites\n Must be in the range of [0, 1]\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import density, Multinomial, marginal_distribution\n >>> from sympy import symbols\n >>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True)\n >>> p1, p2, p3 = symbols('p1, p2, p3', positive=True)\n >>> M = Multinomial('M', 3, p1, p2, p3)\n >>> density(M)(x1, x2, x3)\n Piecewise((6*p1**x1*p2**x2*p3**x3/(factorial(x1)*factorial(x2)*factorial(x3)),\n Eq(x1 + x2 + x3, 3)), (0, True))\n >>> marginal_distribution(M, M[0])(x1).subs(x1, 1)\n 3*p1*p2**2 + 6*p1*p2*p3 + 3*p1*p3**2\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Multinomial_distribution\n .. [2] http://mathworld.wolfram.com/MultinomialDistribution.html\n\n ", "n_words": 117, "vocab_size": 91, "n_whitespaces": 210, "language": "en" } }, { "id": 199984, "commit_id": "a0989bcfd26470833cf03737941bfd80f511c745", "repo": "sympy", "path": "sympy/physics/qho_1d.py", "file_name": "qho_1d.py", "fun_name": "psi_n", "commit_message": "applied backtick correction to the remainder of the project", "code": "def psi_n(n, x, m, omega):\n \n\n # sympify arguments\n n, x, m, omega = map(S, [n, x, m, omega])\n nu = m * omega / hbar\n # normalization coefficient\n C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n)))\n\n return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 64, "n_words": 43, "vocab_size": 31, "complexity": 1, "nloc": 5, "token_counts": 97, "n_ast_nodes": 146, "n_identifiers": 16, "random_cut": "def psi_n(n, x, m, omega):\n \n\n # sympify arguments\n n, x, m, omega = map(S, [n, x, m, omega])\n nu = m * omega / hbar\n # ", "d_id": 49473, "documentation": { "docstring": "\n Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator.\n\n Parameters\n ==========\n\n n :\n the \"nodal\" quantum number. Corresponds to the number of nodes in the\n wavefunction. ``n >= 0``\n x :\n x coordinate.\n m :\n Mass of the particle.\n omega :\n Angular frequency of the oscillator.\n\n Examples\n ========\n\n >>> from sympy.physics.qho_1d import psi_n\n >>> from sympy.abc import m, x, omega\n >>> psi_n(0, x, m, omega)\n (m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4))\n\n ", "n_words": 66, "vocab_size": 46, "n_whitespaces": 146, "language": "en" } }, { "id": 218459, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getsourcefile", "commit_message": "add python 3.10.4 for windows", "code": "def getsourcefile(object):\n \n filename = getfile(object)\n all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]\n all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]\n if any(filename.endswith(s) for s in all_bytecode_suffixes):\n filename = (os.path.splitext(filename)[0] +\n importlib.machinery.SOURCE_SUFFIXES[0])\n elif any(filename.endswith(s) for s in\n importlib.machinery.EXTENSION_SUFFIXES):\n return None\n if os.path.exists(filename):\n return filename\n # only return a non-existent filename if the module has a PEP 302 loader\n module = getmodule(object, filename)\n if getattr(module, '__loader__', None) is not None:\n return filename\n elif getattr(getattr(module, \"__spec__\", None), \"loader\", None) is not None:\n return filename\n # or it is in the linecache\n elif filename in linecache.cache:\n return filename\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 201, "n_words": 85, "vocab_size": 49, "complexity": 9, "nloc": 19, "token_counts": 158, "n_ast_nodes": 250, "n_identifiers": 23, "random_cut": "def getsourcefile(object):\n \n filename = getfile(object)\n all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]\n all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]\n if any(filename.endswith(s) for s in all_bytecode_suffixes):\n filename = (os.path.splitext(filename)[0] +\n importlib.machinery.SOURCE_SUFFIXES[0])\n elif any(filename.endswith(s) for s in\n ", "d_id": 55321, "documentation": { "docstring": "Return the filename that can be used to locate an object's source.\n Return None if no way can be identified to get the source.\n ", "n_words": 24, "vocab_size": 18, "n_whitespaces": 30, "language": "en" } }, { "id": 218024, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/_abc.py", "file_name": "_abc.py", "fun_name": "module_repr", "commit_message": "add python 3.10.4 for windows", "code": "def module_repr(self, module):\n \n warnings.warn(\"importlib.abc.Loader.module_repr() is deprecated and \"\n \"slated for removal in Python 3.12\", DeprecationWarning)\n # The exception will cause ModuleType.__repr__ to ignore this method.\n raise NotImplementedError\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 76, "n_words": 27, "vocab_size": 27, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def module_repr(self, module):\n \n warnings.warn(\"importlib.abc.Loader.module_repr() is deprecated and \"\n \"slated for removal in Python 3.12\", DeprecationWarning", "d_id": 55086, "documentation": { "docstring": "Return a module's repr.\n\n Used by the module type when the method does not raise\n NotImplementedError.\n\n This method is deprecated.\n\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 48, "language": "en" } }, { "id": 249509, "commit_id": "8ae42ab8fa3c6b52d74c24daa7ca75a478fa4fbb", "repo": "synapse", "path": "tests/push/test_email.py", "file_name": "test_email.py", "fun_name": "test_need_validated_email", "commit_message": "Support enabling/disabling pushers (from MSC3881) (#13799)\n\nPartial implementation of MSC3881", "code": "def test_need_validated_email(self):\n \n with self.assertRaises(SynapseError) as cm:\n self.get_success_or_raise(\n self.hs.get_pusherpool().add_or_update_pusher(\n user_id=self.user_id,\n access_token=self.token_id,\n kind=\"email\",\n app_id=\"m.email\",\n app_display_name=\"Email Notifications\",\n device_display_name=\"b@example.com\",\n pushkey=\"b@example.com\",\n lang=None,\n data={},\n )\n )\n\n self.assertEqual(400, cm.exception.code)\n self.assertEqual(Codes.THREEPID_NOT_FOUND, cm.exception.errcode)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 275, "n_words": 24, "vocab_size": 23, "complexity": 1, "nloc": 17, "token_counts": 99, "n_ast_nodes": 161, "n_identifiers": 25, "random_cut": "def test_need_validated_email(self):\n \n with", "d_id": 72960, "documentation": { "docstring": "Test that we can only add an email pusher if the user has validated\n their email.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 30, "language": "en" } }, { "id": 319962, "commit_id": "6d5d308d6c7b7e359ba72964a300634e1065ace9", "repo": "paperless-ngx", "path": "src/documents/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_get_existing_comments", "commit_message": "Starts on implementing tests for the new API", "code": "def test_get_existing_comments(self):\n \n doc = Document.objects.create(\n title=\"test\",\n mime_type=\"application/pdf\",\n content=\"this is a document which will have comments!\",\n )\n comment = Comment.objects.create(\n comment=\"This is a comment.\",\n document=doc,\n user=self.user,\n )\n\n response = self.client.get(\n f\"/api/documents/{doc.pk}/comments/\",\n format=\"json\",\n )\n\n self.assertEqual(response.status_code, 200)\n\n resp_data = response.json()\n\n self.assertEqual(len(resp_data), 1)\n\n resp_data = resp_data[0]\n del resp_data[\"created\"]\n\n self.assertDictEqual(\n resp_data,\n {\n \"id\": comment.id,\n \"comment\": comment.comment,\n \"user\": {\n \"id\": comment.user.id,\n \"username\": comment.user.username,\n \"firstname\": comment.user.first_name,\n \"lastname\": comment.user.last_name,\n },\n },\n )\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 419, "n_words": 64, "vocab_size": 51, "complexity": 1, "nloc": 33, "token_counts": 164, "n_ast_nodes": 275, "n_identifiers": 28, "random_cut": "def test_get_existing_comments(self):\n \n doc = Document.objects.create(\n title=\"test\",\n mime_type=\"application/pdf\",\n content=\"this is a document which will have comments!\",\n )\n comment = Comment.objects.create(\n comment=\"This is a comment.\",\n document=doc,\n user=self.user,\n )\n\n response = self.client.get(\n f\"/api/documents/{doc.pk}/comments/\",\n format=\"json\",\n )\n\n self.assertEqual(response.status_code, 200)\n\n resp_data = response.json()\n\n self.assertEqual(len(resp_data), 1)\n\n resp_data = resp_data[0]\n del resp_data[\"created\"]\n\n self.assertDictEqual(\n resp_data,\n ", "d_id": 117028, "documentation": { "docstring": "\n GIVEN:\n - A document with a single comment\n WHEN:\n - API reuqest for document comments is made\n THEN:\n - The associated comment is returned\n ", "n_words": 24, "vocab_size": 19, "n_whitespaces": 86, "language": "en" } }, { "id": 262565, "commit_id": "2c9f00a808e0aa76a82af2e8b325abb71f50d1df", "repo": "TTS", "path": "TTS/vocoder/datasets/wavegrad_dataset.py", "file_name": "wavegrad_dataset.py", "fun_name": "collate_full_clips", "commit_message": "Fix tune wavegrad (#1844)\n\n* fix imports in tune_wavegrad\r\n\r\n* load_config returns Coqpit object instead None\r\n\r\n* set action (store true) for flag \"--use_cuda\"; start to tune if module is running as the main program\r\n\r\n* fix var order in the result of batch collating\r\n\r\n* make style\r\n\r\n* make style with black and isort", "code": "def collate_full_clips(batch):\n \n max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1]\n max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0]\n\n mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length])\n audios = torch.zeros([len(batch), max_audio_length])\n\n for idx, b in enumerate(batch):\n mel = b[0]\n audio = b[1]\n mels[idx, :, : mel.shape[1]] = mel\n audios[idx, : audio.shape[0]] = audio\n\n return mels, audios\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 155, "n_words": 62, "vocab_size": 38, "complexity": 6, "nloc": 11, "token_counts": 185, "n_ast_nodes": 272, "n_identifiers": 16, "random_cut": "def collate_full_clips(batch):\n \n max_mel_length = max([b[0].sh", "d_id": 77276, "documentation": { "docstring": "This is used in tune_wavegrad.py.\n It pads sequences to the max length.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 160178, "commit_id": "f404e9e92e87a3990712d723d5c562a89300ac01", "repo": "numpy", "path": "numpy/distutils/ccompiler_opt.py", "file_name": "ccompiler_opt.py", "fun_name": "feature_test", "commit_message": "Add space after argument name", "code": "def feature_test(self, name, force_flags=None, macros=[]):\n \n if force_flags is None:\n force_flags = self.feature_flags(name)\n\n self.dist_log(\n \"testing feature '%s' with flags (%s)\" % (\n name, ' '.join(force_flags)\n ))\n # Each CPU feature must have C source code contains at\n # least one intrinsic or instruction related to this feature.\n test_path = os.path.join(\n self.conf_check_path, \"cpu_%s.c\" % name.lower()\n )\n if not os.path.exists(test_path):\n self.dist_fatal(\"feature test file is not exist\", test_path)\n\n test = self.dist_test(\n test_path, force_flags + self.cc_flags[\"werror\"], macros=macros\n )\n if not test:\n self.dist_log(\"testing failed\", stderr=True)\n return test\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 249, "n_words": 81, "vocab_size": 65, "complexity": 4, "nloc": 18, "token_counts": 123, "n_ast_nodes": 204, "n_identifiers": 19, "random_cut": "def feature_test(self, name, force_flags=None, macros=[]):\n \n if force_flags is None:\n force_flags = self.feature_flags(name)\n\n self.dist_log(\n \"testing feature '%s' with flags (%s)\" % (\n name, ' '.join(force_flags)\n ))\n # Each CPU feature must have C source code contains at\n # ", "d_id": 38550, "documentation": { "docstring": "\n Test a certain CPU feature against the compiler through its own\n check file.\n\n Parameters\n ----------\n name : str\n Supported CPU feature name.\n\n force_flags : list or None, optional\n If None(default), the returned flags from `feature_flags()`\n will be used.\n\n macros : list of tuples, optional\n A list of C macro definitions.\n ", "n_words": 50, "vocab_size": 41, "n_whitespaces": 151, "language": "en" } }, { "id": 189987, "commit_id": "309c9d41eb734ca85a7aea5533f88a6d4ee7c944", "repo": "manim", "path": "manim/mobject/svg/svg_mobject.py", "file_name": "svg_mobject.py", "fun_name": "get_file_path", "commit_message": "Ported improved implementation of :class:`.SVGMobject` from 3b1b/manim (#2898)\n\n* port SVGMobject from 3b1b/manim\r\n\r\n* added svgelements as dependency\r\n\r\n* revert change of default values\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* set default stroke_width of svg elements to 0 if not set\r\n\r\n* fix handling of circles with different rx/ry\r\n\r\n* turn more methods into staticmethods\r\n\r\n* removed duplicated method\r\n\r\n* set/adapt stroke-width of some test SVGs\r\n\r\n* updated control data\r\n\r\n* forgot some control data\r\n\r\n* fixed init_colors in tex_mobject and text_mobject\r\n\r\n* minor changes, added docstrings\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* module docstring, removed import\r\n\r\n* vector_to_coords changed again\r\n\r\n* nail sphinx version to below 5.1 to fix rtd (?)\r\n\r\n* update test_text control data for science\r\n\r\n* changed Brace to use VMobjectFromSVGPath\r\n\r\n* remove unused classes and methods depending on old SVG path implementation\r\n\r\n* remove style_utils and svg_path modules\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* change test_text to use monospace font\r\n\r\n* restore geometry.polygram\r\n\r\n* added get_mobject_type_class auxiliary method; changed polyline implementation to ad-hoc approach\r\n\r\n* restore test_text to previous version\r\n\r\n* skip Use tags as svgelements already populates them\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def get_file_path(self) -> str:\n \n if self.file_name is None:\n raise ValueError(\"Must specify file for SVGMobject\")\n return get_full_vector_image_path(self.file_name)\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 5, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def get_file_path(self) -> str:\n \n if self.file_name is None:\n raise ValueError(\"Must specify file for SVGMobject\")\n return get_full_vector_image_path(self.f", "d_id": 46282, "documentation": { "docstring": "Search for an existing file based on the specified file name.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 248340, "commit_id": "177b884ad7cc1ecdd92ff74188732734df203150", "repo": "synapse", "path": "tests/replication/_base.py", "file_name": "_base.py", "fun_name": "default_config", "commit_message": "Lay some foundation work to allow workers to only subscribe to some kinds of messages, reducing replication traffic. (#12672)", "code": "def default_config(self) -> Dict[str, Any]:\n \n base = super().default_config()\n base[\"redis\"] = {\"enabled\": True}\n return base\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 42, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 10, "token_counts": 34, "n_ast_nodes": 60, "n_identifiers": 7, "random_cut": "def default_config(self) -> Dict[str, Any]:\n \n base = super().default_config()\n base[\"redis\"] = {\"enabled\": True}\n ", "d_id": 72223, "documentation": { "docstring": "\n Overrides the default config to enable Redis.\n Even if the test only uses make_worker_hs, the main process needs Redis\n enabled otherwise it won't create a Fake Redis server to listen on the\n Redis port and accept fake TCP connections.\n ", "n_words": 39, "vocab_size": 33, "n_whitespaces": 75, "language": "en" } }, { "id": 177853, "commit_id": "50583d9ed6bfd2a837d0168e1690529a31efa2f7", "repo": "label-studio", "path": "label_studio/tasks/models.py", "file_name": "models.py", "fun_name": "delete_project_summary_annotations_before_updating_annotation", "commit_message": "fix: DEV-2406: Fix counters for skipped annotations (#2364)\n\n* fix: DEV-2406: Fix counters for skipped annotations\r\n\r\n* Fixes\r\n\r\nCo-authored-by: makseq-ubnt ", "code": "def delete_project_summary_annotations_before_updating_annotation(sender, instance, **kwargs):\n \n try:\n old_annotation = sender.objects.get(id=instance.id)\n except Annotation.DoesNotExist:\n # annotation just created - do nothing\n return\n old_annotation.decrease_project_summary_counters()\n\n # update task counters if annotation changes it's was_cancelled status\n task = instance.task\n if old_annotation.was_cancelled != instance.was_cancelled:\n if instance.was_cancelled:\n task.cancelled_annotations = task.cancelled_annotations + 1\n task.total_annotations = task.total_annotations - 1\n else:\n task.cancelled_annotations = task.cancelled_annotations - 1\n task.total_annotations = task.total_annotations + 1\n task.update_is_labeled()\n\n Task.objects.filter(id=instance.task.id).update(\n is_labeled=task.is_labeled,\n total_annotations=task.total_annotations,\n cancelled_annotations=task.cancelled_annotations\n )\n\n\n@receiver(post_save, sender=Annotation)", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "@receiver(post_save, sender=Annotation)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 220, "n_words": 67, "vocab_size": 44, "complexity": 4, "nloc": 20, "token_counts": 135, "n_ast_nodes": 231, "n_identifiers": 22, "random_cut": "def delete_project_summary_annotations_before_updating_annotation(sender, instance, **kwargs):\n \n try:\n old_annotation = sender.objects.get(id=instance.id)\n except Annotation.DoesNotExist:\n # annotation just created - do nothing\n return\n old_annotation.decrease_project_summary_counters()\n\n # update task counters if annotation changes it's was_cancelled status\n task = instance.task\n if old_annotation.was_cancelled != instance.was_cancelled:\n if instance.was_cancelled:\n task.cancelled_annotations = task.cancelled_annotations + 1\n task.total_annotations = task.total_annotations - 1\n else:\n task.cancelled_annotations = task.cancelled_annotations - 1\n task.total_annotations = task.total_annotations + 1\n task.update_is_labeled()\n\n Task.objects.filter(id=instance.task.id).update(\n is_labeled=task.is_labeled,\n total_annot", "d_id": 42524, "documentation": { "docstring": "Before updating annotation fields - ensure previous info removed from project.summary", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 50991, "commit_id": "6b42963d62833925ffed1cdb73400e7d528a5353", "repo": "PaddleHub", "path": "modules/image/keypoint_detection/hand_pose_localization/model.py", "file_name": "model.py", "fun_name": "load_config", "commit_message": "update hand_pose_localization (#1967)\n\n* update hand_pose_localization\r\n\r\n* add clean func", "code": "def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads):\r\n \r\n # 对运行位置进行配置\r\n if use_gpu:\r\n try:\r\n int(os.environ.get('CUDA_VISIBLE_DEVICES'))\r\n except Exception:\r\n print(\r\n )\r\n use_gpu = False\r\n\r\n if os.path.isdir(modelpath):\r\n if os.path.exists(os.path.join(modelpath, \"__params__\")):\r\n # __model__ + __params__\r\n model = os.path.join(modelpath, \"__model__\")\r\n params = os.path.join(modelpath, \"__params__\")\r\n config = Config(model, params)\r\n elif os.path.exists(os.path.join(modelpath, \"params\")):\r\n # model + params\r\n model = os.path.join(modelpath, \"model\")\r\n params = os.path.join(modelpath, \"params\")\r\n config = Config(model, params)\r\n elif os.path.exists(os.path.join(modelpath, \"__model__\")):\r\n # __model__ + others\r\n config = Config(modelpath)\r\n else:\r\n raise Exception(\r\n \"Error! Can\\'t find the model in: %s. Please check your model path.\" % os.path.abspath(modelpath))\r\n elif os.path.exists(modelpath + \".pdmodel\"):\r\n # *.pdmodel + *.pdiparams\r\n model = modelpath + \".pdmodel\"\r\n params = modelpath + \".pdiparams\"\r\n config = Config(model, params)\r\n elif isinstance(modelpath, Config):\r\n config = modelpath\r\n else:\r\n raise Exception(\r\n \"Error! Can\\'t find the model in: %s. Please check your model path.\" % os.path.abspath(modelpath))\r\n\r\n # 设置参数\r\n if use_gpu:\r\n config.enable_use_gpu(100, gpu_id)\r\n else:\r\n config.disable_gpu()\r\n config.set_cpu_math_library_num_threads(cpu_threads)\r\n if use_mkldnn:\r\n config.enable_mkldnn()\r\n\r\n config.disable_glog_info()\r\n\r\n # 返回配置\r\n return config\r\n\r\n # 预测器创建函数\r", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 699, "n_words": 151, "vocab_size": 76, "complexity": 11, "nloc": 40, "token_counts": 291, "n_ast_nodes": 496, "n_identifiers": 28, "random_cut": "def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads):\r\n \r\n # 对运行位置进行配置\r\n if use_gpu:\r\n ", "d_id": 10250, "documentation": { "docstring": "\r\n load the model config\r\n modelpath: inference model path\r\n use_gpu: use gpu or not\r\n use_mkldnn: use mkldnn or not\r\n Error! Unable to use GPU. Please set the environment variables \"CUDA_VISIBLE_DEVICES=GPU_id\" to use GPU. Now switch to CPU to continue...", "n_words": 38, "vocab_size": 27, "n_whitespaces": 73, "language": "en" } }, { "id": 88553, "commit_id": "67c8215ba3f246937fd7e1fbb02f33050a1c0456", "repo": "sentry", "path": "src/sentry/lang/javascript/processor_smcache.py", "file_name": "processor_smcache.py", "fun_name": "trim_line", "commit_message": "feat(processor): Use JavaScriptSmCacheStacktraceProcessor by default for internal projects (#41390)\n\nThis PR builds on top of https://github.com/getsentry/sentry/pull/40951/\r\nand prepares us for gradual rollout.", "code": "def trim_line(line, column=0):\n \n line = line.strip(\"\\n\")\n ll = len(line)\n if ll <= 150:\n return line\n if column > ll:\n column = ll\n start = max(column - 60, 0)\n # Round down if it brings us close to the edge\n if start < 5:\n start = 0\n end = min(start + 140, ll)\n # Round up to the end if it's close\n if end > ll - 5:\n end = ll\n # If we are bumped all the way to the end,\n # make sure we still get a full 140 characters in the line\n if end == ll:\n start = max(end - 140, 0)\n line = line[start:end]\n if end < ll:\n # we've snipped from the end\n line += \" {snip}\"\n if start > 0:\n # we've snipped from the beginning\n line = \"{snip} \" + line\n return line\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 256, "n_words": 139, "vocab_size": 68, "complexity": 8, "nloc": 21, "token_counts": 120, "n_ast_nodes": 204, "n_identifiers": 10, "random_cut": "def trim_line(line, column=0):\n \n line = line.strip(\"\\n\")\n ll = len(line)\n if ll <= 150:\n return line\n if column > ll:\n column = ll\n start = max(column - 60, 0)\n # Round down if it brings us close to the edge\n if start < 5:\n start = 0\n end = min(start + 140, ll)\n # Round up to the end if it's close\n if end > ll - 5:\n end = ll\n # If we are bumped all the way to the end,\n # make sure we still get a full 140 characters in the line\n if end == ll:\n start = max(end - 140, 0)\n line = line[start:end]\n if end < ll:\n # we've snipped from the end\n line += \" {snip}\"\n if start > 0:", "d_id": 18399, "documentation": { "docstring": "\n Trims a line down to a goal of 140 characters, with a little\n wiggle room to be sensible and tries to trim around the given\n `column`. So it tries to extract 60 characters before and after\n the provided `column` and yield a better context.\n ", "n_words": 44, "vocab_size": 34, "n_whitespaces": 60, "language": "en" } }, { "id": 247569, "commit_id": "ef3619e61d84493d98470eb2a69131d15eb1166b", "repo": "synapse", "path": "tests/storage/test_background_update.py", "file_name": "test_background_update.py", "fun_name": "test_background_update_default_batch_set_by_config", "commit_message": "Add config settings for background update parameters (#11980)", "code": "def test_background_update_default_batch_set_by_config(self):\n \n\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n values={\"update_name\": \"test_update\", \"progress_json\": '{\"my_key\": 1}'},\n )\n )\n\n self.update_handler.side_effect = self.update\n self.update_handler.reset_mock()\n res = self.get_success(\n self.updates.do_next_background_update(False),\n by=0.01,\n )\n self.assertFalse(res)\n\n # on the first call, we should get run with the default background update size specified in the config\n self.update_handler.assert_called_once_with({\"my_key\": 1}, 20)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 189, "n_words": 45, "vocab_size": 39, "complexity": 1, "nloc": 15, "token_counts": 92, "n_ast_nodes": 154, "n_identifiers": 17, "random_cut": "def test_background_update_default_batch_set_by_config(self):\n \n\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n values={\"update_name\": \"test_update\", \"progress_json\": '{\"my_key\": 1}'},\n )\n )\n\n self.update_handler.side_effect = self.update\n self.update_handler.reset_mock()\n res = self.get_success(\n self.updates.do_next_background_update(False),\n by=0.", "d_id": 71747, "documentation": { "docstring": "\n Test that the background update is run with the default_batch_size set by the config\n ", "n_words": 14, "vocab_size": 12, "n_whitespaces": 29, "language": "en" } }, { "id": 204304, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/sessions/backends/file.py", "file_name": "file.py", "fun_name": "_expiry_date", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _expiry_date(self, session_data):\n \n return session_data.get(\"_session_expiry\") or (\n self._last_modification()\n + datetime.timedelta(seconds=self.get_session_cookie_age())\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 54, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 62, "n_identifiers": 9, "random_cut": "def _expiry_date(self, session_data):\n \n return session_data.get(\"_session_expiry\") or (\n self._last_modification()\n + datetime.timedelta(seconds=self.get_session_cookie_ag", "d_id": 50689, "documentation": { "docstring": "\n Return the expiry time of the file storing the session's content.\n ", "n_words": 11, "vocab_size": 9, "n_whitespaces": 26, "language": "en" } }, { "id": 32709, "commit_id": "0b8c1b6994082950044452a670e8417a5ebc2db0", "repo": "transformers", "path": "src/transformers/models/trocr/processing_trocr.py", "file_name": "processing_trocr.py", "fun_name": "__call__", "commit_message": "Change audio kwarg to images in TROCR processor (#18421)\n\nCo-authored-by: ydshieh ", "code": "def __call__(self, *args, **kwargs):\n \n # For backward compatibility\n if self._in_target_context_manager:\n return self.current_processor(*args, **kwargs)\n\n images = kwargs.pop(\"images\", None)\n text = kwargs.pop(\"text\", None)\n if len(args) > 0:\n images = args[0]\n args = args[1:]\n\n if images is None and text is None:\n raise ValueError(\"You need to specify either an `images` or `text` input to process.\")\n\n if images is not None:\n inputs = self.feature_extractor(images, *args, **kwargs)\n if text is not None:\n encodings = self.tokenizer(text, **kwargs)\n\n if text is None:\n return inputs\n elif images is None:\n return encodings\n else:\n inputs[\"labels\"] = encodings[\"input_ids\"]\n return inputs\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 283, "n_words": 89, "vocab_size": 50, "complexity": 9, "nloc": 21, "token_counts": 147, "n_ast_nodes": 240, "n_identifiers": 15, "random_cut": "def __call__(self, *args, **kwargs):\n \n # For backward compatibility\n if self._in_target_context_manager:\n return self.current_processor(*args, **kwargs)\n\n images = kwargs.pop(\"images\", None)\n text = kwargs.pop(\"text\", None)\n if len(args) > 0:\n images = args[0]\n args = args[1:]\n\n if images is None and text is None:\n raise ValueError(\"Yo", "d_id": 5973, "documentation": { "docstring": "\n When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's\n [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context\n [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's\n [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.\n ", "n_words": 46, "vocab_size": 33, "n_whitespaces": 82, "language": "en" } }, { "id": 265638, "commit_id": "356ff457be08d5527920c617eb598f24a6edbc3d", "repo": "netbox", "path": "netbox/extras/reports.py", "file_name": "reports.py", "fun_name": "get_report", "commit_message": "Allow reports to be nested in submodules", "code": "def get_report(module_name, report_name):\n \n reports = get_reports()\n module = reports.get(module_name)\n\n if module is None:\n return None\n\n report = module.get(report_name)\n\n if report is None:\n return None\n\n return report\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 61, "n_words": 26, "vocab_size": 15, "complexity": 3, "nloc": 9, "token_counts": 45, "n_ast_nodes": 75, "n_identifiers": 8, "random_cut": "def get_report(module_name, report_name):\n \n reports = get_reports()\n module = reports.get(module_name)\n\n if module is None:\n return None\n\n report = module.get(report_name)\n\n if report is None:\n return None\n\n return report\n\n", "d_id": 78158, "documentation": { "docstring": "\n Return a specific report from within a module.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 15, "language": "en" } }, { "id": 248221, "commit_id": "699192fc1a1055a4bec2345bc80f120f28470c73", "repo": "synapse", "path": "tests/config/test_workers.py", "file_name": "test_workers.py", "fun_name": "test_worker_duty_configs", "commit_message": "Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. (#12654)\n\nCo-authored-by: Shay ", "code": "def test_worker_duty_configs(self) -> None:\n \n\n worker1_config = self._make_worker_config(\n worker_app=\"synapse.app.generic_worker\",\n worker_name=\"worker1\",\n extras={\n \"notify_appservices_from_worker\": \"worker2\",\n \"update_user_directory_from_worker\": \"worker1\",\n },\n )\n self.assertFalse(worker1_config.should_notify_appservices)\n self.assertTrue(worker1_config.should_update_user_directory)\n\n worker2_config = self._make_worker_config(\n worker_app=\"synapse.app.generic_worker\",\n worker_name=\"worker2\",\n extras={\n \"notify_appservices_from_worker\": \"worker2\",\n \"update_user_directory_from_worker\": \"worker1\",\n },\n )\n self.assertTrue(worker2_config.should_notify_appservices)\n self.assertFalse(worker2_config.should_update_user_directory)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 243, "n_words": 32, "vocab_size": 22, "complexity": 1, "nloc": 24, "token_counts": 96, "n_ast_nodes": 170, "n_identifiers": 12, "random_cut": "def test_worker_duty_configs(self) -> None:\n \n\n worker1_config = self._make_worker_config(\n worker_app=\"synapse.app.generic_worker\",\n worker_name=\"worker1\",\n extras={\n \"notify_appservice", "d_id": 72159, "documentation": { "docstring": "\n Additional tests for the worker duties\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 130818, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/runtime_context.py", "file_name": "runtime_context.py", "fun_name": "actor_id", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def actor_id(self):\n \n # only worker mode has actor_id\n assert (\n self.worker.mode == ray.worker.WORKER_MODE\n ), f\"This method is only available when the process is a\\\n worker. Current mode: {self.worker.mode}\"\n actor_id = self.worker.actor_id\n return actor_id if not actor_id.is_nil() else None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 107, "n_words": 38, "vocab_size": 34, "complexity": 2, "nloc": 7, "token_counts": 41, "n_ast_nodes": 78, "n_identifiers": 7, "random_cut": "def actor_id(self):\n \n # only worker mode has actor_id\n assert (\n self.worker.mode == ray.worker.", "d_id": 29383, "documentation": { "docstring": "Get the current actor ID in this worker.\n\n ID of the actor of the current process.\n This shouldn't be used in a driver process.\n\n Returns:\n The current actor id in this worker. None if there's no actor id.\n ", "n_words": 38, "vocab_size": 24, "n_whitespaces": 77, "language": "en" } }, { "id": 73502, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/test_admin.py", "file_name": "test_admin.py", "fun_name": "test_redirect_to_current", "commit_message": "Reformat with black", "code": "def test_redirect_to_current(self):\n \n start_url = reverse(\"wagtailsettings:edit\", args=[\"tests\", \"testsetting\"])\n dest_url = reverse(\n \"wagtailsettings:edit\", args=[\"tests\", \"testsetting\", self.other_site.pk]\n )\n response = self.client.get(\n start_url, follow=True, HTTP_HOST=self.other_site.hostname\n )\n self.assertRedirects(\n response, dest_url, status_code=302, fetch_redirect_response=False\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 117, "n_words": 28, "vocab_size": 23, "complexity": 1, "nloc": 11, "token_counts": 78, "n_ast_nodes": 127, "n_identifiers": 17, "random_cut": "def test_redirect_to_current(self):\n \n start_url = reverse(\"wagtailsettings", "d_id": 16032, "documentation": { "docstring": "\n Should redirect to the setting for the current site taken from the URL,\n by default\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 37, "language": "en" } }, { "id": 151576, "commit_id": "217add70bd010cae584db5aa13a7d5e76011e2bd", "repo": "freqtrade", "path": "freqtrade/freqai/base_models/FreqaiMultiOutputClassifier.py", "file_name": "FreqaiMultiOutputClassifier.py", "fun_name": "fit", "commit_message": "add strat and config for testing on PR", "code": "def fit(self, X, y, sample_weight=None, fit_params=None):\n \n\n if not hasattr(self.estimator, \"fit\"):\n raise ValueError(\"The base estimator should implement a fit method\")\n\n y = self._validate_data(X=\"no_validation\", y=y, multi_output=True)\n\n if is_classifier(self):\n check_classification_targets(y)\n\n if y.ndim == 1:\n raise ValueError(\n \"y must have at least two dimensions for \"\n \"multi-output regression but has only one.\"\n )\n\n if sample_weight is not None and not has_fit_parameter(\n self.estimator, \"sample_weight\"\n ):\n raise ValueError(\"Underlying estimator does not support sample weights.\")\n\n if not fit_params:\n fit_params = [None] * y.shape[1]\n\n self.estimators_ = Parallel(n_jobs=self.n_jobs)(\n delayed(_fit_estimator)(\n self.estimator, X, y[:, i], sample_weight, **fit_params[i]\n )\n for i in range(y.shape[1])\n )\n\n self.classes_ = []\n for estimator in self.estimators_:\n self.classes_.extend(estimator.classes_)\n\n if hasattr(self.estimators_[0], \"n_features_in_\"):\n self.n_features_in_ = self.estimators_[0].n_features_in_\n if hasattr(self.estimators_[0], \"feature_names_in_\"):\n self.feature_names_in_ = self.estimators_[0].feature_names_in_\n\n return self\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 407, "n_words": 114, "vocab_size": 87, "complexity": 11, "nloc": 31, "token_counts": 239, "n_ast_nodes": 379, "n_identifiers": 27, "random_cut": "def fit(self, X, y, sample_weight=None, fit_params=None):\n \n\n if not hasattr(self.estimator, \"fit\"):\n raise ValueError(\"The base estimator should implement a fit method\")\n\n y = self._validate_data(X=\"no_validation\", y=y, multi_output=True)\n\n if is_classifier(self):\n check_classification_targets(y)\n\n if y.ndim == 1:\n raise ValueError(\n \"y must have at least two dimensions for \"\n \"multi-output regression but has only one.\"\n )\n\n if sample_weight is not None and not has_fit_parameter(\n self.estimator, \"sample_weight\"\n ):\n raise ValueError(\"Underlying estimator does not support sample weights.\")\n\n if not fit_params:\n fit_params = [None] * y.shape[1]\n\n self.estimators_ = Parallel(n_jobs=self.n_jobs)(\n delayed(_fit_estimator)(\n self.estimator, X, y[:, i], sample_weight, **fit_params[i]\n )\n ", "d_id": 35052, "documentation": { "docstring": "Fit the model to data, separately for each output variable.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n y : {array-like, sparse matrix} of shape (n_samples, n_outputs)\n Multi-output targets. An indicator matrix turns on multilabel\n estimation.\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If `None`, then samples are equally weighted.\n Only supported if the underlying classifier supports sample\n weights.\n fit_params : A list of dicts for the fit_params\n Parameters passed to the ``estimator.fit`` method of each step.\n Each dict may contain same or different values (e.g. different\n eval_sets or init_models)\n .. versionadded:: 0.23\n Returns\n -------\n self : object\n Returns a fitted instance.\n ", "n_words": 110, "vocab_size": 84, "n_whitespaces": 301, "language": "en" } }, { "id": 111721, "commit_id": "a36dc07e8d39ec4438fd660c98f6f4551ff5f4a6", "repo": "nni", "path": "nni/retiarii/nn/pytorch/api.py", "file_name": "api.py", "fun_name": "inner_choices", "commit_message": "Composition of `ValueChoice` (#4435)", "code": "def inner_choices(self) -> Iterable['ValueChoice']:\n \n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX):\n yield from arg.inner_choices()\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 9, "token_counts": 33, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "def inner_choices(self) -> Iterable['ValueChoice']:\n \n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX):\n yield from arg.inner_choices()\n", "d_id": 24472, "documentation": { "docstring": "\n Return an iterable of all leaf value choices.\n Useful for composition of value choices.\n No deduplication on labels. Mutators should take care.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 51, "language": "en" } }, { "id": 200455, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/tensor/index_methods.py", "file_name": "index_methods.py", "fun_name": "get_indices", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def get_indices(expr):\n \n # We call ourself recursively to determine indices of sub expressions.\n\n # break recursion\n if isinstance(expr, Indexed):\n c = expr.indices\n inds, dummies = _remove_repeated(c)\n return inds, {}\n elif expr is None:\n return set(), {}\n elif isinstance(expr, Idx):\n return {expr}, {}\n elif expr.is_Atom:\n return set(), {}\n\n\n # recurse via specialized functions\n else:\n if expr.is_Mul:\n return _get_indices_Mul(expr)\n elif expr.is_Add:\n return _get_indices_Add(expr)\n elif expr.is_Pow or isinstance(expr, exp):\n return _get_indices_Pow(expr)\n\n elif isinstance(expr, Piecewise):\n # FIXME: No support for Piecewise yet\n return set(), {}\n elif isinstance(expr, Function):\n # Support ufunc like behaviour by returning indices from arguments.\n # Functions do not interpret repeated indices across arguments\n # as summation\n ind0 = set()\n for arg in expr.args:\n ind, sym = get_indices(arg)\n ind0 |= ind\n return ind0, sym\n\n # this test is expensive, so it should be at the end\n elif not expr.has(Indexed):\n return set(), {}\n raise NotImplementedError(\n \"FIXME: No specialized handling of type %s\" % type(expr))\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 451, "n_words": 152, "vocab_size": 102, "complexity": 13, "nloc": 30, "token_counts": 186, "n_ast_nodes": 311, "n_identifiers": 29, "random_cut": "def get_indices(expr):\n \n # We call ourself recursively to d", "d_id": 49661, "documentation": { "docstring": "Determine the outer indices of expression ``expr``\n\n By *outer* we mean indices that are not summation indices. Returns a set\n and a dict. The set contains outer indices and the dict contains\n information about index symmetries.\n\n Examples\n ========\n\n >>> from sympy.tensor.index_methods import get_indices\n >>> from sympy import symbols\n >>> from sympy.tensor import IndexedBase\n >>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])\n >>> i, j, a, z = symbols('i j a z', integer=True)\n\n The indices of the total expression is determined, Repeated indices imply a\n summation, for instance the trace of a matrix A:\n\n >>> get_indices(A[i, i])\n (set(), {})\n\n In the case of many terms, the terms are required to have identical\n outer indices. Else an IndexConformanceException is raised.\n\n >>> get_indices(x[i] + A[i, j]*y[j])\n ({i}, {})\n\n :Exceptions:\n\n An IndexConformanceException means that the terms ar not compatible, e.g.\n\n >>> get_indices(x[i] + y[j]) #doctest: +SKIP\n (...)\n IndexConformanceException: Indices are not consistent: x(i) + y(j)\n\n .. warning::\n The concept of *outer* indices applies recursively, starting on the deepest\n level. This implies that dummies inside parenthesis are assumed to be\n summed first, so that the following expression is handled gracefully:\n\n >>> get_indices((x[i] + A[i, j]*y[j])*x[j])\n ({i, j}, {})\n\n This is correct and may appear convenient, but you need to be careful\n with this as SymPy will happily .expand() the product, if requested. The\n resulting expression would mix the outer ``j`` with the dummies inside\n the parenthesis, which makes it a different expression. To be on the\n safe side, it is best to avoid such ambiguities by using unique indices\n for all contractions that should be held separate.\n\n ", "n_words": 263, "vocab_size": 172, "n_whitespaces": 433, "language": "en" } }, { "id": 61265, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "dist_location", "commit_message": "upd; format", "code": "def dist_location(dist):\n # type: (Distribution) -> str\n \n egg_link = egg_link_path(dist)\n if egg_link:\n return normalize_path(egg_link)\n return normalize_path(dist.location)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 38, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def dist_location(dist):\n # type: (Distribution) -> str\n \n egg_link = egg_link_path(dist)\n if egg_link:\n return normalize_path(egg_lin", "d_id": 12478, "documentation": { "docstring": "\n Get the site-packages location of this distribution. Generally\n this is dist.location, except in the case of develop-installed\n packages, where dist.location is the source code location, and we\n want to know where the egg-link file is.\n\n The returned location is normalized (in particular, with symlinks removed).\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 64, "language": "en" } }, { "id": 164908, "commit_id": "4bc68b39511fdf1dffe91bd315ffee9565b90d1a", "repo": "pandas", "path": "pandas/_testing/_io.py", "file_name": "_io.py", "fun_name": "can_connect", "commit_message": "TST: Check network URL statuses in tests (#45949)", "code": "def can_connect(url, error_classes=None):\n \n if error_classes is None:\n error_classes = _get_default_network_errors()\n\n try:\n with urlopen(url, timeout=20) as response:\n # Timeout just in case rate-limiting is applied\n if response.status != 200:\n return False\n except error_classes:\n return False\n else:\n return True\n\n\n# ------------------------------------------------------------------\n# File-IO\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 119, "n_words": 41, "vocab_size": 33, "complexity": 4, "nloc": 11, "token_counts": 52, "n_ast_nodes": 92, "n_identifiers": 8, "random_cut": "def can_connect(url, error_classes=None):\n \n if error_classes is None:\n error_classes = _get_default_network_errors()\n\n try:\n with urlopen(url, time", "d_id": 39618, "documentation": { "docstring": "\n Try to connect to the given url. True if succeeds, False if OSError\n raised\n\n Parameters\n ----------\n url : basestring\n The URL to try to connect to\n\n Returns\n -------\n connectable : bool\n Return True if no OSError (unable to connect) or URLError (bad url) was\n raised\n ", "n_words": 45, "vocab_size": 33, "n_whitespaces": 94, "language": "en" } }, { "id": 95842, "commit_id": "efb962b72c649c18c466afae41722384d111824b", "repo": "sentry", "path": "tests/sentry/incidents/endpoints/test_serializers.py", "file_name": "test_serializers.py", "fun_name": "test_valid_slack_channel_id", "commit_message": "ref(serializers): Split up large file (#31359)", "code": "def test_valid_slack_channel_id(self):\n \n integration = Integration.objects.create(\n external_id=\"1\",\n provider=\"slack\",\n metadata={\"access_token\": \"xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx\"},\n )\n integration.add_organization(self.organization, self.user)\n\n base_params = self.valid_params.copy()\n base_params.update(\n {\n \"type\": AlertRuleTriggerAction.get_registered_type(\n AlertRuleTriggerAction.Type.SLACK\n ).slug,\n \"targetType\": ACTION_TARGET_TYPE_TO_STRING[\n AlertRuleTriggerAction.TargetType.SPECIFIC\n ],\n \"targetIdentifier\": \"merp\",\n \"integration\": str(integration.id),\n }\n )\n context = self.context.copy()\n context.update({\"input_channel_id\": \"CSVK0921\"})\n responses.add(\n method=responses.GET,\n url=\"https://slack.com/api/conversations.info\",\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"ok\": \"true\", \"channel\": {\"name\": \"merp\", \"id\": \"CSVK0921\"}}),\n )\n serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params)\n assert serializer.is_valid()\n\n serializer.save()\n\n # # Make sure the action was created.\n alert_rule_trigger_actions = list(\n AlertRuleTriggerAction.objects.filter(integration=integration)\n )\n assert len(alert_rule_trigger_actions) == 1\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 448, "n_words": 73, "vocab_size": 63, "complexity": 1, "nloc": 36, "token_counts": 210, "n_ast_nodes": 360, "n_identifiers": 46, "random_cut": "def test_valid_slack_channel_id(self):\n \n integration = Integration.objects.create(\n external_id=\"1\",\n provider=\"slack\",\n metadata={\"access_token\": \"xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx\"},\n )\n integration.add_organization(self.organization, self.user)\n\n base_params = self.valid_params.copy()\n base_params.update(\n {\n \"type\": AlertRuleTriggerAction.get_registered_type(\n AlertRuleTriggerAction.Type.SLACK\n ).slug,\n \"targetType\": ACTION_TARGET_TYPE_TO_STRING[\n AlertRuleTriggerAction.TargetType.SPECIFIC\n ],\n \"targetIdentifier\": \"merp\",\n \"integration\": str(integration.id),\n }\n )\n context = self.context.copy()\n context.update({\"input_channel_id\": \"CSVK0921\"})\n responses.add(\n method=responses.GET,\n url=\"https://", "d_id": 19247, "documentation": { "docstring": "\n Test that when a valid Slack channel ID is provided, we look up the channel name and validate it against the targetIdentifier.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 37, "language": "en" } }, { "id": 221804, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/_aix.py", "file_name": "_aix.py", "fun_name": "get_legacy", "commit_message": "add python 3.10.4 for windows", "code": "def get_legacy(members):\n \n if AIX_ABI == 64:\n # AIX 64-bit member is one of shr64.o, shr_64.o, or shr4_64.o\n expr = r'shr4?_?64\\.o'\n member = get_one_match(expr, members)\n if member:\n return member\n else:\n # 32-bit legacy names - both shr.o and shr4.o exist.\n # shr.o is the preferred name so we look for shr.o first\n # i.e., shr4.o is returned only when shr.o does not exist\n for name in ['shr.o', 'shr4.o']:\n member = get_one_match(re.escape(name), members)\n if member:\n return member\n return None\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 194, "n_words": 77, "vocab_size": 54, "complexity": 5, "nloc": 12, "token_counts": 59, "n_ast_nodes": 103, "n_identifiers": 9, "random_cut": "def get_legacy(members):\n \n if AIX_ABI == 64:\n # AIX 64-bit member is ", "d_id": 56520, "documentation": { "docstring": "\n This routine provides historical aka legacy naming schemes started\n in AIX4 shared library support for library members names.\n e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and\n shr_64.o for 64-bit binary.\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 49, "language": "en" } }, { "id": 216136, "commit_id": "497ebde11325333cf8e1c0e4eeec185a55fb6ace", "repo": "salt", "path": "salt/states/iptables.py", "file_name": "iptables.py", "fun_name": "set_policy", "commit_message": "salt.states.iptables: Document the save parameter\n\nThe examples mention this, but the reference documentation did not,\nand it isn't obvious from the example that minimal installations of\nsome operating systems (in particular Debian) don't have all the\nnecessary packages to make it effective, even if iptables itself is\ninstalled.\n\nSigned-off-by: Simon McVittie ", "code": "def set_policy(name, table=\"filter\", family=\"ipv4\", **kwargs):\n \n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n for ignore in _STATE_INTERNAL_KEYWORDS:\n if ignore in kwargs:\n del kwargs[ignore]\n\n if (\n __salt__[\"iptables.get_policy\"](table, kwargs[\"chain\"], family)\n == kwargs[\"policy\"]\n ):\n ret[\"result\"] = True\n ret[\n \"comment\"\n ] = \"iptables default policy for chain {} on table {} for {} already set to {}\".format(\n kwargs[\"chain\"], table, family, kwargs[\"policy\"]\n )\n return ret\n if __opts__[\"test\"]:\n ret[\"comment\"] = (\n \"iptables default policy for chain {} on table {} for {} needs to be set\"\n \" to {}\".format(kwargs[\"chain\"], table, family, kwargs[\"policy\"])\n )\n return ret\n if not __salt__[\"iptables.set_policy\"](\n table, kwargs[\"chain\"], kwargs[\"policy\"], family\n ):\n ret[\"changes\"] = {\"locale\": name}\n ret[\"result\"] = True\n ret[\"comment\"] = \"Set default policy for {} to {} family {}\".format(\n kwargs[\"chain\"], kwargs[\"policy\"], family\n )\n if \"save\" in kwargs and kwargs[\"save\"]:\n if kwargs[\"save\"] is not True:\n filename = kwargs[\"save\"]\n else:\n filename = None\n __salt__[\"iptables.save\"](filename=filename, family=family)\n ret[\n \"comment\"\n ] = \"Set and saved default policy for {} to {} family {}\".format(\n kwargs[\"chain\"], kwargs[\"policy\"], family\n )\n return ret\n else:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Failed to set iptables default policy\"\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 542, "n_words": 176, "vocab_size": 83, "complexity": 9, "nloc": 46, "token_counts": 281, "n_ast_nodes": 491, "n_identifiers": 12, "random_cut": "def set_policy(name, table=\"filter\", family=\"ipv4\", **kwargs):\n \n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n for ignore in _STATE_INTERNAL_KEYWORDS:\n if ignore in kwargs:\n del kwargs[ignore]\n\n if (\n __salt__[\"iptables.get_policy\"](table, kwargs[\"chain\"], family)\n == kwargs[\"policy\"]\n ):\n ret[\"result\"] = True\n ret[\n \"comment\"\n ] = \"iptables default policy for chain {} on table {} for {} already set to {}\".format(\n kwargs[\"chain\"], table, family, kwargs[\"policy\"]\n )\n return ret\n if __opts__[\"test\"]:\n ret[\"comment\"] = (\n \"iptables default policy for chain {} on table {} for {} needs to be set\"\n \" to {}\".format(kwargs[\"chain\"], table, family, kwargs[\"policy\"])\n )\n return ret\n if not __salt__[\"iptables.set_policy\"](\n table, kwargs[\"chain\"], kwargs[\"policy\"], family\n ):\n ret[\"changes\"] = {\"locale\": name}\n ret[\"result\"] = True\n ret", "d_id": 54425, "documentation": { "docstring": "\n .. versionadded:: 2014.1.0\n\n Sets the default policy for iptables firewall tables\n\n table\n The table that owns the chain that should be modified\n\n family\n Networking family, either ipv4 or ipv6\n\n policy\n The requested table policy\n\n save\n If set to a true value, the new iptables rules for the given family\n will be saved to a file. See the ``append`` state for more details.\n\n ", "n_words": 62, "vocab_size": 45, "n_whitespaces": 119, "language": "en" } }, { "id": 321839, "commit_id": "496c14bc9e0afb6c6787a0a167a1cb623ce5e2ff", "repo": "qutebrowser", "path": "tests/end2end/fixtures/quteprocess.py", "file_name": "quteprocess.py", "fun_name": "_after_start", "commit_message": "quteprocess: Add --qute-delay-start\n\nAllows for some rudimentary debugging of subprocesses.", "code": "def _after_start(self):\n \n delay = self.request.config.getoption('--qute-delay-start')\n if delay:\n with self.disable_capturing():\n print(f\"- waiting {delay}ms for quteprocess \"\n f\"(PID: {self.proc.processId()})...\")\n time.sleep(delay / 1000)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 99, "n_words": 20, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 43, "n_ast_nodes": 97, "n_identifiers": 12, "random_cut": "def _after_start(self):\n \n delay = self.request.config.getoption('--qute-delay-start')\n if delay:\n with self.disable_capturing():\n print(f\"- waiting {delay}ms for quteprocess \"\n f\"(PID: {self.proc.processId()})...\")\n time.", "d_id": 117938, "documentation": { "docstring": "Wait before continuing if requested, e.g. for debugger attachment.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 216018, "commit_id": "681ea37f212619266424f00173d0affa27002071", "repo": "salt", "path": "salt/modules/vault.py", "file_name": "vault.py", "fun_name": "list_secrets", "commit_message": "Don't pass Error as default value in vault.py", "code": "def list_secrets(path, default=None):\n \n if default is None:\n default = CommandExecutionError\n log.debug(\"Listing vault secret keys for %s in %s\", __grains__[\"id\"], path)\n version2 = __utils__[\"vault.is_v2\"](path)\n if version2[\"v2\"]:\n path = version2[\"metadata\"]\n try:\n url = \"v1/{}\".format(path)\n response = __utils__[\"vault.make_request\"](\"LIST\", url)\n if response.status_code != 200:\n response.raise_for_status()\n return response.json()[\"data\"]\n except Exception as err: # pylint: disable=broad-except\n if default is CommandExecutionError:\n raise CommandExecutionError(\n \"Failed to list secrets! {}: {}\".format(type(err).__name__, err)\n )\n return default\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 192, "n_words": 66, "vocab_size": 54, "complexity": 6, "nloc": 19, "token_counts": 123, "n_ast_nodes": 215, "n_identifiers": 19, "random_cut": "def list_secrets(path, default=None):\n \n if default is None:\n default = CommandExecutionError\n log.debug(\"Listing vault secret keys for %s in %s\", __grains__[\"id\"], path)\n version2 = __utils__[\"vault.is_v2\"](path)\n if version2[\"v2\"]:\n path = version2[\"metadata\"]\n try:\n url = \"v1/{}\".format(path)\n response = __utils__[\"vault.make_request\"](\"LIST\", url)\n if response.status_code != 200:\n response", "d_id": 54326, "documentation": { "docstring": "\n .. versionchanged:: 3001\n The ``default`` argument has been added. When the path or path/key\n combination is not found, an exception will be raised, unless a default\n is provided.\n\n List secret keys at the path in vault. The vault policy used must allow this.\n The path should end with a trailing slash.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' vault.list_secrets \"secret/my/\"\n ", "n_words": 60, "vocab_size": 52, "n_whitespaces": 111, "language": "en" } }, { "id": 241584, "commit_id": "650c710efacd633fa283955145342bb64063c883", "repo": "lightning", "path": "tests/strategies/test_sharded_strategy.py", "file_name": "test_sharded_strategy.py", "fun_name": "test_custom_kwargs_sharded", "commit_message": "Rename training plugin test files & names to strategy (#11303)", "code": "def test_custom_kwargs_sharded(tmpdir, cls):\n \n strategy = cls(reduce_fp16=True)\n strategy.model = Mock(spec=LightningModule)\n strategy.model.trainer = Mock()\n class_name = \"sharded\" if isinstance(strategy, DDPShardedStrategy) else \"sharded_spawn\"\n\n with mock.patch(f\"pytorch_lightning.strategies.{class_name}.ShardedDataParallel\", autospec=True) as mock_sharded:\n strategy.configure_ddp()\n args, kwargs = mock_sharded.call_args\n assert \"reduce_fp16\" in kwargs\n assert kwargs[\"reduce_fp16\"]\n\n\n@RunIf(skip_windows=True, fairscale=True)\n@mock.patch(\"pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers\", autospec=True)\n@pytest.mark.parametrize([\"params\", \"expected_buffer_size\"], [(dict(), 0), (dict(reduce_buffer_size=128), 128)])\n@pytest.mark.parametrize(\"num_nodes\", [1, 2])", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@RunIf(skip_windows=True, fairscale=True)\n@mock.patch(\"pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers\", autospec=True)\n@pytest.mark.parametrize([\"params\", \"expected_buffer_size\"], [(dict(), 0), (dict(reduce_buffer_size=128), 128)])\n@pytest.mark.parametrize(\"num_nodes\", [1, 2])", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 79, "n_words": 49, "vocab_size": 42, "complexity": 2, "nloc": 10, "token_counts": 83, "n_ast_nodes": 257, "n_identifiers": 29, "random_cut": "def test_custom_kwargs_sharded(tmpdir, cls):\n \n strategy = cls(reduce_fp16=True)\n strategy.model = Mock(spec=LightningModule)\n strategy.model.trainer = Mock()\n class_name = \"sharded\" if isinstance(strategy, DDPShardedStrategy) else \"sharded_spawn\"\n\n with mock.patch(f\"pytorch_lightning.strategies.{class_name}.ShardedDataParallel\", autosp", "d_id": 69609, "documentation": { "docstring": "Tests to ensure that if custom kwargs are passed, they are set correctly.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 195608, "commit_id": "f0194812568c83585ff09488fe7f67df300938cc", "repo": "rembg", "path": "versioneer.py", "file_name": "versioneer.py", "fun_name": "versions_from_parentdir", "commit_message": "add auto tag", "code": "def versions_from_parentdir(parentdir_prefix, root, verbose):\n \n rootdirs = []\n\n for _ in range(3):\n dirname = os.path.basename(root)\n if dirname.startswith(parentdir_prefix):\n return {\"version\": dirname[len(parentdir_prefix):],\n \"full-revisionid\": None,\n \"dirty\": False, \"error\": None, \"date\": None}\n rootdirs.append(root)\n root = os.path.dirname(root) # up a level\n\n if verbose:\n print(\"Tried directories %s but none started with prefix %s\" %\n (str(rootdirs), parentdir_prefix))\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n\n\nSHORT_VERSION_PY = \n%s\n", "url": "https://github.com/danielgatis/rembg.git", "language": "Python", "ast_errors": "\"\"\"\n# This file was generated by 'versioneer.py' (0.21) from\n# revision-control system data, or from the parent directory name of an\n# unpacked source archive. Distribution tarballs contain a pre-generated copy\n# of this file.\n\nimport", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 170, "n_words": 58, "vocab_size": 51, "complexity": 4, "nloc": 14, "token_counts": 106, "n_ast_nodes": 199, "n_identifiers": 21, "random_cut": "def versions_from_parentdir(parentdir_prefix, root, verbose):\n \n rootdirs = []\n\n for _ in range(3):\n dirname = os.path.basename(root)\n if dirname.startswith(parent", "d_id": 47323, "documentation": { "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n \n# This file was generated by 'versioneer.py' (0.21) from\n# revision-control system data, or from the parent directory name of an\n# unpacked source archive. Distribution tarballs contain a pre-generated copy\n# of this file.\n\nimport json\n\nversion_json = # END VERSION_JSON\n\n", "n_words": 84, "vocab_size": 62, "n_whitespaces": 92, "language": "en" } }, { "id": 203405, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/options.py", "file_name": "options.py", "fun_name": "response_add", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def response_add(self, request, obj, post_url_continue=None):\n \n opts = obj._meta\n preserved_filters = self.get_preserved_filters(request)\n obj_url = reverse(\n \"admin:%s_%s_change\" % (opts.app_label, opts.model_name),\n args=(quote(obj.pk),),\n current_app=self.admin_site.name,\n )\n # Add a link to the object's change form if the user can edit the obj.\n if self.has_change_permission(request, obj):\n obj_repr = format_html('{}', urlquote(obj_url), obj)\n else:\n obj_repr = str(obj)\n msg_dict = {\n \"name\": opts.verbose_name,\n \"obj\": obj_repr,\n }\n # Here, we distinguish between different save types by checking for\n # the presence of keys in request.POST.\n\n if IS_POPUP_VAR in request.POST:\n to_field = request.POST.get(TO_FIELD_VAR)\n if to_field:\n attr = str(to_field)\n else:\n attr = obj._meta.pk.attname\n value = obj.serializable_value(attr)\n popup_response_data = json.dumps(\n {\n \"value\": str(value),\n \"obj\": str(obj),\n }\n )\n return TemplateResponse(\n request,\n self.popup_response_template\n or [\n \"admin/%s/%s/popup_response.html\"\n % (opts.app_label, opts.model_name),\n \"admin/%s/popup_response.html\" % opts.app_label,\n \"admin/popup_response.html\",\n ],\n {\n \"popup_response_data\": popup_response_data,\n },\n )\n\n elif \"_continue\" in request.POST or (\n # Redirecting after \"Save as new\".\n \"_saveasnew\" in request.POST\n and self.save_as_continue\n and self.has_change_permission(request, obj)\n ):\n msg = _(\"The {name} “{obj}” was added successfully.\")\n if self.has_change_permission(request, obj):\n msg += \" \" + _(\"You may edit it again below.\")\n self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS)\n if post_url_continue is None:\n post_url_continue = obj_url\n post_url_continue = add_preserved_filters(\n {\"preserved_filters\": preserved_filters, \"opts\": opts},\n post_url_continue,\n )\n return HttpResponseRedirect(post_url_continue)\n\n elif \"_addanother\" in request.POST:\n msg = format_html(\n _(\n \"The {name} “{obj}” was added successfully. You may add another {name} below.\"\n ),\n **msg_dict,\n )\n self.message_user(request, msg, messages.SUCCESS)\n redirect_url = request.path\n redirect_url = add_preserved_filters(\n {\"preserved_filters\": preserved_filters, \"opts\": opts}, redirect_url\n )\n return HttpResponseRedirect(redirect_url)\n\n else:\n msg = format_html(\n _(\"The {name} “{obj}” was added successfully.\"), **msg_dict\n )\n self.message_user(request, msg, messages.SUCCESS)\n return self.response_post_save_add(request, obj)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1211, "n_words": 248, "vocab_size": 152, "complexity": 12, "nloc": 77, "token_counts": 410, "n_ast_nodes": 665, "n_identifiers": 51, "random_cut": "def response_add(self, request, obj, post_url_continue=None):\n \n opts = obj._meta\n preserved_filters = self.get_preserved_filters(request)\n obj_url = reverse(\n \"admin:%s_%s_change\" % (opts.app_label, opts.model_name),\n args=(quote(obj.pk),),\n current_app=self.admin_site.name,\n )\n # Add a link to the object's change form if the user can edit the obj.\n if self.has_change_permission(request, obj):\n obj_repr = format_html('{}', urlquote(obj_url), obj)\n else:\n obj_repr = str(obj)\n msg_dict = {\n \"name\": opts.verbose_name,\n \"obj\": obj_repr,\n }\n # Here, we distinguish between different save types by checking for\n # the presence of keys in request.POST.\n\n if IS_POPUP_VAR in request.POST:\n to_field = request.POST.get(TO_FIELD_VAR)\n if to_field:\n attr = str(to_field)\n else:\n attr = obj._meta.pk.attname\n value = obj.serializable_value(attr)\n popup_response_data = json.dumps(\n {\n \"value\": str(value),\n \"obj\": str(obj),\n }\n )\n return TemplateResponse(\n request,\n self.popup_response_template\n or [\n \"admin/%s/%s/popup_response.html\"\n % (opt", "d_id": 50354, "documentation": { "docstring": "\n Determine the HttpResponse for the add_view stage.\n ", "n_words": 7, "vocab_size": 6, "n_whitespaces": 22, "language": "en" } }, { "id": 320280, "commit_id": "4aa318598fd0dc6c5d4e08dd2a13e7bf614511ec", "repo": "paperless-ngx", "path": "src/paperless_mail/tests/test_parsers.py", "file_name": "test_parsers.py", "fun_name": "test_tika_parse_unreachable", "commit_message": "add test comments", "code": "def test_tika_parse_unreachable(self):\n \n html = '

    Some Text

    '\n\n # Check if exception is raised when Tika cannot be reached.\n self.parser.tika_server = \"\"\n self.assertRaises(ParseError, self.parser.tika_parse, html)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 61, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def test_tika_parse_unreachable(self):\n \n html = '\"\n + (\".exe\" if getOS() == \"Windows\" else \".bin\"),\n)\n\noutput_group.add_option(\n \"--output-dir\",\n action=\"store\",\n dest=\"output_dir\",\n metavar=\"DIRECTORY\",\n default=\"\",\n help=,\n)\n\noutput_group.add_option(\n \"--remove-output\",\n action=\"store_true\",\n dest=\"remove_build\",\n default=False,\n help=,\n)\n\noutput_group.add_option(\n \"--no-pyi-file\",\n action=\"store_false\",\n dest=\"pyi_file\",\n default=True,\n help=,\n)\n\nparser.add_option_group(output_group)\n\n\ndebug_group = OptionGroup(parser, \"Debug features\")\n\ndebug_group.add_option(\n \"--debug\",\n action=\"store_true\",\n dest=\"debug\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--unstripped\",\n action=\"store_true\",\n dest=\"unstripped\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--profile\",\n action=\"store_true\",\n dest=\"profile\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--internal-graph\",\n action=\"store_true\",\n dest=\"graph\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--trace-execution\",\n action=\"store_true\",\n dest=\"trace_execution\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--recompile-c-only\",\n action=\"store_true\",\n dest=\"recompile_c_only\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--generate-c-only\",\n action=\"store_true\",\n dest=\"generate_c_only\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--experimental\",\n action=\"append\",\n dest=\"experimental\",\n metavar=\"FLAG\",\n default=[],\n help=,\n)\n\ndebug_group.add_option(\n \"--explain-imports\",\n action=\"store_true\",\n dest=\"explain_imports\",\n default=False,\n help=SUPPRESS_HELP,\n)\n\ndebug_group.add_option(\n \"--low-memory\",\n action=\"store_true\",\n dest=\"low_memory\",\n default=False,\n help=,\n)\n\nif os.name == \"nt\":\n debug_group.add_option(\n \"--disable-dll-dependency-cache\",\n action=\"store_true\",\n dest=\"no_dependency_cache\",\n default=False,\n help=,\n )\n\n debug_group.add_option(\n \"--force-dll-dependency-cache-update\",\n action=\"store_true\",\n dest=\"update_dependency_cache\",\n default=False,\n help=,\n )\n\n# This is for testing framework, \"coverage.py\" hates to loose the process. And\n# we can use it to make sure it's not done unknowingly.\nparser.add_option(\n \"--must-not-re-execute\",\n action=\"store_false\",\n dest=\"allow_reexecute\",\n default=True,\n help=SUPPRESS_HELP,\n)\n\n\nparser.add_option_group(debug_group)\n\nc_compiler_group = OptionGroup(parser, \"Backend C compiler choice\")\n\n\nc_compiler_group.add_option(\n \"--clang\",\n action=\"store_true\",\n dest=\"clang\",\n default=False,\n help=,\n)\n\nc_compiler_group.add_option(\n \"--mingw64\",\n action=\"store_true\",\n dest=\"mingw64\",\n default=False,\n help=,\n)\n\nc_compiler_group.add_option(\n \"--msvc\",\n action=\"store\",\n dest=\"msvc_version\",\n default=None,\n help=,\n)\n\nc_compiler_group.add_option(\n \"-j\",\n \"--jobs\",\n action=\"store\",\n dest=\"jobs\",\n metavar=\"N\",\n default=None,\n help=,\n)\n\nc_compiler_group.add_option(\n \"--lto\",\n action=\"store\",\n dest=\"lto\",\n metavar=\"choice\",\n default=\"auto\",\n choices=(\"yes\", \"no\", \"auto\"),\n help=,\n)\n\nc_compiler_group.add_option(\n \"--static-libpython\",\n action=\"store\",\n dest=\"static_libpython\",\n metavar=\"choice\",\n default=\"auto\",\n choices=(\"yes\", \"no\", \"auto\"),\n help=,\n)\n\nc_compiler_group.add_option(\n \"--disable-ccache\",\n action=\"store_true\",\n dest=\"disable_ccache\",\n default=False,\n help=,\n)\n\nparser.add_option_group(c_compiler_group)\n\npgo_group = OptionGroup(parser, \"PGO compilation choices\")\n\npgo_group.add_option(\n \"--pgo\",\n action=\"store_true\",\n dest=\"is_c_pgo\",\n default=False,\n help=,\n)\n\npgo_group.add_option(\n \"--pgo-python\",\n action=\"store_true\",\n dest=\"is_python_pgo\",\n default=False,\n help=SUPPRESS_HELP,\n)\n\npgo_group.add_option(\n \"--pgo-python-input\",\n action=\"store\",\n dest=\"python_pgo_input\",\n default=None,\n help=SUPPRESS_HELP,\n)\n\npgo_group.add_option(\n \"--pgo-python-policy-unused-module\",\n action=\"store\",\n dest=\"python_pgo_policy_unused_module\",\n choices=(\"include\", \"exclude\", \"bytecode\"),\n default=\"include\",\n help=SUPPRESS_HELP,\n)\n\npgo_group.add_option(\n \"--pgo-args\",\n action=\"store\",\n dest=\"pgo_args\",\n default=\"\",\n help=,\n)\n\npgo_group.add_option(\n \"--pgo-executable\",\n action=\"store\",\n dest=\"pgo_executable\",\n default=None,\n help=,\n)\n\n\nparser.add_option_group(pgo_group)\n\ntracing_group = OptionGroup(parser, \"Tracing features\")\n\ntracing_group.add_option(\n \"--quiet\",\n action=\"store_true\",\n dest=\"quiet\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--show-scons\",\n action=\"store_true\",\n dest=\"show_scons\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--show-progress\",\n action=\"store_true\",\n dest=\"show_progress\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--no-progressbar\",\n action=\"store_false\",\n dest=\"progress_bar\",\n default=True,\n help=,\n)\n\n\ntracing_group.add_option(\n \"--show-memory\",\n action=\"store_true\",\n dest=\"show_memory\",\n default=False,\n help=,\n)\n\n\ntracing_group.add_option(\n \"--show-modules\",\n action=\"store_true\",\n dest=\"show_inclusion\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--show-modules-output\",\n action=\"store\",\n dest=\"show_inclusion_output\",\n metavar=\"PATH\",\n default=None,\n help=,\n)\n\ntracing_group.add_option(\n \"--report\",\n action=\"store\",\n dest=\"compilation_report_filename\",\n default=None,\n help=,\n)\n\ntracing_group.add_option(\n \"--verbose\",\n action=\"store_true\",\n dest=\"verbose\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--verbose-output\",\n action=\"store\",\n dest=\"verbose_output\",\n metavar=\"PATH\",\n default=None,\n help=,\n)\n\nparser.add_option_group(tracing_group)\n\nwindows_group = OptionGroup(parser, \"Windows specific controls\")\n\nwindows_group.add_option(\n \"--windows-dependency-tool\",\n action=\"store\",\n dest=\"dependency_tool\",\n default=None,\n help=SUPPRESS_HELP,\n)\n\nwindows_group.add_option(\n \"--windows-disable-console\",\n action=\"store_true\",\n dest=\"disable_console\",\n default=False,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-icon-from-ico\",\n action=\"append\",\n dest=\"icon_path\",\n metavar=\"ICON_PATH\",\n default=[],\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-icon-from-exe\",\n action=\"store\",\n dest=\"icon_exe_path\",\n metavar=\"ICON_EXE_PATH\",\n default=None,\n help=\"Copy executable icons from this existing executable (Windows only).\",\n)\n\nwindows_group.add_option(\n \"--onefile-windows-splash-screen-image\",\n action=\"store\",\n dest=\"splash_screen_image\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-uac-admin\",\n action=\"store_true\",\n dest=\"windows_uac_admin\",\n metavar=\"WINDOWS_UAC_ADMIN\",\n default=False,\n help=\"Request Windows User Control, to grant admin rights on execution. (Windows only). Defaults to off.\",\n)\n\nwindows_group.add_option(\n \"--windows-uac-uiaccess\",\n action=\"store_true\",\n dest=\"windows_uac_uiaccess\",\n metavar=\"WINDOWS_UAC_UIACCESS\",\n default=False,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-company-name\",\n action=\"store\",\n dest=\"windows_company_name\",\n metavar=\"WINDOWS_COMPANY_NAME\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-product-name\",\n action=\"store\",\n dest=\"windows_product_name\",\n metavar=\"WINDOWS_PRODUCT_NAME\",\n default=None,\n help=,\n)\n\n\nwindows_group.add_option(\n \"--windows-file-version\",\n action=\"store\",\n dest=\"windows_file_version\",\n metavar=\"WINDOWS_FILE_VERSION\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-product-version\",\n action=\"store\",\n dest=\"windows_product_version\",\n metavar=\"WINDOWS_PRODUCT_VERSION\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-file-description\",\n action=\"store\",\n dest=\"windows_file_description\",\n metavar=\"WINDOWS_FILE_DESCRIPTION\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-onefile-tempdir\",\n \"--onefile-tempdir\",\n action=\"store_true\",\n dest=\"is_onefile_tempdir\",\n metavar=\"ONEFILE_TEMPDIR\",\n default=False,\n help=SUPPRESS_HELP,\n)\n\nwindows_group.add_option(\n \"--windows-onefile-tempdir-spec\",\n \"--onefile-tempdir-spec\",\n action=\"store\",\n dest=\"onefile_tempdir_spec\",\n metavar=\"ONEFILE_TEMPDIR_SPEC\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-force-stdout-spec\",\n action=\"store\",\n dest=\"force_stdout_spec\",\n metavar=\"WINDOWS_FORCE_STDOUT_SPEC\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-force-stderr-spec\",\n action=\"store\",\n dest=\"force_stderr_spec\",\n metavar=\"WINDOWS_FORCE_STDERR_SPEC\",\n default=None,\n help=,\n)\n\n\nparser.add_option_group(windows_group)\n\nmacos_group = OptionGroup(parser, \"macOS specific controls\")\n\nmacos_group.add_option(\n \"--macos-target-arch\",\n action=\"store\",\n dest=\"macos_target_arch\",\n choices=(\"universal\", \"arm64\", \"x86_64\"),\n metavar=\"MACOS_TARGET_ARCH\",\n default=None,\n help=,\n)\n\nmacos_group.add_option(\n \"--macos-disable-console\",\n \"--disable-console\",\n action=\"store_true\",\n dest=\"disable_console\",\n default=False,\n help=,\n)\n\nmacos_group.add_option(\n \"--macos-create-app-bundle\",\n action=\"store_true\",\n dest=\"macos_create_bundle\",\n default=False,\n help=,\n)\n\nmacos_group.add_option(\n \"--macos-onefile-icon\",\n action=\"append\",\n dest=\"icon_path\",\n metavar=\"ICON_PATH\",\n default=[],\n help=\"Add executable icon for binary to use. Can be given only one time. Defaults to Python icon if available.\",\n)\n\n\nmacos_group.add_option(\n \"--macos-signed-app-name\",\n action=\"store\",\n dest=\"macos_signed_app_name\",\n metavar=\"MACOS_SIGNED_APP_NAME\",\n default=None,\n help=,\n)\n\nmacos_group.add_option(\n \"--macos-app-name\",\n action=\"store\",\n dest=\"macos_app_name\",\n metavar=\"MACOS_APP_NAME\",\n default=None,\n help=,\n)\n\nmacos_group.add_option(\n \"--macos-app-version\",\n action=\"store\",\n dest=\"macos_app_version\",\n metavar=\"MACOS_APP_VERSION\",\n default=None,\n help=,\n)\n\n\nparser.add_option_group(macos_group)\n\nlinux_group = OptionGroup(parser, \"Linux specific controls\")\n\nlinux_group.add_option(\n \"--linux-onefile-icon\",\n action=\"append\",\n dest=\"icon_path\",\n metavar=\"ICON_PATH\",\n default=[],\n help=\"Add executable icon for onefile binary to use. Can be given only one time. Defaults to Python icon if available.\",\n)\n\nlinux_group.add_option(\n \"--linux-onefile-compression\",\n action=\"store\",\n dest=\"appimage_compression\",\n choices=(\"gzip\", \"xz\"),\n metavar=\"COMPRESSION\",\n default=\"gzip\",\n help=\"Compression method to use for Linux onefile builds. Defaults to gzip for faster decompression\",\n)\n\nparser.add_option_group(linux_group)\n\nplugin_group = OptionGroup(parser, \"Plugin control\")\n\nplugin_group.add_option(\n \"--enable-plugin\",\n \"--plugin-enable\",\n action=\"append\",\n dest=\"plugins_enabled\",\n metavar=\"PLUGIN_NAME\",\n default=[],\n help=,\n)\n\nplugin_group.add_option(\n \"--disable-plugin\",\n \"--plugin-disable\",\n action=\"append\",\n dest=\"plugins_disabled\",\n metavar=\"PLUGIN_NAME\",\n default=[],\n help=,\n)\n\nplugin_group.add_option(\n \"--plugin-no-detection\",\n action=\"store_false\",\n dest=\"detect_missing_plugins\",\n default=True,\n help=,\n)\n\nplugin_group.add_option(\n \"--plugin-list\",\n action=\"store_true\",\n dest=\"list_plugins\",\n default=False,\n help=,\n)\n\n\nparser.add_option_group(plugin_group)\n\nplugin_group.add_option(\n \"--user-plugin\",\n action=\"append\",\n dest=\"user_plugins\",\n metavar=\"PATH\",\n default=[],\n help=\"The file name of user plugin. Can be given multiple times. Default empty.\",\n)\n\nplugin_group.add_option(\n \"--persist-source-changes\",\n action=\"store_true\",\n dest=\"persist_source_changes\",\n default=False,\n help=,\n)\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 2090, "n_words": 859, "vocab_size": 399, "complexity": 2, "nloc": 9, "token_counts": 19, "n_ast_nodes": 3971, "n_identifiers": 33, "random_cut": "def _getDataFileTagsOptionHelp():\n return % \", \".join(\n \"'%s' (%s)\" % d for d in data_files_tags\n )\n\n\ndata_file_tags_option = data_group.add_option(\n \"--data-file-tags\",\n action=\"append\",\n dest=\"data_file_tags\",\n metavar=\"DATA_TAGS\",\n default=[],\n)\n\nparser.add_option_group(data_group)\n\n\nexecute_group = OptionGroup(parser, \"Immediate execution after compilation\")\n\nexecute_group.add_option(\n \"--run\",\n action=\"store_true\",\n dest=\"immediate_execution\",\n default=is_nuitka_run,\n help=\n % (\"on\" if is_nuitka_run else \"off\"),\n)\n\nexecute_group.add_option(\n \"--debugger\",\n \"--gdb\",\n action=\"store_true\",\n dest=\"debugger\",\n default=False,\n help=,\n)\n\nexecute_group.add_option(\n \"--execute-with-pythonpath\",\n action=\"store_true\",\n dest=\"keep_pythonpath\",\n default=False,\n help=,\n)\n\nparser.add_option_group(execute_group)\n\ndump_group = OptionGroup(parser, \"Dump options for internal tree\")\n\ndump_group.add_option(\n \"--xml\",\n action=\"store_true\",\n dest=\"dump_xml\",\n default=False,\n help=\"Dump the final result of optimization as XML, then exit.\",\n)\n\n\nparser.add_option_group(dump_group)\n\n\ncodegen_group = OptionGroup(parser, \"Code generation choices\")\n\ncodegen_group.add_option(\n \"--disable-bytecode-cache\",\n action=\"store_true\",\n dest=\"disable_bytecode_cache\",\n default=False,\n help=,\n)\n\ncodegen_group.add_option(\n \"--full-compat\",\n action=\"store_false\",\n dest=\"improved\",\n default=True,\n help=,\n)\n\ncodegen_group.add_option(\n \"--file-reference-choice\",\n action=\"store\",\n dest=\"file_reference_mode\",\n metavar=\"MODE\",\n choices=(\"original\", \"runtime\", \"frozen\"),\n default=None,\n help=,\n)\n\ncodegen_group.add_option(\n \"--module-name-choice\",\n action=\"store\",\n dest=\"module_name_mode\",\n metavar=\"MODE\",\n choices=(\"original\", \"runtime\"),\n default=None,\n help=,\n)\n\n\nparser.add_option_group(codegen_group)\n\noutput_group = OptionGroup(parser, \"Output choices\")\n\noutput_group.add_option(\n \"-o\",\n action=\"store\",\n dest=\"output_filename\",\n metavar=\"FILENAME\",\n default=None,\n help=\n % \"\"\n + (\".exe\" if getOS() == \"Windows\" else \".bin\"),\n)\n\noutput_group.add_option(\n \"--output-dir\",\n action=\"store\",\n dest=\"output_dir\",\n metavar=\"DIRECTORY\",\n default=\"\",\n help=,\n)\n\noutput_group.add_option(\n \"--remove-output\",\n action=\"store_true\",\n dest=\"remove_build\",\n default=False,\n help=,\n)\n\noutput_group.add_option(\n \"--no-pyi-file\",\n action=\"store_false\",\n dest=\"pyi_file\",\n default=True,\n help=,\n)\n\nparser.add_option_group(output_group)\n\n\ndebug_group = OptionGroup(parser, \"Debug features\")\n\ndebug_group.add_option(\n \"--debug\",\n action=\"store_true\",\n dest=\"debug\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--unstripped\",\n action=\"store_true\",\n dest=\"unstripped\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--profile\",\n action=\"store_true\",\n dest=\"profile\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--internal-graph\",\n action=\"store_true\",\n dest=\"graph\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--trace-execution\",\n action=\"store_true\",\n dest=\"trace_execution\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--recompile-c-only\",\n action=\"store_true\",\n dest=\"recompile_c_only\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--generate-c-only\",\n action=\"store_true\",\n dest=\"generate_c_only\",\n default=False,\n help=,\n)\n\ndebug_group.add_option(\n \"--experimental\",\n action=\"append\",\n dest=\"experimental\",\n metavar=\"FLAG\",\n default=[],\n help=,\n)\n\ndebug_group.add_option(\n \"--explain-imports\",\n action=\"store_true\",\n dest=\"explain_imports\",\n default=False,\n help=SUPPRESS_HELP,\n)\n\ndebug_group.add_option(\n \"--low-memory\",\n action=\"store_true\",\n dest=\"low_memory\",\n default=False,\n help=,\n)\n\nif os.name == \"nt\":\n debug_group.add_option(\n \"--disable-dll-dependency-cache\",\n action=\"store_true\",\n dest=\"no_dependency_cache\",\n default=False,\n help=,\n )\n\n debug_group.add_option(\n \"--force-dll-dependency-cache-update\",\n action=\"store_true\",\n dest=\"update_dependency_cache\",\n default=False,\n help=,\n )\n\n# This is for testing framework, \"coverage.py\" hates to loose the process. And\n# we can use it to make sure it's not done unknowingly.\nparser.add_option(\n \"--must-not-re-execute\",\n action=\"store_false\",\n dest=\"allow_reexecute\",\n default=True,\n help=SUPPRESS_HELP,\n)\n\n\nparser.add_option_group(debug_group)\n\nc_compiler_group = OptionGroup(parser, \"Backend C compiler choice\")\n\n\nc_compiler_group.add_option(\n \"--clang\",\n action=\"store_true\",\n dest=\"clang\",\n default=False,\n help=,\n)\n\nc_compiler_group.add_option(\n \"--mingw64\",\n action=\"store_true\",\n dest=\"mingw64\",\n default=False,\n help=,\n)\n\nc_compiler_group.add_option(\n \"--msvc\",\n action=\"store\",\n dest=\"msvc_version\",\n default=None,\n help=,\n)\n\nc_compiler_group.add_option(\n \"-j\",\n \"--jobs\",\n action=\"store\",\n dest=\"jobs\",\n metavar=\"N\",\n default=None,\n help=,\n)\n\nc_compiler_group.add_option(\n \"--lto\",\n action=\"store\",\n dest=\"lto\",\n metavar=\"choice\",\n default=\"auto\",\n choices=(\"yes\", \"no\", \"auto\"),\n help=,\n)\n\nc_compiler_group.add_option(\n \"--static-libpython\",\n action=\"store\",\n dest=\"static_libpython\",\n metavar=\"choice\",\n default=\"auto\",\n choices=(\"yes\", \"no\", \"auto\"),\n help=,\n)\n\nc_compiler_group.add_option(\n \"--disable-ccache\",\n action=\"store_true\",\n dest=\"disable_ccache\",\n default=False,\n help=,\n)\n\nparser.add_option_group(c_compiler_group)\n\npgo_group = OptionGroup(parser, \"PGO compilation choices\")\n\npgo_group.add_option(\n \"--pgo\",\n action=\"store_true\",\n dest=\"is_c_pgo\",\n default=False,\n help=,\n)\n\npgo_group.add_option(\n \"--pgo-python\",\n action=\"store_true\",\n dest=\"is_python_pgo\",\n default=False,\n help=SUPPRESS_HELP,\n)\n\npgo_group.add_option(\n \"--pgo-python-input\",\n action=\"store\",\n dest=\"python_pgo_input\",\n default=None,\n help=SUPPRESS_HELP,\n)\n\npgo_group.add_option(\n \"--pgo-python-policy-unused-module\",\n action=\"store\",\n dest=\"python_pgo_policy_unused_module\",\n choices=(\"include\", \"exclude\", \"bytecode\"),\n default=\"include\",\n help=SUPPRESS_HELP,\n)\n\npgo_group.add_option(\n \"--pgo-args\",\n action=\"store\",\n dest=\"pgo_args\",\n default=\"\",\n help=,\n)\n\npgo_group.add_option(\n \"--pgo-executable\",\n action=\"store\",\n dest=\"pgo_executable\",\n default=None,\n help=,\n)\n\n\nparser.add_option_group(pgo_group)\n\ntracing_group = OptionGroup(parser, \"Tracing features\")\n\ntracing_group.add_option(\n \"--quiet\",\n action=\"store_true\",\n dest=\"quiet\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--show-scons\",\n action=\"store_true\",\n dest=\"show_scons\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--show-progress\",\n action=\"store_true\",\n dest=\"show_progress\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--no-progressbar\",\n action=\"store_false\",\n dest=\"progress_bar\",\n default=True,\n help=,\n)\n\n\ntracing_group.add_option(\n \"--show-memory\",\n action=\"store_true\",\n dest=\"show_memory\",\n default=False,\n help=,\n)\n\n\ntracing_group.add_option(\n \"--show-modules\",\n action=\"store_true\",\n dest=\"show_inclusion\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--show-modules-output\",\n action=\"store\",\n dest=\"show_inclusion_output\",\n metavar=\"PATH\",\n default=None,\n help=,\n)\n\ntracing_group.add_option(\n \"--report\",\n action=\"store\",\n dest=\"compilation_report_filename\",\n default=None,\n help=,\n)\n\ntracing_group.add_option(\n \"--verbose\",\n action=\"store_true\",\n dest=\"verbose\",\n default=False,\n help=,\n)\n\ntracing_group.add_option(\n \"--verbose-output\",\n action=\"store\",\n dest=\"verbose_output\",\n metavar=\"PATH\",\n default=None,\n help=,\n)\n\nparser.add_option_group(tracing_group)\n\nwindows_group = OptionGroup(parser, \"Windows specific controls\")\n\nwindows_group.add_option(\n \"--windows-dependency-tool\",\n action=\"store\",\n dest=\"dependency_tool\",\n default=None,\n help=SUPPRESS_HELP,\n)\n\nwindows_group.add_option(\n \"--windows-disable-console\",\n action=\"store_true\",\n dest=\"disable_console\",\n default=False,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-icon-from-ico\",\n action=\"append\",\n dest=\"icon_path\",\n metavar=\"ICON_PATH\",\n default=[],\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-icon-from-exe\",\n action=\"store\",\n dest=\"icon_exe_path\",\n metavar=\"ICON_EXE_PATH\",\n default=None,\n help=\"Copy executable icons from this existing executable (Windows only).\",\n)\n\nwindows_group.add_option(\n \"--onefile-windows-splash-screen-image\",\n action=\"store\",\n dest=\"splash_screen_image\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-uac-admin\",\n action=\"store_true\",\n dest=\"windows_uac_admin\",\n metavar=\"WINDOWS_UAC_ADMIN\",\n default=False,\n help=\"Request Windows User Control, to grant admin rights on execution. (Windows only). Defaults to off.\",\n)\n\nwindows_group.add_option(\n \"--windows-uac-uiaccess\",\n action=\"store_true\",\n dest=\"windows_uac_uiaccess\",\n metavar=\"WINDOWS_UAC_UIACCESS\",\n default=False,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-company-name\",\n action=\"store\",\n dest=\"windows_company_name\",\n metavar=\"WINDOWS_COMPANY_NAME\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-product-name\",\n action=\"store\",\n dest=\"windows_product_name\",\n metavar=\"WINDOWS_PRODUCT_NAME\",\n default=None,\n help=,\n)\n\n\nwindows_group.add_option(\n \"--windows-file-version\",\n action=\"store\",\n dest=\"windows_file_version\",\n metavar=\"WINDOWS_FILE_VERSION\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-product-version\",\n action=\"store\",\n dest=\"windows_product_version\",\n metavar=\"WINDOWS_PRODUCT_VERSION\",\n default=None,\n help=,\n)\n\nwindows_group.add_option(\n \"--windows-file-description\",\n action=\"store\",\n dest=\"windows_", "d_id": 42847, "documentation": { "docstring": "\\\nFor included data files, special handlings can be chosen. With the\ncommercial plugins, e.g. files can be included directly in the\nbinary. The list is completed by some plugins. With the current\nlist of plugins, these are available: %s.\nThe default is empty.\\\nExecute immediately the created binary (or import the compiled module).\nDefaults to %s.\\\nExecute inside a debugger, e.g. \"gdb\" or \"lldb\" to automatically get a stack trace.\nDefaults to off.\\\nWhen immediately executing the created binary (--execute), don't reset\nPYTHONPATH. When all modules are successfully included, you ought to not need\nPYTHONPATH anymore.\\\nDo not reuse dependency analysis results for modules, esp. from standard library,\nthat are included as bytecode.\\\nEnforce absolute compatibility with CPython. Do not even allow minor\ndeviations from CPython behavior, e.g. not having better tracebacks\nor exception messages which are not really incompatible, but only\ndifferent. This is intended for tests only and should not be used\nfor normal use.\\\nSelect what value \"__file__\" is going to be. With \"runtime\" (default for\nstandalone binary mode and module mode), the created binaries and modules,\nuse the location of themselves to deduct the value of \"__file__\". Included\npackages pretend to be in directories below that location. This allows you\nto include data files in deployments. If you merely seek acceleration, it's\nbetter for you to use the \"original\" value, where the source files location\nwill be used. With \"frozen\" a notation \"\" is used. For\ncompatibility reasons, the \"__file__\" value will always have \".py\" suffix\nindependent of what it really is.\\\nSelect what value \"__name__\" and \"__package__\" are going to be. With \"runtime\"\n(default for module mode), the created module, it uses the parent package to\ndeduct the value of \"__package__\", to be fully compatible. This allows for more\noptimization to happen, but normally any package can be loaded into another one,\nbut this will raise an import error when it detects that with \"original\" mode.\\\nSpecify how the executable should be named. For extension modules there is no\nchoice, also not for standalone mode and using it will be an error. This may\ninclude path information that needs to exist though. Defaults to '%s' on this\nplatform.\n\\\nSpecify where intermediate and final output files should be put. The DIRECTORY\nwill be populated with C files, object files, etc.\nDefaults to current directory.\n\\\nRemoves the build directory after producing the module or exe file.\nDefaults to off.\\\nDo not create a \".pyi\" file for extension modules created by Nuitka. This is\nused to detect implicit imports.\nDefaults to off.\\\nExecuting all self checks possible to find errors in Nuitka, do not use for\nproduction. Defaults to off.\\\nKeep debug info in the resulting object file for better debugger interaction.\nDefaults to off.\\\nEnable vmprof based profiling of time spent. Not working currently. Defaults to off.\\\nCreate graph of optimization process internals, do not use for whole programs, but only\nfor small test cases. Defaults to off.\\\nTraced execution output, output the line of code before executing it.\nDefaults to off.\\\nThis is not incremental compilation, but for Nuitka development only. Takes\nexisting files and simply compile them as C again. Allows compiling edited\nC files for quick debugging changes to the generated source, e.g. to see if\ncode is passed by, values output, etc, Defaults to off. Depends on compiling\nPython source to determine which files it should look at.\\\nGenerate only C source code, and do not compile it to binary or module. This\nis for debugging and code coverage analysis that doesn't waste CPU. Defaults to\noff. Do not think you can use this directly.\\\nUse features declared as 'experimental'. May have no effect if no experimental\nfeatures are present in the code. Uses secret tags (check source) per\nexperimented feature.\\\nAttempt to use less memory, by forking less C compilation jobs and using\noptions that use less memory. For use on embedded machines. Use this in\ncase of out of memory problems. Defaults to off.\\\nDisable the dependency walker cache. Will result in much longer times to create\nthe distribution folder, but might be used in case the cache is suspect to cause\nerrors.\n\\\nFor an update of the dependency walker cache. Will result in much longer times\nto create the distribution folder, but might be used in case the cache is suspect\nto cause errors or known to need an update.\n\\\nEnforce the use of clang. On Windows this requires a working Visual\nStudio version to piggy back on. Defaults to off.\\\nEnforce the use of MinGW64 on Windows. Defaults to off.\\\nEnforce the use of specific MSVC version on Windows. Allowed values\nare e.g. \"14.3\" (MSVC 2022) and other MSVC version numbers, specify\n\"list\" for a list of installed compilers, or use \"latest\".\n\nDefaults to latest MSVC being used if installed, otherwise MinGW64\nis used.\\\nSpecify the allowed number of parallel C compiler jobs. Defaults to the\nsystem CPU count.\\\nUse link time optimizations (MSVC, gcc, clang). Allowed values are\n\"yes\", \"no\", and \"auto\" (when it's known to work). Defaults to\n\"auto\".\\\nUse static link library of Python. Allowed values are \"yes\", \"no\",\nand \"auto\" (when it's known to work). Defaults to \"auto\".\\\nDo not attempt to use ccache (gcc, clang, etc.) or clcache (MSVC, clangcl).\\\nEnables C level profile guided optimization (PGO), by executing a dedicated build first\nfor a profiling run, and then using the result to feedback into the C compilation.\nNote: This is experimental and not working with standalone modes of Nuitka yet.\nDefaults to off.\\\nArguments to be passed in case of profile guided optimization. These are passed to the special\nbuilt executable during the PGO profiling run. Default empty.\\\nCommand to execute when collecting profile information. Use this only, if you need to\nlaunch it through a script that prepares it to run. Default use created program.\\\nDisable all information outputs, but show warnings.\nDefaults to off.\\\nOperate Scons in non-quiet mode, showing the executed commands.\nDefaults to off.Provide progress information and statistics.\nDefaults to off.Disable progress bar outputs (if tqdm is installed).\nDefaults to off.Provide memory information and statistics.\nDefaults to off.\\\nProvide information for included modules and DLLs\nDefaults to off.\\\nWhere to output --show-modules, should be a filename. Default is standard output.\\\nReport module inclusion in an XML output file. Default is off.\\\nOutput details of actions taken, esp. in optimizations. Can become a lot.\nDefaults to off.\\\nWhere to output --verbose, should be a filename. Default is standard output.\\\nWhen compiling for Windows, disable the console window. Defaults to off.\\\nAdd executable icon. Can be given multiple times for different resolutions\nor files with multiple icons inside. In the later case, you may also suffix\nwith # where n is an integer index starting from 1, specifying a specific\nicon to be included, and all others to be ignored.\\\nWhen compiling for Windows and onefile, show this while loading the application. Defaults to off.\\\nRequest Windows User Control, to enforce running from a few folders only, remote\ndesktop access. (Windows only). Defaults to off.\\\nName of the company to use in Windows Version information.\n\nOne of file or product version is required, when a version resource needs to be\nadded, e.g. to specify product name, or company name. Defaults to unused.\\\nName of the product to use in Windows Version information. Defaults to base\nfilename of the binary.\\\nFile version to use in Windows Version information. Must be a sequence of\nup to 4 numbers, e.g. 1.0.0.0, only this format is allowed.\nOne of file or product version is required, when a version resource needs to be\nadded, e.g. to specify product name, or company name. Defaults to unused.\\\nProduct version to use in Windows Version information. Must be a sequence of\nup to 4 numbers, e.g. 1.0.0.0, only this format is allowed.\nOne of file or product version is required, when a version resource needs to be\nadded, e.g. to specify product name, or company name. Defaults to unused.\\\nDescription of the file use in Windows Version information.\n\nOne of file or product version is required, when a version resource needs to be\nadded, e.g. to specify product name, or company name. Defaults to nonsense.\\\nUse this as a temporary folder. Defaults to '%TEMP%\\\\onefile_%PID%_%TIME%', i.e. system temporary directory.\\\nForce standard output of the program to go to this location. Useful for programs with\ndisabled console and programs using the Windows Services Plugin of Nuitka. Defaults\nto not active, use e.g. '%PROGRAM%.out.txt', i.e. file near your program.\\\nForce standard error of the program to go to this location. Useful for programs with\ndisabled console and programs using the Windows Services Plugin of Nuitka. Defaults\nto not active, use e.g. '%PROGRAM%.err.txt', i.e. file near your program.\\\nWhat architectures is this to supposed to run on. Default and limit\nis what the running Python allows for. Default is \"native\" which is\nthe architecture the Python is run with.\\\nWhen compiling for macOS, disable the console window and create a GUI\napplication. Defaults to off.\\\nWhen compiling for macOS, create a bundle rather than a plain binary\napplication. Currently experimental and incomplete. Currently this\nis the only way to unlock disabling of console.Defaults to off.\\\nName of the application to use for macOS signing. Follow com.yourcompany.appname naming\nresults for best results, as these have to be globally unique, and will grant protected\nAPI accesses.\\\nName of the product to use in macOS bundle information. Defaults to base\nfilename of the binary.\\\nProduct version to use in macOS bundle information. Defaults to 1.0 if\nnot given.\\\nEnabled plugins. Must be plug-in names. Use --plugin-list to query the\nfull list and exit. Default empty.\\\nDisabled plugins. Must be plug-in names. Use --plugin-list to query the\nfull list and exit. Default empty.\\\nPlugins can detect if they might be used, and the you can disable the warning\nvia \"--disable-plugin=plugin-that-warned\", or you can use this option to disable\nthe mechanism entirely, which also speeds up compilation slightly of course as\nthis detection code is run in vain once you are certain of which plugins to\nuse. Defaults to off.\\\nShow list of all available plugins and exit. Defaults to off.\\\nWrite source changes to original Python files. Use with care. May need\npermissions, best for use in a virtualenv to debug if plugin code\nchanges work with standard Python or to benefit from bloat removal\neven with pure Python. Default False.", "n_words": 1740, "vocab_size": 655, "n_whitespaces": 1563, "language": "en" } }, { "id": 72267, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_workflows.py", "file_name": "test_workflows.py", "fun_name": "test_collect_workflow_action_data_post", "commit_message": "Reformat with black", "code": "def test_collect_workflow_action_data_post(self):\n \n response = self.client.post(\n reverse(\n \"wagtailadmin_pages:collect_workflow_action_data\",\n args=(\n self.page.id,\n \"approve\",\n self.page.current_workflow_task_state.id,\n ),\n ),\n {\"comment\": \"This is my comment\"},\n )\n self.assertEqual(response.status_code, 200)\n response_json = json.loads(response.content)\n self.assertEqual(response_json[\"step\"], \"success\")\n self.assertEqual(\n response_json[\"cleaned_data\"], {\"comment\": \"This is my comment\"}\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 236, "n_words": 34, "vocab_size": 27, "complexity": 1, "nloc": 18, "token_counts": 94, "n_ast_nodes": 159, "n_identifiers": 16, "random_cut": "def test_collect_workflow_action_data_post(self):\n \n response = self.client.post(\n reverse(\n \"wagtailadmin_pages:collect_workflow_action_data\",\n args=(\n self.page", "d_id": 15867, "documentation": { "docstring": "\n This tests that a POST request to the collect_workflow_action_data view (for the approve action) returns a modal response with the validated data\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 37, "language": "en" } }, { "id": 115841, "commit_id": "871793d4fbd99f454c0c1ff14db6ce3c385e656c", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py", "file_name": "test_lightwood_handler.py", "fun_name": "test_02_train_predictor", "commit_message": "add more TS tests", "code": "def test_02_train_predictor(self):\n query = f\n response = self.handler.native_query(query)\n self.assertTrue(response.type == RESPONSE_TYPE.OK)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 8, "token_counts": 31, "n_ast_nodes": 69, "n_identifiers": 13, "random_cut": "def test_02_train_predictor(self):\n query = f\n response = self.handler.native_query(query)\n self.assertTrue(response.type == ", "d_id": 25581, "documentation": { "docstring": "\n CREATE PREDICTOR {self.test_model_1}\n FROM {PG_HANDLER_NAME} (SELECT * FROM {self.data_table_1} limit 50)\n PREDICT rental_price\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 54, "language": "en" } }, { "id": 244549, "commit_id": "c71a160c5193b92f6a4f56c113e96b63decf8354", "repo": "mmdetection", "path": "mmdet/datasets/pipelines/loading.py", "file_name": "loading.py", "fun_name": "__call__", "commit_message": "Refacter Visualization", "code": "def __call__(self, results):\n \n\n img = results['img']\n if self.to_float32:\n img = img.astype(np.float32)\n\n results['img_path'] = None\n results['img'] = img\n height, width = img.shape[:2]\n results['height'] = height\n results['width'] = width\n results['ori_height'] = height\n results['ori_width'] = width\n return results\n\n\n@TRANSFORMS.register_module()", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "@TRANSFORMS.register_module()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 123, "n_words": 36, "vocab_size": 22, "complexity": 2, "nloc": 12, "token_counts": 78, "n_ast_nodes": 147, "n_identifiers": 13, "random_cut": "def __call__(self, results):\n \n\n img = results['img']\n if self.to_float32:\n img = img.astype(np.float32)\n\n results['img_path'] = None\n results['img'] = img\n height, width = img.shape[:2]\n results['height'] = height\n results['width'] = width\n results['ori_height'] = height\n", "d_id": 70426, "documentation": { "docstring": "Call functions to add image meta information.\n\n Args:\n results (dict): Result dict with Webcam read image in\n ``results['img']``.\n\n Returns:\n dict: The dict contains loaded image and meta information.\n ", "n_words": 28, "vocab_size": 23, "n_whitespaces": 86, "language": "en" } }, { "id": 131233, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_advanced_4.py", "file_name": "test_advanced_4.py", "fun_name": "test_jemalloc_env_var_propagate", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_jemalloc_env_var_propagate():\n \n gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER\n \n expected = {}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=\"\", jemalloc_conf=\"\", jemalloc_comps=[], process_type=gcs_ptype\n )\n assert actual == expected\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=None,\n jemalloc_conf=\"a,b,c\",\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],\n process_type=gcs_ptype,\n )\n assert actual == expected\n \n library_path = \"/abc\"\n expected = {\"LD_PRELOAD\": library_path}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=\"\",\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],\n process_type=gcs_ptype,\n )\n assert actual == expected\n\n # comps should be a list type.\n with pytest.raises(AssertionError):\n ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=\"\",\n jemalloc_comps=\"ray.ray_constants.PROCESS_TYPE_GCS_SERVER,\",\n process_type=gcs_ptype,\n )\n\n # When comps don't match the process_type, it should return an empty dict.\n expected = {}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=\"\",\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_RAYLET],\n process_type=gcs_ptype,\n )\n \n library_path = \"/abc\"\n malloc_conf = \"a,b,c\"\n expected = {\"LD_PRELOAD\": library_path, \"MALLOC_CONF\": malloc_conf}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=malloc_conf,\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],\n process_type=gcs_ptype,\n )\n assert actual == expected\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 381, "n_words": 114, "vocab_size": 52, "complexity": 1, "nloc": 57, "token_counts": 258, "n_ast_nodes": 420, "n_identifiers": 20, "random_cut": "def test_jemalloc_env_var_propagate():\n \n gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER\n \n expected = {}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=\"\", jemalloc_conf=\"\", jemalloc_comps=[], process_type=gcs_ptype\n )\n assert actual == expected\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=None,\n jemalloc_conf=\"a,b,c\",\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],\n process_type=gcs_ptype,\n )\n assert actual == expected\n \n library_path = \"/abc\"\n expected = {\"LD_PRELOAD\": library_path}\n actual = ray._private.services.propagate_jemalloc_env_var(\n jemalloc_path=library_path,\n jemalloc_conf=\"\",\n jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER],\n process_type=gcs_ptype,\n )\n assert actual == expect", "d_id": 29488, "documentation": { "docstring": "Test `propagate_jemalloc_env_var`\n If the shared library path is not specified,\n it should return an empty dict.\n \n When the shared library is specified\n \n When the malloc config is specified\n ", "n_words": 28, "vocab_size": 20, "n_whitespaces": 51, "language": "en" } }, { "id": 244349, "commit_id": "9a3bf7660e6ced54672741095f96df07919f9ba7", "repo": "mmdetection", "path": "mmdet/models/dense_heads/dense_test_mixins.py", "file_name": "dense_test_mixins.py", "fun_name": "aug_test_bboxes", "commit_message": "[Refactor] Refactor dense head outputs to InstanceResults.", "code": "def aug_test_bboxes(self, feats, img_metas, rescale=False):\n \n # check with_nms argument\n gb_sig = signature(self.get_results)\n gb_args = [p.name for p in gb_sig.parameters.values()]\n gbs_sig = signature(self._get_results_single)\n gbs_args = [p.name for p in gbs_sig.parameters.values()]\n assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \\\n f'{self.__class__.__name__}' \\\n ' does not support test-time augmentation'\n\n aug_bboxes = []\n aug_scores = []\n aug_labels = []\n for x, img_meta in zip(feats, img_metas):\n # only one image in the batch\n outs = self.forward(x)\n bbox_outputs = self.get_results(\n *outs,\n img_metas=img_meta,\n cfg=self.test_cfg,\n rescale=False,\n with_nms=False)[0]\n aug_bboxes.append(bbox_outputs.bboxes)\n aug_scores.append(bbox_outputs.scores)\n if len(bbox_outputs) >= 3:\n aug_labels.append(bbox_outputs.labels)\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = self.merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas)\n merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None\n\n if merged_bboxes.numel() == 0:\n det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1)\n return [\n (det_bboxes, merged_labels),\n ]\n\n det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores,\n merged_labels, self.test_cfg.nms)\n det_bboxes = det_bboxes[:self.test_cfg.max_per_img]\n det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img]\n\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n img_metas[0][0]['scale_factor'])\n\n results = InstanceData()\n results.bboxes = _det_bboxes[:, :4]\n results.scores = _det_bboxes[:, 4]\n results.labels = det_labels\n return [results]\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 674, "n_words": 171, "vocab_size": 122, "complexity": 9, "nloc": 46, "token_counts": 361, "n_ast_nodes": 567, "n_identifiers": 54, "random_cut": "def aug_test_bboxes(self, feats, img_metas, rescale=False):\n \n # check with_nms argument\n gb_sig = signature(self.get_results)\n gb_args = [p.name for p in gb_sig.parameters.values()]\n gbs_sig = signature(self._get_results_single)\n gbs_args = [p.name for p in gbs_sig.parameters.values()]\n assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \\\n f'{self.__class__.__name__}' \\\n ' does not support test-time augmentation'\n\n aug_bboxes = []\n aug_scores = []\n aug_labels = []\n for x, img_meta in zip(feats, img_metas):\n # only one image in the batch\n outs = self.forward(x)\n bbox_outputs = self.get_results(\n *outs,\n img_metas=img_meta,\n cfg=self.test_cfg,\n rescale=False,\n with_nms=False)[0]\n aug_bboxes.append(bbox_outputs.bboxes)\n aug_scores.append(bbox_outputs.scores)\n if len(bbox_outputs) >= 3:\n aug_labels.append(bbox_outputs.labels)\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = self.merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas)\n merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None\n\n if merged_bboxes.numel() == 0:\n det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1)\n return [\n (det_bboxes, merged_labels),\n ]\n\n det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores,\n merged_labels, self.test_cfg.nms)\n det_bboxes = det_bboxes[:self.test_cfg.max_per_img]\n det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img]\n\n if rescale:\n ", "d_id": 70341, "documentation": { "docstring": "Test det bboxes with test time augmentation, can be applied in\n DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,\n etc.\n\n Args:\n feats (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains features for all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n The first item is ``bboxes`` with shape (n, 5),\n where 5 represent (tl_x, tl_y, br_x, br_y, score).\n The shape of the second tensor in the tuple is ``labels``\n with shape (n,). The length of list should always be 1.\n ", "n_words": 131, "vocab_size": 92, "n_whitespaces": 345, "language": "en" } }, { "id": 246294, "commit_id": "d0e78af35e519ff76bd23e786007f3e7130d90f7", "repo": "synapse", "path": "synapse/replication/tcp/protocol.py", "file_name": "protocol.py", "fun_name": "pauseProducing", "commit_message": "Add missing type hints to synapse.replication. (#11938)", "code": "def pauseProducing(self) -> None:\n \n logger.info(\"[%s] Pause producing\", self.id())\n self.state = ConnectionStates.PAUSED\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 10, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 8, "random_cut": "def pauseProducing(self) -> None:\n \n logger.info(\"[%s] Pause producing\", self.id())\n self.state = ConnectionStat", "d_id": 71136, "documentation": { "docstring": "This is called when both the kernel send buffer and the twisted\n tcp connection send buffers have become full.\n\n We don't actually have any control over those sizes, so we buffer some\n commands ourselves before knifing the connection due to the remote\n failing to keep up.\n ", "n_words": 46, "vocab_size": 38, "n_whitespaces": 81, "language": "en" } }, { "id": 219497, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_collections_abc.py", "file_name": "_collections_abc.py", "fun_name": "throw", "commit_message": "add python 3.10.4 for windows", "code": "def throw(self, typ, val=None, tb=None):\n \n if val is None:\n if tb is None:\n raise typ\n val = typ()\n if tb is not None:\n val = val.with_traceback(tb)\n raise val\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 104, "n_words": 28, "vocab_size": 16, "complexity": 4, "nloc": 8, "token_counts": 49, "n_ast_nodes": 78, "n_identifiers": 6, "random_cut": "def throw(self, typ, val=None, tb=None):\n \n if val is None:\n if tb is None:\n raise typ\n val = typ()\n if tb is not None:\n va", "d_id": 55605, "documentation": { "docstring": "Raise an exception in the coroutine.\n Return next yielded value or raise StopIteration.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 27, "language": "en" } }, { "id": 20715, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/console.py", "file_name": "console.py", "fun_name": "_exit_buffer", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _exit_buffer(self) -> None:\n \n self._buffer_index -= 1\n self._check_buffer()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 18, "n_ast_nodes": 33, "n_identifiers": 4, "random_cut": "def _exit_buffer(self) -> None:\n \n se", "d_id": 3497, "documentation": { "docstring": "Leave buffer context, and render content if required.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 317626, "commit_id": "148f96351052b0a4ba31e7d15dad16d7639e1ceb", "repo": "core", "path": "homeassistant/components/switchbot/coordinator.py", "file_name": "coordinator.py", "fun_name": "flatten_sensors_data", "commit_message": "Add Switchbot hygrometers (#75325)\n\n* Switchbot add support for hygrometers\r\n\r\n* Update CODEOWNERS\r\n\r\n* Improve debug\r\n\r\n* Remove redundant mention to temp unit\r\n\r\n* Adopt FlowResultType\r\n\r\n* Modify SwitchBot data within coordinator\r\n\r\n* Increase logging for switchbot sensor\r\n\r\n* Revert \"Increase logging for switchbot sensor\"\r\n\r\nThis reverts commit d8b377429c562fc7044a3c98a6e976e4cd71847e.\r\n\r\nCo-authored-by: J. Nick Koston ", "code": "def flatten_sensors_data(sensor):\n \n if \"temp\" in sensor[\"data\"]:\n sensor[\"data\"][\"temperature\"] = sensor[\"data\"][\"temp\"][\"c\"]\n\n return sensor\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 27, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 67, "n_identifiers": 2, "random_cut": "def flatten_sensors_data(sensor):\n \n if \"temp\" in sensor[\"data\"]:\n sensor[\"data\"][\"temp", "d_id": 116193, "documentation": { "docstring": "Deconstruct SwitchBot library temp object C/Fº readings from dictionary.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 278120, "commit_id": "6fafb567af4e4d9f42974d0b6c55b18bc03e17eb", "repo": "keras", "path": "keras/feature_column/sequence_feature_column_test.py", "file_name": "sequence_feature_column_test.py", "fun_name": "test_shared_embedding_column_with_non_sequence_categorical", "commit_message": "resolve line-too-long in feature_column", "code": "def test_shared_embedding_column_with_non_sequence_categorical(self):\n \n with tf.Graph().as_default():\n vocabulary_size = 3\n sparse_input_a = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n sparse_input_b = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n\n categorical_column_a = (\n tf.feature_column.categorical_column_with_identity(\n key=\"aaa\", num_buckets=vocabulary_size\n )\n )\n categorical_column_b = (\n tf.feature_column.categorical_column_with_identity(\n key=\"bbb\", num_buckets=vocabulary_size\n )\n )\n shared_embedding_columns = tf.feature_column.shared_embeddings(\n [categorical_column_a, categorical_column_b], dimension=2\n )\n\n sequence_input_layer = ksfc.SequenceFeatures(\n shared_embedding_columns\n )\n with self.assertRaisesRegex(\n ValueError,\n r\"In embedding_column: aaa_shared_embedding\\. \"\n r\"categorical_column must \"\n r\"be of type SequenceCategoricalColumn to use \"\n r\"SequenceFeatures\\.\",\n ):\n _, _ = sequence_input_layer(\n {\"aaa\": sparse_input_a, \"bbb\": sparse_input_b}\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 696, "n_words": 115, "vocab_size": 64, "complexity": 1, "nloc": 39, "token_counts": 218, "n_ast_nodes": 332, "n_identifiers": 29, "random_cut": "def test_shared_embedding_column_with_non_sequence_categorical(self):\n \n with tf.Graph().as_default():\n vocabulary_size = 3\n sparse_input_a = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n sparse_input_b = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n\n categorical_column_a = (\n tf.feature_column.categorical_column_with_identity(\n key=\"aaa\", num_buckets=vocabulary_size\n )\n )\n categorical_column_b = (\n tf.feature_column.categorical_column_with_identity(\n key=\"bbb\", num_buckets=vocabulary_size\n )\n )\n shared_embedding_columns = tf.feature_column.shared_embeddings(\n [categorical_column_a, categorical_column_b], dimension=2\n )\n\n sequence_input_layer = ksfc.SequenceFeatures(\n shared_embedding_columns\n )\n with self.assertRaisesRegex(\n ValueError,\n r\"In embedding_column: aaa_shared_embedding\\. \"\n r\"categorical_column must \"\n r\"be of type SequenceCategoricalColumn to use \"\n r\"SequenceFeatures\\.\",\n ):\n _, _ = sequence_input_layer(\n {\"aaa\": sparse_input_a,", "d_id": 82378, "documentation": { "docstring": "Tests that error is raised for non-sequence shared embedding\n column.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 285879, "commit_id": "7fd72d9ee1e8847717195859bf6d608268a94e2f", "repo": "OpenBBTerminal", "path": "openbb_terminal/helper_funcs.py", "file_name": "helper_funcs.py", "fun_name": "get_next_stock_market_days", "commit_message": "Forecasting Menu [Work in Progress] (#1933)\n\n* Gave forecasting memory\r\n\r\n* Fixed scripts, refactored\r\n\r\n* FIxed poetry lock\r\n\r\n* edge case check for forecast target\r\n\r\n* Improved combine and load functionality\r\n\r\n* Cleaned up translations\r\n\r\n* Fixed issue with covariates\r\n\r\n* Fixed issue checking covariates\r\n\r\n* Another covariates check fix\r\n\r\n* Ignored regr and linregr warnings\r\n\r\n* Fixed covariate issues\r\n\r\n* switched from forecasting to forecast\r\n\r\n* Finished transition to forecast\r\n\r\n* Can add entire dataset with one command\r\n\r\n* Improved combine description\r\n\r\n* Removed naming covariates\r\n\r\n* Created new installation\r\n\r\n* typo\r\n\r\n* Make plot show dates if available\r\n\r\n* Added better handling or users without the menu\r\n\r\n* Removed unused file\r\n\r\n* Fix\r\n\r\n* Better handling for nontraditional datasets\r\n\r\n* Fixed black and pylint\r\n\r\n* Fixed tests\r\n\r\n* Added darts install to main tests\r\n\r\n* Working on darts with CI\r\n\r\n* Added back test file\r\n\r\n* Made large tables print better\r\n\r\n* naive baseline\r\n\r\n* typo\r\n\r\n* Finished naive\r\n\r\n* no dollar on prediction\r\n\r\n* fixed positive MAPE bug\r\n\r\n* quick refactoring\r\n\r\n* Fixed two different args for same thing\r\n\r\n* added extra patience\r\n\r\n* linreg mape fix\r\n\r\n* info fix\r\n\r\n* Refactored API, bumped to Darts 0.21.0\r\n\r\n* Added fixes\r\n\r\n* Increased verbosity for wrong column\r\n\r\n* Updated dependencies\r\n\r\n* Hid warnings\r\n\r\n* Fixed importing\r\n\r\n* Fixed tests\r\n\r\n* Fixed ugly seasonal plotting\r\n\r\n* Fixed forecast line color\r\n\r\n* Switched chart output to blue\r\n\r\n* Simplified lambda_price_prediction_color\r\n\r\n* fixed residuals\r\n\r\n* Chnage\r\n\r\n* Removed darts from CI per Chavi\r\n\r\n* Added fixes to tests\r\n\r\n* Added knnfix\r\n\r\n* Fixed issue where n!= o\r\n\r\n* Added changes\r\n\r\n* Added changes\r\n\r\n* Imrpoved forecast dash\r\n\r\n* Added Theo notebook\r\n\r\n* Added enhancements to dash\r\n\r\n* Added notebook\r\n\r\n* Added fix for jupyter lab\r\n\r\n* Added debug stuff\r\n\r\n* Change\r\n\r\n* Updated docs\r\n\r\n* Fixed formatting\r\n\r\n* Fixed formatting\r\n\r\n* Removed prints\r\n\r\n* Filtered some info\r\n\r\n* Added button to run model\r\n\r\n* Improved api\r\n\r\n* Added secret feautr (no peeking Martin)\r\n\r\n* Cleaned code\r\n\r\n* Fixed tests\r\n\r\n* Added test fixes\r\n\r\n* Added fixes\r\n\r\n* Fixes\r\n\r\n* FIxes for pres\r\n\r\n* Remove bad tests\r\n\r\n* Removed knn\r\n\r\n* Fixed issues with removing mc\r\n\r\n* doc for conda\r\n\r\n* Added forecast improvements\r\n\r\n* Added streamlit support\r\n\r\n* Fixed issues\r\n\r\n* fix expo with streamlit due to quantile()\r\n\r\n* fixed performance issues with streamlit for now..\r\n\r\n* clean up historical forecast with new trainer\r\n\r\n* quick fix for regression trainer params\r\n\r\n* Added fixes\r\n\r\n* quick fix for other fix for regression trainer params\r\n\r\n* table formatting for timestamp\r\n\r\n* potential fix for inf in feature engineered datasets\r\n\r\n* Basic working in new format\r\n\r\n* dw\r\n\r\n* Trying\r\n\r\n* Fixed issues\r\n\r\n* Improved graphing\r\n\r\n* fixing trainer for LR and formatting\r\n\r\n* doge and linting\r\n\r\n* page break\r\n\r\n* automatic cleaning of datasets\r\n\r\n* automatic cleaning of datasets- fix\r\n\r\n* Fixed forecast dates\r\n\r\n* Made dashboard prettier\r\n\r\n* Added fixes\r\n\r\n* Added fixes\r\n\r\n* Added options\r\n\r\n* Fixed error\r\n\r\n* remove caching\r\n\r\n* adding in spinner\r\n\r\n* Added vairable n_predict in streamlit\r\n\r\n* Added mypy fix\r\n\r\n* renaming and range change\r\n\r\n* new index for n predict\r\n\r\n* check positive float for window size\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* renaming\r\n\r\n* reorg files\r\n\r\n* Update _index.md\r\n\r\n* hidden which command for versions\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* which: ns parser\r\n\r\n* hugo for: which\r\n\r\n* hugo for: forecasting fix\r\n\r\n* formatting black\r\n\r\n* update stock controller test\r\n\r\n* Lay groundwork for better residual plotting\r\n\r\n* improved delete to allow for periods in title\r\n\r\n* improved automatic cleaning of inf\r\n\r\n* Added new API\r\n\r\n* Added new API\r\n\r\n* Added new API\r\n\r\n* formatting for black\r\n\r\n* Updated our testing CI\r\n\r\n* Reverted changes\r\n\r\n* Added forecast docs\r\n\r\n* Fixed mypy issues\r\n\r\n* Fixes tests\r\n\r\n* Did some refactoring, added a report\r\n\r\n* new api in streamlit\r\n\r\n* Added integrated tests\r\n\r\n* Update _index.md\r\n\r\n* improved loading in custom dataset\r\n\r\n* menu spacing\r\n\r\n* installer fixes\r\n\r\n* Added docs fixes\r\n\r\n* Adding comments to test if commit working\r\n\r\n* Fixed report\r\n\r\n* naming conventions\r\n\r\n* formatting\r\n\r\n* removing unused var\r\n\r\n* Made last report imporvements\r\n\r\n* Update README.md\r\n\r\n* Added fix\r\n\r\n* Switched to warning\r\n\r\n* Added fixes\r\n\r\n* Added fixes\r\n\r\n* Added fixes\r\n\r\n* Added fixes\r\n\r\n* Update economy av view test\r\n\r\n* Remove forgotten print statement\r\n\r\n* Update depencencies\r\n\r\n* Added verbosity to pytest\r\n\r\n* Added fixes\r\n\r\n* Fixed pylint\r\n\r\n* Fixed actions checkout\r\n\r\n* Added fixes\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: James Simmons \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: Theodore Aptekarev ", "code": "def get_next_stock_market_days(last_stock_day, n_next_days) -> list:\n \n n_days = 0\n l_pred_days = []\n years: list = []\n holidays: list = []\n if isinstance(last_stock_day, datetime):\n while n_days < n_next_days:\n last_stock_day += timedelta(hours=24)\n year = last_stock_day.date().year\n if year not in years:\n years.append(year)\n holidays += us_market_holidays(year)\n # Check if it is a weekend\n if last_stock_day.date().weekday() > 4:\n continue\n # Check if it is a holiday\n if last_stock_day.strftime(\"%Y-%m-%d\") in holidays:\n continue\n # Otherwise stock market is open\n n_days += 1\n l_pred_days.append(last_stock_day)\n else:\n while n_days < n_next_days:\n l_pred_days.append(last_stock_day + 1 + n_days)\n n_days += 1\n\n return l_pred_days\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 320, "n_words": 90, "vocab_size": 52, "complexity": 7, "nloc": 24, "token_counts": 133, "n_ast_nodes": 225, "n_identifiers": 18, "random_cut": "def get_next_stock_market_days(last_stock_day, n_next_days) -> list:\n \n n_days = 0\n l_pred_days = []\n years: list = []\n holidays: list = []\n if isinstance(last_stock_day, datetime):\n while n_days < n_next_days:\n last_stock_day += timedelta(ho", "d_id": 85472, "documentation": { "docstring": "Gets the next stock market day. Checks against weekends and holidays", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 124708, "commit_id": "365ffe21e592589880e3116302705b5e08a5b81f", "repo": "ray", "path": "dashboard/tests/test_state_head.py", "file_name": "test_state_head.py", "fun_name": "test_max_concurrent_in_progress_functions", "commit_message": "[Core | State Observability] Implement API Server (Dashboard) HTTP Requests Throttling (#26257)\n\nThis is to limit the max number of HTTP requests the dashboard (API server) will accept before rejecting more requests.\r\nThis will make sure the observability requests do not overload the downstream systems (raylet/gcs) when delegating too many concurrent state observability requests to the cluster.", "code": "async def test_max_concurrent_in_progress_functions(extra_req_num):\n \n max_req = 10\n a = A(max_num_call=max_req)\n\n # Run more than allowed concurrent async functions should trigger rate limiting\n res_arr = await asyncio.gather(\n *[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)]\n )\n fail_cnt = 0\n for ok in res_arr:\n fail_cnt += 0 if ok else 1\n\n expected_fail_cnt = max(0, extra_req_num)\n assert fail_cnt == expected_fail_cnt, (\n f\"{expected_fail_cnt} out of {max_req + extra_req_num} \"\n f\"concurrent runs should fail with max={max_req} but {fail_cnt}.\"\n )\n\n assert a.num_call_ == 0, \"All requests should be done\"\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"failures\",\n [\n [True, True, True, True, True],\n [False, False, False, False, False],\n [False, True, False, True, False],\n [False, False, False, True, True],\n [True, True, False, False, False],\n ],\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"failures\",\n [\n [True, True, True, True, True],\n [False, False, False, False, False],\n [False, True, False, True, False],\n [False, False, False, True, True],\n [True, True, False, False, False],\n ],\n)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 225, "n_words": 120, "vocab_size": 78, "complexity": 5, "nloc": 15, "token_counts": 96, "n_ast_nodes": 270, "n_identifiers": 21, "random_cut": "async def test_max_concurrent_in_progress_functions(extra_req_num):\n \n max_req = 10\n a = A(max_num_call=max_req)\n\n # Run more than allowed concurrent async functions should trigger rate limiting\n res_arr = await asyncio.gather(\n *[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)]\n )\n fail_cnt = 0\n for ok in res_arr:\n fail_cnt += 0 if ok else 1\n\n expected_fail_cnt = max(0, extra_req_num)\n assert fail_cnt == expected_fail_cnt, (\n f\"{expected_fail_cnt} out of {max_req + extra_req_num} \"\n f\"concurrent runs should fail with max={max_req} but {fail_cnt}.\"\n )\n\n assert a.num_call_ == 0, \"All requests should be done\"\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"failures\",\n [\n [True, True, True, True, True],\n [False, False, False, False, False],\n [False, True, False, True, False],\n [False, False, False, True, True],\n [True, True, False, False, False],\n ],\n)", "d_id": 27665, "documentation": { "docstring": "Test rate limiting for concurrent in-progress requests on StateHead", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 158149, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "transpose_qkv", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def transpose_qkv(X, num_heads):\n \n # Shape of input `X`:\n # (`batch_size`, no. of queries or key-value pairs, `num_hiddens`).\n # Shape of output `X`:\n # (`batch_size`, no. of queries or key-value pairs, `num_heads`,\n # `num_hiddens` / `num_heads`)\n X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)\n\n # Shape of output `X`:\n # (`batch_size`, `num_heads`, no. of queries or key-value pairs,\n # `num_hiddens` / `num_heads`)\n X = X.transpose(0, 2, 1, 3)\n\n # Shape of `output`:\n # (`batch_size` * `num_heads`, no. of queries or key-value pairs,\n # `num_hiddens` / `num_heads`)\n return X.reshape(-1, X.shape[2], X.shape[3])\n\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 132, "n_words": 87, "vocab_size": 37, "complexity": 1, "nloc": 4, "token_counts": 69, "n_ast_nodes": 111, "n_identifiers": 6, "random_cut": "def transpose_qkv(X, num_heads):\n \n # Shape of input `X`:\n # (`batch_size`, no. ", "d_id": 37330, "documentation": { "docstring": "Transposition for parallel computation of multiple attention heads.\n\n Defined in :numref:`sec_multihead-attention`", "n_words": 11, "vocab_size": 11, "n_whitespaces": 13, "language": "en" } }, { "id": 46849, "commit_id": "4eaf9bcddfb370222b4386b02975974bb253f614", "repo": "airflow", "path": "airflow/models/taskinstance.py", "file_name": "taskinstance.py", "fun_name": "current_state", "commit_message": "No need to load whole ti in current_state (#22764)\n\nCo-authored-by: Jed Cunningham <66968678+jedcunningham@users.noreply.github.com>\r\nCo-authored-by: Tzu-ping Chung ", "code": "def current_state(self, session=NEW_SESSION) -> str:\n \n return (\n session.query(TaskInstance.state)\n .filter(\n TaskInstance.dag_id == self.dag_id,\n TaskInstance.task_id == self.task_id,\n TaskInstance.run_id == self.run_id,\n )\n .scalar()\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 131, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 17, "token_counts": 55, "n_ast_nodes": 85, "n_identifiers": 13, "random_cut": "def current_state(self, session=NEW_SESSION) -> str:\n \n return (\n session.query(TaskInstance.state)\n .filter(\n TaskInstance.dag_id == self.dag_id,\n TaskInstance.task_id == self.ta", "d_id": 9018, "documentation": { "docstring": "\n Get the very latest state from the database, if a session is passed,\n we use and looking up the state becomes part of the session, otherwise\n a new session is used.\n\n :param session: SQLAlchemy ORM Session\n ", "n_words": 36, "vocab_size": 29, "n_whitespaces": 72, "language": "en" } }, { "id": 176154, "commit_id": "dec723f072eb997a497a159dbe8674cd39999ee9", "repo": "networkx", "path": "networkx/generators/small.py", "file_name": "small.py", "fun_name": "house_graph", "commit_message": "Docstrings for the small.py module (#5240)\n\n* added description for the first 5 small graphs\r\n\r\n* modified descriptions based on comment and added description for two more functions\r\n\r\n* added doctrings to all the functions\r\n\r\n* Minor touchups.\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def house_graph(create_using=None):\n \n description = [\n \"adjacencylist\",\n \"House Graph\",\n 5,\n [[2, 3], [1, 4], [1, 4, 5], [2, 3, 5], [3, 4]],\n ]\n G = make_small_undirected_graph(description, create_using)\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 71, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 64, "n_ast_nodes": 90, "n_identifiers": 5, "random_cut": "def house_graph(create_using=None):\n \n description = [\n \"adjacencylist\",\n \"House Graph\",\n 5,\n [[2, 3],", "d_id": 41724, "documentation": { "docstring": "\n Returns the House graph (square with triangle on top)\n\n The house graph is a simple undirected graph with\n 5 nodes and 6 edges [1]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n House graph in the form of a square with a triangle on top\n\n References\n ----------\n .. [1] https://mathworld.wolfram.com/HouseGraph.html\n ", "n_words": 68, "vocab_size": 51, "n_whitespaces": 121, "language": "en" } }, { "id": 22136, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "check_header_validity", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def check_header_validity(header):\n \n name, value = header\n\n for part in header:\n if type(part) not in HEADER_VALIDATORS:\n raise InvalidHeader(\n f\"Header part ({part!r}) from {{{name!r}: {value!r}}} must be \"\n f\"of type str or bytes, not {type(part)}\"\n )\n\n _validate_header_part(name, \"name\", HEADER_VALIDATORS[type(name)][0])\n _validate_header_part(value, \"value\", HEADER_VALIDATORS[type(value)][1])\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 114, "n_words": 40, "vocab_size": 37, "complexity": 3, "nloc": 10, "token_counts": 67, "n_ast_nodes": 134, "n_identifiers": 9, "random_cut": "def check_header_validity(header):\n \n name, value = header\n\n for part in header:\n if type(part) not in HEADER_VALIDATORS:\n raise InvalidHeader(\n f\"Header part ({part!r}) ", "d_id": 4208, "documentation": { "docstring": "Verifies that header parts don't contain leading whitespace\n reserved characters, or return characters.\n\n :param header: tuple, in the format (name, value).\n ", "n_words": 21, "vocab_size": 21, "n_whitespaces": 30, "language": "en" } }, { "id": 27800, "commit_id": "a68553e1a55e3a1bd32826cdce294d27f74175e9", "repo": "saleor", "path": "saleor/graphql/order/tests/test_order.py", "file_name": "test_order.py", "fun_name": "test_orderline_query", "commit_message": "Metadata added to checkout and order lines (#10040)\n\n* Metadata added to checkout and order lines\r\n\r\n* CHANGELOG.md update\r\n\r\n* Missing tests added", "code": "def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order):\n order = fulfilled_order\n query = \n line = order.lines.first()\n\n metadata_key = \"md key\"\n metadata_value = \"md value\"\n\n line.store_value_in_private_metadata({metadata_key: metadata_value})\n line.store_value_in_metadata({metadata_key: metadata_value})\n line.save()\n\n staff_api_client.user.user_permissions.add(permission_manage_orders)\n response = staff_api_client.post_graphql(query)\n content = get_graphql_content(response)\n order_data = content[\"data\"][\"orders\"][\"edges\"][0][\"node\"]\n first_order_data_line = order_data[\"lines\"][0]\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", line.variant.pk)\n\n assert first_order_data_line[\"thumbnail\"] is None\n assert first_order_data_line[\"variant\"][\"id\"] == variant_id\n assert first_order_data_line[\"quantity\"] == line.quantity\n assert first_order_data_line[\"unitPrice\"][\"currency\"] == line.unit_price.currency\n assert first_order_data_line[\"metadata\"] == [\n {\"key\": metadata_key, \"value\": metadata_value}\n ]\n assert first_order_data_line[\"privateMetadata\"] == [\n {\"key\": metadata_key, \"value\": metadata_value}\n ]\n expected_unit_price = Money(\n amount=str(first_order_data_line[\"unitPrice\"][\"gross\"][\"amount\"]),\n currency=\"USD\",\n )\n assert first_order_data_line[\"totalPrice\"][\"currency\"] == line.unit_price.currency\n assert expected_unit_price == line.unit_price.gross\n\n expected_total_price = Money(\n amount=str(first_order_data_line[\"totalPrice\"][\"gross\"][\"amount\"]),\n currency=\"USD\",\n )\n assert expected_total_price == line.unit_price.gross * line.quantity\n\n allocation = line.allocations.first()\n allocation_id = graphene.Node.to_global_id(\"Allocation\", allocation.pk)\n warehouse_id = graphene.Node.to_global_id(\n \"Warehouse\", allocation.stock.warehouse.pk\n )\n assert first_order_data_line[\"allocations\"] == [\n {\n \"id\": allocation_id,\n \"quantity\": allocation.quantity_allocated,\n \"warehouse\": {\"id\": warehouse_id},\n }\n ]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 330, "n_words": 129, "vocab_size": 78, "complexity": 1, "nloc": 93, "token_counts": 349, "n_ast_nodes": 595, "n_identifiers": 45, "random_cut": "def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order):\n order = fulfilled_order\n query = \n line = order.lines.first()\n\n metadata_key = \"md key\"\n metadata_value = \"md value\"\n\n line.store_value_in_private_metadata({metadata_key: metadata_value})\n line.store_value_in_metadata({metadata_key: metadata_value})\n line.save()\n\n staff_api_client.user.user_permissions.add(permission_manage_orders)\n response = staff_api_client.post_graphql(query)\n content = get_graphql_content(response)\n order_data = content[\"data\"][\"orders\"][\"edges\"][0][\"node\"]\n first_order_data_line = order_data[\"lines\"][0]\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", line.variant.pk)\n\n assert first_order_data_line[\"thumbnail\"] is None\n assert first_order_data_line[\"variant\"][\"id\"] == variant_id\n assert first_order_data_line[\"quantity\"] == line.quantity\n assert first_order_data_line[\"unitPrice\"][\"currency\"] == line.unit_price.currency\n assert first_order_data_line[\"metadata\"] == [\n {\"key\": metadata_key, \"value\": metadata_value}\n ]\n assert first_order_data_line[\"privateMetadata\"] == [\n {\"key\": metadata_key, \"value\": metadata_value}\n ]\n expected_unit_price = Money(\n amount=str(first_order_data_line[\"unitPrice\"", "d_id": 5130, "documentation": { "docstring": "\n query OrdersQuery {\n orders(first: 1) {\n edges {\n node {\n lines {\n thumbnail(size: 540) {\n url\n }\n variant {\n id\n }\n quantity\n allocations {\n id\n quantity\n warehouse {\n id\n }\n }\n unitPrice {\n currency\n gross {\n amount\n }\n }\n totalPrice {\n currency\n gross {\n amount\n }\n }\n metadata {\n key\n value\n }\n privateMetadata {\n key\n value\n }\n }\n }\n }\n }\n }\n ", "n_words": 62, "vocab_size": 26, "n_whitespaces": 1222, "language": "en" } }, { "id": 209122, "commit_id": "20ac1d00389d0735e6d8cd1347f0a53f478144ba", "repo": "scapy", "path": "scapy/layers/inet.py", "file_name": "inet.py", "fun_name": "in4_pseudoheader", "commit_message": "Support TCP-MD5 and TCP-AO (#3358)\n\nSupport TCP-MD5 and TCP-AO", "code": "def in4_pseudoheader(proto, u, plen):\n # type: (int, IP, int) -> bytes\n \n if u.len is not None:\n if u.ihl is None:\n olen = sum(len(x) for x in u.options)\n ihl = 5 + olen // 4 + (1 if olen % 4 else 0)\n else:\n ihl = u.ihl\n ln = max(u.len - 4 * ihl, 0)\n else:\n ln = plen\n\n # Filter out IPOption_LSRR and IPOption_SSRR\n sr_options = [opt for opt in u.options if isinstance(opt, IPOption_LSRR) or\n isinstance(opt, IPOption_SSRR)]\n len_sr_options = len(sr_options)\n if len_sr_options == 1 and len(sr_options[0].routers):\n # The checksum must be computed using the final\n # destination address\n u.dst = sr_options[0].routers[-1]\n elif len_sr_options > 1:\n message = \"Found %d Source Routing Options! \"\n message += \"Falling back to IP.dst for checksum computation.\"\n warning(message, len_sr_options)\n\n return struct.pack(\"!4s4sHH\",\n inet_pton(socket.AF_INET, u.src),\n inet_pton(socket.AF_INET, u.dst),\n proto,\n ln)\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 370, "n_words": 132, "vocab_size": 95, "complexity": 10, "nloc": 24, "token_counts": 182, "n_ast_nodes": 302, "n_identifiers": 28, "random_cut": "def in4_pseudoheader(proto, u, plen):\n # type: (int, IP, int) -> bytes\n \n if u.len is not None:\n if u.ihl is None:\n olen = sum(len(x) for x in u.options)\n ihl = 5 + olen // 4 + (1 if olen % 4 else 0)\n else:\n ihl = u.ihl\n ln = max(u.len - 4 * ihl, 0)\n else:\n ln = plen\n\n # Filter out IPOption_LSRR and IPOption_SSRR\n sr_options = [opt for opt in u.options if isinstance(opt, IPOption_LSRR) or\n isinstance(opt, IPOption_SSRR)]\n len_sr_options = len(sr_options)\n if len_sr_options == 1 and len(sr_options[0].routers):\n # The checksum must be computed using the final\n # destination address\n u.dst = sr_options[0].routers[-1]\n elif len_sr_options > 1:\n message = \"Found %d Source Routing Options! \"\n message += \"Falling back to IP.dst for checksum computation.\"\n warning(message, len_sr_options)\n\n return struct.pack(\"!4s4sHH\",\n inet_pton(socket.AF_IN", "d_id": 52612, "documentation": { "docstring": "IPv4 Pseudo Header as defined in RFC793 as bytes\n\n :param proto: value of upper layer protocol\n :param u: IP layer instance\n :param plen: the length of the upper layer and payload\n ", "n_words": 31, "vocab_size": 23, "n_whitespaces": 43, "language": "en" } }, { "id": 153405, "commit_id": "be9d382e35a9b87565499c029056afe1ddce6f37", "repo": "modin", "path": "modin/core/storage_formats/base/doc_utils.py", "file_name": "doc_utils.py", "fun_name": "doc_resample_fillna", "commit_message": "REFACTOR-#4093: Refactor base to be smaller (#4220)\n\nSigned-off-by: jeffreykennethli ", "code": "def doc_resample_fillna(method, refer_to, params=None, overwrite_template_params=False):\n \n action = f\"fill missing values in each group independently using {method} method\"\n params_substitution = \"limit : int\\n\"\n\n if params:\n params_substitution = (\n params\n if overwrite_template_params\n else format_string(\n \"{params}\\n{params_substitution}\",\n params=params,\n params_substitution=params_substitution,\n )\n )\n\n build_rules = \"- QueryCompiler contains unsampled data with missing values filled.\"\n\n return doc_resample(\n action=action,\n extra_params=params_substitution,\n build_rules=build_rules,\n refer_to=refer_to,\n )\n\n\ndoc_dt = partial(\n doc_qc_method,\n template=,\n one_column_method=True,\n refer_to_module_name=\"Series.dt\",\n)\n\ndoc_dt_timestamp = partial(doc_dt, dt_type=\"datetime\")\ndoc_dt_interval = partial(doc_dt, dt_type=\"interval\")\ndoc_dt_period = partial(doc_dt, dt_type=\"period\")\n\ndoc_dt_round = partial(\n doc_qc_method,\n template=,\n one_column_method=True,\n refer_to_module_name=\"Series.dt\",\n)\n\ndoc_str_method = partial(\n doc_qc_method,\n template=,\n one_column_method=True,\n refer_to_module_name=\"Series.str\",\n)\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 270, "n_words": 91, "vocab_size": 62, "complexity": 3, "nloc": 20, "token_counts": 70, "n_ast_nodes": 256, "n_identifiers": 23, "random_cut": "def doc_resample_fillna(method, refer_to, params=None, overwrite_template_params=False):\n \n action = f\"fill missing values in each group in", "d_id": 35399, "documentation": { "docstring": "\n Build decorator which adds docstring for the resample fillna query compiler method.\n\n Parameters\n ----------\n method : str\n Fillna method name.\n refer_to : str\n Method name in ``modin.pandas.resample.Resampler`` module to refer to for\n more information about parameters and output format.\n params : str, optional\n Method parameters in the NumPy docstyle format to substitute\n to the docstring template.\n overwrite_template_params : bool, default: False\n If `params` is specified indicates whether to overwrite method parameters in\n the docstring template or append then at the end.\n\n Returns\n -------\n callable\n \n Get {prop} for each {dt_type} value.\n {params}\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with the same shape as `self`, where each element is\n {prop} for the corresponding {dt_type} value.\n \n Perform {refer_to} operation on the underlying time-series data to the specified `freq`.\n\n Parameters\n ----------\n freq : str\n ambiguous : {{\"raise\", \"infer\", \"NaT\"}} or bool mask, default: \"raise\"\n nonexistent : {{\"raise\", \"shift_forward\", \"shift_backward\", \"NaT\"}} or timedelta, default: \"raise\"\n\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler with performed {refer_to} operation on every element.\n \n Apply \"{refer_to}\" function to each string value in QueryCompiler.\n {params}\n Returns\n -------\n BaseQueryCompiler\n New QueryCompiler containing the result of execution of the \"{refer_to}\" function\n against each string element.\n ", "n_words": 189, "vocab_size": 113, "n_whitespaces": 376, "language": "en" } }, { "id": 68154, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/utilities/transaction_base.py", "file_name": "transaction_base.py", "fun_name": "delete_events", "commit_message": "style: format code with black", "code": "def delete_events(ref_type, ref_name):\n\tevents = (\n\t\tfrappe.db.sql_list(\n\t\t\t,\n\t\t\t(ref_type, ref_name),\n\t\t)\n\t\tor []\n\t)\n\n\tif events:\n\t\tfrappe.delete_doc(\"Event\", events, for_reload=True)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 9, "n_words": 19, "vocab_size": 18, "complexity": 3, "nloc": 18, "token_counts": 44, "n_ast_nodes": 69, "n_identifiers": 9, "random_cut": "def delete_events(ref_type, ref_name):\n\tevents = (\n\t\tfrappe.db.sql_list(\n\t\t\t,\n\t\t\t(ref_type, ref_name),\n\t\t)\n\t\tor []\n\t)\n\n\tif events:\n\t\tfrappe.delete_doc(\"Event\", events, for_reload=True)\n\n", "d_id": 14730, "documentation": { "docstring": " SELECT\n\t\t\tdistinct `tabEvent`.name\n\t\tfrom\n\t\t\t`tabEvent`, `tabEvent Participants`\n\t\twhere\n\t\t\t`tabEvent`.name = `tabEvent Participants`.parent\n\t\t\tand `tabEvent Participants`.reference_doctype = %s\n\t\t\tand `tabEvent Participants`.reference_docname = %s\n\t\t", "n_words": 22, "vocab_size": 14, "n_whitespaces": 15, "language": "en" } }, { "id": 140519, "commit_id": "905258dbc19753c81039f993477e7ab027960729", "repo": "ray", "path": "python/ray/serve/deployment_state.py", "file_name": "deployment_state.py", "fun_name": "check_started", "commit_message": "Clean up docstyle in python modules and add LINT rule (#25272)", "code": "def check_started(self) -> ReplicaStartupStatus:\n \n status, version = self._actor.check_ready()\n\n if status == ReplicaStartupStatus.SUCCEEDED:\n # Re-assign DeploymentVersion if start / update / recover succeeded\n # by reading re-computed version in RayServeReplica\n if version is not None:\n self._version = version\n\n return status\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 115, "n_words": 39, "vocab_size": 30, "complexity": 3, "nloc": 14, "token_counts": 39, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def check_started(self) -> ReplicaStartupStatus:\n \n status, version = self._actor.check_ready()\n\n if status == ReplicaStartupStatus.SUCCEEDED:\n # Re-assign Depl", "d_id": 31990, "documentation": { "docstring": "Check if the replica has started. If so, transition to RUNNING.\n\n Should handle the case where the replica has already stopped.\n\n Returns:\n status: Most recent state of replica by\n querying actor obj ref\n ", "n_words": 33, "vocab_size": 28, "n_whitespaces": 80, "language": "en" } }, { "id": 311537, "commit_id": "58b8c30221a6f6e5acbbe98b7e3298b03fb741f5", "repo": "core", "path": "tests/components/homekit_controller/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "test_battery_low", "commit_message": "Improve homekit_controller tests (#65266)", "code": "async def test_battery_low(hass, utcnow):\n \n helper = await setup_test_component(\n hass, create_battery_level_sensor, suffix=\"battery\"\n )\n\n state = await helper.async_update(\n ServicesTypes.BATTERY_SERVICE,\n {\n CharacteristicsTypes.BATTERY_LEVEL: 1,\n CharacteristicsTypes.STATUS_LO_BATT: 0,\n },\n )\n assert state.attributes[\"icon\"] == \"mdi:battery-10\"\n\n state = await helper.async_update(\n ServicesTypes.BATTERY_SERVICE,\n {\n CharacteristicsTypes.BATTERY_LEVEL: 1,\n CharacteristicsTypes.STATUS_LO_BATT: 1,\n },\n )\n assert state.attributes[\"icon\"] == \"mdi:battery-alert\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 164, "n_words": 44, "vocab_size": 26, "complexity": 1, "nloc": 20, "token_counts": 93, "n_ast_nodes": 149, "n_identifiers": 15, "random_cut": "async def test_battery_low(hass, utcnow):\n \n helper = await setup_test_component(\n hass, create_battery_level_sensor, suffix=\"battery\"\n )\n\n state = await helper.async_update(", "d_id": 110202, "documentation": { "docstring": "Test reading the state of a HomeKit battery's low state.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 101059, "commit_id": "582c2ce40c11ef235dd3f9100f70e1e2832f8dd3", "repo": "faceswap", "path": "lib/model/loss/perceptual_loss_plaid.py", "file_name": "perceptual_loss_plaid.py", "fun_name": "_hyab", "commit_message": "Add Flip Loss Function\n - Add Flip for AMD and TF\n - Split Perceptual Loss functions to own modules\n - Fix allowed input shape for models\n - Allow GUI tooltip to display at higher width", "code": "def _hyab(self, y_true, y_pred):\n \n delta = y_true - y_pred\n root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None))\n delta_norm = frobenius_norm(delta[..., 1:3])\n return root + delta_norm\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 59, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 65, "n_ast_nodes": 97, "n_identifiers": 13, "random_cut": "def _hyab(self, y_true, y_pred):\n \n delta = y_true", "d_id": 20496, "documentation": { "docstring": " Compute the HyAB distance between true and predicted images.\n\n Parameters\n ----------\n y_true: :class:`plaidml.tile.Value`\n The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space\n y_pred: :class:`plaidml.tile.Value`\n The predicted batch of images in in standard or Hunt-adjusted L*A*B* color space\n\n Returns\n -------\n :class:`plaidml.tile.Value`\n image tensor containing the per-pixel HyAB distances between true and predicted images\n ", "n_words": 56, "vocab_size": 34, "n_whitespaces": 146, "language": "en" } }, { "id": 43199, "commit_id": "95bd6b71cc9f5da377e272707f7b68000d980939", "repo": "airflow", "path": "tests/cli/commands/test_db_command.py", "file_name": "test_db_command.py", "fun_name": "test_dry_run", "commit_message": "Don't rely on current ORM structure for db clean command (#23574)\n\nFor command DB clean, by not relying on the ORM models, we will be able to use the command even when the metadatabase is not yet upgraded to the version of Airflow you have installed.\r\n\r\nAdditionally we archive all rows before deletion.", "code": "def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected):\n \n args = self.parser.parse_args(\n [\n 'db',\n 'clean',\n '--clean-before-timestamp',\n '2021-01-01',\n *dry_run_arg,\n ]\n )\n db_command.cleanup_tables(args)\n\n run_cleanup_mock.assert_called_once_with(\n table_names=None,\n dry_run=expected,\n clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'),\n verbose=False,\n confirm=True,\n skip_archive=False,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 231, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 19, "token_counts": 74, "n_ast_nodes": 116, "n_identifiers": 19, "random_cut": "def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected):\n \n args = self.parser.parse_args(\n [\n 'db',\n 'clean',\n '--clean-before-timestamp',\n '2021-01-01',\n *dry_run_arg,\n ]\n )\n db_command.cleanup_tables(args)\n\n run_cleanup_mock.assert_called_once_with(\n table_names=None,\n dry_run=expected,\n clean_before_ti", "d_id": 7862, "documentation": { "docstring": "\n When tz included in the string then default timezone should not be used.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 309272, "commit_id": "2eab3c8de1fd80a8f1456fd97389ca687c11ecb7", "repo": "core", "path": "homeassistant/components/homekit/util.py", "file_name": "util.py", "fun_name": "async_dismiss_setup_message", "commit_message": "Import persistent notification (part 3) (#63900)", "code": "def async_dismiss_setup_message(hass, entry_id):\n \n persistent_notification.async_dismiss(hass, entry_id)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 27, "n_identifiers": 5, "random_cut": "def async_dismiss_setup_message(hass, entry_id):\n \n", "d_id": 107978, "documentation": { "docstring": "Dismiss persistent notification and remove QR code.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 203309, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/apps/registry.py", "file_name": "registry.py", "fun_name": "get_containing_app_config", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_containing_app_config(self, object_name):\n \n self.check_apps_ready()\n candidates = []\n for app_config in self.app_configs.values():\n if object_name.startswith(app_config.name):\n subpath = object_name[len(app_config.name) :]\n if subpath == \"\" or subpath[0] == \".\":\n candidates.append(app_config)\n if candidates:\n return sorted(candidates, key=lambda ac: -len(ac.name))[0]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 139, "n_words": 33, "vocab_size": 28, "complexity": 6, "nloc": 10, "token_counts": 92, "n_ast_nodes": 152, "n_identifiers": 16, "random_cut": "def get_containing_app_config(self, object_name):\n \n self.check_apps_ready()\n candidates = []\n for app_config in self.app_configs.values():\n if object_name.startswith(app_config.name):\n subpath = object_name[len(app_config.name) :]\n if subpath == \"\" or subpath[0] == \".\":\n candidates.append(app_config)\n if candidates:\n return sorted(candidates, key=lambd", "d_id": 50294, "documentation": { "docstring": "\n Look for an app config containing a given object.\n\n object_name is the dotted Python path to the object.\n\n Return the app config for the inner application in case of nesting.\n Return None if the object isn't in any registered app config.\n ", "n_words": 41, "vocab_size": 30, "n_whitespaces": 77, "language": "en" } }, { "id": 279328, "commit_id": "102ab667f513956d89f55f2f9480b9cdc5372eef", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_track_variables", "commit_message": "Prepare keras for making ResourceVariables as CompositeTensors.\n\nWe are going to let ResourceVariable be a subclass of CompositeTensor. Changes in this CL are necessary to not break existing code.\n\nSpecifically, to track resource variables embedded in composite tensors, we will need to manually expand composite tensors layer by layer instead of replying on tf.nest.\n\nCurrently resource variables are atoms and considered to have the same structure as tensors. So we could have one branch to be a resource variable and the other branch to be a tensor. After making resource variable as composite tensors, resource variables will be tf.nest sequences instead of atoms. To avoid the type spec mismatch, we replace resource variables with tf.nest atoms just for the purpose of tf.nest.assert_same_structure.\n\nPiperOrigin-RevId: 464573876", "code": "def _track_variables(self, value):\n \n for val in tf.nest.flatten(value):\n if isinstance(val, tf.Variable):\n self._track_variable(val)\n elif tf_utils.is_extension_type(val):\n # Manually expand extension types to track resource variables.\n nested_vals = tf_utils.type_spec_from_value(val)._to_components(\n val\n )\n self._track_variables(nested_vals)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 158, "n_words": 28, "vocab_size": 27, "complexity": 4, "nloc": 9, "token_counts": 63, "n_ast_nodes": 103, "n_identifiers": 15, "random_cut": "def _track_variables(self, value):\n \n for val in tf.nest.flatten(value):\n if isinstance(val, tf.Variable):\n self._track_variable(val)\n elif tf_utils.is_extension_type(val):\n # Manually expand extension types to track resource variables.\n nested_vals = tf_utils.", "d_id": 82926, "documentation": { "docstring": "Tracks `Variable`s including `Variable`s in `CompositeTensor`s.", "n_words": 6, "vocab_size": 5, "n_whitespaces": 5, "language": "en" } }, { "id": 186573, "commit_id": "16aad35d31a887dab157f9d4f5e0fe9218d06064", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/configurator.py", "file_name": "configurator.py", "fun_name": "_create_vhost_v2", "commit_message": "Fully type certbot-nginx module (#9124)\n\n* Work in progress\r\n\r\n* Fix type\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Oups.\r\n\r\n* Fix typing in UnspacedList\r\n\r\n* Fix logic\r\n\r\n* Finish typing\r\n\r\n* List certbot-nginx as fully typed in tox\r\n\r\n* Fix lint\r\n\r\n* Fix checks\r\n\r\n* Organize imports\r\n\r\n* Fix typing for Python 3.6\r\n\r\n* Fix checks\r\n\r\n* Fix lint\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix signature of deploy_cert regarding the installer interface\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/obj.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix types\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/parser.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Precise type\r\n\r\n* Precise _coerce possible inputs/outputs\r\n\r\n* Fix type\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/http_01.py\r\n\r\nCo-authored-by: ohemorange \r\n\r\n* Fix type\r\n\r\n* Remove an undesirable implementation.\r\n\r\n* Fix type\r\n\r\nCo-authored-by: alexzorin \r\nCo-authored-by: ohemorange ", "code": "def _create_vhost_v2(self, node):\n \n addrs = set()\n for param in node.parameters:\n addr = obj.Addr.fromstring(param)\n if addr:\n addrs.add(addr)\n\n is_ssl = False\n # Exclusion to match the behavior in get_virtual_hosts_v2\n sslengine = node.find_directives(\"SSLEngine\", exclude=False)\n if sslengine:\n for directive in sslengine:\n if directive.parameters[0].lower() == \"on\":\n is_ssl = True\n break\n\n # \"SSLEngine on\" might be set outside of \n # Treat vhosts with port 443 as ssl vhosts\n for addr in addrs:\n if addr.get_port() == \"443\":\n is_ssl = True\n\n enabled = apache_util.included_in_paths(node.filepath, self.parsed_paths)\n\n macro = False\n # Check if the VirtualHost is contained in a mod_macro block\n if node.find_ancestors(\"Macro\"):\n macro = True\n vhost = obj.VirtualHost(\n node.filepath, None, addrs, is_ssl, enabled, modmacro=macro, node=node\n )\n self._populate_vhost_names_v2(vhost)\n return vhost\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 386, "n_words": 111, "vocab_size": 76, "complexity": 9, "nloc": 25, "token_counts": 159, "n_ast_nodes": 259, "n_identifiers": 30, "random_cut": "def _create_vhost_v2(self, node):\n \n addrs = set()\n for param in node.parameters:\n addr = obj.Addr.fromstring(param)\n if addr:\n addrs.add(addr)\n\n is_ssl = False\n # Exclusion to match the behavior in get_virtual_hosts_v2\n sslengine = node.find_directives(\"SSLEngine\", exclude=False)\n if sslengine:\n for directive in sslengine:\n if directive.parameters[0].lower() == \"on\":\n is_ssl = True\n break\n\n # \"SSLEngine on\" might be set outside of \n # Treat vhosts with port 443 as ssl vhosts\n for addr in addrs:\n if addr.get_port() == \"443\":\n is_ssl = True\n\n enabled = apache_uti", "d_id": 45489, "documentation": { "docstring": "Used by get_virtual_hosts_v2 to create vhost objects using ParserNode\n interfaces.\n :param interfaces.BlockNode node: The BlockNode object of VirtualHost block\n :returns: newly created vhost\n :rtype: :class:`~certbot_apache.obj.VirtualHost`\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 60, "language": "en" } }, { "id": 63204, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "insert_on", "commit_message": "upd; format", "code": "def insert_on(self, path, loc=None, replace=False):\n \n\n loc = loc or self.location\n if not loc:\n return\n\n nloc = _normalize_cached(loc)\n bdir = os.path.dirname(nloc)\n npath = [(p and _normalize_cached(p) or p) for p in path]\n\n for p, item in enumerate(npath):\n if item == nloc:\n if replace:\n break\n else:\n # don't modify path (even removing duplicates) if\n # found and not replace\n return\n elif item == bdir and self.precedence == EGG_DIST:\n # if it's an .egg, give it precedence over its directory\n # UNLESS it's already been added to sys.path and replace=False\n if (not replace) and nloc in npath[p:]:\n return\n if path is sys.path:\n self.check_version_conflict()\n path.insert(p, loc)\n npath.insert(p, nloc)\n break\n else:\n if path is sys.path:\n self.check_version_conflict()\n if replace:\n path.insert(0, loc)\n else:\n path.append(loc)\n return\n\n # p is the spot where we found or inserted loc; now remove duplicates\n while True:\n try:\n np = npath.index(nloc, p + 1)\n except ValueError:\n break\n else:\n del npath[np], path[np]\n # ha!\n p = np\n\n return\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 710, "n_words": 154, "vocab_size": 96, "complexity": 18, "nloc": 38, "token_counts": 210, "n_ast_nodes": 343, "n_identifiers": 24, "random_cut": "def insert_on(self, path, loc=None, replace=False):\n \n\n loc = loc or self.location\n if not loc:\n return\n\n nloc = _normalize_cached(loc)\n bdir = os.path.dirname(nloc)\n npath = [(p and _normalize_cached(p) or p) for p in path]\n\n fo", "d_id": 13203, "documentation": { "docstring": "Ensure self.location is on path\n\n If replace=False (default):\n - If location is already in path anywhere, do nothing.\n - Else:\n - If it's an egg and its parent directory is on path,\n insert just ahead of the parent.\n - Else: add to the end of path.\n If replace=True:\n - If location is already on path anywhere (not eggs)\n or higher priority than its parent (eggs)\n do nothing.\n - Else:\n - If it's an egg and its parent directory is on path,\n insert just ahead of the parent,\n removing any lower-priority entries.\n - Else: add it to the front of path.\n ", "n_words": 100, "vocab_size": 50, "n_whitespaces": 288, "language": "en" } }, { "id": 33107, "commit_id": "c72d7d91bf4899760725793421eff9da640c8527", "repo": "transformers", "path": "src/transformers/models/xglm/modeling_tf_xglm.py", "file_name": "modeling_tf_xglm.py", "fun_name": "serving_output", "commit_message": "Add TF implementation of `XGLMModel` (#16543)\n\n* Add TFXGLM models \r\n\r\n* Add todo: self.supports_xla_generation = False\r\n\r\nCo-authored-by: Daniel Stancl \r\nCo-authored-by: Daniel Stancl \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Daniel \r\nCo-authored-by: Patrick von Platen ", "code": "def serving_output(self, output):\n pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n cross_attns = (\n tf.convert_to_tensor(output.cross_attentions)\n if self.config.output_attentions and self.config.add_cross_attention\n else None\n )\n\n return TFBaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=output.hidden_states,\n past_key_values=pkv,\n hidden_states=hs,\n attentions=attns,\n cross_attentions=cross_attns,\n )\n\n\n@add_start_docstrings(\n ,\n XGLM_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"\"\"\n The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n XGLM_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 187, "n_words": 47, "vocab_size": 32, "complexity": 6, "nloc": 16, "token_counts": 113, "n_ast_nodes": 180, "n_identifiers": 22, "random_cut": "def serving_output(self, output):\n pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n cross_attns = (\n tf.convert_to_tensor(output.cross_attentions)\n if self.config.output_attent", "d_id": 6060, "documentation": { "docstring": "\n The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 30, "language": "en" } }, { "id": 154524, "commit_id": "1dc16415333bf2428ee2b1f4d31ff94e66b9a0a6", "repo": "modin", "path": "modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition_manager.py", "file_name": "partition_manager.py", "fun_name": "_apply_func_to_list_of_partitions", "commit_message": "REFACTOR-#5009: use RayWrapper.materialize instead of ray.get (#5010)\n\nSigned-off-by: Myachev ", "code": "def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs):\n \n preprocessed_map_func = cls.preprocess_func(func)\n key_futures = RayWrapper.materialize(\n [\n partition.apply(preprocessed_map_func, **kwargs)\n for partition in partitions\n ]\n )\n gpu_managers = [partition.get_gpu_manager() for partition in partitions]\n return cls._create_partitions(key_futures, gpu_managers)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 124, "n_words": 30, "vocab_size": 25, "complexity": 3, "nloc": 10, "token_counts": 65, "n_ast_nodes": 100, "n_identifiers": 15, "random_cut": "def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs):\n \n preprocessed_map_func = cls.preprocess_func(func)\n key_futures = RayWrapper.materialize(\n [\n partition.apply(preprocessed_map_func, **kwargs)\n for partition in partitions\n ]\n )\n gpu_managers = [partition.get_gpu_manager()", "d_id": 36047, "documentation": { "docstring": "\n Apply `func` to a list of remote partitions from `partitions`.\n\n Parameters\n ----------\n func : callable\n The function to apply.\n partitions : np.ndarray\n NumPy array with partitions.\n **kwargs : dict\n Additional keywords arguments to be passed in `func`.\n\n Returns\n -------\n np.ndarray\n A NumPy array of ``cuDFOnRayDataframePartition`` objects.\n\n Notes\n -----\n This preprocesses the `func` first before applying it to the partitions.\n ", "n_words": 59, "vocab_size": 46, "n_whitespaces": 195, "language": "en" } }, { "id": 140169, "commit_id": "f27e85cd7df5ca2873ef6231200a1530e16ac35d", "repo": "ray", "path": "python/ray/serve/deployment_function_executor_node.py", "file_name": "deployment_function_executor_node.py", "fun_name": "_execute_impl", "commit_message": "[Serve][Deployment Graph][Perf] Add minimal executor DAGNode (#24754)\n\ncloses #24475\r\n\r\nCurrent deployment graph has big perf issues compare with using plain deployment handle, mostly because overhead of DAGNode traversal mechanism. We need this mechanism to empower DAG API, specially deeply nested objects in args where we rely on pickling; But meanwhile the nature of each execution becomes re-creating and replacing every `DAGNode` instances involved upon each execution, that incurs overhead.\r\n\r\nSome overhead is inevitable due to pickling and executing DAGNode python code, but they could be quite minimal. As I profiled earlier, pickling itself is quite fast for our benchmarks at magnitude of microseconds.\r\n\r\nMeanwhile the elephant in the room is DeploymentNode and its relatives are doing too much work in constructor that's beyond necessary, thus slowing everything down. So the fix is as simple as \r\n\r\n1) Introduce a new set of executor dag node types that contains absolute minimal information that only preserves the DAG structure with traversal mechanism, and ability to call relevant deployment handles.\r\n2) Add a simple new pass in our build() that generates and replaces nodes with executor dag to produce a final executor dag to run the graph.\r\n\r\nCurrent ray dag -> serve dag mixed a lot of stuff related to deployment generation and init args, in longer term we should remove them but our correctness depends on it so i rather leave it as separate PR.\r\n\r\n### Current 10 node chain with deployment graph `.bind()`\r\n```\r\nchain_length: 10, num_clients: 1\r\nlatency_mean_ms: 41.05, latency_std_ms: 15.18\r\nthroughput_mean_tps: 27.5, throughput_std_tps: 3.2\r\n```\r\n\r\n### Using raw deployment handle without dag overhead\r\n```\r\nchain_length: 10, num_clients: 1\r\nlatency_mean_ms: 20.39, latency_std_ms: 4.57\r\nthroughput_mean_tps: 51.9, throughput_std_tps: 1.04\r\n```\r\n\r\n### After this PR:\r\n```\r\nchain_length: 10, num_clients: 1\r\nlatency_mean_ms: 20.35, latency_std_ms: 0.87\r\nthroughput_mean_tps: 48.4, throughput_std_tps: 1.43\r\n```", "code": "def _execute_impl(self, *args, **kwargs) -> ObjectRef:\n \n return self._deployment_function_handle.remote(\n *self._bound_args, **self._bound_kwargs\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 43, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 10, "token_counts": 31, "n_ast_nodes": 50, "n_identifiers": 9, "random_cut": "def _execute_impl(self, *args, **kwargs) -> ObjectRef:\n \n return self._deployment_function_handle.remote(\n *", "d_id": 31884, "documentation": { "docstring": "Executor of DeploymentNode getting called each time on dag.execute.\n\n The execute implementation is recursive, that is, the method nodes will\n receive whatever this method returns. We return a handle here so method\n node can directly call upon.\n ", "n_words": 37, "vocab_size": 35, "n_whitespaces": 65, "language": "en" } }, { "id": 268948, "commit_id": "373ad97c72ed1ac4b6898e85b2cfd7b016e4b469", "repo": "keras", "path": "keras/preprocessing/image.py", "file_name": "image.py", "fun_name": "save_img", "commit_message": "Copy image utils from keras_preprocessing directly into core keras\n\nThis is not new code, we are just moving these utilities directly\ninto keras from keras-preprocessing.\n\nFor the library code, just fixed linting errors.\nFor the test code, had to do more major changes to port from pytest, but\nhopefully any errors have been caught by the tests themselves.\n\nPiperOrigin-RevId: 427274651", "code": "def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):\n \n if data_format is None:\n data_format = backend.image_data_format()\n img = array_to_img(x, data_format=data_format, scale=scale)\n if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):\n warnings.warn('The JPG format does not support '\n 'RGBA images, converting to RGB.')\n img = img.convert('RGB')\n img.save(path, format=file_format, **kwargs)\n\n\n@keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img')", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img')", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 81, "n_words": 51, "vocab_size": 44, "complexity": 5, "nloc": 9, "token_counts": 94, "n_ast_nodes": 171, "n_identifiers": 18, "random_cut": "def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):\n \n if data_format is None:\n data_format = backend.image_data_format()\n img = array_to_img(x, data_format=data_format, scale=scale)\n if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):\n warnings.warn('The JPG format does not support '\n 'RGBA images, converting to RGB.')\n img = img.conve", "d_id": 79779, "documentation": { "docstring": "Saves an image stored as a Numpy array to a path or file object.\n\n Args:\n path: Path or file object.\n x: Numpy array.\n data_format: Image data format, either \"channels_first\" or\n \"channels_last\".\n file_format: Optional file format override. If omitted, the format to use\n is determined from the filename extension. If a file object was used\n instead of a filename, this parameter should always be used.\n scale: Whether to rescale image values to be within `[0, 255]`.\n **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.\n ", "n_words": 82, "vocab_size": 63, "n_whitespaces": 135, "language": "en" } }, { "id": 72483, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/pages/edit.py", "file_name": "edit.py", "fun_name": "log_commenting_changes", "commit_message": "Reformat with black", "code": "def log_commenting_changes(self, changes, revision):\n \n for comment in changes[\"new_comments\"]:\n comment.log_create(page_revision=revision, user=self.request.user)\n\n for comment in changes[\"edited_comments\"]:\n comment.log_edit(page_revision=revision, user=self.request.user)\n\n for comment in changes[\"resolved_comments\"]:\n comment.log_resolve(page_revision=revision, user=self.request.user)\n\n for comment in changes[\"deleted_comments\"]:\n comment.log_delete(page_revision=revision, user=self.request.user)\n\n for comment, replies in changes[\"new_replies\"]:\n for reply in replies:\n reply.log_create(page_revision=revision, user=self.request.user)\n\n for comment, replies in changes[\"edited_replies\"]:\n for reply in replies:\n reply.log_edit(page_revision=revision, user=self.request.user)\n\n for comment, replies in changes[\"deleted_replies\"]:\n for reply in replies:\n reply.log_delete(page_revision=revision, user=self.request.user)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 239, "n_words": 61, "vocab_size": 26, "complexity": 11, "nloc": 18, "token_counts": 199, "n_ast_nodes": 306, "n_identifiers": 14, "random_cut": "def log_commenting_changes(self, changes, revision):\n \n for comment in changes[\"new_comments\"]:\n comment.log_create(page_revision=revision, user=self.request.user)\n\n for comment in changes[\"edited_comments\"]:\n comment.log_edit(page_revision=revision, user=self.request.user)\n\n for comment in changes[\"resolved_comments\"]:\n comment.log_resolve(page_revision=revision, user=self.request.user)\n\n for comment in changes[\"deleted_comments\"]:\n comment.log_delete(page_revision=revision, user=self.request.user)\n\n for comment, replies in changes[\"new_replies\"]:\n for reply in replies:\n reply.log_create(page_revision=revision, user=self.request.user)\n\n for comment, replies in changes[\"edited_replies\"]:\n for reply in replies:\n reply.log_edit(page_revision=revision, user=self.request.user)\n\n for comment, replies in changes[\"deleted_replies\"]:\n for reply in replies:\n reply.log_delete(page_revision=revision, user=self.request.user)\n", "d_id": 15900, "documentation": { "docstring": "\n Generates log entries for any changes made to comments or replies.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 108011, "commit_id": "075ff0952896f44d7d0b0b3318f0978ae53f84d7", "repo": "matplotlib", "path": "lib/matplotlib/patches.py", "file_name": "patches.py", "fun_name": "__new__", "commit_message": "Small style fixes.", "code": "def __new__(cls, stylename, **kwargs):\n \n # The \"class\" should have the _style_list attribute, which is a mapping\n # of style names to style classes.\n _list = stylename.replace(\" \", \"\").split(\",\")\n _name = _list[0].lower()\n try:\n _cls = cls._style_list[_name]\n except KeyError as err:\n raise ValueError(f\"Unknown style: {stylename}\") from err\n try:\n _args_pair = [cs.split(\"=\") for cs in _list[1:]]\n _args = {k: float(v) for k, v in _args_pair}\n except ValueError as err:\n raise ValueError(f\"Incorrect style argument: {stylename}\") from err\n return _cls(**{**_args, **kwargs})\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 201, "n_words": 76, "vocab_size": 59, "complexity": 5, "nloc": 13, "token_counts": 120, "n_ast_nodes": 208, "n_identifiers": 20, "random_cut": "def __new__(cls, stylename, **kwargs):\n \n # The \"class\" should have the _style_list attribute, which is a mapping\n # of style names to style classes.\n _list = stylename.replace(\" \", \"\").split(\",\")\n _name = _list[0].lower()\n try:\n _cls = cls._style_list[_name]\n except KeyError as err:\n raise ValueError(f\"Unknown style: {stylename}\") from err\n try:\n ", "d_id": 23015, "documentation": { "docstring": "Return the instance of the subclass with the given style name.", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 277252, "commit_id": "fa6d9107a498f7c2403ff28c7b389a1a0c5cc083", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "losses", "commit_message": "reduct too long lines", "code": "def losses(self):\n \n collected_losses = []\n for layer in self._flatten_layers():\n # If any eager losses are present, we assume the model to be part of\n # an eager training loop (either a custom one or the one used when\n # `run_eagerly=True`) and so we always return just the eager losses.\n if layer._eager_losses:\n # Filter placeholder losses that may have been added by revived\n # layers. (see base_layer_utils for details).\n if (\n layer._eager_losses[0]\n is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER\n ):\n collected_losses.extend(layer._eager_losses)\n else:\n collected_losses.extend(layer._losses)\n for regularizer in layer._callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not None:\n collected_losses.append(loss_tensor)\n return collected_losses\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 369, "n_words": 93, "vocab_size": 71, "complexity": 6, "nloc": 16, "token_counts": 83, "n_ast_nodes": 140, "n_identifiers": 14, "random_cut": "def losses(self):\n \n collected_losses = []\n for layer in self._flatten_layers():\n # If any eager losses are present, we assume the model to be part of\n # an eager training loop (either a custom one or the one used when\n # `run_eagerly=True`) and so we always return just the eager losses.\n if layer._eager_losses:\n # Filter placeholder losses that may have been added by revived\n # layers. (see base_layer_utils for details).\n if (\n layer._eager_losses[0]\n is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER\n ):\n collected_losses.extend(layer._eager_losses)\n else:\n collected_losses.extend(layer._losses)\n for regularizer in layer._callable_losses:\n ", "d_id": 81916, "documentation": { "docstring": "List of losses added using the `add_loss()` API.\n\n Variable regularization tensors are created when this property is\n accessed, so it is eager safe: accessing `losses` under a\n `tf.GradientTape` will propagate gradients back to the corresponding\n variables.\n\n Examples:\n\n >>> class MyLayer(tf.keras.layers.Layer):\n ... def call(self, inputs):\n ... self.add_loss(tf.abs(tf.reduce_mean(inputs)))\n ... return inputs\n >>> l = MyLayer()\n >>> l(np.ones((10, 1)))\n >>> l.losses\n [1.0]\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> x = tf.keras.layers.Dense(10)(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Activity regularization.\n >>> len(model.losses)\n 0\n >>> model.add_loss(tf.abs(tf.reduce_mean(x)))\n >>> len(model.losses)\n 1\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones')\n >>> x = d(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Weight regularization.\n >>> model.add_loss(lambda: tf.reduce_mean(d.kernel))\n >>> model.losses\n []\n\n Returns:\n A list of tensors.\n ", "n_words": 128, "vocab_size": 83, "n_whitespaces": 385, "language": "en" } }, { "id": 260724, "commit_id": "6c0e0b2e4723d11e29057635c7061a36bc1a8512", "repo": "scikit-learn", "path": "sklearn/linear_model/_least_angle.py", "file_name": "_least_angle.py", "fun_name": "fit", "commit_message": "MAINT Parameter Validation for Lars, LarsCV, LassoLars, LassoLarsCV and LassoLarsIC (#24033)\n\nCo-authored-by: jeremie du boisberranger ", "code": "def fit(self, X, y, Xy=None):\n \n self._validate_params()\n\n X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)\n\n _normalize = _deprecate_normalize(\n self.normalize, default=True, estimator_name=self.__class__.__name__\n )\n\n alpha = getattr(self, \"alpha\", 0.0)\n if hasattr(self, \"n_nonzero_coefs\"):\n alpha = 0.0 # n_nonzero_coefs parametrization takes priority\n max_iter = self.n_nonzero_coefs\n else:\n max_iter = self.max_iter\n\n if self.jitter is not None:\n rng = check_random_state(self.random_state)\n\n noise = rng.uniform(high=self.jitter, size=len(y))\n y = y + noise\n\n self._fit(\n X,\n y,\n max_iter=max_iter,\n alpha=alpha,\n fit_path=self.fit_path,\n normalize=_normalize,\n Xy=Xy,\n )\n\n return self\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 310, "n_words": 71, "vocab_size": 52, "complexity": 3, "nloc": 26, "token_counts": 169, "n_ast_nodes": 251, "n_identifiers": 32, "random_cut": "def fit(self, X, y, Xy=None):\n \n self._validate_params()\n\n X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)\n\n _normalize = _deprecate_normalize(\n self.normalize, default=True, estimator_name=self.__class__.__name__\n )\n\n alpha = getattr(self, \"alp", "d_id": 76442, "documentation": { "docstring": "Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \\\n default=None\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\n only when the Gram matrix is precomputed.\n\n Returns\n -------\n self : object\n Returns an instance of self.\n ", "n_words": 70, "vocab_size": 49, "n_whitespaces": 203, "language": "en" } }, { "id": 50226, "commit_id": "ffcde21305c61d950a9f93e57e6180c9a9665b87", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/efficientnet.py", "file_name": "efficientnet.py", "fun_name": "_decode_block_string", "commit_message": "add disco_diffusion_ernievil_base", "code": "def _decode_block_string(block_string):\n \n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n cond_1 = ('s' in options and len(options['s']) == 1)\n cond_2 = ((len(options['s']) == 2) and (options['s'][0] == options['s'][1]))\n assert (cond_1 or cond_2)\n\n return BlockArgs(kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=[int(options['s'][0])])\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 363, "n_words": 73, "vocab_size": 56, "complexity": 7, "nloc": 20, "token_counts": 213, "n_ast_nodes": 348, "n_identifiers": 26, "random_cut": "def _decode_block_string(block_string):\n \n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n cond_1 = ('s' in options and len(options['s']) == 1)\n cond_2 = ((len(options['s']) == 2) and (options['s'][0] == options['s", "d_id": 10055, "documentation": { "docstring": " Gets a block through a string notation of arguments. ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 10, "language": "en" } }, { "id": 160171, "commit_id": "0307f89d48368a39ed97a252f9faed3c7bf64446", "repo": "numpy", "path": "numpy/lib/function_base.py", "file_name": "function_base.py", "fun_name": "copy", "commit_message": "Improve documentation formatting", "code": "def copy(a, order='K', subok=False):\n \n return array(a, order=order, subok=subok, copy=True)\n\n# Basic operations\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 31, "n_ast_nodes": 49, "n_identifiers": 5, "random_cut": "def copy(a, order='K', subok=False):\n \n return ", "d_id": 38543, "documentation": { "docstring": "\n Return an array copy of the given object.\n\n Parameters\n ----------\n a : array_like\n Input data.\n order : {'C', 'F', 'A', 'K'}, optional\n Controls the memory layout of the copy. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,\n 'C' otherwise. 'K' means match the layout of `a` as closely\n as possible. (Note that this function and :meth:`ndarray.copy` are very\n similar, but have different default values for their order=\n arguments.)\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise the\n returned array will be forced to be a base-class array (defaults to False).\n\n .. versionadded:: 1.19.0\n\n Returns\n -------\n arr : ndarray\n Array interpretation of `a`.\n\n See Also\n --------\n ndarray.copy : Preferred method for creating an array copy\n\n Notes\n -----\n This is equivalent to:\n\n >>> np.array(a, copy=True) #doctest: +SKIP\n\n Examples\n --------\n Create an array x, with a reference y and a copy z:\n\n >>> x = np.array([1, 2, 3])\n >>> y = x\n >>> z = np.copy(x)\n\n Note that, when we modify x, y changes, but not z:\n\n >>> x[0] = 10\n >>> x[0] == y[0]\n True\n >>> x[0] == z[0]\n False\n\n Note that, np.copy clears previously set WRITEABLE=False flag.\n\n >>> a = np.array([1, 2, 3])\n >>> a.flags[\"WRITEABLE\"] = False\n >>> b = np.copy(a)\n >>> b.flags[\"WRITEABLE\"]\n True\n >>> b[0] = 3\n >>> b\n array([3, 2, 3])\n\n Note that np.copy is a shallow copy and will not copy object\n elements within arrays. This is mainly important for arrays\n containing Python objects. The new array will contain the\n same object which may lead to surprises if that object can\n be modified (is mutable):\n\n >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)\n >>> b = np.copy(a)\n >>> b[2][0] = 10\n >>> a\n array([1, 'm', list([10, 3, 4])], dtype=object)\n\n To ensure all elements within an ``object`` array are copied,\n use `copy.deepcopy`:\n\n >>> import copy\n >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)\n >>> c = copy.deepcopy(a)\n >>> c[2][0] = 10\n >>> c\n array([1, 'm', list([10, 3, 4])], dtype=object)\n >>> a\n array([1, 'm', list([2, 3, 4])], dtype=object)\n\n ", "n_words": 340, "vocab_size": 188, "n_whitespaces": 593, "language": "en" } }, { "id": 139754, "commit_id": "68d4dd3a8b2defa5549cfa70e59aa26f2d4825a3", "repo": "ray", "path": "python/ray/data/tests/test_context_propagation.py", "file_name": "test_context_propagation.py", "fun_name": "test_context_placement_group", "commit_message": "[Datasets] Add explicit resource allocation option via a top-level scheduling strategy (#24438)\n\nInstead of letting Datasets implicitly use cluster resources in the margins of explicit allocations of other libraries, such as Tune, Datasets should provide an option for explicitly allocating resources for a Datasets workload for users that want to box Datasets in. This PR adds such an explicit resource allocation option, via exposing a top-level scheduling strategy on the DatasetContext with which a placement group can be given.", "code": "def test_context_placement_group():\n driver_code = \n proc = run_string_as_driver_nonblocking(driver_code)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 6, "complexity": 1, "nloc": 30, "token_counts": 23, "n_ast_nodes": 27, "n_identifiers": 4, "random_cut": "def test_context_placement_group():\n driver_code = \n proc = run_string_as_driver_no", "d_id": 31769, "documentation": { "docstring": "\nimport ray\nfrom ray.data.context import DatasetContext\nfrom ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy\nfrom ray._private.test_utils import placement_group_assert_no_leak\n\nray.init(num_cpus=1)\n\ncontext = DatasetContext.get_current()\n# This placement group will take up all cores of the local cluster.\nplacement_group = ray.util.placement_group(\n name=\"core_hog\",\n strategy=\"SPREAD\",\n bundles=[\n {\"CPU\": 1},\n ],\n)\nray.get(placement_group.ready())\ncontext.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group)\npipe = ray.data.range(100, parallelism=2) \\\n .window(blocks_per_window=1) \\\n .map(lambda x: x + 1)\nassert pipe.take_all() == list(range(1, 101))\nplacement_group_assert_no_leak([placement_group])\nray.shutdown()\n ", "n_words": 64, "vocab_size": 55, "n_whitespaces": 78, "language": "en" } }, { "id": 170562, "commit_id": "ab6562a20bd894d02fb28675809698d5be0436f9", "repo": "pandas", "path": "pandas/core/arrays/categorical.py", "file_name": "categorical.py", "fun_name": "reorder_categories", "commit_message": "DEPR: remove inplace arg in Categorical methods (#49321)\n\n* deprecate inplace arg in categorical methods\r\n\r\n* fix tests\r\n\r\n* add back test\r\n\r\n* doc fix\r\n\r\n* doc fixes\r\n\r\n* avoid constructing new objects on every iteration\r\n\r\n* cleanup", "code": "def reorder_categories(self, new_categories, ordered=None):\n \n if set(self.dtype.categories) != set(new_categories):\n raise ValueError(\n \"items in new_categories are not the same as in old categories\"\n )\n return self.set_categories(new_categories, ordered=ordered)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 83, "n_words": 25, "vocab_size": 24, "complexity": 2, "nloc": 6, "token_counts": 43, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def reorder_categories(self, new_categories, ordered=None):\n \n if set(self.dtype.categories) != set(new_categories):\n raise ValueError(\n \"items in new_categories are not the same as in old categories\"\n )\n return self.set_categories(new_categories, ordered=ordered)\n", "d_id": 40576, "documentation": { "docstring": "\n Reorder categories as specified in new_categories.\n\n `new_categories` need to include all old categories and no new category\n items.\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : bool, optional\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n\n Returns\n -------\n cat : Categorical\n Categorical with reordered categories.\n\n Raises\n ------\n ValueError\n If the new categories do not contain all old category items or any\n new ones\n\n See Also\n --------\n rename_categories : Rename categories.\n add_categories : Add new categories.\n remove_categories : Remove the specified categories.\n remove_unused_categories : Remove categories which are not used.\n set_categories : Set the categories to the specified ones.\n ", "n_words": 114, "vocab_size": 71, "n_whitespaces": 325, "language": "en" } }, { "id": 246887, "commit_id": "02d708568b476f2f7716000b35c0adfa4cbd31b3", "repo": "synapse", "path": "tests/rest/client/test_rooms.py", "file_name": "test_rooms.py", "fun_name": "test_get_member_list_no_permission_former_member_with_at_token", "commit_message": "Replace assertEquals and friends with non-deprecated versions. (#12092)", "code": "def test_get_member_list_no_permission_former_member_with_at_token(self):\n \n # create a room, invite the user and the user joins\n room_id = self.helper.create_room_as(\"@alice:red\")\n self.helper.invite(room_id, \"@alice:red\", self.user_id)\n self.helper.join(room_id, self.user_id)\n\n # sync to get an at token\n channel = self.make_request(\"GET\", \"/sync\")\n self.assertEqual(200, channel.code)\n sync_token = channel.json_body[\"next_batch\"]\n\n # check that the user can see the member list to start with\n channel = self.make_request(\n \"GET\", \"/rooms/%s/members?at=%s\" % (room_id, sync_token)\n )\n self.assertEqual(200, channel.code, msg=channel.result[\"body\"])\n\n # ban the user (Note: the user is actually allowed to see this event and\n # state so that they know they're banned!)\n self.helper.change_membership(room_id, \"@alice:red\", self.user_id, \"ban\")\n\n # invite a third user and let them join\n self.helper.invite(room_id, \"@alice:red\", \"@bob:red\")\n self.helper.join(room_id, \"@bob:red\")\n\n # now, with the original user, sync again to get a new at token\n channel = self.make_request(\"GET\", \"/sync\")\n self.assertEqual(200, channel.code)\n sync_token = channel.json_body[\"next_batch\"]\n\n # check the user can no longer see the updated member list\n channel = self.make_request(\n \"GET\", \"/rooms/%s/members?at=%s\" % (room_id, sync_token)\n )\n self.assertEqual(403, channel.code, msg=channel.result[\"body\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 361, "n_words": 150, "vocab_size": 78, "complexity": 1, "nloc": 21, "token_counts": 206, "n_ast_nodes": 351, "n_identifiers": 17, "random_cut": "def test_get_member_list_no_permission_former_member_with_at_token(self):\n \n # create a room, invite the user and the user joins\n room_id = self.helper.create_room_as(\"@alice:red\")\n self.helper.invite(room_id, \"@alice:red\", self.user_id)\n self.helper.join(room_id, self.user_id)\n\n # sync to get an at token\n channel = self.make_request(\"GET\", \"/sync\")\n self.assertEqual(200, channel.code)\n sync_token = channel.json_body[\"next_batch\"]\n\n # check that the user can see the member list to start with\n channel = self.make_request(\n \"GET\", \"/rooms/%s/members?at=%s\" % (room_id, sync_token)\n )\n se", "d_id": 71391, "documentation": { "docstring": "\n Tests that a former member of the room can not get the member list\n (in the case that they use an at token).\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 45, "language": "en" } }, { "id": 222501, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/difflib.py", "file_name": "difflib.py", "fun_name": "_keep_original_ws", "commit_message": "add python 3.10.4 for windows", "code": "def _keep_original_ws(s, tag_s):\n \n return ''.join(\n c if tag_c == \" \" and c.isspace() else tag_c\n for c, tag_c in zip(s, tag_s)\n )\n\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 22, "vocab_size": 19, "complexity": 4, "nloc": 5, "token_counts": 38, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def _keep_original_ws(s, tag_s):\n \n return ''.join(\n c if tag_c == \" \" and c.isspace() else tag_c\n for c, tag_c in zip(s, tag_s)\n )\n\n\n", "d_id": 56599, "documentation": { "docstring": "Replace whitespace with the original whitespace characters in `s`", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 108343, "commit_id": "fb902f735995372f345a8333804f5c6052f29770", "repo": "matplotlib", "path": "lib/matplotlib/cm.py", "file_name": "cm.py", "fun_name": "unregister_cmap", "commit_message": "MNT: Remove cmap_d colormap access", "code": "def unregister_cmap(name):\n \n cmap = _colormaps.get(name, None)\n _colormaps.unregister(name)\n return cmap\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 6, "random_cut": "def unregister_cmap(name):\n \n cmap = _colormaps.get(name, None)\n _colormaps.unregister(name)\n return cmap\n\n", "d_id": 23150, "documentation": { "docstring": "\n Remove a colormap recognized by :func:`get_cmap`.\n\n You may not remove built-in colormaps.\n\n If the named colormap is not registered, returns with no error, raises\n if you try to de-register a default colormap.\n\n .. warning::\n\n Colormap names are currently a shared namespace that may be used\n by multiple packages. Use `unregister_cmap` only if you know you\n have registered that name before. In particular, do not\n unregister just in case to clean the name before registering a\n new colormap.\n\n Parameters\n ----------\n name : str\n The name of the colormap to be un-registered\n\n Returns\n -------\n ColorMap or None\n If the colormap was registered, return it if not return `None`\n\n Raises\n ------\n ValueError\n If you try to de-register a default built-in colormap.\n ", "n_words": 118, "vocab_size": 80, "n_whitespaces": 209, "language": "en" } }, { "id": 104902, "commit_id": "17fd2ea68cf75b36369a9f018497875e292db26a", "repo": "datasets", "path": "src/datasets/utils/streaming_download_manager.py", "file_name": "streaming_download_manager.py", "fun_name": "_get_extraction_protocol_with_magic_number", "commit_message": "don't check f.loc in _get_extraction_protocol_with_magic_number (#4318)", "code": "def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:\n \n magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)\n f.seek(0)\n for i in range(MAGIC_NUMBER_MAX_LENGTH):\n compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])\n if compression is not None: # TODO(QL): raise an error for .tar.gz files as in _get_extraction_protocol\n return compression\n compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])\n if compression is not None:\n raise NotImplementedError(f\"Compression protocol '{compression}' not implemented.\")\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 116, "n_words": 53, "vocab_size": 36, "complexity": 4, "nloc": 11, "token_counts": 81, "n_ast_nodes": 135, "n_identifiers": 15, "random_cut": "def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:\n \n magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)\n f.seek(0)\n for i in range(MAGIC_NUMBER_MAX_LENGTH):\n compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])\n if compression is not None: # TODO(QL): raise an error for .tar.gz files as ", "d_id": 22017, "documentation": { "docstring": "read the magic number from a file-like object and return the compression protocol", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 215953, "commit_id": "f2a783643de61cac1ff3288b40241e5ce6e1ddc8", "repo": "salt", "path": "salt/modules/lxc.py", "file_name": "lxc.py", "fun_name": "_get_veths", "commit_message": "Update to latest ``pyupgrade`` hook. Stop skipping it on CI.\n\nSigned-off-by: Pedro Algarvio ", "code": "def _get_veths(net_data):\n \n if isinstance(net_data, dict):\n net_data = list(net_data.items())\n nics = salt.utils.odict.OrderedDict()\n current_nic = salt.utils.odict.OrderedDict()\n no_names = True\n for item in net_data:\n if item and isinstance(item, dict):\n item = list(item.items())[0]\n # skip LXC configuration comment lines, and play only with tuples conf\n elif isinstance(item, str):\n # deal with reflection of commented lxc configs\n sitem = item.strip()\n if sitem.startswith(\"#\") or not sitem:\n continue\n elif \"=\" in item:\n item = tuple(a.strip() for a in item.split(\"=\", 1))\n if item[0] == \"lxc.network.type\":\n current_nic = salt.utils.odict.OrderedDict()\n if item[0] == \"lxc.network.name\":\n no_names = False\n nics[item[1].strip()] = current_nic\n current_nic[item[0].strip()] = item[1].strip()\n # if not ethernet card name has been collected, assuming we collected\n # data for eth0\n if no_names and current_nic:\n nics[DEFAULT_NIC] = current_nic\n return nics\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 322, "n_words": 118, "vocab_size": 74, "complexity": 14, "nloc": 24, "token_counts": 206, "n_ast_nodes": 342, "n_identifiers": 22, "random_cut": "def _get_veths(net_data):\n \n if isinstance(net_data, dict):\n net_data = list(net_data.items())\n nics = salt.utils.odict.OrderedDict()\n current_nic = salt.utils.odict.OrderedDict()\n no_names = True\n for item in net_data:\n if item and isinstance(item, dict):\n item = list(item.items())[0]\n # skip LXC configuration comment lines, and play only with tuples conf\n elif isinstance(item, str):\n # deal with reflection of commented lxc config", "d_id": 54275, "documentation": { "docstring": "\n Parse the nic setup inside lxc conf tuples back to a dictionary indexed by\n network interface\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 112957, "commit_id": "4feab0e34b490500b06efd6e7e8a34d686702c2f", "repo": "nni", "path": "nni/runtime/log.py", "file_name": "log.py", "fun_name": "start_stdout_logging", "commit_message": "Logging refactor (step 1) - experiment handlers (#4792)", "code": "def start_stdout_logging() -> None:\n \n if '_stdout_' in _handlers:\n return\n\n handler = StreamHandler(sys.stdout)\n handler.setFormatter(_StdoutFormatter())\n\n _handlers['_stdout_'] = handler\n _root_logger.addHandler(handler)\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 14, "token_counts": 41, "n_ast_nodes": 75, "n_identifiers": 10, "random_cut": "def start_stdout_logging() -> None:\n \n if '_stdout_' in _handlers:\n return\n\n ", "d_id": 24799, "documentation": { "docstring": "\n Register the stdout handler.\n\n This function should be invoked on importing nni.\n\n It is safe to call it multiple times.\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 33, "language": "en" } }, { "id": 66146, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/job_offer/job_offer.py", "file_name": "job_offer.py", "fun_name": "get_staffing_plan_detail", "commit_message": "style: format code with black", "code": "def get_staffing_plan_detail(designation, company, offer_date):\n\tdetail = frappe.db.sql(\n\t\t,\n\t\t(designation, company, offer_date),\n\t\tas_dict=1,\n\t)\n\n\treturn frappe._dict(detail[0]) if (detail and detail[0].parent) else None\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 14, "n_words": 22, "vocab_size": 21, "complexity": 3, "nloc": 21, "token_counts": 55, "n_ast_nodes": 90, "n_identifiers": 12, "random_cut": "def get_staffing_plan_detail(designation, company, offer_date):\n\tdetail = frappe.db.sql(\n\t\t,\n\t\t(designation, company, offer_date),\n\t\tas_dict=1,\n\t)\n\n\treturn frappe._dict(detail[0]) if (detail and detail[0].parent) else None\n\n\n@frappe.whitelist()", "d_id": 14109, "documentation": { "docstring": "\n\t\tSELECT DISTINCT spd.parent,\n\t\t\tsp.from_date as from_date,\n\t\t\tsp.to_date as to_date,\n\t\t\tsp.name,\n\t\t\tsum(spd.vacancies) as vacancies,\n\t\t\tspd.designation\n\t\tFROM `tabStaffing Plan Detail` spd, `tabStaffing Plan` sp\n\t\tWHERE\n\t\t\tsp.docstatus=1\n\t\t\tAND spd.designation=%s\n\t\t\tAND sp.company=%s\n\t\t\tAND spd.parent = sp.name\n\t\t\tAND %s between sp.from_date and sp.to_date\n\t", "n_words": 38, "vocab_size": 30, "n_whitespaces": 25, "language": "en" } }, { "id": 247777, "commit_id": "9d21ecf7ceab55bc19c4457b8b07401b0b1623a7", "repo": "synapse", "path": "tests/push/test_push_rule_evaluator.py", "file_name": "test_push_rule_evaluator.py", "fun_name": "test_display_name", "commit_message": "Add type hints to tests files. (#12256)", "code": "def test_display_name(self) -> None:\n \n evaluator = self._get_evaluator({\"body\": \"foo bar baz\"})\n\n condition = {\n \"kind\": \"contains_display_name\",\n }\n\n # Blank names are skipped.\n self.assertFalse(evaluator.matches(condition, \"@user:test\", \"\"))\n\n # Check a display name that doesn't match.\n self.assertFalse(evaluator.matches(condition, \"@user:test\", \"not found\"))\n\n # Check a display name which matches.\n self.assertTrue(evaluator.matches(condition, \"@user:test\", \"foo\"))\n\n # A display name that matches, but not a full word does not result in a match.\n self.assertFalse(evaluator.matches(condition, \"@user:test\", \"ba\"))\n\n # A display name should not be interpreted as a regular expression.\n self.assertFalse(evaluator.matches(condition, \"@user:test\", \"ba[rz]\"))\n\n # A display name with spaces should work fine.\n self.assertTrue(evaluator.matches(condition, \"@user:test\", \"foo bar\"))\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 217, "n_words": 94, "vocab_size": 58, "complexity": 1, "nloc": 12, "token_counts": 118, "n_ast_nodes": 217, "n_identifiers": 8, "random_cut": "def test_display_name(self) -> None:\n \n evaluator = self._get_evaluator({\"body\": \"foo bar baz\"})\n\n condition = {\n \"kind\": \"contains_display_name\",\n }\n\n # Blank names are skipped.\n self.assertFalse(evaluator.matches(condition, \"@user:test\", \"\"))\n\n # Check a display name that doesn't match.\n self.assertFalse(evaluator.matches(condition, \"@user:test\", \"not found\"))\n\n # Check a display name which matches.\n self.assertTrue(evaluator.matches(condition, \"@user:test\", \"foo\"))\n\n # A display name that matches, but not a full word does not result in a match.\n self.assertFalse(evaluator.matches(condition, \"@user:test\", \"ba\"))\n\n # A display name should not be interpreted as a regular expression.\n self.assertFalse(evaluator.matches(condition, \"@user:test\",", "d_id": 71912, "documentation": { "docstring": "Check for a matching display name in the body of the event.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 215784, "commit_id": "a35b29b2651bf33c5d5b45e64bc7765ffde4aff4", "repo": "salt", "path": "tests/pytests/functional/modules/file/test_readlink.py", "file_name": "test_readlink.py", "fun_name": "test_readlink_not_a_link", "commit_message": "Add some funtional tests\n\nAdd functional tests for the following:\n- file.readlink\n- file.replace\n- file.symlink\n\nRemove unit tests for file.replace as they are duplicated in the added\nfunctional test", "code": "def test_readlink_not_a_link(file, source):\n \n with pytest.raises(SaltInvocationError) as exc:\n file.readlink(path=source)\n assert \"A valid link was not specified\" in exc.value.message\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 61, "n_identifiers": 11, "random_cut": "def test_readlink_not_a_link(file, source):\n \n with pytest.raises(Salt", "d_id": 54176, "documentation": { "docstring": "\n Test readlink where the path is not a link\n Should throw a SaltInvocationError\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 23, "language": "en" } }, { "id": 67910, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/report/stock_analytics/stock_analytics.py", "file_name": "stock_analytics.py", "fun_name": "get_periodic_data", "commit_message": "style: format code with black", "code": "def get_periodic_data(entry, filters):\n\t\n\tperiodic_data = {}\n\tfor d in entry:\n\t\tperiod = get_period(d.posting_date, filters)\n\t\tbal_qty = 0\n\n\t\t# if period against item does not exist yet, instantiate it\n\t\t# insert existing balance dict against period, and add/subtract to it\n\t\tif periodic_data.get(d.item_code) and not periodic_data.get(d.item_code).get(period):\n\t\t\tprevious_balance = periodic_data[d.item_code][\"balance\"].copy()\n\t\t\tperiodic_data[d.item_code][period] = previous_balance\n\n\t\tif d.voucher_type == \"Stock Reconciliation\":\n\t\t\tif periodic_data.get(d.item_code) and periodic_data.get(d.item_code).get(\"balance\").get(\n\t\t\t\td.warehouse\n\t\t\t):\n\t\t\t\tbal_qty = periodic_data[d.item_code][\"balance\"][d.warehouse]\n\n\t\t\tqty_diff = d.qty_after_transaction - bal_qty\n\t\telse:\n\t\t\tqty_diff = d.actual_qty\n\n\t\tif filters[\"value_quantity\"] == \"Quantity\":\n\t\t\tvalue = qty_diff\n\t\telse:\n\t\t\tvalue = d.stock_value_difference\n\n\t\t# period-warehouse wise balance\n\t\tperiodic_data.setdefault(d.item_code, {}).setdefault(\"balance\", {}).setdefault(d.warehouse, 0.0)\n\t\tperiodic_data.setdefault(d.item_code, {}).setdefault(period, {}).setdefault(d.warehouse, 0.0)\n\n\t\tperiodic_data[d.item_code][\"balance\"][d.warehouse] += value\n\t\tperiodic_data[d.item_code][period][d.warehouse] = periodic_data[d.item_code][\"balance\"][\n\t\t\td.warehouse\n\t\t]\n\n\treturn periodic_data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 76, "n_words": 106, "vocab_size": 67, "complexity": 8, "nloc": 27, "token_counts": 274, "n_ast_nodes": 435, "n_identifiers": 21, "random_cut": "def get_periodic_data(entry, filters):\n\t\n\tperiodic_data = {}\n\tfor d in entry:\n\t\tperiod = get_period(d.posting_date, filters)\n\t\tbal_qty = 0\n\n\t\t# if period against item does not exist yet, instantiate it\n\t\t# insert existing balance dict against period, and add/subtract to it\n\t\tif periodic_data.get(d.item_code) and not periodic_data.get(d.item_code).get(period):\n\t\t\tprevious_balance = periodic_data[d.item_code][\"balance\"].copy()\n\t\t\tperiodic_data[d.item_code][period] = previous_balance\n\n\t\tif d.voucher_type == \"Stock Reconciliation\":\n\t\t\tif periodic_data.get(d.item_code) and periodic_data.get(d.item_code).get(\"balance\").get(\n\t\t\t\td.warehouse\n\t\t\t):\n\t\t\t\tbal_qty = periodic_data[d.item_code][\"balance\"][d.warehouse]\n\n\t\t\tqty_diff = d.qty_after_transaction - bal_qty\n\t\telse:\n\t\t\tqty_diff = d.actual_qty\n\n\t\tif filters[\"value_quantity\"] == \"Quantity\":\n\t\t\tvalue = qty_diff\n\t\telse:\n\t\t\tv", "d_id": 14660, "documentation": { "docstring": "Structured as:\n\tItem 1\n\t - Balance (updated and carried forward):\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t - Jun 2021 (sum of warehouse quantities used in report)\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t - Jul 2021 (sum of warehouse quantities used in report)\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\tItem 2\n\t - Balance (updated and carried forward):\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t - Jun 2021 (sum of warehouse quantities used in report)\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t - Jul 2021 (sum of warehouse quantities used in report)\n\t - Warehouse A : bal_qty/value\n\t - Warehouse B : bal_qty/value\n\t", "n_words": 118, "vocab_size": 26, "n_whitespaces": 433, "language": "en" } }, { "id": 260817, "commit_id": "49279c3267c0c54cdba80a571820c46f25fbe883", "repo": "scikit-learn", "path": "sklearn/utils/__init__.py", "file_name": "__init__.py", "fun_name": "shuffle", "commit_message": "DOC ensures sklearn.utils.shuffle passes numpydoc validation (#24367)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def shuffle(*arrays, random_state=None, n_samples=None):\n \n return resample(\n *arrays, replace=False, n_samples=n_samples, random_state=random_state\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 50, "n_identifiers": 6, "random_cut": "def shuffle(*arrays, random_state=None, n_samples=None):\n \n return resample(\n *arrays, replace=False, n_samples=n_samples, random_state=random_state\n )\n\n", "d_id": 76516, "documentation": { "docstring": "Shuffle arrays or sparse matrices in a consistent way.\n\n This is a convenience alias to ``resample(*arrays, replace=False)`` to do\n random permutations of the collections.\n\n Parameters\n ----------\n *arrays : sequence of indexable data-structures\n Indexable data-structures can be arrays, lists, dataframes or scipy\n sparse matrices with consistent first dimension.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for shuffling\n the data.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary `.\n\n n_samples : int, default=None\n Number of samples to generate. If left to None this is\n automatically set to the first dimension of the arrays. It should\n not be larger than the length of arrays.\n\n Returns\n -------\n shuffled_arrays : sequence of indexable data-structures\n Sequence of shuffled copies of the collections. The original arrays\n are not impacted.\n\n See Also\n --------\n resample : Resample arrays or sparse matrices in a consistent way.\n\n Examples\n --------\n It is possible to mix sparse and dense arrays in the same run::\n\n >>> import numpy as np\n >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])\n >>> y = np.array([0, 1, 2])\n\n >>> from scipy.sparse import coo_matrix\n >>> X_sparse = coo_matrix(X)\n\n >>> from sklearn.utils import shuffle\n >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)\n >>> X\n array([[0., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> X_sparse\n <3x2 sparse matrix of type '<... 'numpy.float64'>'\n with 3 stored elements in Compressed Sparse Row format>\n\n >>> X_sparse.toarray()\n array([[0., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> y\n array([2, 1, 0])\n\n >>> shuffle(y, n_samples=2, random_state=0)\n array([0, 1])\n ", "n_words": 248, "vocab_size": 152, "n_whitespaces": 519, "language": "en" } }, { "id": 266763, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py", "file_name": "integration_aliases.py", "fun_name": "check_changes", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def check_changes(self, args, results): # type: (SanityConfig, Results) -> None\n \n integration_targets = list(walk_integration_targets())\n module_targets = list(walk_module_targets())\n\n integration_targets_by_name = dict((target.name, target) for target in integration_targets)\n module_names_by_path = dict((target.path, target.module) for target in module_targets)\n\n disabled_targets = []\n unstable_targets = []\n unsupported_targets = []\n\n for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:\n for target in args.metadata.change_description.focused_command_targets[command]:\n if self.DISABLED in integration_targets_by_name[target].aliases:\n disabled_targets.append(target)\n elif self.UNSTABLE in integration_targets_by_name[target].aliases:\n unstable_targets.append(target)\n elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:\n unsupported_targets.append(target)\n\n untested_modules = []\n\n for path in args.metadata.change_description.no_integration_paths:\n module = module_names_by_path.get(path)\n\n if module:\n untested_modules.append(module)\n\n comments = [\n self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),\n self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),\n self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),\n self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),\n ]\n\n comments = [comment for comment in comments if comment]\n\n labels = dict(\n needs_tests=bool(untested_modules),\n disabled_tests=bool(disabled_targets),\n unstable_tests=bool(unstable_targets),\n unsupported_tests=bool(unsupported_targets),\n )\n\n results.comments += comments\n results.labels.update(labels)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 483, "n_words": 118, "vocab_size": 76, "complexity": 14, "nloc": 36, "token_counts": 298, "n_ast_nodes": 456, "n_identifiers": 45, "random_cut": "def check_changes(self, args, results): # type: (SanityConfig, Results) -> None\n \n integration_targets = list(walk_integration_targets())\n module_targets = list(walk_module_targets())\n\n integration_targets_by_name = dict((target.name, target) for target in integration_targets)\n module_names_by_path = dict((target.path, target.module) for target in module_targets)\n\n disabled_targets = []\n unstable_targets = []\n unsupported_targets = []\n\n for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:\n ", "d_id": 78567, "documentation": { "docstring": "Check changes and store results in the provided result dictionary.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 177582, "commit_id": "35125cca12ba1e8703c4284894e4e2db44ce7009", "repo": "label-studio", "path": "label_studio/tests/test_next_task.py", "file_name": "test_next_task.py", "fun_name": "test_overlap_first", "commit_message": "fix: DEV-1348: Fix _rearrange_overlap_cohort filter condition for overlap bulk update with concurrent import (#1844)\n\n* [fix] Rearrange overlap depending in annotations count\r\n\r\n* Fix next task test for not random overlap assignment\r\n\r\n* Delete unused method\r\n\r\n* Rename rearrange method to have back compatibility\r\n\r\n* Refactor to Q_finished_annotations from tasks.models\r\n\r\n* Fix filter for tasks with max annotations\r\n\r\n* Change filter for tasks with max annotations\r\n\r\n* Change project stats recalculation condition\r\n\r\n* Fix rearrange during import from storage\r\n\r\n* Change _rearrange_overlap_cohort filter condition\r\n\r\n* Switching to bulk_update in _rearrange_overlap_cohort\r\n\r\n* Stylize code\r\n\r\n* Add is_labeled on import\r\n\r\n* Fix tests\r\n\r\n* Fix tests\r\n\r\n* Fix tests more\r\n\r\nCo-authored-by: nik \r\nCo-authored-by: Sergei Ivashchenko \r\nCo-authored-by: niklub \r\nCo-authored-by: Max Tkachenko ", "code": "def test_overlap_first(business_client, setup_before_upload, show_overlap_first):\n c = business_client\n config = dict(\n title='test_overlap_first',\n is_published=True,\n maximum_annotations=1,\n show_overlap_first=show_overlap_first,\n sampling=\"Uniform sampling\",\n label_config=\n )\n\n project = make_project(config, business_client.user)\n\n annotation_result = json.dumps([{\n 'from_name': 'text_class',\n 'to_name': 'text',\n 'type': 'choices',\n 'value': {'choices': ['class_A']}\n }])\n\n num_tasks = 1000\n overlap_cohort_percentage = 1\n\n # set up tasks overlap\n setup_after_upload = True\n if setup_before_upload:\n r = c.patch(\n f'/api/projects/{project.id}/',\n data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}),\n content_type='application/json'\n )\n assert r.status_code == 200\n setup_after_upload = False\n\n # create tasks\n tasks = []\n for i in range(num_tasks):\n tasks.append({'data': {'text': f'this is {str(i)}'}})\n r = business_client.post(\n f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json')\n assert r.status_code == 201\n\n if setup_after_upload:\n r = c.patch(\n f'/api/projects/{project.id}/',\n data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}),\n content_type='application/json'\n )\n assert r.status_code == 200\n\n expected_tasks_with_overlap = int(overlap_cohort_percentage / 100. * num_tasks)\n\n assert Task.objects.filter(Q(project_id=project.id) & Q(overlap__gt=1)).count() == expected_tasks_with_overlap\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 377, "n_words": 122, "vocab_size": 84, "complexity": 8, "nloc": 63, "token_counts": 396, "n_ast_nodes": 474, "n_identifiers": 42, "random_cut": "def test_overlap_first(business_client, setup_before_upload, show_overlap_first):\n c = business_client\n config = dict(\n title='test_overlap_first',\n ", "d_id": 42449, "documentation": { "docstring": "\n \n \n \n \n \n \n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 104, "language": "en" } }, { "id": 261252, "commit_id": "97057d329da1786aa03206251aab68bf51312390", "repo": "scikit-learn", "path": "sklearn/utils/extmath.py", "file_name": "extmath.py", "fun_name": "svd_flip", "commit_message": "DOC Ensures that svd_flip passes numpydoc validation (#24581)\n\nCo-authored-by: Thomas J. Fan ", "code": "def svd_flip(u, v, u_based_decision=True):\n \n if u_based_decision:\n # columns of u, rows of v\n max_abs_cols = np.argmax(np.abs(u), axis=0)\n signs = np.sign(u[max_abs_cols, range(u.shape[1])])\n u *= signs\n v *= signs[:, np.newaxis]\n else:\n # rows of v, columns of u\n max_abs_rows = np.argmax(np.abs(v), axis=1)\n signs = np.sign(v[range(v.shape[0]), max_abs_rows])\n u *= signs\n v *= signs[:, np.newaxis]\n return u, v\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 136, "n_words": 54, "vocab_size": 30, "complexity": 2, "nloc": 12, "token_counts": 127, "n_ast_nodes": 191, "n_identifiers": 15, "random_cut": "def svd_flip(u, v, u_based_decision=True):\n \n if u_based_decision:\n # columns of u, rows of v\n max_abs_cols = np.argmax(np.abs(u), axis=0)\n signs = np.sign(u[max_abs_cols, range(u.shape[1])])\n u *= signs\n v *= signs[:, np.newaxis]\n else:\n # rows of v, columns of u\n max_abs_rows = np.argmax(np.abs(v), axis=1)\n signs = np.sign(v[r", "d_id": 76710, "documentation": { "docstring": "Sign correction to ensure deterministic output from SVD.\n\n Adjusts the columns of u and the rows of v such that the loadings in the\n columns in u that are largest in absolute value are always positive.\n\n Parameters\n ----------\n u : ndarray\n Parameters u and v are the output of `linalg.svd` or\n :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner\n dimensions so one can compute `np.dot(u * s, v)`.\n\n v : ndarray\n Parameters u and v are the output of `linalg.svd` or\n :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner\n dimensions so one can compute `np.dot(u * s, v)`.\n The input v should really be called vt to be consistent with scipy's\n output.\n\n u_based_decision : bool, default=True\n If True, use the columns of u as the basis for sign flipping.\n Otherwise, use the rows of v. The choice of which variable to base the\n decision on is generally algorithm dependent.\n\n Returns\n -------\n u_adjusted : ndarray\n Array u with adjusted columns and the same dimensions as u.\n\n v_adjusted : ndarray\n Array v with adjusted rows and the same dimensions as v.\n ", "n_words": 171, "vocab_size": 86, "n_whitespaces": 298, "language": "en" } }, { "id": 139905, "commit_id": "d5a6d46049d0ea0490c90366a081de79a87d0fac", "repo": "ray", "path": "rllib/policy/tf_policy.py", "file_name": "tf_policy.py", "fun_name": "extra_action_out_fn", "commit_message": "[RLlib] Migrate MAML, MB-MPO, MARWIL, and BC to use Policy sub-classing implementation. (#24914)", "code": "def extra_action_out_fn(self) -> Dict[str, TensorType]:\n \n extra_fetches = {}\n # Action-logp and action-prob.\n if self._sampled_action_logp is not None:\n extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob\n extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp\n # Action-dist inputs.\n if self._dist_inputs is not None:\n extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs\n return extra_fetches\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 36, "vocab_size": 25, "complexity": 3, "nloc": 17, "token_counts": 65, "n_ast_nodes": 103, "n_identifiers": 13, "random_cut": "def extra_action_out_fn(self) -> Dict[str, TensorType]:\n \n extra_fetches = {}\n # Action-logp and action-prob.\n if self._sampled_action_logp is not None:\n extra_fetches", "d_id": 31802, "documentation": { "docstring": "Extra values to fetch and return from compute_actions().\n\n By default we return action probability/log-likelihood info\n and action distribution inputs (if present).\n\n Returns:\n Dict[str, TensorType]: An extra fetch-dict to be passed to and\n returned from the compute_actions() call.\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 92, "language": "en" } }, { "id": 45432, "commit_id": "4ad21f5f7c2d416cf813a860564bc2bf3e161d46", "repo": "airflow", "path": "airflow/jobs/triggerer_job.py", "file_name": "triggerer_job.py", "fun_name": "cleanup_finished_triggers", "commit_message": "Log traceback in trigger excs (#21213)", "code": "async def cleanup_finished_triggers(self):\n \n for trigger_id, details in list(self.triggers.items()):\n if details[\"task\"].done():\n # Check to see if it exited for good reasons\n saved_exc = None\n try:\n result = details[\"task\"].result()\n except (asyncio.CancelledError, SystemExit, KeyboardInterrupt):\n # These are \"expected\" exceptions and we stop processing here\n # If we don't, then the system requesting a trigger be removed -\n # which turns into CancelledError - results in a failure.\n del self.triggers[trigger_id]\n continue\n except BaseException as e:\n # This is potentially bad, so log it.\n self.log.exception(\"Trigger %s exited with error %s\", details[\"name\"], e)\n saved_exc = e\n else:\n # See if they foolishly returned a TriggerEvent\n if isinstance(result, TriggerEvent):\n self.log.error(\n \"Trigger %s returned a TriggerEvent rather than yielding it\", details[\"name\"]\n )\n # See if this exited without sending an event, in which case\n # any task instances depending on it need to be failed\n if details[\"events\"] == 0:\n self.log.error(\n \"Trigger %s exited without sending an event. Dependent tasks will be failed.\",\n details[\"name\"],\n )\n self.failed_triggers.append((trigger_id, saved_exc))\n del self.triggers[trigger_id]\n await asyncio.sleep(0)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 733, "n_words": 162, "vocab_size": 116, "complexity": 7, "nloc": 25, "token_counts": 160, "n_ast_nodes": 275, "n_identifiers": 24, "random_cut": "async def cleanup_finished_triggers(self):\n \n for trigger_id, details in list(self.triggers.items()):\n if details[\"task\"].done():\n # Check to see if it exited for", "d_id": 8562, "documentation": { "docstring": "\n Go through all trigger tasks (coroutines) and clean up entries for\n ones that have exited, optionally warning users if the exit was\n not normal.\n ", "n_words": 24, "vocab_size": 24, "n_whitespaces": 53, "language": "en" } }, { "id": 133910, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/contrib/sumo/utils.py", "file_name": "utils.py", "fun_name": "get_global_travel_time", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_global_travel_time(self):\n \n gtt = 0\n for entity in self.tripinfo:\n gtt += self.get_duration(entity, default=0.0)\n for entity in self.personinfo:\n gtt += self.get_duration(entity, default=0.0)\n return gtt\n\n ###########################################################################\n # ROUTING\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 89, "n_words": 26, "vocab_size": 17, "complexity": 3, "nloc": 7, "token_counts": 53, "n_ast_nodes": 79, "n_identifiers": 8, "random_cut": "def get_global_travel_time(self):\n \n gtt = 0\n for entity in self.tripinfo:\n gtt += self.get_duration(entity, default=0.0", "d_id": 30145, "documentation": { "docstring": "\n Returns the global travel time computed from SUMO tripinfo data.\n\n The functions process_tripinfo_file() needs to be called in advance\n to initialize the data structures required.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 54, "language": "en" } }, { "id": 22114, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/sessions.py", "file_name": "sessions.py", "fun_name": "post", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def post(self, url, data=None, json=None, **kwargs):\n r\n\n return self.request(\"POST\", url, data=data, json=json, **kwargs)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 11, "token_counts": 40, "n_ast_nodes": 58, "n_identifiers": 7, "random_cut": "def post(self, url, data=None, json=None, **kwargs):\n r\n\n re", "d_id": 4190, "documentation": { "docstring": "Sends a POST request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary, list of tuples, bytes, or file-like\n object to send in the body of the :class:`Request`.\n :param json: (optional) json to send in the body of the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :rtype: requests.Response\n ", "n_words": 55, "vocab_size": 39, "n_whitespaces": 108, "language": "en" } }, { "id": 209531, "commit_id": "08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf", "repo": "scapy", "path": "scapy/contrib/http2.py", "file_name": "http2.py", "fun_name": "__getitem__", "commit_message": "E275 - Missing whitespace after keyword (#3711)\n\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: plorinquer \r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: speakinghedge \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>\r\n\r\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>", "code": "def __getitem__(self, idx):\n # type: (int) -> HPackHdrEntry\n \n assert idx >= 0\n if idx > type(self)._static_entries_last_idx:\n idx -= type(self)._static_entries_last_idx + 1\n if idx >= len(self._dynamic_table):\n raise KeyError(\n 'EINVAL: idx: out-of-bound read: {}; maximum index: {}'.format(idx, len(self._dynamic_table)) # noqa: E501\n )\n return self._dynamic_table[idx]\n return type(self)._static_entries[idx]\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 162, "n_words": 44, "vocab_size": 37, "complexity": 3, "nloc": 10, "token_counts": 76, "n_ast_nodes": 124, "n_identifiers": 10, "random_cut": "def __getitem__(self, idx):\n # type: (int) -> HPackHdrEntry\n \n assert idx >= 0\n if idx > type(self)._static_entries_last_idx:\n idx -= type(self)._static_entries_last_idx + 1\n if idx >= len(self._dynamic_table):\n raise KeyError(\n 'EINVAL: idx: out-of-bound read: {}; maximum index: {}'.format(idx, len(self._dynamic_tab", "d_id": 52723, "documentation": { "docstring": "Gets an element from the header tables (static or dynamic indifferently)\n\n :param int idx: the index number of the entry to retrieve. If the index\n value is superior to the last index of the static entry table, then the\n dynamic entry type is requested, following the procedure described in\n RFC 7541 par2.3.3\n :return: HPackHdrEntry: the entry defined at this requested index. If the entry does not exist, KeyError is # noqa: E501\n raised\n :raises: KeyError, AssertionError\n ", "n_words": 76, "vocab_size": 55, "n_whitespaces": 135, "language": "en" } }, { "id": 206977, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_changelist/tests.py", "file_name": "tests.py", "fun_name": "test_pagination", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_pagination(self):\n \n parent = Parent.objects.create(name=\"anything\")\n for i in range(1, 31):\n Child.objects.create(name=\"name %s\" % i, parent=parent)\n Child.objects.create(name=\"filtered %s\" % i, parent=parent)\n\n request = self.factory.get(\"/child/\")\n request.user = self.superuser\n\n # Test default queryset\n m = ChildAdmin(Child, custom_site)\n cl = m.get_changelist_instance(request)\n self.assertEqual(cl.queryset.count(), 60)\n self.assertEqual(cl.paginator.count, 60)\n self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])\n\n # Test custom queryset\n m = FilteredChildAdmin(Child, custom_site)\n cl = m.get_changelist_instance(request)\n self.assertEqual(cl.queryset.count(), 30)\n self.assertEqual(cl.paginator.count, 30)\n self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 208, "n_words": 67, "vocab_size": 43, "complexity": 2, "nloc": 17, "token_counts": 209, "n_ast_nodes": 327, "n_identifiers": 27, "random_cut": "def test_pagination(self):\n \n parent = Parent.objects.create(name=\"anything\")\n for i in range(1, 31):\n Child.objects.create(name=\"name %s\" % i, parent=parent)\n Child.objects.create(name=\"filtered %s\" % i, parent=parent)\n\n request = self.factory.get(\"/child/\")\n request.user = self.superuser\n\n # Test default queryset\n m = ChildAdmin(Child, custom_site)\n cl = m.get_changelist_instance(request)\n self.assertEqual(cl.queryset.count(), 60)\n self.assertEqual(cl.paginator.count, 60)\n self.assertEqual(list(", "d_id": 51819, "documentation": { "docstring": "\n Regression tests for #12893: Pagination in admins changelist doesn't\n use queryset set by modeladmin.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 258522, "commit_id": "ab08e4dba5f1f87b8c3395f32469a6ddb5e34f89", "repo": "scikit-learn", "path": "sklearn/discriminant_analysis.py", "file_name": "discriminant_analysis.py", "fun_name": "transform", "commit_message": "DOC Add documentation on output shape of LDA.transform (#22238)", "code": "def transform(self, X):\n \n if self.solver == \"lsqr\":\n raise NotImplementedError(\n \"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen').\"\n )\n check_is_fitted(self)\n\n X = self._validate_data(X, reset=False)\n if self.solver == \"svd\":\n X_new = np.dot(X - self.xbar_, self.scalings_)\n elif self.solver == \"eigen\":\n X_new = np.dot(X, self.scalings_)\n\n return X_new[:, : self._max_components]\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 155, "n_words": 47, "vocab_size": 38, "complexity": 4, "nloc": 12, "token_counts": 88, "n_ast_nodes": 147, "n_identifiers": 14, "random_cut": "def transform(self, X):\n \n if self.solver == \"lsqr\":\n raise NotImplementedError(\n \"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen').\"\n )\n check_is_fitted(self)\n\n X = self._validate_data(X, reset=False)\n if self.solver == \"svd\":\n X_new = np.dot(X - self.xbar_, self.scalings_)\n elif self.solver == \"eigen\":\n X_new = np.dot(X, self.scalings_)\n\n return X_new[:, : self._max_components]\n", "d_id": 75274, "documentation": { "docstring": "Project data to maximize class separation.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components) or \\\n (n_samples, min(rank, n_components))\n Transformed data. In the case of the 'svd' solver, the shape\n is (n_samples, min(rank, n_components)).\n ", "n_words": 46, "vocab_size": 34, "n_whitespaces": 139, "language": "en" } }, { "id": 311456, "commit_id": "58b8c30221a6f6e5acbbe98b7e3298b03fb741f5", "repo": "core", "path": "tests/components/homekit_controller/test_climate.py", "file_name": "test_climate.py", "fun_name": "test_heater_cooler_hvac_mode_vs_hvac_action", "commit_message": "Improve homekit_controller tests (#65266)", "code": "async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow):\n \n helper = await setup_test_component(hass, create_heater_cooler_service)\n\n # Simulate that current temperature is above target temp\n # Heating might be on, but hvac_action currently 'off'\n await helper.async_update(\n ServicesTypes.HEATER_COOLER,\n {\n CharacteristicsTypes.TEMPERATURE_CURRENT: 22,\n CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21,\n CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.IDLE,\n CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT,\n CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED,\n },\n )\n\n state = await helper.poll_and_get_state()\n assert state.state == \"heat\"\n assert state.attributes[\"hvac_action\"] == \"idle\"\n\n # Simulate that current temperature is below target temp\n # Heating might be on and hvac_action currently 'heat'\n await helper.async_update(\n ServicesTypes.HEATER_COOLER,\n {\n CharacteristicsTypes.TEMPERATURE_CURRENT: 19,\n CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21,\n CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.HEATING,\n CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT,\n CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED,\n },\n )\n\n state = await helper.poll_and_get_state()\n assert state.state == \"heat\"\n assert state.attributes[\"hvac_action\"] == \"heating\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 301, "n_words": 101, "vocab_size": 56, "complexity": 1, "nloc": 28, "token_counts": 161, "n_ast_nodes": 256, "n_identifiers": 25, "random_cut": "async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow):\n \n helper = await setup_test_component(hass, create_heater_cooler_service)\n\n # Simulate that current temperature is above target temp\n # Heating might be on, but hvac_action currently 'off'\n await helper.async_update(\n ServicesTypes.HEATER_COOLER,\n {\n CharacteristicsTypes.TEMPERATURE_CURRENT: 22,\n CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21,\n CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.IDLE,\n CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT,\n CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED,\n },\n )\n\n state = await helper.poll_and_get_state()\n assert state", "d_id": 110121, "documentation": { "docstring": "Check that we haven't conflated hvac_mode and hvac_action.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 253415, "commit_id": "8c2428c9d355ca5fbc3dd90e9820ceb1cc795837", "repo": "mitmproxy", "path": "examples/contrib/webscanner_helper/watchdog.py", "file_name": "watchdog.py", "fun_name": "not_in_timeout", "commit_message": "[autofix.ci] apply automated fixes", "code": "def not_in_timeout(cls, last_triggered, timeout):\n \n return (\n last_triggered is None\n or timeout is None\n or (time.time() - last_triggered > timeout)\n )\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 20, "vocab_size": 16, "complexity": 3, "nloc": 6, "token_counts": 32, "n_ast_nodes": 51, "n_identifiers": 5, "random_cut": "def not_in_timeout(cls, last_triggered, timeout):\n \n return (\n last_triggered is None\n or timeout is None\n or (tim", "d_id": 74072, "documentation": { "docstring": "Checks if current error lies not in timeout after last trigger (potential reset of connection).", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 194733, "commit_id": "daa85bf085c9e275cc65d0b03758d1f70742b57f", "repo": "ParlAI", "path": "parlai/core/torch_generator_agent.py", "file_name": "torch_generator_agent.py", "fun_name": "get_rescored_finished", "commit_message": "Logging token level losses at inference time (#4169)", "code": "def get_rescored_finished(self, n_best=None):\n \n # if we never actually finished, force one\n if not self.finished:\n self.outputs[-1][0] = self.eos\n self.finished.append(\n _HypothesisTail(\n timestep=len(self.outputs) - 1,\n hypid=0,\n score=self.all_scores[-1][0],\n tokenid=self.outputs[-1][0],\n token_score=self.token_scores[0, -1]\n if self.token_scores is not None\n else None,\n token_rank=self.token_ranks[0, -1]\n if self.token_ranks is not None\n else None,\n )\n )\n\n rescored_finished = []\n for finished_item in self.finished:\n current_length = finished_item.timestep + 1\n # these weights are from Google NMT paper\n length_penalty = math.pow((1 + current_length) / 6, self.length_penalty)\n rescored_finished.append(\n _HypothesisTail(\n timestep=finished_item.timestep,\n hypid=finished_item.hypid,\n score=finished_item.score / length_penalty,\n tokenid=finished_item.tokenid,\n token_score=finished_item.token_score,\n token_rank=finished_item.token_rank,\n )\n )\n\n # Note: beam size is almost always pretty small, so sorting is cheap enough\n srted = sorted(rescored_finished, key=attrgetter('score'), reverse=True)\n\n if n_best is not None:\n srted = srted[:n_best]\n\n n_best_list = []\n for hyp in srted:\n hyp_data = self._get_hyp_from_finished(hyp)\n token_ids = self._get_pretty_hypothesis(hyp_data)\n token_metadata = (\n self._get_pretty_token_metadata(hyp_data) if self.verbose else None\n )\n n_best_list.append((token_ids, hyp.score, token_metadata))\n\n # check that there is at least one finished candidate\n # and assert that each of them contains only one EOS\n assert (\n len(n_best_list) >= 1\n ), f'TreeSearch returned {len(n_best_list)} candidates, must be >= 1'\n for (pred, score, _) in n_best_list:\n assert (pred == self.eos).sum() == 1, (\n f'TreeSearch returned a finalized hypo with multiple end tokens '\n f'with score {score.item():.2f}'\n )\n\n return n_best_list\n\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 909, "n_words": 201, "vocab_size": 140, "complexity": 9, "nloc": 51, "token_counts": 336, "n_ast_nodes": 525, "n_identifiers": 42, "random_cut": "def get_rescored_finished(self, n_best=None):\n \n # if we never actually finished, force one\n if not self.finished:\n self.outputs[-1][0] = self.eos\n self.finished.append(\n _HypothesisTail(\n timestep=len(self.outputs) - 1,\n hypid=0,\n score=self.all_scores[-1][0],\n tokenid=self.outputs[-1][0],\n token_score=self.token_scores[0, -1]\n if self.token_scores is not None\n else None,\n token_rank=self.token_ranks[0, -1]\n if self.token_ranks is not None\n else None,\n )\n )\n\n rescored_finished = []\n for finished_item in self.finished:\n current_length = finished_item.timestep + 1\n # these weights are from Google NMT paper\n length_penalty = math.pow((1 + current_length) / 6, self.length_penalty)\n rescored_finished.append(\n _HypothesisTail(\n timestep=finished_item.timestep,\n h", "d_id": 47049, "documentation": { "docstring": "\n Return finished hypotheses according to adjusted scores.\n\n Score adjustment is done according to the Google NMT paper, which\n penalizes long utterances.\n\n :param n_best:\n number of finalized hypotheses to return\n\n :return:\n list of (tokens, score, token_metadata) 3-tuples, in sorted order, where:\n - tokens is a tensor of token ids\n - score is the adjusted log probability of the entire utterance\n - token_metadata dictionary:\n token_logprobs -> a tensor of conditional log probabilities of tokens\n token_ranks -> a tensor of ranks of tokens in vocabulator, by probability, when sampled\n ", "n_words": 86, "vocab_size": 59, "n_whitespaces": 228, "language": "en" } }, { "id": 166924, "commit_id": "62b6d25551d006758422c20e7f931858e23054a9", "repo": "pandas", "path": "pandas/core/resample.py", "file_name": "resample.py", "fun_name": "quantile", "commit_message": "DEPR: numeric_only default in resampler ops (#47177)", "code": "def quantile(self, q=0.5, **kwargs):\n \n return self._downsample(\"quantile\", q=q, **kwargs)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 29, "n_ast_nodes": 44, "n_identifiers": 5, "random_cut": "def quantile(self, q=0.5, **kwargs):\n \n return self._do", "d_id": 39870, "documentation": { "docstring": "\n Return value at the given quantile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n\n Returns\n -------\n DataFrame or Series\n Quantile of values within each group.\n\n See Also\n --------\n Series.quantile\n Return a series, where the index is q and the values are the quantiles.\n DataFrame.quantile\n Return a DataFrame, where the columns are the columns of self,\n and the values are the quantiles.\n DataFrameGroupBy.quantile\n Return a DataFrame, where the coulmns are groupby columns,\n and the values are its quantiles.\n ", "n_words": 80, "vocab_size": 48, "n_whitespaces": 238, "language": "en" } }, { "id": 188795, "commit_id": "7b9bb6e62424e4b3c960e9e25c45a6946988959c", "repo": "calibre", "path": "src/calibre/gui2/preferences/create_custom_column.py", "file_name": "create_custom_column.py", "fun_name": "current_columns", "commit_message": "Yet another version of CreateNewCustomColumn.\n\nMy apologies for the multiple commits. I have been working with @davidfor and we cycled a few times. I hope this is the last, barring bugs.", "code": "def current_columns(self):\n \n return copy.deepcopy(self.custcols) #deepcopy to prevent users from changing it\n", "url": "https://github.com/kovidgoyal/calibre.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 5, "random_cut": "def current_columns(self):\n \n return copy.deepcopy(self.custcols) #de", "d_id": 45933, "documentation": { "docstring": "\n Return the currently defined custom columns\n\n Return the currently defined custom columns including the ones that haven't\n yet been created. It is a dict of dicts defined as follows:\n custcols[lookup_name] = {\n 'label': lookup_name,\n 'name': column_heading,\n 'datatype': datatype,\n 'display': display,\n 'normalized': None,\n 'colnum': an integer used internally,\n 'is_multiple': is_multiple,\n }\n Columns that already exist will have additional attributes that this class\n doesn't use. See calibre.library.field_metadata.add_custom_field() for the\n complete list.\n ", "n_words": 69, "vocab_size": 58, "n_whitespaces": 278, "language": "en" } }, { "id": 75054, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/image_operations.py", "file_name": "image_operations.py", "fun_name": "transform_vector", "commit_message": "Reformat with black", "code": "def transform_vector(self, vector):\n \n return Vector(\n (vector.x + self.offset[0]) * self.scale[0],\n (vector.y + self.offset[1]) * self.scale[1],\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 59, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 52, "n_ast_nodes": 78, "n_identifiers": 8, "random_cut": "def transform_vector(self, vector):\n \n return Vector(\n (vector", "d_id": 16345, "documentation": { "docstring": "\n Transforms the given vector into the coordinate space of the final image.\n\n Use this to find out where a point on the source image would end up in the\n final image after cropping/resizing has been performed.\n\n Returns a new vector.\n ", "n_words": 40, "vocab_size": 33, "n_whitespaces": 76, "language": "en" } }, { "id": 82306, "commit_id": "a3110e1ff24085373898c7d2a85f628abeb8518d", "repo": "django-cms", "path": "cms/utils/conf.py", "file_name": "conf.py", "fun_name": "_load_from_file", "commit_message": "Enabled isort workflow (#7200)\n\n* Ran isort\r\n\r\n* Enabled isort workflow\r\n\r\nCo-authored-by: Vinit Kumar ", "code": "def _load_from_file(module_path):\n \n from imp import PY_SOURCE, load_module\n\n imported = None\n if module_path:\n with open(module_path, 'r') as openfile:\n imported = load_module(\"mod\", openfile, module_path, ('imported', 'r', PY_SOURCE))\n return imported\n\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 60, "n_words": 27, "vocab_size": 24, "complexity": 2, "nloc": 7, "token_counts": 48, "n_ast_nodes": 85, "n_identifiers": 8, "random_cut": "def _load_from_file(module_path):\n \n from imp import PY_SOURCE, load_module\n\n imported = None\n if module_path:\n with open(module_path, 'r') as openfile:\n imported = load_module(\"mod\", openfile, module_path, ('imp", "d_id": 17347, "documentation": { "docstring": "\n Load a python module from its absolute filesystem path\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 203858, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/db/backends/postgis/schema.py", "file_name": "schema.py", "fun_name": "_alter_column_type_sql", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _alter_column_type_sql(self, table, old_field, new_field, new_type):\n \n if not hasattr(old_field, \"dim\") or not hasattr(new_field, \"dim\"):\n return super()._alter_column_type_sql(table, old_field, new_field, new_type)\n\n if old_field.dim == 2 and new_field.dim == 3:\n sql_alter = self.sql_alter_column_to_3d\n elif old_field.dim == 3 and new_field.dim == 2:\n sql_alter = self.sql_alter_column_to_2d\n else:\n sql_alter = self.sql_alter_column_type\n return (\n (\n sql_alter\n % {\n \"column\": self.quote_name(new_field.column),\n \"type\": new_type,\n },\n [],\n ),\n [],\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 284, "n_words": 60, "vocab_size": 42, "complexity": 7, "nloc": 20, "token_counts": 121, "n_ast_nodes": 189, "n_identifiers": 15, "random_cut": "def _alter_column_type_sql(self, table, old_field, new_field, new_type):\n \n if not hasattr(old_field, \"dim\") or not hasattr(new_field, \"dim\"):\n return super()._alter_column_type_sql(table, old_field, new_field, new_type)\n\n if old_field.dim == 2 and new_field.dim == 3:\n sql_alter = self.sql_alter_column_to_3d\n elif old_field.dim == 3 and new_field.dim == 2:\n sql_alter = self.sql_alter_co", "d_id": 50562, "documentation": { "docstring": "\n Special case when dimension changed.\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 268048, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/test.py", "file_name": "test.py", "fun_name": "format_command", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def format_command(self) -> str:\n \n command = 'ansible-test %s' % self.command\n\n if self.test:\n command += ' --test %s' % self.test\n\n if self.python_version:\n command += ' --python %s' % self.python_version\n\n return command\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 30, "vocab_size": 20, "complexity": 3, "nloc": 8, "token_counts": 41, "n_ast_nodes": 74, "n_identifiers": 6, "random_cut": "def format_command(self) -> str:\n \n command = 'ansible-test %s' % self.command\n\n if self.test", "d_id": 79322, "documentation": { "docstring": "Return a string representing the CLI command associated with the test failure.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 194950, "commit_id": "7e453008fde751aff0cfd752662e19fe2adc7410", "repo": "ParlAI", "path": "projects/seeker/scripts/generate_lm_data.py", "file_name": "generate_lm_data.py", "fun_name": "act", "commit_message": "SeeKeR (#4447)\n\n* seeker\r\n\r\n* todo\r\n\r\n* readme updates; add test\r\n\r\n* small config changes\r\n\r\n* various updates\r\n\r\n* readme fix\r\n\r\n* model card\r\n\r\n* add arxiv link\r\n\r\n* surround spacy with try catch\r\n\r\n* more protected\r\n\r\n* more protection of imports\r\n\r\n* lint", "code": "def act(self):\n \n obs = self.observation\n reply = {'text': INVALID, 'id': self.getID(), 'episode_done': False}\n if obs is None or obs['text'] == DO_NOT_RETRIEVE:\n return Message(reply)\n\n # construct the search query\n labels = obs.get('labels', obs.get('eval_labels', None))\n search_query = self.construct_search_query(labels)\n if (\n self.opt['min_num_search_words'] > 0\n and len(search_query[0].split()) <= self.opt['min_num_search_words']\n ):\n return Message(reply)\n\n # retrieve\n self.search_engine.set_search_queries(search_query)\n retrieved, _ = self.search_engine.retrieve_and_score(self.dummy)\n all_docs = [d.get_tokenization_str() for d in retrieved[0]] # batched\n\n # Find the right doc\n best_f1, best_doc, best_doc_idx = self.get_best_doc(all_docs, labels)\n if best_doc:\n assert best_doc_idx is not None\n reply['knowledge'] = f'{TOKEN_KNOWLEDGE}{best_doc}{TOKEN_END_KNOWLEDGE}'\n reply['f1_overlap'] = best_f1\n reply['text'] = labels[0]\n reply['retrieved_docs'] = all_docs\n reply['gold_doc'] = all_docs[best_doc_idx]\n reply['search_query'] = search_query[0]\n return Message(reply)\n\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 343, "n_words": 102, "vocab_size": 74, "complexity": 7, "nloc": 25, "token_counts": 219, "n_ast_nodes": 379, "n_identifiers": 31, "random_cut": "def act(self):\n \n obs = self.observation\n reply = {'text': INVALID, 'id': self.getID(), 'episode_done': False}\n if obs is None or obs['text'] == DO_NOT_RETRIEVE:\n return Message(reply)\n\n # construct the search query\n labels = obs.get('labels', obs.get('eval_labels', None))\n search_query = self.construct_search_query(labels)\n if (\n self.opt['min_num_search_words'] > 0\n and len(search_query[0].split()) <= self.opt['min_num_search_words']\n ):\n return Message(reply)\n\n # retrieve\n self.search_engine.set_search_queries(search_query)\n retrieved, _ = self.search_engine.retrieve_and_score(self.dummy)\n all_docs = [d.get_tokenization_str() for d in retrieved[0]] # batched\n\n # Find the right doc\n best_f1, best_doc, best_doc_idx = self.get_best_doc(a", "d_id": 47131, "documentation": { "docstring": "\n Search for overlap with the observation label.\n\n Return the best fitting document. A document is valid if the f1 is above the\n threshold AND the f1 is less than 1.0 AND the target label is not in the\n document.\n ", "n_words": 39, "vocab_size": 27, "n_whitespaces": 75, "language": "en" } }, { "id": 284002, "commit_id": "5bf4618b398492f0ab2d09b3827467c7089831ec", "repo": "OpenBBTerminal", "path": "openbb_terminal/forex/quantitative_analysis/qa_controller.py", "file_name": "qa_controller.py", "fun_name": "print_help", "commit_message": "Adds QA and Pred to forex (#1652)\n\n* added qa and pred to forex\r\n\r\n* updated test help\r\n\r\n* Add forex/qa notebooks api wrapper\r\n\r\n* Add forex/qa tests\r\n\r\n* Add all menu commands to the integration test script\r\n\r\nCo-authored-by: Theodore Aptekarev ", "code": "def print_help(self):\n \n help_text = f\n console.print(text=help_text, menu=\"Forex - Quantitative Analysis\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 33, "token_counts": 22, "n_ast_nodes": 54, "n_identifiers": 9, "random_cut": "def print_help(self):\n \n he", "d_id": 84618, "documentation": { "docstring": "Print help[cmds]\n pick pick target column for analysis[/cmds]\n\n[param]Pair: [/param]{self.ticker}\n[param]Target Column: [/param]{self.target}\n[cmds]\n[info]Statistics:[/info]\n summary brief summary statistics of loaded pair.\n normality normality statistics and tests\n unitroot unit root test for stationarity (ADF, KPSS)\n[info]Plots:[/info]\n line line plot of selected target\n hist histogram with density plot\n cdf cumulative distribution function\n bw box and whisker plot\n acf (partial) auto-correlation function differentials of prices\n qqplot residuals against standard normal curve\n[info]Rolling Metrics:[/info]\n rolling rolling mean and std deviation of prices\n spread rolling variance and std deviation of prices\n quantile rolling median and quantile of prices\n skew rolling skewness of distribution of prices\n kurtosis rolling kurtosis of distribution of prices\n[info]Risk:[/info]\n var display value at risk\n es display expected shortfall\n[info]Other:[/info]\n raw print raw data\n decompose decomposition in cyclic-trend, season, and residuals of prices\n cusum detects abrupt changes using cumulative sum algorithm of prices[/cmds]\n ", "n_words": 142, "vocab_size": 95, "n_whitespaces": 315, "language": "en" } }, { "id": 205879, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/query.py", "file_name": "query.py", "fun_name": "chain", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def chain(self, klass=None):\n \n obj = self.clone()\n if klass and obj.__class__ != klass:\n obj.__class__ = klass\n if not obj.filter_is_sticky:\n obj.used_aliases = set()\n obj.filter_is_sticky = False\n if hasattr(obj, \"_setup_query\"):\n obj._setup_query()\n return obj\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 112, "n_words": 30, "vocab_size": 22, "complexity": 5, "nloc": 10, "token_counts": 64, "n_ast_nodes": 108, "n_identifiers": 11, "random_cut": "def chain(self, klass=None):\n \n obj = self.clone()\n if klass and obj.__class__ != klass:\n obj.__class__ = klass\n if not obj.filter_is_sticky:\n obj.used_a", "d_id": 51259, "documentation": { "docstring": "\n Return a copy of the current Query that's ready for another operation.\n The klass argument changes the type of the Query, e.g. UpdateQuery.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 45, "language": "en" } }, { "id": 7846, "commit_id": "24f6583aa3b384aa6179c3579be600760897f1d8", "repo": "ludwig", "path": "tests/integration_tests/test_gbm.py", "file_name": "test_gbm.py", "fun_name": "run_test_gbm_non_number_inputs", "commit_message": "Bugfix: non-number inputs to GBM (#2418)", "code": "def run_test_gbm_non_number_inputs(tmpdir, backend_config):\n \n input_features = [binary_feature(), category_feature(encoder={\"reduce_output\": \"sum\"})]\n output_feature = binary_feature()\n output_features = [output_feature]\n\n csv_filename = os.path.join(tmpdir, \"training.csv\")\n dataset_filename = generate_data(input_features, output_features, csv_filename, num_examples=100)\n\n config = {\n MODEL_TYPE: \"gbm\",\n \"input_features\": input_features,\n \"output_features\": output_features,\n TRAINER: {\"num_boost_round\": 2},\n }\n\n model = LudwigModel(config, backend=backend_config)\n _, _, output_directory = model.train(\n dataset=dataset_filename,\n output_directory=tmpdir,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n skip_save_log=True,\n )\n model.load(os.path.join(tmpdir, \"api_experiment_run\", \"model\"))\n preds, _ = model.predict(dataset=dataset_filename, output_directory=output_directory)\n\n prob_col = preds[output_feature[\"name\"] + \"_probabilities\"]\n if backend_config[\"type\"] == \"ray\":\n prob_col = prob_col.compute()\n assert len(prob_col.iloc[0]) == 2\n assert prob_col.apply(sum).mean() == pytest.approx(1.0)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 209, "n_words": 81, "vocab_size": 65, "complexity": 2, "nloc": 28, "token_counts": 222, "n_ast_nodes": 354, "n_identifiers": 42, "random_cut": "def run_test_gbm_non_number_inputs(tmpdir, backend_config):\n \n input_features = [binary_feature(), category_feature(encoder={\"reduce_output\": \"sum\"})]\n output_feature = binary_feature()\n output_features = [output_feature]\n\n csv_filename = os.path.join(tmpdir, \"training.csv\")\n dataset_filename = generate_data(input_features, output_features, csv_filename, num_examples=100)\n\n", "d_id": 1280, "documentation": { "docstring": "Test that the GBM model can train and predict with non-number inputs.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 209644, "commit_id": "ca10c5cf00425d0178998ec0b006cbb65ddbfb54", "repo": "scapy", "path": "scapy/contrib/pnio_rpc.py", "file_name": "pnio_rpc.py", "fun_name": "i2len", "commit_message": "[MS-RPCE] and [MS-SMB] major update (#3683)\n\n* Various fixes regarding DCE/RPC build\r\n\r\n* DCE/RPC sessions\r\n\r\n* Cleanup unused code\r\n\r\n* Add missing GSS_WRAP algo names\r\n\r\n* Add find_dcerpc_interface\r\n\r\n* Split SMB client and server\r\n\r\n* Missing StrFixedLenFieldUtf16\r\n\r\n* Remove unfinished smbserver feature\r\n\r\n* Friendlier getter for SMB2\r\n\r\n* DceRpcNak\r\n\r\n* Improve NDR parsing (a lot)\r\n\r\n* Minor SMB2 improvements\r\n\r\n* BIG NDR refactor + Dissect pointer deferal\r\n\r\n* Build with pointer deferral\r\n\r\n* Small build bugs\r\n\r\n* SMB2 logoff, fix rawToken in SMB standalone\r\n\r\n* Add security providers from MS-RPCE to DCERPC\r\n\r\n* Cleanup ptr_pack of NDRPacketListField\r\n\r\n* Clearer exception in find_dcerpc_interface\r\n\r\n* Add minor_version attribute\r\n\r\n* Fix computation of auth_pad in sec_trailer\r\n\r\n* Fix a WTF bug\r\n\r\n* Compute length for NDR arrays\r\n\r\n* Pass enum to EnumField\r\n\r\n* Match union attributes from response with request\r\n\r\n* Improve SMB server\r\n\r\n* Small bug in pointer deferal dissection\r\n\r\n* Add user-friendly utils\r\n\r\n* Add a few NDR tests\r\n\r\n* More user-friendly improvements\r\n\r\n* Bug: parent not copied in clone_with\r\n\r\n* Build: propagate NDR64 and bug fix\r\n\r\n* Default close response parameters\r\n\r\n* Fix Python 2.7\r\n\r\n* Fix SMB2_Create_Context offset\r\n\r\n* Fix SMB2 create context\r\n\r\n* SMB2: support chain, improvements\r\n\r\n* Fix ioctl error\r\n\r\n* SMB: check computeNTProofStr\r\n\r\n* Fix UTCField default\r\n\r\n* Improve FileId capabilities\r\n\r\n* SMB2: contexts\r\n\r\n* Typos\r\n\r\n* Minor NDRUnion fixes\r\n\r\n* Py2 fixes", "code": "def i2len(self, pkt, val):\n \n fld_len = self.fld.i2len(pkt, val)\n return fld_len + self.padlen(fld_len, pkt)\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def i2len(self, pkt, val):\n \n fld_len = self.f", "d_id": 52762, "documentation": { "docstring": "get the length of the field, including the padding length", "n_words": 10, "vocab_size": 7, "n_whitespaces": 9, "language": "en" } }, { "id": 109326, "commit_id": "349f8678f1cf225d6070a236cf41a5e1f044cb18", "repo": "matplotlib", "path": "lib/matplotlib/_mathtext.py", "file_name": "_mathtext.py", "fun_name": "get_kerning", "commit_message": "Replace MathtextBackend mechanism.\n\nThe MathtextBackend (\"MB\") mechanism was previously used to let actual\nbackends customize how they received mathtext results -- either as lists\nof glyphs and rectangles (for vector backends: MathtextBackendPath),\nor a bitmap (for raster backends: MathtextBackendAgg); in both cases,\nmetrics are also provided. MBs also controlled font hinting. Note that\nthe MB mechanism was not publically user-extendable (this would require\ntouching the private MathTextParser._backend_mapping dict), so third\nparties could not meaningfully provide their own backends.\n\nMBs were attached to _mathtext.Fonts objects, which were central to\nthe \"shipping\" stage of the parse (ship(), which converts the nested\nparse tree created by pyparsing into flat calls to render_glyph and\nrender_rect_filled). This led to a slightly curious API, where\nthe old MathtextBackendAgg.get_results() (for example) calls\n`_mathtext.ship(0, 0, box)` and this somehow magically mutates self --\nthis is because self is indirectly attached to sub-elements of box.\n\nThis PR changes the implementation to instead detach output logic\nfrom Fonts (which become restricted to providing glyph metrics and\nrelated info), and makes ship() instead return a simple Output object\n(lists of glyphs and rects) which is itself able either to convert to\na VectorParse or a RasterParse -- namedtuples that are backcompatible\nwith the tuples previously returned by MathTextParser.parse(). (While\ntechnically these are \"new\" classes in the API, they are simply there to\n(slightly) better document the return value of MathtextBackend.parse().)\n\nIn summary, this patch\n- removes the non-extensible MB system,\n- detaches output logic from Fonts objects, thus avoiding \"action at\n distance\" where `ship(0, 0, box)` would mutate the calling MB,\n- (weakly) documents the return value of MathtextBackend.parse().\n\nUnrelatedly, also deprecate the unused MathTextWarning.", "code": "def get_kerning(self, next):\n \n advance = self._metrics.advance - self.width\n kern = 0.\n if isinstance(next, Char):\n kern = self.fontset.get_kern(\n self.font, self.font_class, self.c, self.fontsize,\n next.font, next.font_class, next.c, next.fontsize,\n self.dpi)\n return advance + kern\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 121, "n_words": 30, "vocab_size": 25, "complexity": 2, "nloc": 9, "token_counts": 79, "n_ast_nodes": 114, "n_identifiers": 16, "random_cut": "def get_kerning(self, next):\n \n advance = self._metrics.advance - self.width\n kern = 0.\n if isinstance", "d_id": 23525, "documentation": { "docstring": "\n Return the amount of kerning between this and the given character.\n\n This method is called when characters are strung together into `Hlist`\n to create `Kern` nodes.\n ", "n_words": 26, "vocab_size": 25, "n_whitespaces": 55, "language": "en" } }, { "id": 271691, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_generator_v1.py", "file_name": "training_generator_v1.py", "fun_name": "_get_num_samples_or_steps", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _get_num_samples_or_steps(data, steps_per_epoch):\n \n flat_inputs = tf.nest.flatten(data)\n if hasattr(flat_inputs[0], \"shape\"):\n return int(flat_inputs[0].shape[0]), False\n return steps_per_epoch, True\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 34, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 10, "random_cut": "def _get_num_samples_or_steps(data, steps_per_epoch):\n \n flat_inputs = tf.nest.flatten(data)\n if hasattr(flat_inputs[0], \"shape\"):\n return int(flat_inputs[0].shape[0]), False\n return steps_per_epoch, True\n\n", "d_id": 80845, "documentation": { "docstring": "Returns number of samples or steps, and whether to use steps count mode.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 26394, "commit_id": "ab45ebda5a14df6806046fd552e2c6d08f025503", "repo": "saleor", "path": "saleor/graphql/product/tests/test_attributes.py", "file_name": "test_attributes.py", "fun_name": "test_retrieve_product_attributes_input_type", "commit_message": "Better permissions (#9363)\n\n* Better permissions\r\n\r\n* Add OWNER permission\r\n\r\n* WIP Add enums to represent function-based permissions\r\n\r\n* Rename OWNER to IS_OWNER\r\n\r\n* Add flag to skip autogenerated permission message\r\n\r\n* Rename InternalPermissions to PermissionFunctions\r\n\r\n* Add permission descriptions for meta mutations\r\n\r\n* Better permissions validation\r\n\r\n* Reuse user checking functions\r\n\r\n* Rename permission functions enums\r\n\r\n* Update schema\r\n\r\n* Rename permission functions enums", "code": "def test_retrieve_product_attributes_input_type(staff_api_client, product, channel_USD):\n query = \n\n variables = {\"channel\": channel_USD.slug}\n found_products = get_graphql_content(\n staff_api_client.post_graphql(query, variables)\n )[\"data\"][\"products\"][\"edges\"]\n assert len(found_products) == 1\n\n for gql_attr in found_products[0][\"node\"][\"attributes\"]:\n assert len(gql_attr[\"values\"]) == 1\n assert gql_attr[\"values\"][0][\"inputType\"] == \"DROPDOWN\"\n\n\nATTRIBUTES_RESORT_QUERY = \n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 73, "n_words": 34, "vocab_size": 26, "complexity": 2, "nloc": 24, "token_counts": 87, "n_ast_nodes": 156, "n_identifiers": 13, "random_cut": "def test_retrieve_product_attributes_input_type(staff_api_client, product, channel_USD):\n query = \n\n variables = {\"channel\": channel_USD.slug}\n found_products = get_graphql_content(\n staff_api_client.post_graphql(query, variables)\n )[\"data\"][\"products\"][\"edges\"]\n assert len(found_products) == 1\n\n for gql_attr in found_products[0][\"node\"][\"attributes\"]:\n assert len(gq", "d_id": 4980, "documentation": { "docstring": "\n query ($channel: String){\n products(first: 10, channel: $channel) {\n edges {\n node {\n attributes {\n values {\n inputType\n }\n }\n }\n }\n }\n }\n \n mutation ProductTypeReorderAttributes(\n $productTypeId: ID!\n $moves: [ReorderInput!]!\n $type: ProductAttributeType!\n ) {\n productTypeReorderAttributes(\n productTypeId: $productTypeId\n moves: $moves\n type: $type\n ) {\n productType {\n id\n variantAttributes {\n id\n slug\n }\n productAttributes {\n id\n }\n }\n\n errors {\n field\n message\n code\n attributes\n }\n }\n }\n", "n_words": 64, "vocab_size": 39, "n_whitespaces": 433, "language": "en" } }, { "id": 205311, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/migration.py", "file_name": "migration.py", "fun_name": "apply", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def apply(self, project_state, schema_editor, collect_sql=False):\n \n for operation in self.operations:\n # If this operation cannot be represented as SQL, place a comment\n # there instead\n if collect_sql:\n schema_editor.collected_sql.append(\"--\")\n if not operation.reduces_to_sql:\n schema_editor.collected_sql.append(\n \"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:\"\n )\n schema_editor.collected_sql.append(\"-- %s\" % operation.describe())\n schema_editor.collected_sql.append(\"--\")\n if not operation.reduces_to_sql:\n continue\n # Save the state before the operation has run\n old_state = project_state.clone()\n operation.state_forwards(self.app_label, project_state)\n # Run the operation\n atomic_operation = operation.atomic or (\n self.atomic and operation.atomic is not False\n )\n if not schema_editor.atomic_migration and atomic_operation:\n # Force a transaction on a non-transactional-DDL backend or an\n # atomic operation inside a non-atomic migration.\n with atomic(schema_editor.connection.alias):\n operation.database_forwards(\n self.app_label, schema_editor, old_state, project_state\n )\n else:\n # Normal behaviour\n operation.database_forwards(\n self.app_label, schema_editor, old_state, project_state\n )\n return project_state\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 606, "n_words": 124, "vocab_size": 87, "complexity": 9, "nloc": 27, "token_counts": 160, "n_ast_nodes": 266, "n_identifiers": 21, "random_cut": "def apply(self, project_state, schema_editor, collect_sql=False):\n \n for operation in self.operations:\n # If this operation cannot be represented as SQL, place a comment\n # there instead\n if collect_sql:\n schema_editor.collected_sql.append(\"--\")\n if not operation.reduces_to_sql:\n schema_editor.collected_sql.append(\n \"-- MIGRATION NOW PERFO", "d_id": 51087, "documentation": { "docstring": "\n Take a project_state representing all migrations prior to this one\n and a schema_editor for a live database and apply the migration\n in a forwards order.\n\n Return the resulting project state for efficient reuse by following\n Migrations.\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 79, "language": "en" } }, { "id": 136637, "commit_id": "c976799dfd96806ec9972a287835f7a034ec3d2c", "repo": "ray", "path": "python/ray/autoscaler/_private/kuberay/node_provider.py", "file_name": "node_provider.py", "fun_name": "safe_to_scale", "commit_message": "KubeRay node provider refactor (#30281)\n\nImplements KubeRay node provider as a \"BatchingNodeProvider\".\r\nBuilds on #29933.\r\n\r\nSummary of design\r\nAn autoscaler update now works like this:\r\n\r\nlist pod data from k8s\r\ncheck if it's safe to proceed with update. Abort the update if not.\r\ndo some internal calculation to determine desired scale\r\nsubmit a single patch to the RayCluster CR if a scale change is required\r\nEverything is single-threaded and there are O(1) K8s API calls per autoscaler update.\r\n\r\nSigned-off-by: Dmitri Gekhtman ", "code": "def safe_to_scale(self) -> bool:\n \n # Get the list of nodes.\n node_set = set(self.node_data_dict.keys())\n worker_groups = self._raycluster[\"spec\"].get(\"workerGroupSpecs\", [])\n\n # Accumulates the indices of worker groups with non-empty workersToDelete\n non_empty_worker_group_indices = []\n\n for group_index, worker_group in enumerate(worker_groups):\n workersToDelete = worker_group.get(\"scaleStrategy\", {}).get(\n \"workersToDelete\", []\n )\n if workersToDelete:\n non_empty_worker_group_indices.append(group_index)\n for worker in workersToDelete:\n if worker in node_set:\n # The operator hasn't removed this worker yet. Abort\n # the autoscaler update.\n logger.warning(f\"Waiting for operator to remove worker {worker}.\")\n return False\n\n # All required workersToDelete have been removed.\n # Clean up the workersToDelete field.\n patch_payload = []\n for group_index in non_empty_worker_group_indices:\n patch = worker_delete_patch(group_index, workers_to_delete=[])\n patch_payload.append(patch)\n if patch_payload:\n logger.info(\"Cleaning up workers to delete.\")\n logger.info(f\"Submitting patch {patch_payload}.\")\n self._submit_raycluster_patch(patch_payload)\n\n # It's safe to proceed with the autoscaler update.\n return True\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 440, "n_words": 122, "vocab_size": 79, "complexity": 7, "nloc": 40, "token_counts": 147, "n_ast_nodes": 262, "n_identifiers": 25, "random_cut": "def safe_to_scale(self) -> bool:\n \n # Get the list of nodes.\n node_set = set(self.node_data_dict.keys())\n worker_groups = self._raycluster[\"spec\"].get(\"workerGroupSpecs\", [])\n\n # Accumulates the indices of worker groups with non-empty workersToDelete\n non_empty_worker_group_indices = []\n\n for group_index, worker_group in enumerate(worker_groups):\n workersToDelete = worker_group.get(\"scaleStrategy\", {}).get(\n \"workersToDelete\", []\n )\n if workersToDelete:\n non_empty_worker_group_indices.append(group_index)\n for worker in workersToDelete:\n if worker in node_set:\n # The ope", "d_id": 30960, "documentation": { "docstring": "Returns False iff non_terminated_nodes contains any pods in the RayCluster's\n workersToDelete lists.\n\n Explanation:\n If there are any workersToDelete which are non-terminated,\n we should wait for the operator to do its job and delete those\n pods. Therefore, we back off the autoscaler update.\n\n If, on the other hand, all of the workersToDelete have already been cleaned up,\n then we patch away the workersToDelete lists and return True.\n In the future, we may consider having the operator clean up workersToDelete\n on it own:\n https://github.com/ray-project/kuberay/issues/733\n\n Note (Dmitri):\n It is stylistically bad that this function has a side effect.\n ", "n_words": 95, "vocab_size": 76, "n_whitespaces": 186, "language": "en" } }, { "id": 203818, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/db/backends/oracle/operations.py", "file_name": "operations.py", "fun_name": "get_distance", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_distance(self, f, value, lookup_type):\n \n if not value:\n return []\n value = value[0]\n if isinstance(value, Distance):\n if f.geodetic(self.connection):\n dist_param = value.m\n else:\n dist_param = getattr(\n value, Distance.unit_attname(f.units_name(self.connection))\n )\n else:\n dist_param = value\n\n # dwithin lookups on Oracle require a special string parameter\n # that starts with \"distance=\".\n if lookup_type == \"dwithin\":\n dist_param = \"distance=%s\" % dist_param\n\n return [dist_param]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 240, "n_words": 58, "vocab_size": 42, "complexity": 5, "nloc": 16, "token_counts": 89, "n_ast_nodes": 148, "n_identifiers": 14, "random_cut": "def get_distance(self, f, value, lookup_type):\n \n if not value:\n return []\n value = value[0]\n if isinstance(value, Distance):\n if f.geodetic(self.connection):\n dist_param = value.m\n else:\n dist_param = getattr(\n value, Distance.unit_attname(f.units_name(self.connection))\n )\n else:\n dist_param = value\n\n # dwithin lookups on Oracle require a special string parameter\n # that starts with \"distance=\".\n if lookup_type == \"", "d_id": 50543, "documentation": { "docstring": "\n Return the distance parameters given the value and the lookup type.\n On Oracle, geometry columns with a geodetic coordinate system behave\n implicitly like a geography column, and thus meters will be used as\n the distance parameter on them.\n ", "n_words": 38, "vocab_size": 32, "n_whitespaces": 74, "language": "en" } }, { "id": 74848, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_standard_get_document_model", "commit_message": "Reformat with black", "code": "def test_standard_get_document_model(self):\n \n del settings.WAGTAILDOCS_DOCUMENT_MODEL\n from wagtail.documents.models import Document\n\n self.assertIs(get_document_model(), Document)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 38, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 10, "random_cut": "def test_standard_get_document_model(self):\n \n del settings.WAGTAILDOCS_DOCUMENT_MODEL\n from wagtail.documents.models import Document\n\n self.assertIs(get_document_model(), Document)\n", "d_id": 16326, "documentation": { "docstring": "Test get_document_model with no WAGTAILDOCS_DOCUMENT_MODEL", "n_words": 5, "vocab_size": 5, "n_whitespaces": 4, "language": "en" } }, { "id": 261639, "commit_id": "af16e5934ae269d05fd7df983b97def7c0ef0bd2", "repo": "scikit-learn", "path": "sklearn/utils/__init__.py", "file_name": "__init__.py", "fun_name": "_safe_assign", "commit_message": "MAINT test globally setting output via context manager (#24932)\n\nCo-authored-by: jeremie du boisberranger ", "code": "def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):\n \n row_indexer = slice(None, None, None) if row_indexer is None else row_indexer\n column_indexer = (\n slice(None, None, None) if column_indexer is None else column_indexer\n )\n\n if hasattr(X, \"iloc\"): # pandas dataframe\n X.iloc[row_indexer, column_indexer] = values\n else: # numpy array or sparse matrix\n X[row_indexer, column_indexer] = values\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 92, "n_words": 51, "vocab_size": 33, "complexity": 4, "nloc": 9, "token_counts": 80, "n_ast_nodes": 120, "n_identifiers": 8, "random_cut": "def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):\n \n row_indexer = slice(None, None, None) if row_indexer is None else row_indexer\n column_indexer = (\n slice(None, None, None) if column_indexer is None else column_indexer\n )\n\n if", "d_id": 76908, "documentation": { "docstring": "Safe assignment to a numpy array, sparse matrix, or pandas dataframe.\n\n Parameters\n ----------\n X : {ndarray, sparse-matrix, dataframe}\n Array to be modified. It is expected to be 2-dimensional.\n\n values : ndarray\n The values to be assigned to `X`.\n\n row_indexer : array-like, dtype={int, bool}, default=None\n A 1-dimensional array to select the rows of interest. If `None`, all\n rows are selected.\n\n column_indexer : array-like, dtype={int, bool}, default=None\n A 1-dimensional array to select the columns of interest. If `None`, all\n columns are selected.\n ", "n_words": 80, "vocab_size": 50, "n_whitespaces": 143, "language": "en" } }, { "id": 219662, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "copy_sign", "commit_message": "add python 3.10.4 for windows", "code": "def copy_sign(self, a, b):\n \n a = _convert_other(a, raiseit=True)\n return a.copy_sign(b)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def copy_sign(self, a, b):\n \n a = _convert_other(a, raiseit=True)\n return a.copy_sign(b)\n", "d_id": 55690, "documentation": { "docstring": "Copies the second operand's sign to the first one.\n\n In detail, it returns a copy of the first operand with the sign\n equal to the sign of the second operand.\n\n >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))\n Decimal('1.50')\n >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))\n Decimal('1.50')\n >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))\n Decimal('-1.50')\n >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))\n Decimal('-1.50')\n >>> ExtendedContext.copy_sign(1, -2)\n Decimal('-1')\n >>> ExtendedContext.copy_sign(Decimal(1), -2)\n Decimal('-1')\n >>> ExtendedContext.copy_sign(1, Decimal(-2))\n Decimal('-1')\n ", "n_words": 60, "vocab_size": 32, "n_whitespaces": 179, "language": "en" } }, { "id": 70979, "commit_id": "de3fcba9e95818e9634ab7de6bfcb1f4221f2775", "repo": "wagtail", "path": "wagtail/contrib/forms/views.py", "file_name": "views.py", "fun_name": "get_validated_ordering", "commit_message": "Fix warnings from flake8-comprehensions.", "code": "def get_validated_ordering(self):\n \n orderable_fields = self.orderable_fields or ()\n ordering = {}\n if self.is_export:\n # Revert to CSV order_by submit_time ascending for backwards compatibility\n default_ordering = self.ordering_csv or ()\n else:\n default_ordering = self.ordering or ()\n if isinstance(default_ordering, str):\n default_ordering = (default_ordering,)\n ordering_strs = self.request.GET.getlist('order_by') or list(default_ordering)\n for order in ordering_strs:\n try:\n _, prefix, field_name = order.rpartition('-')\n if field_name in orderable_fields:\n ordering[field_name] = (\n prefix, 'descending' if prefix == '-' else 'ascending'\n )\n except (IndexError, ValueError):\n continue # invalid ordering specified, skip it\n return ordering\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 319, "n_words": 82, "vocab_size": 58, "complexity": 11, "nloc": 20, "token_counts": 122, "n_ast_nodes": 205, "n_identifiers": 21, "random_cut": "def get_validated_ordering(self):\n \n orderable_fields = self.orderable_fields or ()\n ", "d_id": 15591, "documentation": { "docstring": " Return a dict of field names with ordering labels if ordering is valid ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 14, "language": "en" } }, { "id": 177309, "commit_id": "06dc63c62822a56d3a8ed36c65630298d8954cff", "repo": "networkx", "path": "networkx/generators/tests/test_expanders.py", "file_name": "test_expanders.py", "fun_name": "test_chordal_cycle_graph", "commit_message": "Minor updates to expanders generator tests (#6027)\n\n* Split MGG test into two based on dependencies.\r\n\r\n* Parametrize tests on prime numbers.\r\n\r\n* Use fns from nx namespace, rm explicit imports.\r\n\r\n* Parametrize exception test and check message.", "code": "def test_chordal_cycle_graph(p):\n \n G = nx.chordal_cycle_graph(p)\n assert len(G) == p\n # TODO The second largest eigenvalue should be smaller than a constant,\n # independent of the number of nodes in the graph:\n #\n # eigs = sorted(sp.linalg.eigvalsh(nx.adjacency_matrix(G).toarray()))\n # assert_less(eigs[-2], ...)\n #\n\n\n@pytest.mark.parametrize(\"p\", (3, 5, 7, 11, 13)) # Primes", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"p\", (3, 5, 7, 11, 13))", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 83, "n_words": 48, "vocab_size": 39, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def test_chordal_cycle_graph(p):\n \n G = nx.chordal_cycle_graph(p)\n assert len(G) == p\n # TODO The second largest eigenvalue should be smaller than a constant,\n # independent of the number of nodes in the graph:\n #\n # eigs = sorted(sp.linalg.eigvalsh(nx.adjacency_matrix(G).toarray()))\n # assert_less(", "d_id": 42333, "documentation": { "docstring": "Test for the :func:`networkx.chordal_cycle_graph` function.", "n_words": 5, "vocab_size": 5, "n_whitespaces": 4, "language": "en" } }, { "id": 269171, "commit_id": "a127de7007fe49413bd9167e179f5df12b6c100e", "repo": "keras", "path": "keras/utils/dataset_utils.py", "file_name": "dataset_utils.py", "fun_name": "convert_dataset_split_sizes", "commit_message": "fixes dataset slicing errors", "code": "def convert_dataset_split_sizes(left_size,right_size,total_size):\n \n\n left_size_type = type(left_size) \n right_size_type = type(right_size)\n\n\n if left_size is not None and left_size_type not in [int,float]:\n raise ValueError(f'Invalid `left_size` type Got {left_size_type}'\n 'It should be one of float,int or None') \n if right_size is not None and right_size_type not in [int,float]: \n raise ValueError(f'Invalid `right_size` type Got {right_size_type}'\n 'It should be one of float,int or None') \n \n \n if (left_size_type == int \n and (left_size <= 0 or left_size>= total_size)\n or left_size_type == float \n and (left_size <= 0 or left_size>= 1) ):\n raise ValueError('`left_size` should be either a positive integer'\n f'and smaller than {total_size} or a float '\n 'within the range `[0, 1]`') \n \n if (right_size_type == int \n and (right_size <= 0 or right_size>= total_size) \n or right_size_type == float \n and (right_size <= 0 or right_size>= 1)):\n raise ValueError('`right_size` should be either a positive integer '\n f'and smaller than {total_size} or'\n 'a float within the range `[0, 1]`') \n \n if right_size_type == left_size_type == float and right_size + left_size > 1:\n raise ValueError('sum of `left_size` and `right_size`'\n ' should be within `[0,1]`'\n f'Got {right_size + left_size} ,'\n 'reduce the `left_size` or `right_size`')\n\n if left_size_type == float:\n left_size = math.ceil(left_size*total_size)\n else:\n left_size = float(left_size)\n\n if right_size_type == float:\n right_size = math.ceil(right_size*total_size)\n else:\n right_size = float(right_size)\n\n\n if left_size is None:\n left_size = total_size - right_size\n elif right_size is None:\n right_size = total_size - left_size\n\n if left_size + right_size > total_size:\n raise ValueError('The sum of `left_size` and `right_size`'\n f' should be smaller than the samples {total_size} '\n ' reduce `left_size` or `right_size` ' )\n\n \n if left_size == 0:\n raise ValueError(f'with dataset of length={total_size}'\n '`left_size`={left_size} and `right_size`={right_size} '\n 'resulting left dataset split will be empty, '\n 'adjust any of the aforementioned parameters')\n \n left_size,right_size = int(left_size) ,int(right_size)\n return left_size,right_size\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 674, "n_words": 278, "vocab_size": 115, "complexity": 25, "nloc": 51, "token_counts": 278, "n_ast_nodes": 496, "n_identifiers": 12, "random_cut": "def convert_dataset_split_sizes(left_size,right_size,total_size):\n \n\n left_size_type = type(left_size) \n right_size_type = type(right_size)\n\n\n if left_size is not None and left_size_type not in [int,float]:\n raise ValueError(f'Invalid `left_size` type Got {left_size_type}'\n 'It should be one of float,int or None') \n if right_size is not None and right_size_type not in [int,float]: \n raise ValueError(f'Invalid `right_size` type Got {right_size_type}'\n 'It should be one of float,int or None') \n \n \n if (left_size_type == int \n and (left_size <= 0 or left", "d_id": 79938, "documentation": { "docstring": "Helper function to convert left_size/right_size relative to dataset's size\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 11, "language": "en" } }, { "id": 80945, "commit_id": "f52ef6e9677b01c111b012a8725da43a2580d8f1", "repo": "awx", "path": "awx/main/managers.py", "file_name": "managers.py", "fun_name": "active_count", "commit_message": "Fixes case sensitive host count", "code": "def active_count(self):\n \n return self.order_by().exclude(inventory_sources__source='controller').values(name_lower=Lower('name')).distinct().count()\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 37, "n_ast_nodes": 68, "n_identifiers": 10, "random_cut": "def active_count(self):\n \n return self.order_by().exclude(inventory_sources__source='controller').values(name_lower=Lower('name')).distinct().count()\n", "d_id": 17116, "documentation": { "docstring": "Return count of active, unique hosts for licensing.\n Construction of query involves:\n - remove any ordering specified in model's Meta\n - Exclude hosts sourced from another Tower\n - Restrict the query to only return the name column\n - Only consider results that are unique\n - Return the count of this query\n ", "n_words": 51, "vocab_size": 37, "n_whitespaces": 105, "language": "en" } }, { "id": 249886, "commit_id": "09de2aecb05cb46e0513396e2675b24c8beedb68", "repo": "synapse", "path": "tests/handlers/test_sso.py", "file_name": "test_sso.py", "fun_name": "test_set_avatar_incorrect_mime_type", "commit_message": "Add support for handling avatar with SSO login (#13917)\n\nThis commit adds support for handling a provided avatar picture URL\r\nwhen logging in via SSO.\r\n\r\nSigned-off-by: Ashish Kumar \r\n\r\nFixes #9357.", "code": "async def test_set_avatar_incorrect_mime_type(self) -> None:\n \n handler = self.hs.get_sso_handler()\n\n # any random user works since image check is supposed to fail\n user_id = \"@sso-user:test\"\n\n self.assertFalse(\n self.get_success(handler.set_avatar(user_id, \"http://my.server/me.png\"))\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 80, "n_words": 27, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 38, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "async def test_set_avatar_incorrect_mime_type(self) -> None:\n \n handler = self.hs.get_sso_handler()\n\n # any random user works since image check is supposed to fail\n us", "d_id": 73183, "documentation": { "docstring": "Tests that saving an avatar fails when its mime type is not allowed", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 276201, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saved_model/utils.py", "file_name": "utils.py", "fun_name": "layer_uses_training_bool", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def layer_uses_training_bool(layer):\n \n if layer._expects_training_arg: # pylint: disable=protected-access\n return True\n visited = {layer}\n to_visit = list_all_layers(layer)\n while to_visit:\n layer = to_visit.pop()\n if layer in visited:\n continue\n if getattr(layer, \"_expects_training_arg\", True):\n return True\n visited.add(layer)\n to_visit.extend(list_all_layers(layer))\n return False\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 118, "n_words": 35, "vocab_size": 27, "complexity": 5, "nloc": 14, "token_counts": 69, "n_ast_nodes": 117, "n_identifiers": 10, "random_cut": "def layer_uses_training_bool(layer):\n \n if layer._expects_training_arg: # pylint: disable=protected-access\n return True\n visited = {layer}\n to_visit = list_all_layers(la", "d_id": 81585, "documentation": { "docstring": "Returns whether this layer or any of its children uses the training arg.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 176084, "commit_id": "26be7d28bdb4eb96c888e373e08f46e6b85711e3", "repo": "edgedb", "path": "tests/test_edgeql_for.py", "file_name": "test_edgeql_for.py", "fun_name": "test_edgeql_for_in_computable_09", "commit_message": "Add a `bag` type that tells assert_query_result to ignore order (#3314)\n\nassert_query_result currently supports using sets to ignore order,\r\nbut that doesn't work for objects, which can't be hashed or sorted.\r\n\r\nThere is a system for specifying a sort key for internal data, but it\r\nis way clunkier than just saying we don't care about the order.\r\n\r\nI converted some places that were using sort= to use this.", "code": "async def test_edgeql_for_in_computable_09(self):\n # This is basically test_edgeql_for_in_computable_01 but with\n # a WITH binding in front of the whole shape\n await self.assert_query_result(\n r", "url": "https://github.com/edgedb/edgedb.git", "language": "Python", "ast_errors": "\n # This is basically test_edgeql_for_in_computable_01 but with\n # a WITH binding in front of the whole shape\n await self.assert_query_result(\n r'''\n WITH\n U := (\n SELECT User {\n select_deck := (\n FOR letter IN {'I', 'B'}\n UNION (\n SELECT User.deck {User", "n_ast_errors": 2, "ast_levels": 6, "n_whitespaces": 54, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 30, "token_counts": 48, "n_ast_nodes": 34, "n_identifiers": 8, "random_cut": "async def test_edgeql_for_in_computable_09(self):\n # This", "d_id": 41674, "documentation": { "docstring": "\n WITH\n U := (\n SELECT User {\n select_deck := (\n FOR letter IN {'I', 'B'}\n UNION (\n SELECT User.deck {\n name,\n # just define an ad-hoc link prop", "n_words": 28, "vocab_size": 23, "n_whitespaces": 287, "language": "en" } }, { "id": 65173, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/budget_variance_report/budget_variance_report.py", "file_name": "budget_variance_report.py", "fun_name": "get_actual_details", "commit_message": "style: format code with black", "code": "def get_actual_details(name, filters):\n\tbudget_against = frappe.scrub(filters.get(\"budget_against\"))\n\tcond = \"\"\n\n\tif filters.get(\"budget_against\") == \"Cost Center\":\n\t\tcc_lft, cc_rgt = frappe.db.get_value(\"Cost Center\", name, [\"lft\", \"rgt\"])\n\t\tcond = .format(\n\t\t\tlft=cc_lft, rgt=cc_rgt\n\t\t)\n\n\tac_details = frappe.db.sql(\n\t\t.format(\n\t\t\ttab=filters.budget_against, budget_against=budget_against, cond=cond\n\t\t),\n\t\t(filters.from_fiscal_year, filters.to_fiscal_year, name),\n\t\tas_dict=1,\n\t)\n\n\tcc_actual_details = {}\n\tfor d in ac_details:\n\t\tcc_actual_details.setdefault(d.account, []).append(d)\n\n\treturn cc_actual_details\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 52, "vocab_size": 43, "complexity": 3, "nloc": 53, "token_counts": 138, "n_ast_nodes": 223, "n_identifiers": 26, "random_cut": "def get_actual_details(name, filters):\n\tbudget_against = frappe.scrub(filters.get(\"budget_against\"))\n\tcond = \"\"\n\n\tif filters.get(\"budget_against\") == \"Cost Center\":\n\t\tcc_lft, cc_rgt = frappe.db.get_value(\"Cost Center\", name, [\"lft\", \"rgt\"])\n\t\tcond = .format(\n\t\t\tlft=cc_lft, rgt=cc_rgt\n\t\t)\n\n\tac_details = frappe.db.sql(\n\t\t.format(\n\t\t\ttab=filters.budget_against, budget_against=budget_against, cond=cond\n\t\t),\n\t\t(filters.from_fiscal_year, filters.to_fiscal_year, name),\n\t\tas_dict=1,\n\t)\n\n\tcc_actual_details = {}\n\tfor d in ac_details:\n\t\tcc_actual_details.setdefault(d.account, []).append(d)\n\n\treturn cc_ac", "d_id": 13815, "documentation": { "docstring": "\n\t\t\t\tand lft >= \"{lft}\"\n\t\t\t\tand rgt <= \"{rgt}\"\n\t\t\t\n\t\t\tselect\n\t\t\t\tgl.account,\n\t\t\t\tgl.debit,\n\t\t\t\tgl.credit,\n\t\t\t\tgl.fiscal_year,\n\t\t\t\tMONTHNAME(gl.posting_date) as month_name,\n\t\t\t\tb.{budget_against} as budget_against\n\t\t\tfrom\n\t\t\t\t`tabGL Entry` gl,\n\t\t\t\t`tabBudget Account` ba,\n\t\t\t\t`tabBudget` b\n\t\t\twhere\n\t\t\t\tb.name = ba.parent\n\t\t\t\tand b.docstatus = 1\n\t\t\t\tand ba.account=gl.account\n\t\t\t\tand b.{budget_against} = gl.{budget_against}\n\t\t\t\tand gl.fiscal_year between %s and %s\n\t\t\t\tand b.{budget_against} = %s\n\t\t\t\tand exists(\n\t\t\t\t\tselect\n\t\t\t\t\t\tname\n\t\t\t\t\tfrom\n\t\t\t\t\t\t`tab{tab}`\n\t\t\t\t\twhere\n\t\t\t\t\t\tname = gl.{budget_against}\n\t\t\t\t\t\t{cond}\n\t\t\t\t)\n\t\t\t\tgroup by\n\t\t\t\t\tgl.name\n\t\t\t\torder by gl.fiscal_year\n\t\t", "n_words": 70, "vocab_size": 46, "n_whitespaces": 38, "language": "en" } }, { "id": 22864, "commit_id": "39c49e07066b2a53e176d555af6a7bf8aabb8a9c", "repo": "Python", "path": "VoiceAssistant/Project_Basic_struct/textRead.py", "file_name": "textRead.py", "fun_name": "print_index", "commit_message": "VoiceAssistant\n\nThis is Voice Assistant coded using Python which can do the following: -\r\n 1. Speak Text entered by User.\r\n 2. Search anything on Google.\r\n 3. Search anything on Wikipedia.\r\n 4. Read an MS Word(docx) document.\r\n 5. Read a book(PDF).\r\n 6. Can be used as a Dictator.", "code": "def print_index(toc):\r\n \r\n dash = \"-\"*(100 - 7)\r\n space = \" \"*47\r\n print(f\"{space}INDEX\")\r\n print(f\"\\n\\nName : {dash} PageNo.\\n\\n\\n\")\r\n for topic in toc:\r\n eq_dash = \"-\"*(100 - len(topic[1]))\r\n print(f\"{topic[1]} {eq_dash} {topic[2]}\")\r\n \r", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 68, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 8, "token_counts": 55, "n_ast_nodes": 131, "n_identifiers": 8, "random_cut": "def print_index(toc):\r\n \r\n dash = \"-\"*(100 - 7)\r\n spa", "d_id": 4477, "documentation": { "docstring": "Prints out the index in proper format with title name and page number\r\n\r\n Args:\r\n toc (nested list): toc[1] - Topic name\r\n toc[2] - Page number\r\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 64, "language": "en" } }, { "id": 106836, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "pie", "commit_message": "apply black py to all python files", "code": "def pie(self, X, win=None, env=None, opts=None):\n \n\n X = np.squeeze(X)\n assert X.ndim == 1, \"X should be one-dimensional\"\n assert np.all(np.greater_equal(X, 0)), \"X cannot contain negative values\"\n\n opts = {} if opts is None else opts\n _title2str(opts)\n _assert_opts(opts)\n\n data = [\n {\n \"values\": X.tolist(),\n \"labels\": opts.get(\"legend\"),\n \"type\": \"pie\",\n }\n ]\n return self._send(\n {\n \"data\": data,\n \"win\": win,\n \"eid\": env,\n \"layout\": _opts2layout(opts),\n \"opts\": opts,\n }\n )\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 304, "n_words": 63, "vocab_size": 55, "complexity": 2, "nloc": 23, "token_counts": 128, "n_ast_nodes": 213, "n_identifiers": 18, "random_cut": "def pie(self, X, win=None, env=None, opts=None):\n \n\n X = np.squeeze(X)\n assert X.ndim == 1, \"X should be one-dimensional\"\n assert np.all(np.greater_equal(X, 0)), \"X cannot contain negative values\"\n\n opts = {} if opts is None else opts\n _title2str(opts)\n _assert_opts(opts)\n\n data = [\n ", "d_id": 22463, "documentation": { "docstring": "\n This function draws a pie chart based on the `N` tensor `X`.\n\n The following `opts` are supported:\n\n - `opts.legend`: `list` containing legend names\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 52, "language": "en" } }, { "id": 46963, "commit_id": "04082ac091e92587b22c8323170ebe38bc68a19a", "repo": "airflow", "path": "airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py", "file_name": "kubernetes_pod.py", "fun_name": "dry_run", "commit_message": "Cleanup dup code now that k8s provider requires 2.3.0+ (#22845)", "code": "def dry_run(self) -> None:\n \n pod = self.build_pod_request_obj()\n print(yaml.dump(prune_dict(pod.to_dict(), mode='strict')))\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 8, "token_counts": 35, "n_ast_nodes": 62, "n_identifiers": 10, "random_cut": "def dry_run(self) -> None:\n \n pod = self.build_pod_request_obj()", "d_id": 9046, "documentation": { "docstring": "\n Prints out the pod definition that would be created by this operator.\n Does not include labels specific to the task instance (since there isn't\n one in a dry_run) and excludes all empty elements.\n ", "n_words": 33, "vocab_size": 32, "n_whitespaces": 62, "language": "en" } }, { "id": 196693, "commit_id": "9ad8ab9fe58051cf11626ba6654852fcfec60147", "repo": "sympy", "path": "sympy/stats/crv_types.py", "file_name": "crv_types.py", "fun_name": "ExponentialPower", "commit_message": "Documentation cleanup 5", "code": "def ExponentialPower(name, mu, alpha, beta):\n r\n return rv(name, ExponentialPowerDistribution, (mu, alpha, beta))\n\n\n#-------------------------------------------------------------------------------\n# F distribution ---------------------------------------------------------------\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 63, "token_counts": 28, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def ExponentialPower(name, mu, alpha, beta):\n r\n return rv(name, ExponentialPowerDistribution, (mu, alpha, beta))\n\n\n#-------------------------------------------------------------------------------\n# F distribution ----------------------------", "d_id": 48111, "documentation": { "docstring": "\n Create a Continuous Random Variable with Exponential Power distribution.\n This distribution is known also as Generalized Normal\n distribution version 1.\n\n Explanation\n ===========\n\n The density of the Exponential Power distribution is given by\n\n .. math::\n f(x) := \\frac{\\beta}{2\\alpha\\Gamma(\\frac{1}{\\beta})}\n e^{{-(\\frac{|x - \\mu|}{\\alpha})^{\\beta}}}\n\n with :math:`x \\in [ - \\infty, \\infty ]`.\n\n Parameters\n ==========\n\n mu : Real number\n A location.\n alpha : Real number,`\\alpha > 0`\n A scale.\n beta : Real number, `\\beta > 0`\n A shape.\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import ExponentialPower, density, cdf\n >>> from sympy import Symbol, pprint\n >>> z = Symbol(\"z\")\n >>> mu = Symbol(\"mu\")\n >>> alpha = Symbol(\"alpha\", positive=True)\n >>> beta = Symbol(\"beta\", positive=True)\n >>> X = ExponentialPower(\"x\", mu, alpha, beta)\n >>> pprint(density(X)(z), use_unicode=False)\n beta\n /|mu - z|\\\n -|--------|\n \\ alpha /\n beta*e\n ---------------------\n / 1 \\\n 2*alpha*Gamma|----|\n \\beta/\n >>> cdf(X)(z)\n 1/2 + lowergamma(1/beta, (Abs(mu - z)/alpha)**beta)*sign(-mu + z)/(2*gamma(1/beta))\n\n References\n ==========\n\n .. [1] https://reference.wolfram.com/language/ref/ExponentialPowerDistribution.html\n .. [2] https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1\n\n ", "n_words": 152, "vocab_size": 109, "n_whitespaces": 387, "language": "en" } }, { "id": 9135, "commit_id": "995b44897fe6158bb70ad03a3e79f517f65f9034", "repo": "insightface", "path": "parsing/dml_csr/utils/miou.py", "file_name": "miou.py", "fun_name": "get_palette", "commit_message": "Create miou.py", "code": "def get_palette(num_cls):\n \n\n n = num_cls\n palette = [0] * (n * 3)\n for j in range(0, n):\n lab = j\n palette[j * 3 + 0] = 0\n palette[j * 3 + 1] = 0\n palette[j * 3 + 2] = 0\n i = 0\n while lab:\n palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))\n palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))\n palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))\n i += 1\n lab >>= 3\n return palette\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 211, "n_words": 99, "vocab_size": 41, "complexity": 3, "nloc": 16, "token_counts": 161, "n_ast_nodes": 239, "n_identifiers": 8, "random_cut": "def get_palette(num_cls):\n \n\n n = num_cls\n palette = [0] * (n * 3)\n for j in range(0, n):\n lab = j\n palette[j * 3 + 0] = 0\n palette[j * 3 + 1] = 0\n palette[j * 3 + 2] = 0\n i = 0\n while lab:\n palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))\n palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))\n palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))\n i += 1\n lab >>= 3\n return palette\n\n", "d_id": 1559, "documentation": { "docstring": " Returns the color map for visualizing the segmentation mask.\n Args:\n num_cls: Number of classes\n Returns:\n The color map\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 42, "language": "en" } }, { "id": 60287, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/pycaffe.py", "file_name": "pycaffe.py", "fun_name": "_Net_forward_backward_all", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):\n \n # Batch blobs and diffs.\n all_outs = {out: [] for out in set(self.outputs + (blobs or []))}\n all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}\n forward_batches = self._batch({in_: kwargs[in_]\n for in_ in self.inputs if in_ in kwargs})\n backward_batches = self._batch({out: kwargs[out]\n for out in self.outputs if out in kwargs})\n # Collect outputs from batches (and heed lack of forward/backward batches).\n for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):\n batch_blobs = self.forward(blobs=blobs, **fb)\n batch_diffs = self.backward(diffs=diffs, **bb)\n for out, out_blobs in six.iteritems(batch_blobs):\n all_outs[out].extend(out_blobs.copy())\n for diff, out_diffs in six.iteritems(batch_diffs):\n all_diffs[diff].extend(out_diffs.copy())\n # Package in ndarray.\n for out, diff in zip(all_outs, all_diffs):\n all_outs[out] = np.asarray(all_outs[out])\n all_diffs[diff] = np.asarray(all_diffs[diff])\n # Discard padding at the end and package in ndarray.\n pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))\n if pad:\n for out, diff in zip(all_outs, all_diffs):\n all_outs[out] = all_outs[out][:-pad]\n all_diffs[diff] = all_diffs[diff][:-pad]\n return all_outs, all_diffs\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 348, "n_words": 144, "vocab_size": 90, "complexity": 15, "nloc": 23, "token_counts": 326, "n_ast_nodes": 500, "n_identifiers": 37, "random_cut": "def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):\n \n # Batch blobs and diffs.\n all_outs = {out: [] for out in set(self.outputs + (blobs or []))}\n all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}\n forward_batches = self._batch({in_: ", "d_id": 12067, "documentation": { "docstring": "\n Run net forward + backward in batches.\n\n Parameters\n ----------\n blobs: list of blobs to extract as in forward()\n diffs: list of diffs to extract as in backward()\n kwargs: Keys are input (for forward) and output (for backward) blob names\n and values are ndarrays. Refer to forward() and backward().\n Prefilled variants are called for lack of input or output blobs.\n\n Returns\n -------\n all_blobs: {blob name: blob ndarray} dict.\n all_diffs: {blob name: diff ndarray} dict.\n ", "n_words": 73, "vocab_size": 51, "n_whitespaces": 129, "language": "en" } }, { "id": 249233, "commit_id": "1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b", "repo": "synapse", "path": "tests/rest/admin/test_device.py", "file_name": "test_device.py", "fun_name": "test_user_does_not_exist", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13479)\n\nReplace\r\n- `HTTPStatus.NOT_FOUND`\r\n- `HTTPStatus.FORBIDDEN`\r\n- `HTTPStatus.UNAUTHORIZED`\r\n- `HTTPStatus.CONFLICT`\r\n- `HTTPStatus.CREATED`\r\n\r\nSigned-off-by: Dirk Klimpel ", "code": "def test_user_does_not_exist(self) -> None:\n \n url = \"/_synapse/admin/v2/users/@unknown_person:test/devices\"\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 94, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 12, "token_counts": 59, "n_ast_nodes": 96, "n_identifiers": 13, "random_cut": "def test_user_does_not_exist(self) -> None:\n \n url = \"/_synapse/admin/v2/users/@unknown_person:test/devices\"\n channe", "d_id": 72737, "documentation": { "docstring": "\n Tests that a lookup for a user that does not exist returns a 404\n ", "n_words": 14, "vocab_size": 11, "n_whitespaces": 29, "language": "en" } }, { "id": 246289, "commit_id": "d0e78af35e519ff76bd23e786007f3e7130d90f7", "repo": "synapse", "path": "synapse/replication/tcp/protocol.py", "file_name": "protocol.py", "fun_name": "send_ping", "commit_message": "Add missing type hints to synapse.replication. (#11938)", "code": "def send_ping(self) -> None:\n \n now = self.clock.time_msec()\n\n if self.time_we_closed:\n if now - self.time_we_closed > PING_TIMEOUT_MS:\n logger.info(\n \"[%s] Failed to close connection gracefully, aborting\", self.id()\n )\n assert self.transport is not None\n self.transport.abortConnection()\n else:\n if now - self.last_sent_command >= PING_TIME:\n self.send_command(PingCommand(str(now)))\n\n if (\n self.received_ping\n and now - self.last_received_command > PING_TIMEOUT_MS\n ):\n logger.info(\n \"[%s] Connection hasn't received command in %r ms. Closing.\",\n self.id(),\n now - self.last_received_command,\n )\n self.send_error(\"ping timeout\")\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 364, "n_words": 66, "vocab_size": 52, "complexity": 6, "nloc": 25, "token_counts": 120, "n_ast_nodes": 199, "n_identifiers": 20, "random_cut": "def send_ping(self) -> None:\n \n now = self.clock.time_msec()\n\n if self.time_we_closed:\n if now - self.time_we_closed > PING_TIMEOUT_MS:\n logger.info(\n \"[%s] Failed to close connection gracefully, aborting\", self.id()\n )\n assert self.transport is not None\n self", "d_id": 71133, "documentation": { "docstring": "Periodically sends a ping and checks if we should close the connection\n due to the other side timing out.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 33, "language": "en" } }, { "id": 139165, "commit_id": "e8fc66af348f2afd2b578fe1c6776cc88ea82499", "repo": "ray", "path": "python/ray/workflow/workflow_context.py", "file_name": "workflow_context.py", "fun_name": "workflow_logging_context", "commit_message": "[Workflow]Make workflow logs publish to the correct driver. (#24089)\n\nAll workflow tasks are executed as remote functions that submitted from WorkflowManagmentActor. WorkflowManagmentActor is a detached long-running actor whose owner is the first driver in the cluster that runs the very first workflow execution. Therefore, for new drivers that run workflows, the loggings won't be properly published back to the driver because loggings are saved and published based on job_id and the job_id is always the first driver's job_id as the ownership goes like: first_driver -> WorkflowManagmentActor -> workflow executions using remote functions.\r\n\r\nTo solve this, during workflow execution, we pass the actual driver's job_id along with execution, and re-configure the logging files on each worker that runs the remote functions. Notice that we need to do this in multiple places as a workflow task is executed with more than one remote functions that are running in different workers.", "code": "def workflow_logging_context(job_id) -> None:\n \n node = ray.worker._global_node\n original_out_file, original_err_file = node.get_log_file_handles(\n get_worker_log_file_name(\"WORKER\")\n )\n out_file, err_file = node.get_log_file_handles(\n get_worker_log_file_name(\"WORKER\", job_id)\n )\n try:\n configure_log_file(out_file, err_file)\n yield\n finally:\n configure_log_file(original_out_file, original_err_file)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 27, "token_counts": 60, "n_ast_nodes": 104, "n_identifiers": 13, "random_cut": "def workflow_logging_context(job_id) -> None:\n \n node = ray.worker._global_node\n original_out_file, original_err_file = node.get_log_file_handles(\n get_worker_log_file_name(\"WORKER\")\n )\n out_file, err_file = node.get_log_file_handles(\n get_worker_log_file_name(\"WORKER\", job_id)\n )\n try:\n ", "d_id": 31619, "documentation": { "docstring": "Initialize the workflow logging context.\n\n Workflow executions are running as remote functions from\n WorkflowManagementActor. Without logging redirection, workflow\n inner execution logs will be pushed to the driver that initially\n created WorkflowManagementActor rather than the driver that\n actually submits the current workflow execution.\n We use this conext manager to re-configure the log files to send\n the logs to the correct driver, and to restore the log files once\n the execution is done.\n\n Args:\n job_id: The ID of the job that submits the workflow execution.\n ", "n_words": 83, "vocab_size": 56, "n_whitespaces": 120, "language": "en" } }, { "id": 30270, "commit_id": "cf9030f843079d3f69cd1414050f8b594c84cee1", "repo": "spotify-downloader", "path": "spotdl/console/entry_point.py", "file_name": "entry_point.py", "fun_name": "console_entry_point", "commit_message": "added option to profile code\n\nfized pylint warnings", "code": "def console_entry_point():\n \n\n if \"--profile\" in sys.argv:\n with cProfile.Profile() as profile:\n entry_point()\n\n stats = pstats.Stats(profile)\n stats.sort_stats(pstats.SortKey.TIME)\n\n # Use snakeviz to visualize the profile\n stats.dump_stats(\"spotdl.profile\")\n else:\n entry_point()\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 87, "n_words": 25, "vocab_size": 24, "complexity": 2, "nloc": 9, "token_counts": 53, "n_ast_nodes": 101, "n_identifiers": 14, "random_cut": "def console_entry_point():\n \n\n if \"--profile\" in sys.argv:\n with cProfile.Profile() as profile:\n entry_point()\n\n stats = pstats.Stats(profile)\n stats.sort_stats(pstats.SortKey.TIME)\n\n ", "d_id": 5455, "documentation": { "docstring": "\n Wrapper around `entry_point` so we can profile the code\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 147576, "commit_id": "2eaa54bd763ae0e63158ae0d939633c804394b78", "repo": "ray", "path": "rllib/agents/trainer_config.py", "file_name": "trainer_config.py", "fun_name": "callbacks", "commit_message": "[RLlib] POC: Config objects instead of dicts (PPO only). (#23491)", "code": "def callbacks(self, callbacks_class) -> \"TrainerConfig\":\n \n self.callbacks_class = callbacks_class\n\n return self\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 14, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 3, "random_cut": "def callbacks(self, callbacks_class) -> \"TrainerConfig\":\n \n self.callbacks_class = callbacks_c", "d_id": 34012, "documentation": { "docstring": "Sets the callbacks configuration.\n\n Args:\n callbacks_class: Callbacks class, whose methods will be run during\n various phases of training and environment sample collection.\n See the `DefaultCallbacks` class and\n `examples/custom_metrics_and_callbacks.py` for more usage information.\n\n Returns:\n This updated TrainerConfig object.\n ", "n_words": 37, "vocab_size": 35, "n_whitespaces": 125, "language": "en" } }, { "id": 87194, "commit_id": "5462ee11ad11ebb9a50323befcd286816d7898c8", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_details.py", "file_name": "test_project_details.py", "fun_name": "test_get_dynamic_sampling_after_migrating_to_new_plan_default_biases", "commit_message": "feat(ds): Support new DS behaviour in project_details endpoint (#40387)\n\nSupports new adaptive dynamic sampling behaviour alongside\r\nthe deprecated dynamic sampling behaviour and achieves that\r\nthrough feature flag differentiation\r\n\r\nThis PR achieve that through the following:\r\n- Introducing a new `DynamicSamplingBiasSerializer` which is composed of\r\nid representing the bias name and a boolean flag indicating whether that\r\nparticular flag is active or not\r\n- Modifies current existing behavior for both old sampling flag and new\r\nsampling flag. Essentially the new setup entails that to be on the old\r\ndynamic sampling, the following flags need to be enabled\r\n\"organizations:server-side-sampling\" and\r\n\"organizations:server-side-sampling-ui\", and to be on the new dynamic\r\nsampling configurations, you need the following flags to be enabled\r\n\"organizations:dynamic-sampling-basic\" and\r\n\"organizations:server-side-sampling\"\r\nP.S. 1: These flags will be replaced \r\n\"organizations:server-side-sampling-ui\" ->\r\n\"organizations:dynamic-sampling-deprecated\"\r\n\"organizations:server-side-sampling-basic\" ->\r\n\"organizations:dynamic-sampling\"\r\nHence, these feature flags need to be updated once this PR lands\r\nhttps://github.com/getsentry/sentry/pull/40388\r\nP.S. 2: If a project is on the new plan and the old plan, the new plan\r\ntakes precedence\r\n- Introduces default biases that are enabled by default and can be\r\noverwritten. The motivation to do this is to be able to add new biases\r\nthat are enabled by default, and both the GET and PUT request honor this\r\nlist\r\n- `GET` and `POST` endpoint does a dictionary update of user's stored\r\nbiases on the default biases that are hardcoded, and returns them to the\r\nUI/ relay. This means that the introduced project option\r\n\"sentry:dynamic_sampling_biases\" might not have all the toggles\r\nenabled/disabled through the UI but only the ones that a customer chose\r\nto modify\r\n\r\n\r\nFollowup:\r\n- This new feature flag behaviour needs to be reflected in ProjectConfig\r\ncomputations", "code": "def test_get_dynamic_sampling_after_migrating_to_new_plan_default_biases(self):\n \n\n self.project.update_option(\"sentry:dynamic_sampling\", self.dynamic_sampling_data)\n\n with Feature(\n {\n self.universal_ds_flag: True,\n self.old_ds_flag: True,\n self.new_ds_flag: True,\n }\n ):\n response = self.get_success_response(\n self.organization.slug, self.project.slug, method=\"get\"\n )\n assert response.data[\"dynamicSampling\"] is None\n assert response.data[\"dynamicSamplingBiases\"] == DEFAULT_BIASES\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 184, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 14, "token_counts": 83, "n_ast_nodes": 135, "n_identifiers": 16, "random_cut": "def test_get_dynamic_sampling_after_migrating_to_new_plan_default_biases(self):\n \n\n self.project.update_option(\"sentry:dynamic_sampling\", self.dynamic_sampling_data)\n\n with Feature(\n {\n self.universal_ds_flag: True,\n self.old_ds_flag: True,\n self.new_ds_flag: True,\n }\n ):\n response = self.get_success_response(\n self.organization.slug, self.project.slug, method=\"get\"\n ", "d_id": 18249, "documentation": { "docstring": "\n Tests the case when an organization was in EA/LA and has setup previously Dynamic Sampling rules,\n and now they have migrated to an AM2 plan, but haven't manipulated the bias toggles yet so they get the\n default biases. This also ensures that they no longer receive the deprecated dynamic sampling rules.\n ", "n_words": 51, "vocab_size": 44, "n_whitespaces": 80, "language": "en" } }, { "id": 61225, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "strtobool", "commit_message": "upd; format", "code": "def strtobool(val):\n # type: (str) -> int\n \n val = val.lower()\n if val in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return 1\n elif val in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return 0\n else:\n raise ValueError(f\"invalid truth value {val!r}\")\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 77, "n_words": 38, "vocab_size": 34, "complexity": 3, "nloc": 8, "token_counts": 59, "n_ast_nodes": 117, "n_identifiers": 4, "random_cut": "def strtobool(val):\n # type: (str) -> int\n \n ", "d_id": 12450, "documentation": { "docstring": "Convert a string representation of truth to true (1) or false (0).\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n ", "n_words": 39, "vocab_size": 35, "n_whitespaces": 52, "language": "en" } }, { "id": 108482, "commit_id": "3df958c760dbde3a6c576fefa7827a136385b5c3", "repo": "matplotlib", "path": "lib/matplotlib/artist.py", "file_name": "artist.py", "fun_name": "convert_xunits", "commit_message": "Update artist.py (#23150)", "code": "def convert_xunits(self, x):\n \n ax = getattr(self, 'axes', None)\n if ax is None or ax.xaxis is None:\n return x\n return ax.xaxis.convert_units(x)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 59, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 5, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 7, "random_cut": "def convert_xunits(self, x):\n \n ax = getattr(self, 'axes', None)\n if ax is None or ax.xaxis is None:\n ", "d_id": 23210, "documentation": { "docstring": "\n Convert *x* using the unit type of the xaxis.\n\n If the artist is not contained in an Axes or if the xaxis does not\n have units, *x* itself is returned.\n ", "n_words": 30, "vocab_size": 24, "n_whitespaces": 59, "language": "en" } }, { "id": 212222, "commit_id": "c9751009161f092b2e403d8cccccf5252c0dce1a", "repo": "bokeh", "path": "bokeh/models/widgets/sliders.py", "file_name": "sliders.py", "fun_name": "value_as_datetime", "commit_message": "Add DatetimeRangeSlider (#12034)\n\n* Add DatetimeRangeSlider\r\n\r\n* Add tests\r\n\r\n* Add docs", "code": "def value_as_datetime(self) -> tp.Tuple[datetime, datetime] | None:\n \n if self.value is None:\n return None\n v1, v2 = self.value\n if isinstance(v1, numbers.Number):\n d1 = datetime.utcfromtimestamp(v1 / 1000)\n else:\n d1 = v1\n if isinstance(v2, numbers.Number):\n d2 = datetime.utcfromtimestamp(v2 / 1000)\n else:\n d2 = v2\n return d1, d2\n\n value = NonNullable(Tuple(Datetime, Datetime), help=)\n\n value_throttled = Readonly(NonNullable(Tuple(Datetime, Datetime)), help=)\n\n start = NonNullable(Datetime, help=)\n\n end = NonNullable(Datetime, help=)\n\n step = Int(default=3_600_000, help=)\n\n format = Override(default=\"%d %b %Y %H:%M:%S\")\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 204, "n_words": 81, "vocab_size": 49, "complexity": 4, "nloc": 16, "token_counts": 87, "n_ast_nodes": 267, "n_identifiers": 26, "random_cut": "def value_as_datetime(self) -> tp.Tuple[datetime, datetime] | None:\n \n ", "d_id": 53206, "documentation": { "docstring": " Convenience property to retrieve the value tuple as a tuple of\n datetime objects.\n \n Initial or selected range.\n \n Initial or selected value, throttled to report only on mouseup.\n \n The minimum allowable value.\n \n The maximum allowable value.\n \n The step between consecutive values, in units of milliseconds.\n Default is one hour.\n ", "n_words": 48, "vocab_size": 38, "n_whitespaces": 101, "language": "en" } }, { "id": 89862, "commit_id": "ce841204ef3b20d0f6ac812ebb06aebbc63547ac", "repo": "sentry", "path": "tests/sentry/receivers/test_onboarding.py", "file_name": "test_onboarding.py", "fun_name": "test_first_event_with_minified_stack_trace_received", "commit_message": "ref(onboarding): Add function to record first event per project with min stack trace -(#42208)", "code": "def test_first_event_with_minified_stack_trace_received(self, record_analytics):\n \n now = timezone.now()\n project = self.create_project(first_event=now)\n project_created.send(project=project, user=self.user, sender=type(project))\n url = \"http://localhost:3000\"\n data = load_data(\"javascript\")\n data[\"tags\"] = [(\"url\", url)]\n data[\"exception\"] = {\n \"values\": [\n {\n **data[\"exception\"][\"values\"][0],\n \"raw_stacktrace\": {\n \"frames\": [\n {\n \"function\": \"o\",\n \"filename\": \"/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js\",\n \"abs_path\": \"https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js\",\n \"lineno\": 2,\n \"colno\": 37098,\n \"pre_context\": [\n \"/*! For license information please see vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd. {snip}\"\n ],\n \"context_line\": \"{snip} .apply(this,arguments);const i=o.map((e=>c(e,t)));return e.apply(this,i)}catch(e){throw l(),(0,i.$e)((n=>{n.addEventProcessor((e=>(t.mechani {snip}\",\n \"post_context\": [\n \"//# sourceMappingURL=../sourcemaps/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.fe32 {snip}\"\n ],\n \"in_app\": False,\n },\n ],\n },\n }\n ]\n }\n\n self.store_event(\n project_id=project.id,\n data=data,\n )\n\n record_analytics.assert_called_with(\n \"first_event_with_minified_stack_trace_for_project.sent\",\n user_id=self.user.id,\n organization_id=project.organization_id,\n project_id=project.id,\n platform=data[\"platform\"],\n url=url,\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 887, "n_words": 88, "vocab_size": 70, "complexity": 1, "nloc": 45, "token_counts": 198, "n_ast_nodes": 339, "n_identifiers": 23, "random_cut": "def test_first_event_with_minified_stack_trace_received(self, record_analytics):\n \n now = timezone.now()\n project = self.create_project(first_event=now)\n project_created.send(project=project, user=self.user, sender=type(project))\n url = \"http://localhost:3000\"\n data = load_data(\"javascript\")\n data[\"tags\"] = [(\"url\", url)]\n data[\"exception\"] = {\n \"values\": [\n {\n **data[\"exception\"][\"values\"][0],\n \"raw_stacktrace\": {\n \"frames\": [\n {\n \"function\": \"o\",\n \"filename\": \"/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js\",\n \"abs_path\": \"https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js\",\n \"lineno\": 2,\n \"colno\": 37098,\n ", "d_id": 18579, "documentation": { "docstring": "\n Test that an analytics event is recorded when\n a first event with minified stack trace is received\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 39, "language": "en" } }, { "id": 157103, "commit_id": "c4d35f5515191409913827fd4faa3b69a3d7399a", "repo": "dask", "path": "dask/array/backends.py", "file_name": "backends.py", "fun_name": "arange", "commit_message": "Backend library dispatching for IO in Dask-Array and Dask-DataFrame (#9475)", "code": "def arange(start, /, stop=None, step=1, *, dtype=None, meta=None, **kwargs):\n \n raise NotImplementedError\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 31, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def arange(start, /, stop=None, step=1, *, dtype=None, meta=None, **kwargs):\n \n raise NotImplementedError\n\n", "d_id": 36854, "documentation": { "docstring": "Create an ascending or descending array\n\n Returns evenly spaced values within the half-open interval\n ``[start, stop)`` as a one-dimensional array.\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 41, "language": "en" } }, { "id": 141365, "commit_id": "8affbc7be6fdce169264b8db5b0276dbcc719f6d", "repo": "ray", "path": "python/ray/tune/checkpoint_manager.py", "file_name": "checkpoint_manager.py", "fun_name": "best_checkpoints", "commit_message": "[tune/train] Consolidate checkpoint manager 3: Ray Tune (#24430)\n\n**Update**: This PR is now part 3 of a three PR group to consolidate the checkpoints.\r\n\r\n1. Part 1 adds the common checkpoint management class #24771 \r\n2. Part 2 adds the integration for Ray Train #24772\r\n3. This PR builds on #24772 and includes all changes. It moves the Ray Tune integration to use the new common checkpoint manager class.\r\n\r\nOld PR description:\r\n\r\nThis PR consolidates the Ray Train and Tune checkpoint managers. These concepts previously did something very similar but in different modules. To simplify maintenance in the future, we've consolidated the common core.\r\n\r\n- This PR keeps full compatibility with the previous interfaces and implementations. This means that for now, Train and Tune will have separate CheckpointManagers that both extend the common core\r\n- This PR prepares Tune to move to a CheckpointStrategy object\r\n- In follow-up PRs, we can further unify interfacing with the common core, possibly removing any train- or tune-specific adjustments (e.g. moving to setup on init rather on runtime for Ray Train)\r\n\r\nCo-authored-by: Antoni Baum ", "code": "def best_checkpoints(self):\n \n checkpoints = sorted(self._top_persisted_checkpoints, key=lambda c: c.priority)\n return [wrapped.tracked_checkpoint for wrapped in checkpoints]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 3, "token_counts": 33, "n_ast_nodes": 53, "n_identifiers": 10, "random_cut": "def best_checkpoints(self):\n \n checkpoints = sorted(self._top_persisted_checkpoints, key=lambda c: c.priority)\n return [wrappe", "d_id": 32341, "documentation": { "docstring": "Returns best PERSISTENT checkpoints, sorted by score.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 112077, "commit_id": "5136a86d11a3602b283bad15098335fc6f005ae0", "repo": "nni", "path": "nni/runtime/config.py", "file_name": "config.py", "fun_name": "get_config_directory", "commit_message": "Typehint and copyright header (#4669)", "code": "def get_config_directory() -> Path:\n \n if os.getenv('NNI_CONFIG_DIR') is not None:\n config_dir = Path(os.getenv('NNI_CONFIG_DIR')) # type: ignore\n elif sys.prefix != sys.base_prefix or Path(sys.prefix, 'conda-meta').is_dir():\n config_dir = Path(sys.prefix, 'nni')\n elif sys.platform == 'win32':\n config_dir = Path(os.environ['APPDATA'], 'nni')\n else:\n config_dir = Path.home() / '.config/nni'\n config_dir.mkdir(parents=True, exist_ok=True)\n return config_dir\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 94, "n_words": 44, "vocab_size": 34, "complexity": 5, "nloc": 15, "token_counts": 106, "n_ast_nodes": 186, "n_identifiers": 15, "random_cut": "def get_config_directory() -> Path:\n \n if os.getenv('NNI_CONFIG_DIR') is not None:\n config_dir = Path(os.getenv('NNI_CONFIG_DIR')) # type: ignore\n elif sys.prefix != sys.base_prefix or Path(sys.prefix, 'conda-meta').is_dir():\n config_dir = Path(sys.prefix, 'nni')\n elif sys.platform == 'win32':\n config_dir = Path(os.environ['APPDATA'], 'nni')\n else:\n config_dir = Path.home() / '.config/nni'\n c", "d_id": 24568, "documentation": { "docstring": "\n Get NNI config directory.\n Create it if not exist.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 19, "language": "en" } }, { "id": 247570, "commit_id": "ef3619e61d84493d98470eb2a69131d15eb1166b", "repo": "synapse", "path": "tests/storage/test_background_update.py", "file_name": "test_background_update.py", "fun_name": "test_background_update_min_batch_set_in_config", "commit_message": "Add config settings for background update parameters (#11980)", "code": "def test_background_update_min_batch_set_in_config(self):\n \n # a very long-running individual update\n duration_ms = 50\n\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n values={\"update_name\": \"test_update\", \"progress_json\": '{\"my_key\": 1}'},\n )\n )\n\n # Run the update with the long-running update item", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 124, "n_words": 30, "vocab_size": 24, "complexity": 1, "nloc": 19, "token_counts": 103, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "def test_background_update_min_batch_set_in_config(self):\n \n # a very long-running individual update\n duration_ms = 50\n\n self.get_success(\n self.store.db_pool.", "d_id": 71748, "documentation": { "docstring": "\n Test that the minimum batch size set in the config is used\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 188999, "commit_id": "471b19d2aa799cd73bded23379e864dd35bec2b6", "repo": "psutil", "path": "psutil/_pswindows.py", "file_name": "_pswindows.py", "fun_name": "swap_memory", "commit_message": "Fix typos", "code": "def swap_memory():\n \n mem = cext.virtual_mem()\n\n total_phys = mem[0]\n free_phys = mem[1]\n total_system = mem[2]\n free_system = mem[3]\n\n # Despite the name PageFile refers to total system memory here\n # thus physical memory values need to be subtracted to get swap values\n total = total_system - total_phys\n free = min(total, free_system - free_phys)\n used = total - free\n percent = usage_percent(used, total, round_=1)\n return _common.sswap(total, used, free, percent, 0, 0)\n\n\n# =====================================================================\n# --- disk\n# =====================================================================\n\n\ndisk_io_counters = cext.disk_io_counters\n\n", "url": "https://github.com/giampaolo/psutil.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 114, "n_words": 79, "vocab_size": 53, "complexity": 1, "nloc": 11, "token_counts": 85, "n_ast_nodes": 142, "n_identifiers": 18, "random_cut": "def swap_memory():\n \n mem = cext.virtual_mem()\n\n total_phys = mem[0]\n free_phys = mem[1]\n total_system = mem[2]\n free_system = mem[3]\n\n # Despite the name PageFile refers to total system", "d_id": 45962, "documentation": { "docstring": "Swap system memory as a (total, used, free, sin, sout) tuple.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 65243, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/general_ledger/general_ledger.py", "file_name": "general_ledger.py", "fun_name": "get_supplier_invoice_details", "commit_message": "style: format code with black", "code": "def get_supplier_invoice_details():\n\tinv_details = {}\n\tfor d in frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t):\n\t\tinv_details[d.name] = d.bill_no\n\n\treturn inv_details\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 9, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 9, "token_counts": 37, "n_ast_nodes": 59, "n_identifiers": 9, "random_cut": "def get_supplier_invoice_details():\n\tinv_details = {}\n\tfor d in frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t):\n\t\tinv_details[d.name] = d.bill_no\n\n\treturn inv_details\n\n", "d_id": 13832, "documentation": { "docstring": " select name, bill_no from `tabPurchase Invoice`\n\t\twhere docstatus = 1 and bill_no is not null and bill_no != '' ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 19, "language": "en" } }, { "id": 209387, "commit_id": "9420c2229bf5330c2cc580f114f63f920a68db10", "repo": "scapy", "path": "scapy/contrib/dce_rpc.py", "file_name": "dce_rpc.py", "fun_name": "dce_rpc_endianess", "commit_message": "Add SPDX License identifiers (#3655)\n\n* Add SPDX License identifiers\r\n\r\n* Relicense `ldp.py` with author consent\r\n\r\nSee https://github.com/secdev/scapy/issues/3478\r\n\r\n* Apply guedou suggestions\r\n\r\n* Relicense someim under GPL2\r\n\r\n* DCE/RPC licensing", "code": "def dce_rpc_endianess(pkt):\n \n if pkt.endianness == 0: # big endian\n return \">\"\n elif pkt.endianness == 1: # little endian\n return \"<\"\n else:\n return \"!\"\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 58, "n_words": 23, "vocab_size": 17, "complexity": 3, "nloc": 7, "token_counts": 28, "n_ast_nodes": 56, "n_identifiers": 3, "random_cut": "def dce_rpc_endianess(pkt):\n \n if pkt.endianness == 0: # big endian\n return \">\"\n elif pkt.endianness == 1: # little endian\n return \"<\"\n ", "d_id": 52665, "documentation": { "docstring": "Determine the right endianness sign for a given DCE/RPC packet", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 266117, "commit_id": "a5308ea28e851a4ddb65a4e7ca2297b641e5891f", "repo": "netbox", "path": "netbox/utilities/utils.py", "file_name": "utils.py", "fun_name": "deserialize_object", "commit_message": "Closes #10851: New staging mechanism (#10890)\n\n* WIP\r\n\r\n* Convert checkout() context manager to a class\r\n\r\n* Misc cleanup\r\n\r\n* Drop unique constraint from Change model\r\n\r\n* Extend staging tests\r\n\r\n* Misc cleanup\r\n\r\n* Incorporate M2M changes\r\n\r\n* Don't cancel wipe out creation records when an object is deleted\r\n\r\n* Rename Change to StagedChange\r\n\r\n* Add documentation for change staging", "code": "def deserialize_object(model, fields, pk=None):\n \n content_type = ContentType.objects.get_for_model(model)\n if 'custom_fields' in fields:\n fields['custom_field_data'] = fields.pop('custom_fields')\n data = {\n 'model': '.'.join(content_type.natural_key()),\n 'pk': pk,\n 'fields': fields,\n }\n instance = list(serializers.deserialize('python', [data]))[0]\n\n return instance\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 30, "vocab_size": 25, "complexity": 2, "nloc": 11, "token_counts": 83, "n_ast_nodes": 144, "n_identifiers": 16, "random_cut": "def deserialize_object(model, fields, pk=None):\n \n content_type = ContentType.objects.get_fo", "d_id": 78298, "documentation": { "docstring": "\n Instantiate an object from the given model and field data. Functions as\n the complement to serialize_object().\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 310161, "commit_id": "7d85c00b91cd989dfead3246a65eb297d27e935b", "repo": "core", "path": "tests/test_setup.py", "file_name": "test_setup.py", "fun_name": "test_component_not_installed_if_requirement_fails", "commit_message": "Make setup tests async (#64456)\n\nCo-authored-by: Franck Nijhof ", "code": "async def test_component_not_installed_if_requirement_fails(hass):\n \n hass.config.skip_pip = False\n mock_integration(hass, MockModule(\"comp\", requirements=[\"package==0.0.1\"]))\n\n with patch(\"homeassistant.util.package.install_package\", return_value=False):\n assert not await setup.async_setup_component(hass, \"comp\", {})\n\n assert \"comp\" not in hass.config.components\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 45, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 6, "token_counts": 61, "n_ast_nodes": 108, "n_identifiers": 12, "random_cut": "async def test_component_not_installed_if_requirement_fails(hass):\n \n hass.config.skip_pip = False\n mock_integration(hass, MockModule(\"comp\", requirements=[\"package==0.0.1\"]))\n\n with patch(\"homeassistant.util.package.install_package\", return_value=False):\n assert not await setup.async_", "d_id": 108848, "documentation": { "docstring": "Component setup should fail if requirement can't install.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 167694, "commit_id": "9612375ca28ade056f15d4338f1bfde5d045c9fc", "repo": "pandas", "path": "pandas/core/config_init.py", "file_name": "config_init.py", "fun_name": "use_numba_cb", "commit_message": "TYP: return values in core/*.py (#47587)\n\n* TYP: return values in core/*.py\r\n\r\n* fix test\r\n\r\n* to_html\r\n\r\n* to_html part 2\r\n\r\n* DataFrame.query\r\n\r\n* more overloads\r\n\r\n* fix query?\r\n\r\n* increase stacklevel by one\r\n\r\n* fix rename_axis\r\n\r\n* and an overload for DataFrame.eval\r\n\r\n* address comments\r\n\r\n* fix typevar", "code": "def use_numba_cb(key) -> None:\n from pandas.core.util import numba_\n\n numba_.set_use_numba(cf.get_option(key))\n\n\nwith cf.config_prefix(\"compute\"):\n cf.register_option(\n \"use_bottleneck\",\n True,\n use_bottleneck_doc,\n validator=is_bool,\n cb=use_bottleneck_cb,\n )\n cf.register_option(\n \"use_numexpr\", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb\n )\n cf.register_option(\n \"use_numba\", False, use_numba_doc, validator=is_bool, cb=use_numba_cb\n )\n#\n# options from the \"display\" namespace\n\npc_precision_doc = \n\npc_colspace_doc = \n\npc_max_rows_doc = \n\npc_min_rows_doc = \n\npc_max_cols_doc = \n\npc_max_categories_doc = \n\npc_max_info_cols_doc = \n\npc_nb_repr_h_doc = \n\npc_pprint_nest_depth = \n\npc_multi_sparse_doc = \n\nfloat_format_doc = \n\nmax_colwidth_doc = \n\ncolheader_justify_doc = \n\npc_expand_repr_doc = \n\npc_show_dimensions_doc = \n\npc_east_asian_width_doc = \n\npc_ambiguous_as_wide_doc = \n\npc_latex_repr_doc = \n\npc_table_schema_doc = \n\npc_html_border_doc = \n\npc_html_use_mathjax_doc = \n\npc_max_dir_items = \n\npc_width_doc = \n\npc_chop_threshold_doc = \n\npc_max_seq_items = \n\npc_max_info_rows_doc = \n\npc_large_repr_doc = \n\npc_memory_usage_doc = \n\npc_latex_escape = \n\npc_latex_longtable = \n\npc_latex_multicolumn = \n\npc_latex_multicolumn_format = \n\npc_latex_multirow = \n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 174, "n_words": 105, "vocab_size": 64, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 372, "n_identifiers": 52, "random_cut": "def use_numba_cb(key) -> None:\n from pandas.core.util import numba_\n\n numba_.set_use_numba(cf.get_option(key))\n\n\nwith cf.config_prefix(\"compute\"):\n cf.register_option(\n \"use_bottleneck\",\n True,\n use_bottleneck_doc,\n validator=is_bool,\n cb=use_bottleneck_cb,\n )\n cf.register_option(\n \"use_numexpr\", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb\n )\n cf.register_option(\n \"use_numba\", False, use_numba_doc, validator=is_bool, cb=use_numba_cb\n )\n#\n# options from the \"display\" namespace\n\npc_precision_doc = \n\npc_colspace_doc = \n\npc_max_rows_doc = \n\npc_min_rows_doc = \n\npc_max_cols_doc = \n\npc_max_categories_doc = \n\npc_max_info_cols_doc = \n\npc_nb_repr_h_doc = \n\npc_pprint_nest_depth = \n\npc_multi_sparse_doc = \n\nfloat_format_doc = \n\nmax_colwidth_doc = \n\ncolheader_justify_doc = \n\npc_expand_repr_doc = \n\npc_show_dimensions_doc = \n\npc_east_asian_width_doc = \n\npc_ambiguous_as_wide_doc = \n\npc_latex_repr_doc = \n\npc_table_schema_doc = \n\npc_html_border_doc = \n\npc_html_use_mathjax_doc = \n\npc_max_dir_items = \n\npc_width_doc = \n\npc_chop_threshold_doc = \n\npc_max_se", "d_id": 40078, "documentation": { "docstring": "\n: int\n Floating point output precision in terms of number of places after the\n decimal, for regular formatting as well as scientific notation. Similar\n to ``precision`` in :meth:`numpy.set_printoptions`.\n\n: int\n Default space for DataFrame columns.\n\n: int\n If max_rows is exceeded, switch to truncate view. Depending on\n `large_repr`, objects are either centrally truncated or printed as\n a summary view. 'None' value means unlimited.\n\n In case python/IPython is running in a terminal and `large_repr`\n equals 'truncate' this can be set to 0 and pandas will auto-detect\n the height of the terminal and print a truncated object which fits\n the screen height. The IPython notebook, IPython qtconsole, or\n IDLE do not run in a terminal and hence it is not possible to do\n correct auto-detection.\n\n: int\n The numbers of rows to show in a truncated view (when `max_rows` is\n exceeded). Ignored when `max_rows` is set to None or 0. When set to\n None, follows the value of `max_rows`.\n\n: int\n If max_cols is exceeded, switch to truncate view. Depending on\n `large_repr`, objects are either centrally truncated or printed as\n a summary view. 'None' value means unlimited.\n\n In case python/IPython is running in a terminal and `large_repr`\n equals 'truncate' this can be set to 0 and pandas will auto-detect\n the width of the terminal and print a truncated object which fits\n the screen width. The IPython notebook, IPython qtconsole, or IDLE\n do not run in a terminal and hence it is not possible to do\n correct auto-detection.\n\n: int\n This sets the maximum number of categories pandas should output when\n printing out a `Categorical` or a Series of dtype \"category\".\n\n: int\n max_info_columns is used in DataFrame.info method to decide if\n per column information will be printed.\n\n: boolean\n When True, IPython notebook will use html representation for\n pandas objects (if it is available).\n\n: int\n Controls the number of nested levels to process when pretty-printing\n\n: boolean\n \"sparsify\" MultiIndex display (don't display repeated\n elements in outer levels within groups)\n\n: callable\n The callable should accept a floating point number and return\n a string with the desired format of the number. This is used\n in some places like SeriesFormatter.\n See formats.format.EngFormatter for an example.\n\n: int or None\n The maximum width in characters of a column in the repr of\n a pandas data structure. When the column overflows, a \"...\"\n placeholder is embedded in the output. A 'None' value means unlimited.\n\n: 'left'/'right'\n Controls the justification of column headers. used by DataFrameFormatter.\n\n: boolean\n Whether to print out the full DataFrame repr for wide DataFrames across\n multiple lines, `max_columns` is still respected, but the output will\n wrap-around across multiple \"pages\" if its width exceeds `display.width`.\n\n: boolean or 'truncate'\n Whether to print out dimensions at the end of DataFrame repr.\n If 'truncate' is specified, only print out the dimensions if the\n frame is truncated (e.g. not display all rows and/or columns)\n\n: boolean\n Whether to use the Unicode East Asian Width to calculate the display text\n width.\n Enabling this may affect to the performance (default: False)\n\n: boolean\n Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)\n (default: False)\n\n: boolean\n Whether to produce a latex DataFrame representation for jupyter\n environments that support it.\n (default: False)\n\n: boolean\n Whether to publish a Table Schema representation for frontends\n that support it.\n (default: False)\n\n: int\n A ``border=value`` attribute is inserted in the ``
    %s
    ')\n a(self.formatmon", "d_id": 56301, "documentation": { "docstring": "\n Return a formatted year as a table of tables.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 39093, "commit_id": "f15d8b347b601069aba950a53f879e9659bd7c91", "repo": "recommenders", "path": "recommenders/models/sasrec/ssept.py", "file_name": "ssept.py", "fun_name": "predict", "commit_message": "cleanup-1", "code": "def predict(self, inputs):\n \n training = False\n user = inputs[\"user\"]\n input_seq = inputs[\"input_seq\"]\n candidate = inputs[\"candidate\"]\n\n mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1)\n seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h)\n\n u0_latent = self.user_embedding_layer(user)\n u0_latent = u0_latent * (self.user_embedding_dim ** 0.5) # (1, 1, h)\n u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h)\n test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h)\n\n u_latent = self.user_embedding_layer(user)\n u_latent = u_latent * (self.user_embedding_dim ** 0.5) # (b, 1, h)\n u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h)\n\n seq_embeddings = tf.reshape(\n tf.concat([seq_embeddings, u_latent], 2),\n [tf.shape(input_seq)[0], -1, self.hidden_units],\n )\n seq_embeddings += positional_embeddings # (b, s, h1 + h2)\n\n seq_embeddings *= mask\n seq_attention = seq_embeddings\n seq_attention = self.encoder(seq_attention, training, mask)\n seq_attention = self.layer_normalization(seq_attention) # (b, s, h1+h2)\n seq_emb = tf.reshape(\n seq_attention,\n [tf.shape(input_seq)[0] * self.seq_max_len, self.hidden_units],\n ) # (b*s1, h1+h2)\n\n candidate_emb = self.item_embedding_layer(candidate) # (b, s2, h2)\n candidate_emb = tf.squeeze(candidate_emb, axis=0) # (s2, h2)\n candidate_emb = tf.reshape(\n tf.concat([candidate_emb, test_user_emb], 1), [-1, self.hidden_units]\n ) # (b*s2, h1+h2)\n\n candidate_emb = tf.transpose(candidate_emb, perm=[1, 0]) # (h1+h2, b*s2)\n test_logits = tf.matmul(seq_emb, candidate_emb) # (b*s1, b*s2)\n\n test_logits = tf.reshape(\n test_logits,\n [tf.shape(input_seq)[0], self.seq_max_len, 1 + self.num_neg_test],\n ) # (1, s, 101)\n test_logits = test_logits[:, -1, :] # (1, 101)\n return test_logits\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 522, "n_words": 198, "vocab_size": 95, "complexity": 1, "nloc": 40, "token_counts": 378, "n_ast_nodes": 578, "n_identifiers": 40, "random_cut": "def predict(self, inputs):\n \n training = False\n user = inputs[\"user\"]\n input_seq = inputs[\"input_seq\"]\n candidate = inputs[\"candidate\"]\n\n mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1)\n seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h)\n\n u0_latent = self.user_embedding_layer(user)\n u0_latent = u0_latent * (self.user_embedding_dim ** 0.5) # (1, 1, h)\n u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h)\n test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h)\n\n u_latent = self.user_embedding_layer(user)\n u_latent = u_latent * (self.user_embedding_dim ** 0.5) # (b, 1, h)\n u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h)\n\n seq_embeddings = tf.reshape(\n tf.concat([seq_embeddings, u_latent], 2),\n [tf.shape(input_seq)[0], -1, self.hidden_units],\n )\n seq_embeddings", "d_id": 7106, "documentation": { "docstring": "\n Model prediction for candidate (negative) items\n\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 243702, "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "tobitmap", "commit_message": "Improve exception traceback readability", "code": "def tobitmap(self, name=\"image\"):\n \n\n self.load()\n if self.mode != \"1\":\n msg = \"not a bitmap\"\n raise ValueError(msg)\n data = self.tobytes(\"xbm\")\n return b\"\".join(\n [\n f\"#define {name}_width {self.size[0]}\\n\".encode(\"ascii\"),\n f\"#define {name}_height {self.size[1]}\\n\".encode(\"ascii\"),\n f\"static char {name}_bits[] = {{\\n\".encode(\"ascii\"),\n data,\n b\"};\",\n ]\n )\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 197, "n_words": 36, "vocab_size": 33, "complexity": 2, "nloc": 15, "token_counts": 76, "n_ast_nodes": 173, "n_identifiers": 12, "random_cut": "def tobitmap(self, name=\"image\"):\n \n\n self.load()\n if self.mode != \"1\":\n msg = \"not a bitmap\"\n raise ValueError(msg)\n data = self.tobytes(\"xbm\")\n return b\"\".join(\n [\n f\"#define {name}_width {self.size[0]}\\n\".encode(\"ascii\"),\n f\"#define {name}_height {self.size[1]}\\n\".encode(\"ascii\"),\n f\"static char {name}_bits[] = {{\\n\".encode(\"ascii\"),\n data,\n b\"};\",\n ]\n )\n", "d_id": 70075, "documentation": { "docstring": "\n Returns the image converted to an X11 bitmap.\n\n .. note:: This method only works for mode \"1\" images.\n\n :param name: The name prefix to use for the bitmap variables.\n :returns: A string containing an X11 bitmap.\n :raises ValueError: If the mode is not \"1\"\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 87, "language": "en" } }, { "id": 177528, "commit_id": "979d54acba7c3d372c93d44c6c149700608ce8b0", "repo": "networkx", "path": "networkx/classes/digraph.py", "file_name": "digraph.py", "fun_name": "add_edges_from", "commit_message": "doc: update documentation when providing an iterator over current graph to add/remove_edges_from. (#6268)\n\n* doc for add_edges_from\r\n\r\n* doc for digraph\r\n\r\n* doc for multigraph\r\n\r\n* multigraph.add_nodes_from returns keylist\r\n\r\n* update docs for graph - edges\r\n\r\n* doc update: graph.add_nodes_from\r\n\r\n* doc update: graph.remove_nodes_from\r\n\r\n* doc update: graph.add_edges_from\r\n\r\n* doc update: rewording for graph.add_edges_from\r\n\r\n* doc update: graph.add_weighted_edges_from rewording\r\n\r\n* doc update: digraph updated as graph\r\n\r\n* doc update: digraph minor sync\r\n\r\n* doc update: multigraph same as graph\r\n\r\n* Update graph.py\r\n\r\n* Update digraph.py\r\n\r\n* Update multigraph.py", "code": "def add_edges_from(self, ebunch_to_add, **attr):\n \n for e in ebunch_to_add:\n ne = len(e)\n if ne == 3:\n u, v, dd = e\n elif ne == 2:\n u, v = e\n dd = {}\n else:\n raise NetworkXError(f\"Edge tuple {e} must be a 2-tuple or 3-tuple.\")\n if u not in self._succ:\n if u is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[u] = self.adjlist_inner_dict_factory()\n self._pred[u] = self.adjlist_inner_dict_factory()\n self._node[u] = self.node_attr_dict_factory()\n if v not in self._succ:\n if v is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[v] = self.adjlist_inner_dict_factory()\n self._pred[v] = self.adjlist_inner_dict_factory()\n self._node[v] = self.node_attr_dict_factory()\n datadict = self._adj[u].get(v, self.edge_attr_dict_factory())\n datadict.update(attr)\n datadict.update(dd)\n self._succ[u][v] = datadict\n self._pred[v][u] = datadict\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 455, "n_words": 102, "vocab_size": 55, "complexity": 8, "nloc": 27, "token_counts": 217, "n_ast_nodes": 350, "n_identifiers": 22, "random_cut": "def add_edges_from(self, ebunch_to_add, **attr):\n \n for e in ebunch_to_add:\n ne = len(e)\n if ne == 3:\n u, v, dd = e\n elif ne == 2:\n u, v = e\n dd = {}\n else:\n raise NetworkXError(f\"Edge tuple {e} must be a 2-tuple or 3-tuple.\")\n if u not in self._succ:\n if u is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[u] = self.adjlist_inner_dict_factory()\n self._pred[u] = self.adjlist_inner_dict_factory()\n self._node[u] = self.node_attr_dict_factory()\n if v not in self._succ:\n if v is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[v] = self.adjlist_inner_dict_factory()\n self._pred[v] = self.a", "d_id": 42422, "documentation": { "docstring": "Add all the edges in ebunch_to_add.\n\n Parameters\n ----------\n ebunch_to_add : container of edges\n Each edge given in the container will be added to the\n graph. The edges must be given as 2-tuples (u, v) or\n 3-tuples (u, v, d) where d is a dictionary containing edge data.\n attr : keyword arguments, optional\n Edge data (or labels or objects) can be assigned using\n keyword arguments.\n\n See Also\n --------\n add_edge : add a single edge\n add_weighted_edges_from : convenient way to add weighted edges\n\n Notes\n -----\n Adding the same edge twice has no effect but any edge data\n will be updated when each duplicate edge is added.\n\n Edge attributes specified in an ebunch take precedence over\n attributes specified via keyword arguments.\n\n When adding edges from an iterator over the graph you are changing,\n a `RuntimeError` can be raised with message:\n `RuntimeError: dictionary changed size during iteration`. This\n happens when the graph's underlying dictionary is modified during\n iteration. To avoid this error, evaluate the iterator into a separate\n object, e.g. by using `list(iterator_of_edges)`, and pass this\n object to `G.add_edges_from`.\n\n Examples\n --------\n >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc\n >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples\n >>> e = zip(range(0, 3), range(1, 4))\n >>> G.add_edges_from(e) # Add the path graph 0-1-2-3\n\n Associate data to edges\n\n >>> G.add_edges_from([(1, 2), (2, 3)], weight=3)\n >>> G.add_edges_from([(3, 4), (1, 4)], label=\"WN2898\")\n\n Evaluate an iterator over a graph if using it to modify the same graph\n\n >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])\n >>> # Grow graph by one new node, adding edges to all existing nodes.\n >>> # wrong way - will raise RuntimeError\n >>> # G.add_edges_from(((5, n) for n in G.nodes))\n >>> # right way - note that there will be no self-edge for node 5\n >>> G.add_edges_from(list((5, n) for n in G.nodes))\n ", "n_words": 305, "vocab_size": 185, "n_whitespaces": 629, "language": "en" } }, { "id": 100558, "commit_id": "bdbbad4d310fb606b6f412aa81e9f57ccd994e97", "repo": "faceswap", "path": "lib/gpu_stats/amd.py", "file_name": "amd.py", "fun_name": "_select_device", "commit_message": "Refactor lib.gpu_stats (#1218)\n\n* inital gpu_stats refactor\r\n\r\n* Add dummy CPU Backend\r\n\r\n* Update Sphinx documentation", "code": "def _select_device(self) -> None:\n \n if os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member\n self._log(\"debug\", \"Setting PlaidML devices from user_settings\")\n else:\n self._select_largest_gpu()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 8, "token_counts": 37, "n_ast_nodes": 68, "n_identifiers": 10, "random_cut": "def _select_device(self) -> None:\n \n if os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member\n self._log(\"debug\", \"Setting PlaidML devices from user_setting", "d_id": 20022, "documentation": { "docstring": "\n If the plaidml user configuration settings exist, then set the default GPU from the\n settings file, Otherwise set the GPU to be the one with most VRAM. ", "n_words": 27, "vocab_size": 20, "n_whitespaces": 42, "language": "en" } }, { "id": 215789, "commit_id": "a35b29b2651bf33c5d5b45e64bc7765ffde4aff4", "repo": "salt", "path": "tests/pytests/functional/modules/file/test_readlink.py", "file_name": "test_readlink.py", "fun_name": "test_readlink_non_canonical", "commit_message": "Add some funtional tests\n\nAdd functional tests for the following:\n- file.readlink\n- file.replace\n- file.symlink\n\nRemove unit tests for file.replace as they are duplicated in the added\nfunctional test", "code": "def test_readlink_non_canonical(file, source):\n \n intermediate = source.parent / \"intermediate.lnk\"\n intermediate.symlink_to(source)\n target = source.parent / \"symlink.lnk\"\n target.symlink_to(intermediate)\n try:\n result = file.readlink(path=target)\n assert result == str(intermediate)\n finally:\n intermediate.unlink()\n target.unlink()\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 75, "n_words": 26, "vocab_size": 21, "complexity": 2, "nloc": 11, "token_counts": 65, "n_ast_nodes": 114, "n_identifiers": 12, "random_cut": "def test_readlink_non_canonical(file, source):\n \n int", "d_id": 54179, "documentation": { "docstring": "\n Test readlink where there are nested symlinks and canonicalize=False\n Should resolve to the first symlink\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 22159, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "select_proxy", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def select_proxy(url, proxies):\n \n proxies = proxies or {}\n urlparts = urlparse(url)\n if urlparts.hostname is None:\n return proxies.get(urlparts.scheme, proxies.get(\"all\"))\n\n proxy_keys = [\n urlparts.scheme + \"://\" + urlparts.hostname,\n urlparts.scheme,\n \"all://\" + urlparts.hostname,\n \"all\",\n ]\n proxy = None\n for proxy_key in proxy_keys:\n if proxy_key in proxies:\n proxy = proxies[proxy_key]\n break\n\n return proxy\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 140, "n_words": 49, "vocab_size": 35, "complexity": 5, "nloc": 17, "token_counts": 91, "n_ast_nodes": 148, "n_identifiers": 11, "random_cut": "def select_proxy(url, proxies):\n \n proxies = proxies or {}\n urlparts = urlparse(url)\n if urlparts.hostname is None:\n return proxies.get(urlparts.scheme, proxies.get(\"al", "d_id": 4229, "documentation": { "docstring": "Select a proxy for the url, if applicable.\n\n :param url: The url being for the request\n :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 38, "language": "en" } }, { "id": 220693, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/sslproto.py", "file_name": "sslproto.py", "fun_name": "eof_received", "commit_message": "add python 3.10.4 for windows", "code": "def eof_received(self):\n \n try:\n if self._loop.get_debug():\n logger.debug(\"%r received EOF\", self)\n\n self._wakeup_waiter(ConnectionResetError)\n\n if not self._in_handshake:\n keep_open = self._app_protocol.eof_received()\n if keep_open:\n logger.warning('returning true from eof_received() '\n 'has no effect when using ssl')\n finally:\n self._transport.close()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 194, "n_words": 31, "vocab_size": 29, "complexity": 5, "nloc": 12, "token_counts": 65, "n_ast_nodes": 118, "n_identifiers": 14, "random_cut": "def eof_received(self):\n \n try:\n ", "d_id": 56085, "documentation": { "docstring": "Called when the other end of the low-level stream\n is half-closed.\n\n If this returns a false value (including None), the transport\n will close itself. If it returns a true value, closing the\n transport is up to the protocol.\n ", "n_words": 38, "vocab_size": 29, "n_whitespaces": 74, "language": "en" } }, { "id": 162332, "commit_id": "311b6615d85d3530f2709c50e4223ff3b6b14361", "repo": "yt-dlp", "path": "yt_dlp/extractor/common.py", "file_name": "common.py", "fun_name": "url_result", "commit_message": "[extractor] Improve `url_result` and related", "code": "def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):\n \n if ie is not None:\n kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key()\n if video_id is not None:\n kwargs['id'] = video_id\n if video_title is not None:\n kwargs['title'] = video_title\n return {\n **kwargs,\n '_type': 'url_transparent' if url_transparent else 'url',\n 'url': url,\n }\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 157, "n_words": 49, "vocab_size": 33, "complexity": 6, "nloc": 12, "token_counts": 94, "n_ast_nodes": 151, "n_identifiers": 10, "random_cut": "def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):\n \n if ie is not None:\n kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key()\n if video_id is not None:\n kwargs['id'] = video_id\n if video_title is not None:\n kwargs['title'] = video_title\n return {\n **kwargs,\n '_type': 'url_transparent' if url_transparent else 'url',\n 'url': url,\n }\n", "d_id": 39190, "documentation": { "docstring": "Returns a URL that points to a page that should be processed", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 110064, "commit_id": "d9d75f2bbf340034a93bdf8cd913fa83a71ece9c", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/art3d.py", "file_name": "art3d.py", "fun_name": "_shade_colors", "commit_message": "Refactor shading", "code": "def _shade_colors(color, normals, lightsource=None):\n \n if lightsource is None:\n # chosen for backwards-compatibility\n lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712)\n\n with np.errstate(invalid=\"ignore\"):\n shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True))\n @ lightsource.direction)\n mask = ~np.isnan(shade)\n\n if mask.any():\n # convert dot product to allowed shading fractions\n in_norm = mcolors.Normalize(-1, 1)\n out_norm = mcolors.Normalize(0.3, 1).inverse\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 121, "n_words": 48, "vocab_size": 41, "complexity": 3, "nloc": 19, "token_counts": 176, "n_ast_nodes": 174, "n_identifiers": 24, "random_cut": "def _shade_colors(color, normals, lightsource=None):\n \n if lightsource is None:\n # chosen for backwards-compatibility\n lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712)\n\n with np.errstate(invalid=\"ignore\"):\n shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True))\n @ lightsource.direction)\n mask = ~np.isnan(shade)\n", "d_id": 23904, "documentation": { "docstring": "\n Shade *color* using normal vectors given by *normals*,\n assuming a *lightsource* (using default position if not given).\n *color* can also be an array of the same length as *normals*.\n ", "n_words": 29, "vocab_size": 28, "n_whitespaces": 42, "language": "en" } }, { "id": 260641, "commit_id": "6e5ef2e9b8c64e6788428610ae884b9bf3d298a2", "repo": "scikit-learn", "path": "sklearn/feature_selection/_rfe.py", "file_name": "_rfe.py", "fun_name": "score", "commit_message": "MAINT solve long line reported by flake8 (#24065)", "code": "def score(self, X, y, **fit_params):\n \n check_is_fitted(self)\n return self.estimator_.score(self.transform(X), y, **fit_params)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "def score(self, X, y, **fit_params):\n \n check_is_fitted(self)\n return self.estimator_.score(self.transform(X), y, **fit_params)\n", "d_id": 76391, "documentation": { "docstring": "Reduce X to the selected features and return the score of the estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n y : array of shape [n_samples]\n The target values.\n\n **fit_params : dict\n Parameters to pass to the `score` method of the underlying\n estimator.\n\n .. versionadded:: 1.0\n\n Returns\n -------\n score : float\n Score of the underlying base estimator computed with the selected\n features returned by `rfe.transform(X)` and `y`.\n ", "n_words": 72, "vocab_size": 46, "n_whitespaces": 212, "language": "en" } }, { "id": 194423, "commit_id": "b046b560ef3cebbe2573327017793bc2c348aecd", "repo": "kivy", "path": "kivy/effects/scroll.py", "file_name": "scroll.py", "fun_name": "reset", "commit_message": "ScrollEffect: Fix layout when ScrollView gets resized", "code": "def reset(self, pos):\n \n self.value = pos\n self.velocity = 0\n if self.history:\n val = self.history[-1][1]\n self.history = [(time(), val)]\n", "url": "https://github.com/kivy/kivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 68, "n_words": 18, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 8, "random_cut": "def reset(self, pos):\n \n self.value = pos\n self.velocity = 0\n if self.history:\n ", "d_id": 46959, "documentation": { "docstring": "(internal) Reset the value and the velocity to the `pos`.\n Mostly used when the bounds are checked.\n ", "n_words": 17, "vocab_size": 14, "n_whitespaces": 31, "language": "en" } }, { "id": 309482, "commit_id": "b52a8ba37a5e5e05b80beddff06b116371941d86", "repo": "core", "path": "tests/components/tradfri/test_util.py", "file_name": "test_util.py", "fun_name": "test_from_fan_speed", "commit_message": "Bump pytradfri to 8.0.1 and fix fan preset mode \"Auto\" bug (#63920)\n\n* Move util functions\r\n\r\n* Fix errors\r\n\r\n* Revert changes\r\n\r\n* Fix tests\r\n\r\n* Use self.async_set_percentage()\r\n\r\n* Fix calculation functions and associated tests\r\n\r\n* Handle case of 0\r\n\r\n* Update tests/components/tradfri/test_util.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update tests/components/tradfri/test_util.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update tests/components/tradfri/test_util.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Handle case of 0\r\n\r\n* Update homeassistant/components/tradfri/fan.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def test_from_fan_speed(fan_speed, expected_result):\n \n assert _from_fan_speed(fan_speed) == expected_result\n\n\n@pytest.mark.parametrize(\n \"percentage, expected_result\",\n [\n (1, 2),\n (100, 50),\n (50, 26),\n ],\n)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"percentage, expected_result\",\n [\n (1, 2),\n (100, 50),\n (50, 26),\n ],\n)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 53, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 69, "n_identifiers": 7, "random_cut": "def test_from_fan_speed(fan_speed, expected_result):\n \n assert _from_fan_speed(fan_speed) == expected_result\n\n\n@pytes", "d_id": 108182, "documentation": { "docstring": "Test that we can convert fan speed to percentage value.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 91199, "commit_id": "2058dd477767e47c9fce603766a45e1fbe29c33d", "repo": "sentry", "path": "src/sentry/utils/pytest/fixtures.py", "file_name": "fixtures.py", "fun_name": "task_runner", "commit_message": "ref(proj-config): Introduce new tasks (#35238)", "code": "def task_runner():\n \n from sentry.testutils.helpers.task_runner import TaskRunner\n\n return TaskRunner\n\n\n@pytest.fixture", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 17, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 35, "n_identifiers": 7, "random_cut": "def task_runner():\n \n from sentry.testutils.helpers.task_runner import TaskRunner\n\n return Task", "d_id": 18735, "documentation": { "docstring": "Context manager that ensures Celery tasks run directly inline where invoked.\n\n While this context manager is active any Celery tasks created will run immediately at\n the callsite rather than being sent to RabbitMQ and handled by a worker.\n ", "n_words": 38, "vocab_size": 34, "n_whitespaces": 47, "language": "en" } }, { "id": 206586, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/crypto.py", "file_name": "crypto.py", "fun_name": "get_random_string", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS):\n \n return \"\".join(secrets.choice(allowed_chars) for i in range(length))\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 9, "random_cut": "def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS):\n \n return \"\".join(secrets.choice(allo", "d_id": 51579, "documentation": { "docstring": "\n Return a securely generated random string.\n\n The bit length of the returned value can be calculated with the formula:\n log_2(len(allowed_chars)^length)\n\n For example, with default `allowed_chars` (26+26+10), this gives:\n * length: 12, bit length =~ 71 bits\n * length: 22, bit length =~ 131 bits\n ", "n_words": 44, "vocab_size": 34, "n_whitespaces": 74, "language": "en" } }, { "id": 251953, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/test_tutils.py", "file_name": "test_tutils.py", "fun_name": "test_command_reply", "commit_message": "make it black!", "code": "def test_command_reply(tplaybook):\n \n tplaybook >> TEvent()\n tplaybook << TCommand()\n tplaybook >> tutils.reply()\n assert tplaybook\n assert tplaybook.actual[1] == tplaybook.actual[2].command\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 17, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 44, "n_ast_nodes": 69, "n_identifiers": 8, "random_cut": "def test_command_reply(tplaybook):\n \n tplaybook >> TEvent()\n tplaybook << TCommand()\n tplaybook >> tutils.reply()\n assert tplaybook\n assert tplaybook.actual[1] == tplaybook.actual[2].command\n\n", "d_id": 73901, "documentation": { "docstring": "CommandReplies can use relative offsets to point to the matching command.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 269762, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/benchmarks/distribution_util.py", "file_name": "distribution_util.py", "fun_name": "_mirrored_cross_device_ops", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _mirrored_cross_device_ops(all_reduce_alg, num_packs):\n \n if all_reduce_alg is None:\n return None\n mirrored_all_reduce_options = {\n \"nccl\": tf.distribute.NcclAllReduce,\n \"hierarchical_copy\": tf.distribute.HierarchicalCopyAllReduce,\n }\n if all_reduce_alg not in mirrored_all_reduce_options:\n raise ValueError(\n \"When used with `mirrored`, valid values for all_reduce_alg are \"\n \"[`nccl`, `hierarchical_copy`]. Supplied value: {}\".format(\n all_reduce_alg\n )\n )\n cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg]\n return cross_device_ops_class(num_packs=num_packs)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 152, "n_words": 47, "vocab_size": 40, "complexity": 3, "nloc": 16, "token_counts": 65, "n_ast_nodes": 110, "n_identifiers": 11, "random_cut": "def _mirrored_cross_device_ops(all_reduce_alg, num_packs):\n \n if all_reduce_alg is None:\n return None\n mirrored_all_reduce_options = {\n \"nccl\": tf.distribute.NcclAllReduce,\n \"hierarchical_copy\": tf.distribute.HierarchicalCopyAllReduce,\n }\n if al", "d_id": 80259, "documentation": { "docstring": "Return a CrossDeviceOps based on all_reduce_alg and num_packs.\n\n Args:\n all_reduce_alg: a string specifying which cross device op to pick, or None.\n num_packs: an integer specifying number of packs for the cross device op.\n\n Returns:\n tf.distribute.CrossDeviceOps object or None.\n\n Raises:\n ValueError: if `all_reduce_alg` not in [None, \"nccl\", \"hierarchical_copy\"].\n ", "n_words": 47, "vocab_size": 41, "n_whitespaces": 79, "language": "en" } }, { "id": 43461, "commit_id": "09f38ad3f6872bae5059a1de226362eb358c4a7a", "repo": "airflow", "path": "tests/providers/microsoft/azure/hooks/test_asb.py", "file_name": "test_asb.py", "fun_name": "test_delete_queue", "commit_message": "Implement Azure Service Bus Queue Operators (#24038)\n\nImplemented Azure Service Bus Queue based Operator's to create queue, send message to the queue and receive message(list of message or batch message) and delete queue in azure service \r\n- Added `AzureServiceBusCreateQueueOperator`\r\n- Added `AzureServiceBusSendMessageOperator`\r\n- Added `AzureServiceBusReceiveMessageOperator`\r\n- Added `AzureServiceBusDeleteQueueOperator`\r\n- Added Example DAG\r\n- Added Documentation\r\n- Added hooks and connection type in - provider yaml file\r\n- Added unit Test case, doc strings", "code": "def test_delete_queue(self, mock_sb_admin_client):\n \n hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)\n hook.delete_queue(self.queue_name)\n expected_calls = [mock.call().__enter__().delete_queue(self.queue_name)]\n mock_sb_admin_client.assert_has_calls(expected_calls)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 52, "n_ast_nodes": 87, "n_identifiers": 14, "random_cut": "def test_delete_queue(self, mock_sb_admin_client):\n \n hook = AdminClientHook(azure_service_bus_conn_id=sel", "d_id": 7966, "documentation": { "docstring": "\n Test Delete queue functionality by passing queue name, assert the function with values,\n mock the azure service bus function `delete_queue`\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 43, "language": "en" } }, { "id": 223772, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/message.py", "file_name": "message.py", "fun_name": "get_content_disposition", "commit_message": "add python 3.10.4 for windows", "code": "def get_content_disposition(self):\n \n value = self.get('content-disposition')\n if value is None:\n return None\n c_d = _splitparam(value)[0].lower()\n return c_d\n\n # I.e. def walk(self): ...\n from email.iterators import walk\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 25, "vocab_size": 20, "complexity": 2, "nloc": 6, "token_counts": 36, "n_ast_nodes": 73, "n_identifiers": 10, "random_cut": "def get_content_disposition(self):\n \n value = self.get('content-disposition')\n if value is None:\n return None\n c_d = _splitparam(value)[0].lower()\n retu", "d_id": 57056, "documentation": { "docstring": "Return the message's content-disposition if it exists, or None.\n\n The return values can be either 'inline', 'attachment' or None\n according to the rfc2183.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 198878, "commit_id": "68bd82de645a61f4bbc0b6246e70959373c9cba2", "repo": "sympy", "path": "sympy/printing/aesaracode.py", "file_name": "aesaracode.py", "fun_name": "_get_or_create", "commit_message": "fix(printing): change Aesara argument broadcastable to shape", "code": "def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):\n \n\n # Defaults\n if name is None:\n name = s.name\n if dtype is None:\n dtype = 'floatX'\n if broadcastable is None:\n broadcastable = ()\n\n key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)\n\n if key in self.cache:\n return self.cache[key]\n\n value = aet.tensor(name=name, dtype=dtype, shape=broadcastable)\n self.cache[key] = value\n return value\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 165, "n_words": 51, "vocab_size": 30, "complexity": 5, "nloc": 13, "token_counts": 107, "n_ast_nodes": 164, "n_identifiers": 13, "random_cut": "def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):\n \n\n # Defaults\n if name is None:\n name = s.name\n if dtype is None:\n dtype = 'floatX'\n if broadcastable is None:\n broadcastable = ()\n\n key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)\n\n if key in self.cache:\n return self.cache[key]\n\n value = aet.tensor(name=name, dtype=dtype, shape=broadcastable)\n self.cache[key] = value\n ", "d_id": 49056, "documentation": { "docstring": "\n Get the Aesara variable for a SymPy symbol from the cache, or create it\n if it does not exist.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 205769, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/query.py", "file_name": "query.py", "fun_name": "defer", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def defer(self, *fields):\n \n self._not_support_combined_queries(\"defer\")\n if self._fields is not None:\n raise TypeError(\"Cannot call defer() after .values() or .values_list()\")\n clone = self._chain()\n if fields == (None,):\n clone.query.clear_deferred_loading()\n else:\n clone.query.add_deferred_loading(fields)\n return clone\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 111, "n_words": 29, "vocab_size": 27, "complexity": 3, "nloc": 10, "token_counts": 62, "n_ast_nodes": 107, "n_identifiers": 11, "random_cut": "def defer(self, *fields):\n \n self._not_support_combined_queries(\"defer\")\n if self._fields is not None:\n raise TypeError(\"Cannot call defer() after .values() or .values_list()\")\n clone = self._chain()\n if fields == (None,):\n ", "d_id": 51203, "documentation": { "docstring": "\n Defer the loading of data for certain fields until they are accessed.\n Add the set of deferred fields to any existing set of deferred fields.\n The only exception to this is if None is passed in as the only\n parameter, in which case removal all deferrals.\n ", "n_words": 46, "vocab_size": 35, "n_whitespaces": 82, "language": "en" } }, { "id": 218848, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/pytree.py", "file_name": "pytree.py", "fun_name": "match_seq", "commit_message": "add python 3.10.4 for windows", "code": "def match_seq(self, nodes, results=None):\n \n if len(nodes) != 1:\n return False\n return self.match(nodes[0], results)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 45, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 53, "n_identifiers": 6, "random_cut": "def match_seq(self, nodes, results=None):\n \n if len(nodes) != 1:\n ", "d_id": 55500, "documentation": { "docstring": "\n Does this pattern exactly match a sequence of nodes?\n\n Default implementation for non-wildcard patterns.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 87837, "commit_id": "b3ce25d7c3ce85a9b7195f97c6d3d76c764e1808", "repo": "sentry", "path": "src/sentry/auth/access.py", "file_name": "access.py", "fun_name": "team_ids_with_membership", "commit_message": "ref(access): Remove models from Access fields (#40940)\n\nAnticipating changes for Hybrid Cloud silo boundaries, change the public\r\ninterface of the `Access` class to not expose any ORM models as\r\ndataclass fields. As a first step, replace all such objects with their\r\nraw IDs. (Credit to @corps for the underlying idea. Future steps:\r\nreplace models as method parameters; replace raw IDs with API object\r\nrepresentations.)", "code": "def team_ids_with_membership(self) -> FrozenSet[int]:\n \n return frozenset(team.id for team in self._team_memberships.keys())\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 11, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 9, "random_cut": "def team_ids_with_membership(self) -> FrozenSet[int]:\n \n return frozenset(team.id for team in self._", "d_id": 18336, "documentation": { "docstring": "Return the IDs of teams in which the user has actual membership.\n\n This represents the set of all teams for which `has_team_membership` returns\n true. Use that method where possible and use this property only when you need\n to iterate or query for all such teams.\n\n Compare to accessible_team_ids, which is equal to this property in the\n typical case but represents a superset of IDs in case of superuser access.\n ", "n_words": 69, "vocab_size": 49, "n_whitespaces": 111, "language": "en" } }, { "id": 135117, "commit_id": "432f023642731bf53aac9b6c778f9dd7b1d82a57", "repo": "ray", "path": "rllib/models/tests/test_distributions.py", "file_name": "test_distributions.py", "fun_name": "test_gumbel_softmax", "commit_message": "[RLlib] Deprecate `AlgorithmConfig.framework(\"tfe\")`: Use `tf2` instead. (#29755)", "code": "def test_gumbel_softmax(self):\n \n for fw, sess in framework_iterator(frameworks=(\"tf2\", \"tf\"), session=True):\n batch_size = 1000\n num_categories = 5\n input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories))\n input_space.seed(42)\n\n # Batch of size=n and deterministic.\n inputs = input_space.sample()\n gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0)\n\n expected = softmax(inputs)\n # Sample n times, expect always mean value (deterministic draw).\n out = gumbel_softmax.deterministic_sample()\n check(out, expected)\n\n # Batch of size=n and non-deterministic -> expect roughly that\n # the max-likelihood (argmax) ints are output (most of the time).\n inputs = input_space.sample()\n gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0)\n expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32)\n outs = gumbel_softmax.sample()\n if sess:\n outs = sess.run(outs)\n check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 337, "n_words": 99, "vocab_size": 71, "complexity": 3, "nloc": 18, "token_counts": 188, "n_ast_nodes": 286, "n_identifiers": 32, "random_cut": "def test_gumbel_softmax(self):\n \n for fw, sess in framework_iterator(frameworks=(\"tf2\", \"tf\"), session=True):\n batch_size = 1000\n num_categories = 5\n input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories))\n input_space.seed(42)\n\n # Batch of size=n and deterministic.\n inputs = input_space.sample()\n gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0)\n\n expected = softmax(inputs)\n # Sample n times, expect always mean value (deterministic draw).\n out = gumbel_softmax.deterministic_sample()\n check(out, expected)\n\n ", "d_id": 30547, "documentation": { "docstring": "Tests the GumbelSoftmax ActionDistribution (tf + eager only).", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 9140, "commit_id": "995b44897fe6158bb70ad03a3e79f517f65f9034", "repo": "insightface", "path": "parsing/dml_csr/utils/miou.py", "file_name": "miou.py", "fun_name": "get_confusion_matrix", "commit_message": "Create miou.py", "code": "def get_confusion_matrix(gt_label, pred_label, num_classes):\n \n index = (gt_label * num_classes + pred_label).astype('int32')\n label_count = np.bincount(index)\n confusion_matrix = np.zeros((num_classes, num_classes))\n\n for i_label in range(num_classes):\n for i_pred_label in range(num_classes):\n cur_index = i_label * num_classes + i_pred_label\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\n\n return confusion_matrix\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 105, "n_words": 43, "vocab_size": 29, "complexity": 4, "nloc": 10, "token_counts": 88, "n_ast_nodes": 138, "n_identifiers": 16, "random_cut": "def get_confusion_matrix(gt_label, pred_label, num_classes):\n \n index = (gt_label * num_classes + pred_label).astype('int32')\n label_count = np.bincount(index)\n confusion_matrix = np.zeros((num_classes, num_classes))\n\n for i_label in range(num_classes):\n for i_pred_label in range(num_classes):\n cur_index = i_label * num_classes + i_pred_label\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = ", "d_id": 1562, "documentation": { "docstring": "\n Calcute the confusion matrix by given label and pred\n :param gt_label: the ground truth label\n :param pred_label: the pred label\n :param num_classes: the nunber of class\n :return: the confusion matrix\n ", "n_words": 30, "vocab_size": 19, "n_whitespaces": 49, "language": "en" } }, { "id": 161059, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg2mel/utils/nets_utils.py", "file_name": "nets_utils.py", "fun_name": "make_pad_mask", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def make_pad_mask(lengths, xs=None, length_dim=-1):\n \n if length_dim == 0:\n raise ValueError('length_dim cannot be 0: {}'.format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.tolist()\n bs = int(len(lengths))\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n # ind = (:, None, ..., None, :, , None, ..., None)\n ind = tuple(slice(None) if i in (0, length_dim) else None\n for i in range(xs.dim()))\n mask = mask[ind].expand_as(xs).to(xs.device)\n return mask\n\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 232, "n_words": 103, "vocab_size": 66, "complexity": 8, "nloc": 22, "token_counts": 219, "n_ast_nodes": 347, "n_identifiers": 35, "random_cut": "def make_pad_mask(lengths, xs=None, length_dim=-1):\n \n if length_dim == 0:\n raise ValueError('length_dim cannot be 0: {}'.format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.tolist()\n bs = int(len(lengths))\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n # ind = (:, None, ..., None, :, , None, ..., None)\n ind = ", "d_id": 38875, "documentation": { "docstring": "Make mask tensor containing indices of padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor. See the example.\n\n Returns:\n Tensor: Mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_non_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 1],\n [0, 0, 0, 1]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_pad_mask(lengths, xs, 1)\n tensor([[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)\n >>> make_pad_mask(lengths, xs, 2)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n ", "n_words": 417, "vocab_size": 87, "n_whitespaces": 1334, "language": "en" } }, { "id": 84491, "commit_id": "aa796af0a8b665ee730a059bc2594ae21cb1e828", "repo": "zulip", "path": "zerver/tests/test_upload.py", "file_name": "test_upload.py", "fun_name": "test_guess_content_type_from_filename", "commit_message": "upload: Remove `mimetype` url parameter in `get_file_info`.\n\nThis `mimetype` parameter was introduced in c4fa29a and its last\nusage removed in 5bab2a3. This parameter was undocumented in the\nOpenAPI endpoint documentation for `/user_uploads`, therefore\nthere shouldn't be client implementations that rely on it's\npresence.\n\nRemoves the `request.GET` call for the `mimetype` parameter and\nreplaces it by getting the `content_type` value from the file,\nwhich is an instance of Django's `UploadedFile` class and stores\nthat file metadata as a property.\n\nIf that returns `None` or an empty string, then we try to guess\nthe `content_type` from the filename, which is the same as the\nprevious behaviour when `mimetype` was `None` (which we assume\nhas been true since it's usage was removed; see above).\n\nIf unable to guess the `content_type` from the filename, we now\nfallback to \"application/octet-stream\", instead of an empty string\nor `None` value.\n\nAlso, removes the specific test written for having `mimetype` as\na url parameter in the request, and replaces it with a test that\ncovers when we try to guess `content_type` from the filename.", "code": "def test_guess_content_type_from_filename(self) -> None:\n \n data, content_type = encode_multipart_formdata({\"file\": (\"somefile\", b\"zulip!\", None)})\n result = self.api_post(\n self.example_user(\"hamlet\"), \"/api/v1/user_uploads\", data, content_type=content_type\n )\n self.assert_json_success(result)\n\n data, content_type = encode_multipart_formdata({\"file\": (\"somefile.txt\", b\"zulip!\", None)})\n result = self.api_post(\n self.example_user(\"hamlet\"), \"/api/v1/user_uploads\", data, content_type=content_type\n )\n self.assert_json_success(result)\n\n # This test will go through the code path for uploading files onto LOCAL storage\n # when Zulip is in DEVELOPMENT mode.", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 149, "n_words": 58, "vocab_size": 40, "complexity": 1, "nloc": 15, "token_counts": 100, "n_ast_nodes": 170, "n_identifiers": 9, "random_cut": "def test_guess_content_type_from_filename(self) -> None:\n \n data, content_type = encode_multipart_formdata({\"file\": (\"somefile\"", "d_id": 17843, "documentation": { "docstring": "\n Test coverage for files without content-type in the metadata;\n in which case we try to guess the content-type from the filename.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 43, "language": "en" } }, { "id": 221801, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/_aix.py", "file_name": "_aix.py", "fun_name": "get_member", "commit_message": "add python 3.10.4 for windows", "code": "def get_member(name, members):\n \n # look first for a generic match - prepend lib and append .so\n expr = rf'lib{name}\\.so'\n member = get_one_match(expr, members)\n if member:\n return member\n elif AIX_ABI == 64:\n expr = rf'lib{name}64\\.so'\n member = get_one_match(expr, members)\n if member:\n return member\n # since an exact match with .so as suffix was not found\n # look for a versioned name\n # If a versioned name is not found, look for AIX legacy member name\n member = get_version(name, members)\n if member:\n return member\n else:\n return get_legacy(members)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 166, "n_words": 85, "vocab_size": 49, "complexity": 5, "nloc": 15, "token_counts": 67, "n_ast_nodes": 121, "n_identifiers": 9, "random_cut": "def get_member(name, members):\n \n # look first for a generic match - prepend lib and append .so\n expr = rf'lib{name}\\.so'\n member = get_one_match(expr, members)\n if member:\n return member\n elif AIX_ABI == 64:\n expr = rf'lib{name", "d_id": 56518, "documentation": { "docstring": "\n Return an archive member matching the request in name.\n Name is the library name without any prefix like lib, suffix like .so,\n or version number.\n Given a list of members find and return the most appropriate result\n Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c\n and finally, legacy AIX naming scheme.\n ", "n_words": 53, "vocab_size": 47, "n_whitespaces": 75, "language": "en" } }, { "id": 44137, "commit_id": "b96e4992b5df24898e169c01fe23e4cb7d32dd94", "repo": "airflow", "path": "tests/core/test_impersonation_tests.py", "file_name": "test_impersonation_tests.py", "fun_name": "check_original_docker_image", "commit_message": "Fixed tests failing on Python 3.8 (#21022)\n\nThe change #21003 broke TestDeprecation class tests by removing\r\nTestCase and leaving self.skipTest.\r\n\r\nThis change replaces self.skipTest with pytest.skipTest everywhere.", "code": "def check_original_docker_image():\n if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None:\n raise pytest.skip(\n \n )\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 40, "n_words": 12, "vocab_size": 12, "complexity": 3, "nloc": 9, "token_counts": 33, "n_ast_nodes": 60, "n_identifiers": 8, "random_cut": "def check_original_docker_image():\n if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAG", "d_id": 8176, "documentation": { "docstring": "Adding/removing a user as part of a test is very bad for host os\n(especially if the user already existed to begin with on the OS), therefore we check if we run inside a\nthe official docker container and only allow to run the test there. This is done by checking /.dockerenv\nfile (always present inside container) and checking for PYTHON_BASE_IMAGE variable.\n", "n_words": 62, "vocab_size": 46, "n_whitespaces": 58, "language": "en" } }, { "id": 123487, "commit_id": "df4293473d2fb6e887e31522cab5aff95e201581", "repo": "sqlmap", "path": "lib/core/option.py", "file_name": "option.py", "fun_name": "_useWizardInterface", "commit_message": "Fixing DeprecationWarning (logger.warn)", "code": "def _useWizardInterface():\n \n\n if not conf.wizard:\n return\n\n logger.info(\"starting wizard interface\")\n\n while not conf.url:\n message = \"Please enter full target URL (-u): \"\n conf.url = readInput(message, default=None)\n\n message = \"%s data (--data) [Enter for None]: \" % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST)\n conf.data = readInput(message, default=None)\n\n if not (any('=' in _ for _ in (conf.url, conf.data)) or '*' in conf.url):\n warnMsg = \"no GET and/or %s parameter(s) found for testing \" % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST)\n warnMsg += \"(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). \"\n if not conf.crawlDepth and not conf.forms:\n warnMsg += \"Will search for forms\"\n conf.forms = True\n logger.warning(warnMsg)\n\n choice = None\n\n while choice is None or choice not in (\"\", \"1\", \"2\", \"3\"):\n message = \"Injection difficulty (--level/--risk). Please choose:\\n\"\n message += \"[1] Normal (default)\\n[2] Medium\\n[3] Hard\"\n choice = readInput(message, default='1')\n\n if choice == '2':\n conf.risk = 2\n conf.level = 3\n elif choice == '3':\n conf.risk = 3\n conf.level = 5\n else:\n conf.risk = 1\n conf.level = 1\n\n if not conf.getAll:\n choice = None\n\n while choice is None or choice not in (\"\", \"1\", \"2\", \"3\"):\n message = \"Enumeration (--banner/--current-user/etc). Please choose:\\n\"\n message += \"[1] Basic (default)\\n[2] Intermediate\\n[3] All\"\n choice = readInput(message, default='1')\n\n if choice == '2':\n options = WIZARD.INTERMEDIATE\n elif choice == '3':\n options = WIZARD.ALL\n else:\n options = WIZARD.BASIC\n\n for _ in options:\n conf.__setitem__(_, True)\n\n logger.debug(\"muting sqlmap.. it will do the magic for you\")\n conf.verbose = 0\n\n conf.batch = True\n conf.threads = 4\n\n dataToStdout(\"\\nsqlmap is running, please wait..\\n\\n\")\n\n kb.wizardMode = True\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 631, "n_words": 253, "vocab_size": 128, "complexity": 22, "nloc": 50, "token_counts": 350, "n_ast_nodes": 611, "n_identifiers": 37, "random_cut": "def _useWizardInterface():\n \n\n if not conf.wizard:\n return\n\n logger.info(\"starting wizard interface\")\n\n while not conf.url:\n message = \"Please enter full target URL (-u): \"\n conf.url = readInput(message, default=None)\n\n message = \"%s data (--data) [Enter for None]: \" % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST)\n conf.data = readInput(message, default=None)\n\n if not (any('=' in _ for _ in (conf.url, conf.data)) or '*' in conf.url):\n warnMsg = \"no GET and/or %s parameter(s) found for testing \" % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST)\n warnMsg += \"(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). \"\n if not conf.crawlDepth and not conf.forms:\n warnMsg += \"Will search for forms\"\n conf.forms = True\n logger.warning(warnMsg)\n\n choice = None\n\n while choice is None or choice not in (\"\", \"1\", \"2\", \"3\"):\n message = \"Injection difficulty (--level/--risk). Please choose:\\n\"\n message += \"[1] Normal (default)\\n[2] Medium\\n[3] Hard\"\n choice = readInput(message, default='1')\n\n if choice == '2':\n conf.risk = 2\n conf.level = 3\n elif choice == '3':\n conf.risk = 3\n conf.level = 5\n else:\n conf.risk = 1\n conf.level = 1\n\n if not conf.getAll:\n choice = None\n\n while choice is None or choice not in (\"\", \"1\", \"2\", \"3\"):\n message = \"Enumeration (--banner/--current-user/etc). Please choose:\\n\"\n message += \"[1] Basic (default)\\n[2] Intermediate\\n[3] All\"\n choice = readInput(message, default='1')\n\n if choice == '2':\n options = WIZARD.INTERMEDIATE\n elif choice == '3':\n options = WIZARD.ALL\n else:\n options = WIZARD.BASIC\n\n for _ in options:\n conf.__setitem__(_, Tru", "d_id": 27392, "documentation": { "docstring": "\n Presents simple wizard interface for beginner users\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 53209, "commit_id": "88a2e91018e5efe2970ba86238d69d1031350593", "repo": "prefect", "path": "src/prefect/orion/alembic/env.py", "file_name": "env.py", "fun_name": "run_migrations_online", "commit_message": "initial commit", "code": "async def run_migrations_online() -> None:\n \n\n engine = await db_interface.engine()\n\n versions_dir = context.get_x_argument(as_dictionary=True).get(\"versions_dir\", None)\n\n if versions_dir is None:\n # if version dir is not explicitly provided determine versions location from dialect\n dialect = get_dialect(engine=engine)\n if dialect.name == \"postgresql\":\n versions_dir = Path(context.script.dir / \"postgresql\")\n elif dialect.name == \"sqlite\":\n versions_dir = Path(context.script.dir / \"sqlite\")\n else:\n raise ValueError(f\"No versions dir exists for dialect: {dialect.name}\")\n\n context.script.version_locations = [versions_dir]\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 146, "n_words": 63, "vocab_size": 44, "complexity": 4, "nloc": 20, "token_counts": 117, "n_ast_nodes": 183, "n_identifiers": 16, "random_cut": "async def run_migrations_online() -> None:\n \n\n engine = await db_interface.engine()\n\n versi", "d_id": 10737, "documentation": { "docstring": "\n Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n ", "n_words": 21, "vocab_size": 21, "n_whitespaces": 34, "language": "en" } }, { "id": 64915, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/payment_order/payment_order.py", "file_name": "payment_order.py", "fun_name": "get_mop_query", "commit_message": "style: format code with black", "code": "def get_mop_query(doctype, txt, searchfield, start, page_len, filters):\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"parent\": filters.get(\"parent\"), \"start\": start, \"page_len\": page_len, \"txt\": \"%%%s%%\" % txt},\n\t)\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 16, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 50, "n_ast_nodes": 99, "n_identifiers": 13, "random_cut": "def get_mop_query(doctype, txt, searchfield, start, page_len, filters):\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"parent\": filters.get(\"parent\"), \"start\": start, \"page_len\": page_len, \"txt\": \"%%%s%%\" % txt},\n\t)\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "d_id": 13751, "documentation": { "docstring": " select mode_of_payment from `tabPayment Order Reference`\n\t\twhere parent = %(parent)s and mode_of_payment like %(txt)s\n\t\tlimit %(start)s, %(page_len)s", "n_words": 17, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 108535, "commit_id": "032316bc6c7798fca6c82de24167c975f237687f", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_pyplot.py", "file_name": "test_pyplot.py", "fun_name": "test_doc_pyplot_summary", "commit_message": "Cleanup documentation generation for pyplot\n\n- remove the awkward `pyplot.plotting()` function, which only served\n as a namespace to take up the docs for pyplot and output them via\n `.. autofunction`\n- Instead generate the same information using `.. autosummary::`. We\n have to list the desired methods here explicitly. I've added a test\n that these are the same as previously auto-generated in the\n `plotting()` docstring. If we change anything in pyplot, we'll be\n notified through the test failure that we have to adapt the\n autosummary list.\n- Removed the docstring generation logic\n `_setup_pyplot_info_docstrings()`. Apart from generating the\n `plotting()` docstring, this added docstrings to the pyplot colormap\n setters. Instead, we now add these docstrings directly via\n boilerplate.py\n\nCo-authored-by: Elliott Sales de Andrade ", "code": "def test_doc_pyplot_summary():\n \n pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst'\n if not pyplot_docs.exists():\n pytest.skip(\"Documentation sources not available\")\n\n lines = pyplot_docs.read_text()\n m = re.search(r':nosignatures:\\n\\n(.*?)\\n\\n', lines, re.DOTALL)\n doc_functions = set(line.strip() for line in m.group(1).split('\\n'))\n plot_commands = set(plt.get_plot_commands())\n missing = plot_commands.difference(doc_functions)\n if missing:\n raise AssertionError(\n f\"The following pyplot functions are not listed in the \"\n f\"documentation. Please add them to doc/api/pyplot_summary.rst: \"\n f\"{missing!r}\")\n extra = doc_functions.difference(plot_commands)\n if extra:\n raise AssertionError(\n f\"The following functions are listed in the pyplot documentation, \"\n f\"but they do not exist in pyplot. \"\n f\"Please remove them from doc/api/pyplot_summary.rst: {extra!r}\")\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 208, "n_words": 88, "vocab_size": 60, "complexity": 5, "nloc": 20, "token_counts": 127, "n_ast_nodes": 228, "n_identifiers": 27, "random_cut": "def test_doc_pyplot_summary():\n \n pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst'\n if not pyplot_docs.exists():\n pytest.skip(\"Documentation sources not available\")\n\n lines = pyplot_docs.read_text()\n m = re.search(r':nosignatures:\\n\\n(.*?)\\n\\n', lines, re.DOTALL)\n doc_functions = set(line.strip() for line in m.group(1).split('\\n'))\n plot_commands = set(plt.get_pl", "d_id": 23246, "documentation": { "docstring": "Test that pyplot_summary lists all the plot functions.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 65618, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/accounts_controller.py", "file_name": "accounts_controller.py", "fun_name": "validate_child_on_delete", "commit_message": "style: format code with black", "code": "def validate_child_on_delete(row, parent):\n\t\n\tif parent.doctype == \"Sales Order\":\n\t\tif flt(row.delivered_qty):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"Row #{0}: Cannot delete item {1} which has already been delivered\").format(\n\t\t\t\t\trow.idx, row.item_code\n\t\t\t\t)\n\t\t\t)\n\t\tif flt(row.work_order_qty):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"Row #{0}: Cannot delete item {1} which has work order assigned to it.\").format(\n\t\t\t\t\trow.idx, row.item_code\n\t\t\t\t)\n\t\t\t)\n\t\tif flt(row.ordered_qty):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"Row #{0}: Cannot delete item {1} which is assigned to customer's purchase order.\").format(\n\t\t\t\t\trow.idx, row.item_code\n\t\t\t\t)\n\t\t\t)\n\n\tif parent.doctype == \"Purchase Order\" and flt(row.received_qty):\n\t\tfrappe.throw(\n\t\t\t_(\"Row #{0}: Cannot delete item {1} which has already been received\").format(\n\t\t\t\trow.idx, row.item_code\n\t\t\t)\n\t\t)\n\n\tif flt(row.billed_amt):\n\t\tfrappe.throw(\n\t\t\t_(\"Row #{0}: Cannot delete item {1} which has already been billed.\").format(\n\t\t\t\trow.idx, row.item_code\n\t\t\t)\n\t\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 75, "n_words": 107, "vocab_size": 42, "complexity": 8, "nloc": 32, "token_counts": 161, "n_ast_nodes": 269, "n_identifiers": 16, "random_cut": "def validate_child_on_delete(row, parent):\n\t\n\tif parent.doctype == \"Sales Order\":\n\t\tif flt(row.delivered_qty):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"Row #{0}: Cannot delete item {1} which has already been delivered\").format(\n\t\t\t\t\trow.idx, row.item_code\n\t\t\t\t)\n\t\t\t)\n\t\tif flt(row.work_order_qty):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"Row #{0}: Cannot delete item {1} which has work order assigned to it.\").format(\n\t\t\t\t\trow.idx, row.item_code\n\t\t\t\t)\n\t\t\t)\n\t\tif flt(row.ordered_qty):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"Row #{0}: Ca", "d_id": 13956, "documentation": { "docstring": "Check if partially transacted item (row) is being deleted.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 101311, "commit_id": "9e503bdaa2bfe2baaea50ad2e4bf742f309d9d10", "repo": "faceswap", "path": "scripts/fsmedia.py", "file_name": "fsmedia.py", "fun_name": "_get_items", "commit_message": "bugfix: debug landmarks", "code": "def _get_items(self):\n \n postprocess_items = {}\n # Debug Landmarks\n if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks):\n postprocess_items[\"DebugLandmarks\"] = None\n\n # Face Filter post processing\n if ((hasattr(self._args, \"filter\") and self._args.filter is not None) or\n (hasattr(self._args, \"nfilter\") and\n self._args.nfilter is not None)):\n\n if hasattr(self._args, \"detector\"):\n detector = self._args.detector.replace(\"-\", \"_\").lower()\n else:\n detector = \"cv2_dnn\"\n if hasattr(self._args, \"aligner\"):\n aligner = self._args.aligner.replace(\"-\", \"_\").lower()\n else:\n aligner = \"cv2_dnn\"\n\n face_filter = dict(detector=detector,\n aligner=aligner,\n multiprocess=not self._args.singleprocess)\n filter_lists = {}\n if hasattr(self._args, \"ref_threshold\"):\n face_filter[\"ref_threshold\"] = self._args.ref_threshold\n for filter_type in ('filter', 'nfilter'):\n filter_args = getattr(self._args, filter_type, None)\n filter_args = None if not filter_args else filter_args\n filter_lists[filter_type] = filter_args\n face_filter[\"filter_lists\"] = filter_lists\n postprocess_items[\"FaceFilter\"] = {\"kwargs\": face_filter}\n\n logger.debug(\"Postprocess Items: %s\", postprocess_items)\n return postprocess_items\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 496, "n_words": 108, "vocab_size": 67, "complexity": 12, "nloc": 29, "token_counts": 249, "n_ast_nodes": 422, "n_identifiers": 23, "random_cut": "def _get_items(self):\n \n postprocess_items = {}\n # Debug Landmarks\n if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks):\n postprocess_items[\"DebugLandmarks\"] = None\n\n # Face Filter post processing\n if ((hasattr(self._args, \"filter\") and self._args.filter is not None) or\n (hasattr(self._args, \"nfilter\") and\n self._args.nfilter is not None)):\n\n if hasattr(self._args, \"detector\"):\n detector = self._args.detector.replace(\"-\", \"_\").lower()\n else:\n detector = \"cv2_dnn\"\n if hasattr(self._args, \"aligner\"):\n aligner = self._args.aligner.replace(\"-\", \"_\").lower()\n else:\n aligner = \"cv2_dnn\"\n\n face_filter = dict(detector=detector,\n aligner=aligner,\n multiprocess=not self._args.singleprocess)\n filter_lists = {}\n if hasattr(self._args, \"ref_threshold\"", "d_id": 20730, "documentation": { "docstring": " Check the passed in command line arguments for requested actions,\n\n For any requested actions, add the item to the actions list along with\n any relevant arguments and keyword arguments.\n\n Returns\n -------\n dict\n The name of the action to be performed as the key. Any action specific\n arguments and keyword arguments as the value.\n ", "n_words": 53, "vocab_size": 37, "n_whitespaces": 118, "language": "en" } }, { "id": 218532, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "_collapse_addresses_internal", "commit_message": "add python 3.10.4 for windows", "code": "def _collapse_addresses_internal(addresses):\n \n # First merge\n to_merge = list(addresses)\n subnets = {}\n while to_merge:\n net = to_merge.pop()\n supernet = net.supernet()\n existing = subnets.get(supernet)\n if existing is None:\n subnets[supernet] = net\n elif existing != net:\n # Merge consecutive subnets\n del subnets[supernet]\n to_merge.append(supernet)\n # Then iterate over resulting networks, skipping subsumed subnets\n last = None\n for net in sorted(subnets.values()):\n if last is not None:\n # Since they are sorted, last.network_address <= net.network_address\n # is a given.\n if last.broadcast_address >= net.broadcast_address:\n continue\n yield net\n last = net\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 255, "n_words": 83, "vocab_size": 56, "complexity": 7, "nloc": 19, "token_counts": 104, "n_ast_nodes": 177, "n_identifiers": 15, "random_cut": "def _collapse_addresses_internal(addresses):\n \n # First merge\n to_merge = list(addresses)\n subnets = {}\n while to_merge:\n net = to_merge.pop()\n supernet = net.supernet()\n existing = subnets.get(supernet)\n if existing is None:\n subnets[supernet] = net\n elif existing != net:\n # Merge consecutive subnets\n del subnets[supernet]\n to_merge.append(supernet)\n # Then iterate over resulting networks, skipping subsumed subnets\n last = None\n for net in sorted(subnets.values()):\n if last is not None:\n # Since they are", "d_id": 55370, "documentation": { "docstring": "Loops through the addresses, collapsing concurrent netblocks.\n\n Example:\n\n ip1 = IPv4Network('192.0.2.0/26')\n ip2 = IPv4Network('192.0.2.64/26')\n ip3 = IPv4Network('192.0.2.128/26')\n ip4 = IPv4Network('192.0.2.192/26')\n\n _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->\n [IPv4Network('192.0.2.0/24')]\n\n This shouldn't be called directly; it is called via\n collapse_addresses([]).\n\n Args:\n addresses: A list of IPv4Network's or IPv6Network's\n\n Returns:\n A list of IPv4Network's or IPv6Network's depending on what we were\n passed.\n\n ", "n_words": 57, "vocab_size": 47, "n_whitespaces": 150, "language": "en" } }, { "id": 107491, "commit_id": "f156db08eee54d285ab0fb4e031e48d078ba6aa3", "repo": "matplotlib", "path": "lib/matplotlib/backend_bases.py", "file_name": "backend_bases.py", "fun_name": "inaxes", "commit_message": "DOC: More cleanup axes -> Axes", "code": "def inaxes(self, xy):\n \n axes_list = [a for a in self.figure.get_axes()\n if a.patch.contains_point(xy) and a.get_visible()]\n if axes_list:\n axes = cbook._topmost_artist(axes_list)\n else:\n axes = None\n\n return axes\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 102, "n_words": 25, "vocab_size": 20, "complexity": 5, "nloc": 8, "token_counts": 56, "n_ast_nodes": 92, "n_identifiers": 13, "random_cut": "def inaxes(self, xy):\n \n axes_list = [a for a in self.figure.get_axes()\n if a.patch.contains_point(xy) and a.get_visible()]\n if axes_list:\n axes = cbook._topmost_artist(axes_list)", "d_id": 22780, "documentation": { "docstring": "\n Return the topmost visible `~.axes.Axes` containing the point *xy*.\n\n Parameters\n ----------\n xy : (float, float)\n (x, y) pixel positions from left/bottom of the canvas.\n\n Returns\n -------\n `~matplotlib.axes.Axes` or None\n The topmost visible Axes containing the point, or None if there\n is no Axes at the point.\n ", "n_words": 46, "vocab_size": 36, "n_whitespaces": 136, "language": "en" } }, { "id": 45484, "commit_id": "69f6f9e01b6df76c3c8fa266d460324163957887", "repo": "airflow", "path": "airflow/migrations/versions/98271e7606e2_add_scheduling_decision_to_dagrun_and_.py", "file_name": "98271e7606e2_add_scheduling_decision_to_dagrun_and_.py", "fun_name": "upgrade", "commit_message": "Autogenerate migration reference doc (#21601)\n\n* document airflow version in each alembic migration module and use this to autogen the doc\r\n* update each migration module to have the same description used in migration ref (so it can be used in autogen)", "code": "def upgrade():\n \n conn = op.get_bind()\n is_sqlite = bool(conn.dialect.name == \"sqlite\")\n is_mssql = bool(conn.dialect.name == \"mssql\")\n\n if is_sqlite:\n op.execute(\"PRAGMA foreign_keys=off\")\n\n with op.batch_alter_table('dag_run', schema=None) as batch_op:\n batch_op.add_column(sa.Column('last_scheduling_decision', TIMESTAMP, nullable=True))\n batch_op.create_index('idx_last_scheduling_decision', ['last_scheduling_decision'], unique=False)\n batch_op.add_column(sa.Column('dag_hash', sa.String(32), nullable=True))\n\n with op.batch_alter_table('dag', schema=None) as batch_op:\n batch_op.add_column(sa.Column('next_dagrun', TIMESTAMP, nullable=True))\n batch_op.add_column(sa.Column('next_dagrun_create_after', TIMESTAMP, nullable=True))\n # Create with nullable and no default, then ALTER to set values, to avoid table level lock\n batch_op.add_column(sa.Column('concurrency', sa.Integer(), nullable=True))\n batch_op.add_column(sa.Column('has_task_concurrency_limits', sa.Boolean(), nullable=True))\n\n batch_op.create_index('idx_next_dagrun_create_after', ['next_dagrun_create_after'], unique=False)\n\n try:\n from airflow.configuration import conf\n\n concurrency = conf.getint('core', 'dag_concurrency', fallback=16)\n except: # noqa\n concurrency = 16\n\n # Set it to true here as it makes us take the slow/more complete path, and when it's next parsed by the\n # DagParser it will get set to correct value.\n\n op.execute(\n f\n )\n\n with op.batch_alter_table('dag', schema=None) as batch_op:\n batch_op.alter_column('concurrency', type_=sa.Integer(), nullable=False)\n batch_op.alter_column('has_task_concurrency_limits', type_=sa.Boolean(), nullable=False)\n\n if is_sqlite:\n op.execute(\"PRAGMA foreign_keys=on\")\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 300, "n_words": 135, "vocab_size": 94, "complexity": 4, "nloc": 34, "token_counts": 309, "n_ast_nodes": 553, "n_identifiers": 32, "random_cut": "def upgrade():\n \n conn = op.get_bind()\n is_sqlite = bool(conn.dialect.name == \"sqlite\")\n is_mssql = bool(conn.dialect.name == \"mssql\")\n\n if is_sqlite:\n op.execute(\"PRAGMA foreign_keys=off\")\n\n with op.batch_alter_table('dag_run', schema=None) as batch_op:\n batch_op.add_column(sa.Column('last_scheduling_decision', TIMESTAMP, nullable=True))\n batch_op.create_index('idx_last_scheduling_decision', ['last_scheduling_decision'], unique=False)\n batch_op.add_column(sa.Column('dag_hash', sa.String(32), nullable=True))\n\n with op.batch_alter_table('dag', schema=None) as batch_op:\n batc", "d_id": 8611, "documentation": { "docstring": "Apply Add ``scheduling_decision`` to ``DagRun`` and ``DAG``\n UPDATE dag SET\n concurrency={concurrency},\n has_task_concurrency_limits={1 if is_sqlite or is_mssql else sa.true()}\n where concurrency IS NULL\n ", "n_words": 22, "vocab_size": 22, "n_whitespaces": 65, "language": "en" } }, { "id": 3889, "commit_id": "1e0ac30ebdcfce55a5644bcd486044da45c93dd6", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-orb/source_orb/source.py", "file_name": "source.py", "fun_name": "enrich_ledger_entries_with_event_data", "commit_message": "🎉 New Source: Orb (#9985)\n\n* V1 of source_orb connector\r\n\r\n* add boostrap.md file\r\n\r\n* add clause on Pagination to bootstrap.md\r\n\r\n* add SUMMARY documentation\r\n\r\n* add lookback_window_days connector parameter\r\n\r\n* Add support for start_date parameter\r\n\r\n* Add ability to transform record in order to un-nest IDs\r\n\r\n* Add support for extracting event properties based on connector configuration", "code": "def enrich_ledger_entries_with_event_data(self, ledger_entries):\n \n # Build up a list of the subset of ledger entries we are expected\n # to enrich with event metadata.\n event_id_to_ledger_entry = {}\n for entry in ledger_entries:\n maybe_event_id: Optional[str] = entry.get(\"event_id\")\n if maybe_event_id:\n event_id_to_ledger_entry[maybe_event_id] = entry\n\n # Nothing to enrich; short-circuit\n if len(event_id_to_ledger_entry) == 0:\n return ledger_entries\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 147, "n_words": 50, "vocab_size": 41, "complexity": 12, "nloc": 35, "token_counts": 261, "n_ast_nodes": 84, "n_identifiers": 10, "random_cut": "def enrich_ledger_entries_with_event_data(self, ledger_entries):\n \n # Build up a list of the subset of ledger entries we are expected\n # to enrich with event metadata.\n event_id_to_ledger_entry = {}\n for entry in ledger_entries:\n maybe_event_id: Optional[str] = entry.get(\"event_id\")\n if maybe_event_id:\n event_id_to_ledger_entry[maybe_event_id] = entry\n\n # Nothing to enrich; short-circuit\n if len(event_id_to_ledger_entry) == 0:\n return ledger_entries\n", "d_id": 591, "documentation": { "docstring": "\n Enriches a list of ledger entries with event metadata (applies only to decrements that\n have an event_id property set, i.e. automated decrements to the ledger applied by Orb).\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 50, "language": "en" } }, { "id": 3245, "commit_id": "61f4138eeb028287425f6007d692bf7faa808e75", "repo": "PySyft", "path": "packages/syft/tests/syft/core/adp/data_subject_ledger_test.py", "file_name": "data_subject_ledger_test.py", "fun_name": "test_cache", "commit_message": "Add tests for ledger and cache", "code": "def test_cache() -> None:\n \n ledger_store = DictLedgerStore()\n user_key = b\"1322\"\n ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key)\n\n assert (\n ledger._cache_constant2epsilon[0] == 0.05372712063485988\n ), \"The first value in the cache is incorrect\"\n assert (\n ledger._cache_constant2epsilon[1] == 0.07773597369831031\n ), \"Has the DP cache been changed?\"\n\n rdp_700k = convert_constants_to_indices(np.array([700_000]))\n assert (\n ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075\n ), \"Has the DP cache been changed?\"\n rdp_50 = convert_constants_to_indices(np.array([50]))\n assert (\n ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825\n ), \"Has the DP cache been changed?\"\n assert (\n len(ledger._cache_constant2epsilon) >= 1_200_000\n ), \"Has the cache been changed?\"\n\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 164, "n_words": 81, "vocab_size": 43, "complexity": 1, "nloc": 22, "token_counts": 139, "n_ast_nodes": 211, "n_identifiers": 16, "random_cut": "def test_cache() -> None:\n \n ledger_store = DictLedgerStore()\n user_key = b\"1322\"\n ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key)\n\n assert (\n ledger._cache_constant2epsilon[0] == 0.05372712063485988\n ), \"The first value in the cache is incorrect\"\n assert (\n ledger._cache_constant2epsilon[1] == 0.07773597369831031\n ), \"Has the DP cache been changed?\"\n\n rdp_700k = convert_constants_to_indices(np.array([700_000]))\n assert (\n ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075\n ), \"Has the DP cache been changed?\"\n rdp_50 = convert_constants_to_indices(np.array([50]))\n assert (\n ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825\n ), \"Has the DP cache bee", "d_id": 413, "documentation": { "docstring": "Ensure the most up to date RDP-to-epsilon cache is being used.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 20452, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/lexers/__init__.py", "file_name": "__init__.py", "fun_name": "get_lexer_for_mimetype", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def get_lexer_for_mimetype(_mime, **options):\n \n for modname, name, _, _, mimetypes in LEXERS.values():\n if _mime in mimetypes:\n if name not in _lexer_cache:\n _load_lexers(modname)\n return _lexer_cache[name](**options)\n for cls in find_plugin_lexers():\n if _mime in cls.mimetypes:\n return cls(**options)\n raise ClassNotFound('no lexer for mimetype %r found' % _mime)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 116, "n_words": 42, "vocab_size": 31, "complexity": 6, "nloc": 10, "token_counts": 77, "n_ast_nodes": 123, "n_identifiers": 14, "random_cut": "def get_lexer_for_mimetype(_mime, **options):\n \n for modname, name, _, _, mimetypes in LEXERS.values():\n if _mime in mimetypes:\n if name not in _lexer_cache:\n _load_lexers(modname)\n return _lexer_cache[name](**options)\n for cls in find_plugin_lexers():\n ", "d_id": 3380, "documentation": { "docstring": "Get a lexer for a mimetype.\n\n Raises ClassNotFound if not found.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 260406, "commit_id": "9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/tests/test_glm.py", "file_name": "test_glm.py", "fun_name": "test_glm_regression", "commit_message": "TST tight tests for GLMs (#23619)\n\nCo-authored-by: Olivier Grisel ", "code": "def test_glm_regression(solver, fit_intercept, glm_dataset):\n \n model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset\n params = dict(\n alpha=alpha,\n fit_intercept=fit_intercept,\n # While _GeneralizedLinearRegressor exposes the solver parameter, public\n # estimators currently do not, and lbfgs is the only solver anyway.\n # TODO: Expose solver as soon as we have a second solver to choose from.\n # solver=solver, # only lbfgs available\n tol=1e-12,\n max_iter=1000,\n )\n\n model = clone(model).set_params(**params)\n X = X[:, :-1] # remove intercept\n if fit_intercept:\n coef = coef_with_intercept\n intercept = coef[-1]\n coef = coef[:-1]\n else:\n coef = coef_without_intercept\n intercept = 0\n\n model.fit(X, y)\n\n rtol = 5e-5\n assert model.intercept_ == pytest.approx(intercept, rel=rtol)\n assert_allclose(model.coef_, coef, rtol=rtol)\n\n # Same with sample_weight.\n model = (\n clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))\n )\n assert model.intercept_ == pytest.approx(intercept, rel=rtol)\n assert_allclose(model.coef_, coef, rtol=rtol)\n\n\n@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 276, "n_words": 127, "vocab_size": 89, "complexity": 2, "nloc": 26, "token_counts": 201, "n_ast_nodes": 344, "n_identifiers": 34, "random_cut": "def test_glm_regression(solver, fit_intercept, glm_dataset):\n \n model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset\n params = dict(\n alpha=alpha,\n fit_intercept=fit_intercept,\n # While _GeneralizedLinearRegressor exposes the solver parameter, public\n # estimators currently do not, and lbfgs is the only solver anyw", "d_id": 76230, "documentation": { "docstring": "Test that GLM converges for all solvers to correct solution.\n\n We work with a simple constructed data set with known solution.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 27, "language": "en" } }, { "id": 100425, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "setup.py", "file_name": "setup.py", "fun_name": "_cuda_check", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _cuda_check(self):\n \n with Popen(\"nvcc -V\", shell=True, stdout=PIPE, stderr=PIPE) as chk:\n stdout, stderr = chk.communicate()\n if not stderr:\n version = re.search(r\".*release (?P\\d+\\.\\d+)\",\n stdout.decode(locale.getpreferredencoding()))\n self.cuda_version = version.groupdict().get(\"cuda\", None)\n locate = \"where\" if self._os == \"windows\" else \"which\"\n path = os.popen(f\"{locate} nvcc\").read()\n if path:\n path = path.split(\"\\n\")[0] # Split multiple entries and take first found\n while True: # Get Cuda root folder\n path, split = os.path.split(path)\n if split == \"bin\":\n break\n self.cuda_path = path\n return\n\n # Failed to load nvcc, manual check\n getattr(self, f\"_cuda_check_{self._os}\")()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 332, "n_words": 81, "vocab_size": 65, "complexity": 6, "nloc": 18, "token_counts": 149, "n_ast_nodes": 271, "n_identifiers": 27, "random_cut": "def _cuda_check(self):\n \n with Popen(\"nvcc -V\", shell=True, stdout=PIPE, stderr=PIPE) as chk:\n stdout, stderr = chk.communicate()\n if not stderr:\n version = re.search(r\".*release (?P\\d+\\.\\d+)\",\n stdout.decode(locale.getpreferredencoding()))\n self.cuda_version = version.groupdict().get(\"cuda\", None)\n locate = \"where\" if self._os == \"windows\" else \"which\"\n path = os.popen(f\"{locate} nvcc\").read()\n if path:\n path = path.split(\"", "d_id": 19908, "documentation": { "docstring": " Obtain the location and version of Cuda and populate :attr:`cuda_version` and\n :attr:`cuda_path`\n\n Initially just calls `nvcc -V` to get the installed version of Cuda currently in use.\n If this fails, drills down to more OS specific checking methods.\n ", "n_words": 38, "vocab_size": 31, "n_whitespaces": 67, "language": "en" } }, { "id": 45097, "commit_id": "46a337c8cda6fcc515fffe9a4e4cc324edaefa0a", "repo": "airflow", "path": "tests/models/test_taskinstance.py", "file_name": "test_taskinstance.py", "fun_name": "test_map_product_same", "commit_message": "Implement mapped value unpacking (#21641)", "code": "def test_map_product_same(self, dag_maker, session):\n \n outputs = []\n\n with dag_maker(dag_id=\"product_same\", session=session) as dag:\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 20, "token_counts": 177, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def test_map_product_same(self, dag_maker, session):\n \n outputs = ", "d_id": 8477, "documentation": { "docstring": "Test a mapped task can refer to the same source multiple times.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 152834, "commit_id": "1f92336be768d235c18a82acb2195b7135101ae7", "repo": "stable-diffusion-webui", "path": "modules/deepbooru.py", "file_name": "deepbooru.py", "fun_name": "get_deepbooru_tags", "commit_message": "refactored the deepbooru module to improve speed on running multiple interogations in a row. Added the option to generate deepbooru tags for textual inversion preproccessing.", "code": "def get_deepbooru_tags(pil_image, threshold=0.5):\n \n from modules import shared # prevents circular reference\n create_deepbooru_process(threshold)\n shared.deepbooru_process_return[\"value\"] = -1\n shared.deepbooru_process_queue.put(pil_image)\n while shared.deepbooru_process_return[\"value\"] == -1:\n time.sleep(0.2)\n release_process()\n return ret\n\n", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 24, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 13, "random_cut": "def get_deepbooru_tags(pil_image, threshold=0.5):\n \n from modules import shared # prevents circular reference\n create_deepbooru_process(threshold)\n shared.deepbooru_process_return[\"value\"] = -1\n shared.deepbooru_proces", "d_id": 35196, "documentation": { "docstring": "\n This method is for running only one image at a time for simple use. Used to the img2img interrogate.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 27, "language": "en" } }, { "id": 116615, "commit_id": "0dadd5cecec68f252a08637f695b0e4b573b316f", "repo": "mindsdb", "path": "tests/unit/test_executor.py", "file_name": "test_executor.py", "fun_name": "test_update_from_select", "commit_message": "support of update command\n#2454", "code": "def test_update_from_select(self, mock_handler):\n self.set_handler(mock_handler, name='pg', tables={'tasks': self.df})\n\n # --- use predictor ---\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical,\n 'c': dtype.datetime\n },\n 'predicted_value': 'ccc'\n }\n self.set_predictor(predictor)\n sql = \n\n ret = self.command_executor.execute_command(\n parse_sql(sql, dialect='mindsdb'))\n assert ret.error_code is None\n\n # 1 select and 2 updates\n assert mock_handler().query.call_count == 3\n\n # second is update\n assert mock_handler().query.call_args_list[1][0][0].to_string() == \"update table2 set a1=1, c1='ccc' where (a1 = 1) AND (b1 = 'ccc')\"\n\n\n # @patch('mindsdb.integrations.handlers.postgres_handler.Handler')\n # def test_union_type_mismatch(self, mock_handler):\n # self.set_handler(mock_handler, name='pg', tables={'tasks': self.df})\n #\n # sql = \n # from mindsdb.api.mysql.mysql_proxy.utilities import ErSqlWrongArguments\n # with pytest.raises(ErSqlWrongArguments):\n # self.command_executor.execute_command(parse_sql(sql, dialect='mindsdb'))\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 360, "n_words": 101, "vocab_size": 71, "complexity": 1, "nloc": 37, "token_counts": 135, "n_ast_nodes": 247, "n_identifiers": 25, "random_cut": "def test_update_from_select(self, mock_handler):\n self.set_handler(mock_handler, name='pg', tables={'tasks': self.df})\n\n # --- use predictor ---\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical,\n 'c': dtype.datetime", "d_id": 25796, "documentation": { "docstring": "\n update \n pg.table2 \n set\n a1 = df.a,\n c1 = df.c\n from \n (\n SELECT model.a as a, model.b as b, model.p as c\n FROM pg.tasks as t\n JOIN mindsdb.task_model as model\n WHERE t.a=1 \n )\n as df\n where \n table2.a1 = df.a \n and table2.b1 = df.b \n \n # SELECT a, b FROM pg.tasks\n # UNION\n # SELECT b, a FROM pg.tasks\n # ", "n_words": 57, "vocab_size": 38, "n_whitespaces": 410, "language": "en" } }, { "id": 65565, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py", "file_name": "supplier_scorecard_variable.py", "fun_name": "get_total_shipments", "commit_message": "style: format code with black", "code": "def get_total_shipments(scorecard):\n\t\n\tsupplier = frappe.get_doc(\"Supplier\", scorecard.supplier)\n\n\t# Look up all PO Items with delivery dates between our dates\n\tdata = frappe.db.sql(\n\t\t,\n\t\t{\"supplier\": supplier.name, \"start_date\": scorecard.start_date, \"end_date\": scorecard.end_date},\n\t\tas_dict=0,\n\t)[0][0]\n\n\tif not data:\n\t\tdata = 0\n\treturn data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 27, "n_words": 38, "vocab_size": 33, "complexity": 2, "nloc": 20, "token_counts": 68, "n_ast_nodes": 114, "n_identifiers": 12, "random_cut": "def get_total_shipments(scorecard):\n\t\n\tsupplier = frappe.get_doc(\"Supplier\", scorecard.supplier)\n\n\t# Loo", "d_id": 13942, "documentation": { "docstring": "Gets the total number of ordered shipments to arrive in the period (based on Purchase Receipts)\n\t\t\tSELECT\n\t\t\t\tCOUNT(po_item.base_amount)\n\t\t\tFROM\n\t\t\t\t`tabPurchase Order Item` po_item,\n\t\t\t\t`tabPurchase Order` po\n\t\t\tWHERE\n\t\t\t\tpo.supplier = %(supplier)s\n\t\t\t\tAND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s\n\t\t\t\tAND po_item.docstatus = 1\n\t\t\t\tAND po_item.parent = po.name", "n_words": 44, "vocab_size": 37, "n_whitespaces": 33, "language": "en" } }, { "id": 196909, "commit_id": "3a56f9bb1642dda441f65b3713635a8e98150247", "repo": "sympy", "path": "sympy/utilities/source.py", "file_name": "source.py", "fun_name": "source", "commit_message": "Update the deprecation for source()", "code": "def source(object):\n \n print('In file: %s' % inspect.getsourcefile(object))\n print(inspect.getsource(object))\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 17, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def source(object):\n \n print('In file: %s' % inspect.getsourcefile(", "d_id": 48243, "documentation": { "docstring": "\n Prints the source code of a given object.\n\n .. deprecated:: 1.3\n\n The ``source()`` function is deprecated. Use ``inspect.getsource()`` or\n ``??`` in IPython/Jupyter instead.\n\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 45, "language": "en" } }, { "id": 277080, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/tf_utils.py", "file_name": "tf_utils.py", "fun_name": "validate_axis", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def validate_axis(axis, input_shape):\n \n input_shape = tf.TensorShape(input_shape)\n rank = input_shape.rank\n if not rank:\n raise ValueError(\n f\"Input has undefined rank. Received: input_shape={input_shape}\"\n )\n\n # Convert axis to list and resolve negatives\n if isinstance(axis, int):\n axis = [axis]\n else:\n axis = list(axis)\n for idx, x in enumerate(axis):\n if x < 0:\n axis[idx] = rank + x\n\n # Validate axes\n for x in axis:\n if x < 0 or x >= rank:\n raise ValueError(\n \"Invalid value for `axis` argument. \"\n \"Expected 0 <= axis < inputs.rank (with \"\n f\"inputs.rank={rank}). Received: axis={tuple(axis)}\"\n )\n if len(axis) != len(set(axis)):\n raise ValueError(f\"Duplicate axis: {tuple(axis)}\")\n return axis\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 272, "n_words": 98, "vocab_size": 65, "complexity": 9, "nloc": 24, "token_counts": 119, "n_ast_nodes": 224, "n_identifiers": 16, "random_cut": "def validate_axis(axis, input_shape):\n \n input_shape = tf.TensorShape(input_shape)\n rank = input_shape.rank\n if not rank:\n raise ValueError(\n f\"Input has undefined rank. Received: input_shape={input_shape}\"\n )\n\n # Convert axis to list and resolve negatives\n if isinstance(axis, int):\n axis = [axis]\n else:\n axis = list(axis)\n for idx, x in enumerate(axis):\n if x < 0:\n axis[idx] = rank + x\n\n # Va", "d_id": 81856, "documentation": { "docstring": "Validate an axis value and returns its standardized form.\n\n Args:\n axis: Value to validate. Can be an integer or a list/tuple of integers.\n Integers may be negative.\n input_shape: Reference input shape that the axis/axes refer to.\n\n Returns:\n Normalized form of `axis`, i.e. a list with all-positive values.\n ", "n_words": 47, "vocab_size": 43, "n_whitespaces": 78, "language": "en" } }, { "id": 20375, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/latex.py", "file_name": "latex.py", "fun_name": "_filter_to", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _filter_to(self, it, pred):\n \n buf = ''\n idx = 0\n for i, t, v in it:\n if pred(t):\n if buf:\n yield idx, None, buf\n buf = ''\n yield i, t, v\n else:\n if not buf:\n idx = i\n buf += v\n if buf:\n yield idx, None, buf\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 232, "n_words": 47, "vocab_size": 25, "complexity": 6, "nloc": 15, "token_counts": 70, "n_ast_nodes": 116, "n_identifiers": 9, "random_cut": "def _filter_to(self, it, pred):\n \n ", "d_id": 3351, "documentation": { "docstring": " Keep only the tokens that match `pred`, merge the others together ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 12, "language": "en" } }, { "id": 291261, "commit_id": "8a8732f0bc2a7cd891a3ddaff3edbe9c246d6ebf", "repo": "core", "path": "homeassistant/components/mqtt/mixins.py", "file_name": "mixins.py", "fun_name": "entity_registry_enabled_default", "commit_message": "Strict type hints for MQTT integration (#82317)\n\n* Strict type hints for MQTT integration\r\n\r\n* Fix errors\r\n\r\n* Additional corrections\r\n\r\n* Use cv.template to avoid untyped calls\r\n\r\n* Enable strict typing policy for MQTT integration\r\n\r\n* Use ignore[no-untyped-call]\r\n\r\n* Use # type: ignore[unreachable]\r\n\r\n* Correct cast\r\n\r\n* Refactor getting discovery_payload\r\n\r\n* Remove unused type ignore comments", "code": "def entity_registry_enabled_default(self) -> bool:\n \n return bool(self._config[CONF_ENABLED_BY_DEFAULT])\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def entity_registry_enabled_default(self) -> bool:\n \n return bool(self._config[CONF_ENABLED_BY_DEFAULT])\n", "d_id": 90371, "documentation": { "docstring": "Return if the entity should be enabled when first added to the entity registry.", "n_words": 14, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 196300, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/polygon.py", "file_name": "polygon.py", "fun_name": "bisectors", "commit_message": "Updated import locations", "code": "def bisectors(self):\n \n # use lines containing sides so containment check during\n # intersection calculation can be avoided, thus reducing\n # the processing time for calculating the bisectors\n s = [Line(l) for l in self.sides]\n v = self.vertices\n c = self.incenter\n l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0])\n l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0])\n l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0])\n return {v[0]: l1, v[1]: l2, v[2]: l3}\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 139, "n_words": 62, "vocab_size": 53, "complexity": 2, "nloc": 8, "token_counts": 143, "n_ast_nodes": 213, "n_identifiers": 15, "random_cut": "def bisectors(self):\n \n # use lines containing sides so containment check during\n # intersection calculation can be avoided, thus reducing\n # the processing time for calculating the bisectors\n s = [Line(l) for l in self.sides]\n v = self.vertices\n c = self.incenter\n l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0])\n l2 = Segme", "d_id": 47800, "documentation": { "docstring": "The angle bisectors of the triangle.\n\n An angle bisector of a triangle is a straight line through a vertex\n which cuts the corresponding angle in half.\n\n Returns\n =======\n\n bisectors : dict\n Each key is a vertex (Point) and each value is the corresponding\n bisector (Segment).\n\n See Also\n ========\n\n sympy.geometry.point.Point, sympy.geometry.line.Segment\n\n Examples\n ========\n\n >>> from sympy import Point, Triangle, Segment\n >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)\n >>> t = Triangle(p1, p2, p3)\n >>> from sympy import sqrt\n >>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1))\n True\n\n ", "n_words": 91, "vocab_size": 63, "n_whitespaces": 232, "language": "en" } }, { "id": 256545, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "ui/utils.py", "file_name": "utils.py", "fun_name": "query", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Dict[str, Any]], Dict[str, str]]:\n \n\n url = f\"{API_ENDPOINT}/{DOC_REQUEST}\"\n params = {\"filters\": filters, \"Retriever\": {\"top_k\": top_k_retriever}, \"Reader\": {\"top_k\": top_k_reader}}\n req = {\"query\": query, \"params\": params}\n response_raw = requests.post(url, json=req)\n\n if response_raw.status_code >= 400 and response_raw.status_code != 503:\n raise Exception(f\"{vars(response_raw)}\")\n\n response = response_raw.json()\n if \"errors\" in response:\n raise Exception(\", \".join(response[\"errors\"]))\n\n # Format response\n results = []\n answers = response[\"answers\"]\n for answer in answers:\n if answer.get(\"answer\", None):\n results.append(\n {\n \"context\": \"...\" + answer[\"context\"] + \"...\",\n \"answer\": answer.get(\"answer\", None),\n \"source\": answer[\"meta\"][\"name\"],\n \"relevance\": round(answer[\"score\"] * 100, 2),\n \"document\": [doc for doc in response[\"documents\"] if doc[\"id\"] == answer[\"document_id\"]][0],\n \"offset_start_in_doc\": answer[\"offsets_in_document\"][0][\"start\"],\n \"_raw\": answer,\n }\n )\n else:\n results.append(\n {\n \"context\": None,\n \"answer\": None,\n \"document\": None,\n \"relevance\": round(answer[\"score\"] * 100, 2),\n \"_raw\": answer,\n }\n )\n return results, response\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 523, "n_words": 124, "vocab_size": 89, "complexity": 8, "nloc": 40, "token_counts": 297, "n_ast_nodes": 521, "n_identifiers": 30, "random_cut": "def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Dict[str, Any]], Dict[str, str]]:\n \n\n url = f\"{API_ENDPOINT}/{DOC_REQUEST}\"\n params = {\"filters\": filters, \"Retriever\": {\"top_k\": top_k_retriever}, \"Reader\": {\"top_k\": top_k_reader}}\n req = {\"query\": query, \"params\": params}\n response_raw = requests.post(url, json=req)\n\n if response_raw.status_code >= 400 and response_raw.status_code != 503:\n raise Exception(f\"{vars(response_raw)}\")\n\n response = ", "d_id": 74890, "documentation": { "docstring": "\n Send a query to the REST API and parse the answer.\n Returns both a ready-to-use representation of the results and the raw JSON.\n ", "n_words": 23, "vocab_size": 18, "n_whitespaces": 33, "language": "en" } }, { "id": 298548, "commit_id": "e2bbdb26be42d9b82538f5964819489e6f7aa656", "repo": "core", "path": "homeassistant/components/daikin/climate.py", "file_name": "climate.py", "fun_name": "format_target_temperature", "commit_message": "Daikin AC : Round to nearest half degree (#70446) (#70452)", "code": "def format_target_temperature(target_temperature):\n \n return str(round(float(target_temperature) * 2, 0) / 2).rstrip(\"0\").rstrip(\".\")\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 33, "n_ast_nodes": 59, "n_identifiers": 6, "random_cut": "def format_target_temperature(target_temperature):\n \n return str(round(float(target_temperature) * 2, 0) / 2).r", "d_id": 97491, "documentation": { "docstring": "Format target temperature to be sent to the Daikin unit, rounding to nearest half degree.", "n_words": 15, "vocab_size": 13, "n_whitespaces": 14, "language": "en" } }, { "id": 67274, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/uae_vat_201/uae_vat_201.py", "file_name": "uae_vat_201.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(filters=None):\n\t\n\tdata = []\n\temirates, amounts_by_emirate = append_vat_on_sales(data, filters)\n\tappend_vat_on_expenses(data, filters)\n\treturn data, emirates, amounts_by_emirate\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 11, "n_words": 16, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 34, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def get_data(filters=None):\n\t\n\tdata = []\n\temirates, amounts_by_emirate = append_vat_on_sales(data, filters)\n\tappend_vat_on_expenses(data, filters)\n\t", "d_id": 14468, "documentation": { "docstring": "Returns the list of dictionaries. Each dictionary is a row in the datatable and chart data.", "n_words": 16, "vocab_size": 15, "n_whitespaces": 15, "language": "en" } }, { "id": 57112, "commit_id": "b13e269bdebd6248023455e7f1ccb24669cbfe3e", "repo": "prefect", "path": "src/prefect/utilities/callables.py", "file_name": "callables.py", "fun_name": "dict", "commit_message": "Move parameter schema utilities to prefect.utilites.callables", "code": "def dict(self, *args, **kwargs):\n \n kwargs.setdefault(\"exclude_none\", True)\n return super().dict(*args, **kwargs)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 56, "n_identifiers": 6, "random_cut": "def dict(self, *args, **kwargs):\n \n kwargs.setdefault(\"exclude_none\", True)\n r", "d_id": 11620, "documentation": { "docstring": "Exclude `None` fields by default to comply with\n the OpenAPI spec.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 25, "language": "en" } }, { "id": 3326, "commit_id": "c5d4a973631ccae7918b9d7881f875a265f30619", "repo": "airbyte", "path": "airbyte-integrations/bases/base-normalization/normalization/transform_catalog/stream_processor.py", "file_name": "stream_processor.py", "fun_name": "extract_column_names", "commit_message": "🐛 Fix normalization issue with quoted & case sensitive columns (#9317)", "code": "def extract_column_names(self) -> Dict[str, Tuple[str, str]]:\n \n fields = []\n for field in self.properties.keys():\n if not is_airbyte_column(field):\n fields.append(field)\n result = {}\n field_names = set()\n for field in fields:\n field_name = self.name_transformer.normalize_column_name(field, in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True)\n if field_name_lookup in field_names:\n # TODO handle column name duplicates or collisions deterministically in this stream\n for i in range(1, 1000):\n field_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=True)\n if field_name_lookup not in field_names:\n break\n field_names.add(field_name_lookup)\n result[field] = (field_name, jinja_name)\n return result\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 353, "n_words": 83, "vocab_size": 51, "complexity": 7, "nloc": 28, "token_counts": 178, "n_ast_nodes": 294, "n_identifiers": 24, "random_cut": "def extract_column_names(self) -> Dict[str, Tuple[str, str]]:\n \n fields = []\n for field in self.properties.keys():\n if not is_airbyte_column(field):\n fields.append(field)\n result = {}\n field_names = set()\n for field in fields:\n field_name = self.name_transformer.normalize_column_name(field, in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True)\n if field_name_lookup in field_names:\n # TODO handle column name duplicates or collisions deterministically in this stream\n for i in range(1, 1000):\n field_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=True)\n if field_name", "d_id": 443, "documentation": { "docstring": "\n Generate a mapping of JSON properties to normalized SQL Column names, handling collisions and avoid duplicate names\n\n The mapped value to a field property is a tuple where:\n - the first value is the normalized \"raw\" column name\n - the second value is the normalized quoted column name to be used in jinja context\n ", "n_words": 54, "vocab_size": 38, "n_whitespaces": 92, "language": "en" } }, { "id": 176574, "commit_id": "db20f63bd3f16dedb6c660dbc6fbc89e89892c82", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/generic.py", "file_name": "generic.py", "fun_name": "_build_paths_from_predecessors", "commit_message": "Add a space in an error (#5601)\n\n* Add a space in an error\r\n\r\n* Fix style errors", "code": "def _build_paths_from_predecessors(sources, target, pred):\n \n if target not in pred:\n raise nx.NetworkXNoPath(f\"Target {target} cannot be reached from given sources\")\n\n seen = {target}\n stack = [[target, 0]]\n top = 0\n while top >= 0:\n node, i = stack[top]\n if node in sources:\n yield [p for p, n in reversed(stack[: top + 1])]\n if len(pred[node]) > i:\n stack[top][1] = i + 1\n next = pred[node][i]\n if next in seen:\n continue\n else:\n seen.add(next)\n top += 1\n if top == len(stack):\n stack.append([next, 0])\n else:\n stack[top][:] = [next, 0]\n else:\n seen.discard(node)\n top -= 1\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 311, "n_words": 88, "vocab_size": 62, "complexity": 8, "nloc": 25, "token_counts": 170, "n_ast_nodes": 271, "n_identifiers": 19, "random_cut": "def _build_paths_from_predecessors(sources, target, pred):\n \n if", "d_id": 41976, "documentation": { "docstring": "Compute all simple paths to target, given the predecessors found in\n pred, terminating when any source in sources is found.\n\n Parameters\n ----------\n sources : set\n Starting nodes for path.\n\n target : node\n Ending node for path.\n\n pred : dict\n A dictionary of predecessor lists, keyed by node\n\n Returns\n -------\n paths : generator of lists\n A generator of all paths between source and target.\n\n Raises\n ------\n NetworkXNoPath\n If `target` cannot be reached from `source`.\n\n Notes\n -----\n There may be many paths between the sources and target. If there are\n cycles among the predecessors, this function will not produce all\n possible paths because doing so would produce infinitely many paths\n of unbounded length -- instead, we only produce simple paths.\n\n See Also\n --------\n shortest_path\n single_source_shortest_path\n all_pairs_shortest_path\n all_shortest_paths\n bellman_ford_path\n ", "n_words": 126, "vocab_size": 92, "n_whitespaces": 237, "language": "en" } }, { "id": 287747, "commit_id": "1b144c0e4dd683e3b47668a89da5eb6da4ae5e08", "repo": "core", "path": "homeassistant/components/bluetooth/models.py", "file_name": "models.py", "fun_name": "is_connected", "commit_message": "Update to bleak 0.18.0 (#79008)", "code": "def is_connected(self) -> bool:\n \n return self._backend is not None and self._backend.is_connected\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 21, "n_ast_nodes": 35, "n_identifiers": 4, "random_cut": "def is_connected(self) -> bool:\n \n return self._backend is not None and self._backend.is_connected\n", "d_id": 86935, "documentation": { "docstring": "Return True if the client is connected to a device.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 21422, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "_create_gnu_long_header", "commit_message": "Vendor in pip 22.1.2", "code": "def _create_gnu_long_header(cls, name, type, encoding, errors):\n \n name = name.encode(encoding, errors) + NUL\n\n info = {}\n info[\"name\"] = \"././@LongLink\"\n info[\"type\"] = type\n info[\"size\"] = len(name)\n info[\"magic\"] = GNU_MAGIC\n\n # create extended header + name blocks.\n return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \\\n cls._create_payload(name)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 120, "n_words": 42, "vocab_size": 32, "complexity": 1, "nloc": 9, "token_counts": 78, "n_ast_nodes": 126, "n_identifiers": 14, "random_cut": "def _create_gnu_long_header(cls, name, type, encoding, errors):\n \n name = name.encode(encoding, errors) + NUL\n\n info = {}\n info[\"name\"] = \"././@LongLink\"\n info[\"type\"] = type\n info[\"size\"] = len(name)\n info[\"magic\"] = GNU_MAGIC\n\n # create extended header + name blocks.\n return cls._create_he", "d_id": 3823, "documentation": { "docstring": "Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence\n for name.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 25, "language": "en" } }, { "id": 199652, "commit_id": "93e4d381d35cd4c21a3a8d713c157f8fb21f725b", "repo": "sympy", "path": "sympy/polys/appellseqs.py", "file_name": "appellseqs.py", "fun_name": "genocchi_poly", "commit_message": "Custom Appell sequence functions and a doctest", "code": "def genocchi_poly(n, x=None, polys=False):\n \n if n < 0:\n raise ValueError(\"Cannot generate Genocchi polynomial of degree %s\" % (n-1))\n poly = DMP(dup_genocchi(int(n), ZZ), ZZ)\n if x is not None:\n poly = Poly.new(poly, x)\n else:\n poly = PurePoly.new(poly, Dummy('x'))\n return poly if polys else poly.as_expr()\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 82, "n_words": 43, "vocab_size": 36, "complexity": 4, "nloc": 9, "token_counts": 87, "n_ast_nodes": 139, "n_identifiers": 15, "random_cut": "def genocchi_poly(n, x=None, polys=False):\n \n if n < 0:\n raise ValueError(\"Cannot generate Genocchi polynomial of degree %s\" % (n-1))\n poly = DMP(dup_genocchi(int(n), ZZ), ", "d_id": 49318, "documentation": { "docstring": "Generates the Genocchi polynomial `\\operatorname{G}_n(x)`.\n\n `\\operatorname{G}_n(x)` is twice the difference between the plain and\n central Bernoulli polynomials, so has degree `n-1`:\n\n .. math :: \\operatorname{G}_n(x) = 2 (\\operatorname{B}_n(x) -\n \\operatorname{B}_n^c(x))\n\n The factor of 2 in the definition endows `\\operatorname{G}_n(x)` with\n integer coefficients.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial plus one.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "n_words": 70, "vocab_size": 58, "n_whitespaces": 128, "language": "en" } }, { "id": 119314, "commit_id": "e085370ec4137cf0f73c5163cb664bc4e1c46082", "repo": "jax", "path": "jax/_src/third_party/scipy/signal_helper.py", "file_name": "signal_helper.py", "fun_name": "_triage_segments", "commit_message": "Add some functions for spectral analysis.\n\nThis commit adds \"stft\", \"csd\", and \"welch\" functions in scipy.signal.", "code": "def _triage_segments(window, nperseg, input_length):\n \n # parse window; if array like, then set nperseg = win.shape\n if isinstance(window, (str, tuple)):\n # if nperseg not specified\n if nperseg is None:\n nperseg = 256 # then change to default\n if nperseg > input_length:\n warnings.warn(f'nperseg = {nperseg} is greater than input length '\n f' = {input_length}, using nperseg = {nperseg}')\n nperseg = input_length\n win = jnp.array(osp_signal.get_window(window, nperseg))\n else:\n win = jnp.asarray(window)\n if len(win.shape) != 1:\n raise ValueError('window must be 1-D')\n if input_length < win.shape[-1]:\n raise ValueError('window is longer than input signal')\n if nperseg is None:\n nperseg = win.shape[0]\n elif nperseg is not None:\n if nperseg != win.shape[0]:\n raise ValueError(\"value specified for nperseg is different\"\n \" from length of window\")\n return win, nperseg\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 237, "n_words": 118, "vocab_size": 69, "complexity": 9, "nloc": 22, "token_counts": 142, "n_ast_nodes": 248, "n_identifiers": 18, "random_cut": "def _triage_segments(window, nperseg, input_length):\n \n # parse window; if array like, then set nperseg = win.shape\n if isinstance(window, (str, tuple)):\n # if nperseg not specified\n if nperseg is None:\n nperseg = 256 # then change to default\n if nperseg > input_length:\n warnings.warn(f'nperseg = {nperseg} is greater than input length '\n f' = {input_length}, using nperseg = {nperseg}')\n nperseg = input_length\n win = jnp.array(osp_signal.get_window(window, nperseg))\n else:\n win = jnp.asarray(window)\n if len(win.shape) != 1:\n raise Va", "d_id": 26585, "documentation": { "docstring": "\n Parses window and nperseg arguments for spectrogram and _spectral_helper.\n This is a helper function, not meant to be called externally.\n Parameters\n ----------\n window : string, tuple, or ndarray\n If window is specified by a string or tuple and nperseg is not\n specified, nperseg is set to the default of 256 and returns a window of\n that length.\n If instead the window is array_like and nperseg is not specified, then\n nperseg is set to the length of the window. A ValueError is raised if\n the user supplies both an array_like window and a value for nperseg but\n nperseg does not equal the length of the window.\n nperseg : int\n Length of each segment\n input_length: int\n Length of input signal, i.e. x.shape[-1]. Used to test for errors.\n Returns\n -------\n win : ndarray\n window. If function was called with string or tuple than this will hold\n the actual array used as a window.\n nperseg : int\n Length of each segment. If window is str or tuple, nperseg is set to\n 256. If window is array_like, nperseg is set to the length of the\n 6\n window.\n ", "n_words": 182, "vocab_size": 88, "n_whitespaces": 270, "language": "en" } }, { "id": 34811, "commit_id": "44b21f117bcf71e3d88a11c3523c94b27949fdbf", "repo": "transformers", "path": "src/transformers/modeling_utils.py", "file_name": "modeling_utils.py", "fun_name": "register_for_auto_class", "commit_message": "Save code of registered custom models (#15379)\n\n* Allow dynamic modules to use relative imports\r\n\r\n* Work for configs\r\n\r\n* Fix last merge conflict\r\n\r\n* Save code of registered custom objects\r\n\r\n* Map strings to strings\r\n\r\n* Fix test\r\n\r\n* Add tokenizer\r\n\r\n* Rework tests\r\n\r\n* Tests\r\n\r\n* Ignore fixtures py files for tests\r\n\r\n* Tokenizer test + fix collection\r\n\r\n* With full path\r\n\r\n* Rework integration\r\n\r\n* Fix typo\r\n\r\n* Remove changes in conftest\r\n\r\n* Test for tokenizers\r\n\r\n* Add documentation\r\n\r\n* Update docs/source/custom_models.mdx\r\n\r\nCo-authored-by: Lysandre Debut \r\n\r\n* Add file structure and file content\r\n\r\n* Add more doc\r\n\r\n* Style\r\n\r\n* Update docs/source/custom_models.mdx\r\n\r\nCo-authored-by: Suraj Patil \r\n\r\n* Address review comments\r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Suraj Patil ", "code": "def register_for_auto_class(cls, auto_class=\"AutoModel\"):\n \n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class\n\n\n# To update the docstring, we need to copy the method, otherwise we change the original docstring.\nPreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub)\nPreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format(\n object=\"model\", object_class=\"AutoModel\", object_files=\"model checkpoint\"\n)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 113, "n_words": 57, "vocab_size": 47, "complexity": 3, "nloc": 7, "token_counts": 52, "n_ast_nodes": 150, "n_identifiers": 21, "random_cut": "def register_for_auto_class(cls, auto_class=\"AutoModel\"):\n \n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class\n\n\n# To update the docstring, we need to copy the method, otherwise we change the original docstring.\nPreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub)\nPreTrainedModel.push_to_hub.__doc__ = PreTra", "d_id": 6345, "documentation": { "docstring": "\n Register this class with a given auto class. This should only be used for custom models as the ones in the\n library are already mapped with an auto class.\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"AutoModel\"`):\n The auto class to register this new model with.\n ", "n_words": 47, "vocab_size": 39, "n_whitespaces": 102, "language": "en" } }, { "id": 336999, "commit_id": "4d1cce2fd01056515f0f353322a231164a4a5c5d", "repo": "diffusers", "path": "src/diffusers/utils/import_utils.py", "file_name": "import_utils.py", "fun_name": "is_accelerate_available", "commit_message": "add accelerate to load models with smaller memory footprint (#361)\n\n* add accelerate to load models with smaller memory footprint\r\n\r\n* remove low_cpu_mem_usage as it is reduntant\r\n\r\n* move accelerate init weights context to modelling utils\r\n\r\n* add test to ensure results are the same when loading with accelerate\r\n\r\n* add tests to ensure ram usage gets lower when using accelerate\r\n\r\n* move accelerate logic to single snippet under modelling utils and remove it from configuration utils\r\n\r\n* format code using to pass quality check\r\n\r\n* fix imports with isor\r\n\r\n* add accelerate to test extra deps\r\n\r\n* only import accelerate if device_map is set to auto\r\n\r\n* move accelerate availability check to diffusers import utils\r\n\r\n* format code\r\n\r\nCo-authored-by: Patrick von Platen ", "code": "def is_accelerate_available():\n return _accelerate_available\n\n\n# docstyle-ignore\nFLAX_IMPORT_ERROR = \n\n# docstyle-ignore\nINFLECT_IMPORT_ERROR = \n\n# docstyle-ignore\nPYTORCH_IMPORT_ERROR = \n\n# docstyle-ignore\nONNX_IMPORT_ERROR = \n\n# docstyle-ignore\nSCIPY_IMPORT_ERROR = \n\n# docstyle-ignore\nTENSORFLOW_IMPORT_ERROR = \n\n# docstyle-ignore\nTRANSFORMERS_IMPORT_ERROR = \n\n# docstyle-ignore\nUNIDECODE_IMPORT_ERROR = \n\n\nBACKENDS_MAPPING = OrderedDict(\n [\n (\"flax\", (is_flax_available, FLAX_IMPORT_ERROR)),\n (\"inflect\", (is_inflect_available, INFLECT_IMPORT_ERROR)),\n (\"onnx\", (is_onnx_available, ONNX_IMPORT_ERROR)),\n (\"scipy\", (is_scipy_available, SCIPY_IMPORT_ERROR)),\n (\"tf\", (is_tf_available, TENSORFLOW_IMPORT_ERROR)),\n (\"torch\", (is_torch_available, PYTORCH_IMPORT_ERROR)),\n (\"transformers\", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)),\n (\"unidecode\", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)),\n ]\n)\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 120, "n_words": 66, "vocab_size": 44, "complexity": 1, "nloc": 2, "token_counts": 6, "n_ast_nodes": 199, "n_identifiers": 20, "random_cut": "def is_accelerate_available():\n return _accelerate_available\n\n\n# docstyle-ignore\nFLAX_IMPORT_ERROR = \n\n# docstyle-ignore\nINFLECT_IMPORT_ERROR = \n\n# docstyle-ignore\nPYTORCH_IMPORT_ERROR = \n\n# docstyle-ignore\nONNX_IMPORT_ERROR = \n\n# docstyle-ignore\nSCIPY_IMPORT_ERROR = \n\n# docstyle-ignore\nTENSORFLOW_IMPORT_ERROR = \n\n# docstyle-ignore\nTRANSFORMERS_IMPORT_ERROR = \n\n# docstyle-ignore\nUNIDECODE_IMP", "d_id": 120941, "documentation": { "docstring": "\n{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://github.com/google/flax and follow the ones that match your environment.\n\n{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install\ninflect`\n\n{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.\n\n{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip\ninstall onnxruntime`\n\n{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install\nscipy`\n\n{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://www.tensorflow.org/install and follow the ones that match your environment.\n\n{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip\ninstall transformers`\n\n{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install\nUnidecode`\n", "n_words": 197, "vocab_size": 44, "n_whitespaces": 181, "language": "en" } }, { "id": 108330, "commit_id": "c0cb163c627fe52e38311954226e3349f34f6914", "repo": "matplotlib", "path": "lib/matplotlib/text.py", "file_name": "text.py", "fun_name": "set_horizontalalignment", "commit_message": "Document text alignment\n\nCloses #21571.", "code": "def set_horizontalalignment(self, align):\n \n _api.check_in_list(['center', 'right', 'left'], align=align)\n self._horizontalalignment = align\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def set_horizontalalignment(self, align):\n \n _api.check_i", "d_id": 23143, "documentation": { "docstring": "\n Set the horizontal alignment relative to the anchor point.\n\n See also :doc:`/gallery/text_labels_and_annotations/text_alignment`.\n\n Parameters\n ----------\n align : {'left', 'center', 'right'}\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 62, "language": "en" } }, { "id": 220817, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/tasks.py", "file_name": "tasks.py", "fun_name": "_wrap_awaitable", "commit_message": "add python 3.10.4 for windows", "code": "def _wrap_awaitable(awaitable):\n \n return (yield from awaitable.__await__())\n\n_wrap_awaitable._is_coroutine = _is_coroutine\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 14, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 37, "n_identifiers": 4, "random_cut": "def _wrap_awaitable(awaitable):\n \n return (yield from awaitable.__await__())\n\n_wrap_awaitable._is_coroutine = _is_coroutine\n\n", "d_id": 56128, "documentation": { "docstring": "Helper for asyncio.ensure_future().\n\n Wraps awaitable (an object with __await__) into a coroutine\n that will later be wrapped in a Task by ensure_future().\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 31, "language": "en" } }, { "id": 148280, "commit_id": "0e6c042e29cbbe429d81c9c1af3c75c261f00980", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "normalize_file", "commit_message": "[Bugfix] fix invalid excluding of Black (#24042)\n\n- We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options\r\n- Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.", "code": "def normalize_file(file, separators=None):\n\t\n\t# Normalize path separators.\n\tif separators is None:\n\t\tseparators = NORMALIZE_PATH_SEPS\n\n\t# Convert path object to string.\n\tnorm_file = str(file)\n\n\tfor sep in separators:\n\t\tnorm_file = norm_file.replace(sep, posixpath.sep)\n\n\t# Remove current directory prefix.\n\tif norm_file.startswith('./'):\n\t\tnorm_file = norm_file[2:]\n\n\treturn norm_file\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 31, "n_words": 43, "vocab_size": 32, "complexity": 4, "nloc": 9, "token_counts": 58, "n_ast_nodes": 98, "n_identifiers": 10, "random_cut": "def normalize_file(file, separators=None):\n\t\n\t# Normalize path separators.\n\t", "d_id": 34216, "documentation": { "docstring": "\n\tNormalizes the file path to use the POSIX path separator (i.e., ``'/'``).\n\n\t*file* (:class:`str` or :class:`pathlib.PurePath`) is the file path.\n\n\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t:data:`None`) optionally contains the path separators to normalize.\n\tThis does not need to include the POSIX path separator (``'/'``), but\n\tincluding it will not affect the results. Default is :data:`None` for\n\t:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty\n\tcontainer (e.g., an empty tuple ``()``).\n\n\tReturns the normalized file path (:class:`str`).\n\t", "n_words": 75, "vocab_size": 54, "n_whitespaces": 66, "language": "en" } }, { "id": 250235, "commit_id": "e2a1adbf5d11288f2134ced1f84c6ffdd91a9357", "repo": "synapse", "path": "synapse/types/state.py", "file_name": "state.py", "fun_name": "wildcard_types", "commit_message": "Allow selecting \"prejoin\" events by state keys (#14642)\n\n* Declare new config\r\n\r\n* Parse new config\r\n\r\n* Read new config\r\n\r\n* Don't use trial/our TestCase where it's not needed\r\n\r\nBefore:\r\n\r\n```\r\n$ time trial tests/events/test_utils.py > /dev/null\r\n\r\nreal\t0m2.277s\r\nuser\t0m2.186s\r\nsys\t0m0.083s\r\n```\r\n\r\nAfter:\r\n```\r\n$ time trial tests/events/test_utils.py > /dev/null\r\n\r\nreal\t0m0.566s\r\nuser\t0m0.508s\r\nsys\t0m0.056s\r\n```\r\n\r\n* Helper to upsert to event fields\r\n\r\nwithout exceeding size limits.\r\n\r\n* Use helper when adding invite/knock state\r\n\r\nNow that we allow admins to include events in prejoin room state with\r\narbitrary state keys, be a good Matrix citizen and ensure they don't\r\naccidentally create an oversized event.\r\n\r\n* Changelog\r\n\r\n* Move StateFilter tests\r\n\r\nshould have done this in #14668\r\n\r\n* Add extra methods to StateFilter\r\n\r\n* Use StateFilter\r\n\r\n* Ensure test file enforces typed defs; alphabetise\r\n\r\n* Workaround surprising get_current_state_ids\r\n\r\n* Whoops, fix mypy", "code": "def wildcard_types(self) -> List[str]:\n \n return [t for t, state_keys in self.types.items() if state_keys is None]\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 15, "vocab_size": 14, "complexity": 3, "nloc": 8, "token_counts": 31, "n_ast_nodes": 50, "n_identifiers": 8, "random_cut": "def wildcard_types(self) -> List[str]:\n \n return [t for t, state_keys in self.types.items() if state_keys is No", "d_id": 73332, "documentation": { "docstring": "Returns a list of event types which require us to fetch all state keys.\n This will be empty unless `has_wildcards` returns True.\n\n Returns:\n A list of event types.\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 60, "language": "en" } }, { "id": 115389, "commit_id": "0e22eac78f7dd836a0e16b343d1bd02d039a3b6b", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/snowflake_handler/snowflake_handler.py", "file_name": "snowflake_handler.py", "fun_name": "get_columns", "commit_message": "Add snowflake connector", "code": "def get_columns(self, table_name) -> Response:\n \n q = f\"SHOW COLUMNS IN TABLE {table_name};\"\n result = self.native_query(q)\n return result\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 45, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 24, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def get_columns(self, table_name) -> Response:\n \n ", "d_id": 25438, "documentation": { "docstring": "\n List the columns in the tabels for which the user have access\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 27, "language": "en" } }, { "id": 65562, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py", "file_name": "supplier_scorecard_variable.py", "fun_name": "get_on_time_shipments", "commit_message": "style: format code with black", "code": "def get_on_time_shipments(scorecard):\n\t\n\n\tsupplier = frappe.get_doc(\"Supplier\", scorecard.supplier)\n\n\t# Look up all PO Items with delivery dates between our dates\n\ttotal_items_delivered_on_time = frappe.db.sql(\n\t\t,\n\t\t{\"supplier\": supplier.name, \"start_date\": scorecard.start_date, \"end_date\": scorecard.end_date},\n\t\tas_dict=0,\n\t)[0][0]\n\n\tif not total_items_delivered_on_time:\n\t\ttotal_items_delivered_on_time = 0\n\treturn total_items_delivered_on_time\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 27, "n_words": 38, "vocab_size": 33, "complexity": 2, "nloc": 26, "token_counts": 68, "n_ast_nodes": 114, "n_identifiers": 12, "random_cut": "def get_on_time_shipments(scorecard):\n\t\n\n\tsupplier = frappe.get_d", "d_id": 13939, "documentation": { "docstring": "Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs)\n\t\t\tSELECT\n\t\t\t\tCOUNT(pr_item.qty)\n\t\t\tFROM\n\t\t\t\t`tabPurchase Order Item` po_item,\n\t\t\t\t`tabPurchase Receipt Item` pr_item,\n\t\t\t\t`tabPurchase Order` po,\n\t\t\t\t`tabPurchase Receipt` pr\n\t\t\tWHERE\n\t\t\t\tpo.supplier = %(supplier)s\n\t\t\t\tAND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s\n\t\t\t\tAND po_item.schedule_date <= pr.posting_date\n\t\t\t\tAND po_item.qty = pr_item.qty\n\t\t\t\tAND pr_item.docstatus = 1\n\t\t\t\tAND pr_item.purchase_order_item = po_item.name\n\t\t\t\tAND po_item.parent = po.name\n\t\t\t\tAND pr_item.parent = pr.name", "n_words": 69, "vocab_size": 51, "n_whitespaces": 52, "language": "en" } }, { "id": 85099, "commit_id": "4e4689949438735622bdf669f05d218c671e7e01", "repo": "zulip", "path": "zerver/webhooks/bitbucket3/tests.py", "file_name": "tests.py", "fun_name": "test_commit_comment_deleted", "commit_message": "webhooks: Pick a more reasonable length for short sha.\n\n7 characters are not enough for large projects, so we change\nit to reasonably longer. As an example, The Linux kernel needs\nat least 11 characters of sha in its shortened form to identify\na revision. We pick 11 so it should work for most of the projects.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_commit_comment_deleted(self) -> None:\n expected_message = \n self.check_webhook(\"commit_comment_deleted\", TOPIC, expected_message)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 38, "n_identifiers": 5, "random_cut": "def test_commit_comment_deleted(self) -> None:\n expected_message = \n self.check_webhook(\"commit_comment_deleted\", TOPIC, expected_message)\n", "d_id": 17931, "documentation": { "docstring": "[hypro999](http://139.59.64.214:7990/users/hypro999) deleted their comment on [508d1b67f1f](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\\n~~~ quote\\n~~Just an arbitrary comment on a commit. Nothing to see here...~~\\n~~~", "n_words": 17, "vocab_size": 15, "n_whitespaces": 16, "language": "en" } }, { "id": 58372, "commit_id": "8a4560e237b90a7b64c6bb77b6cb3ee9a6648e33", "repo": "prefect", "path": "src/prefect/agent.py", "file_name": "agent.py", "fun_name": "get_work_queues", "commit_message": "Agents support multiple queues", "code": "async def get_work_queues(self) -> Optional[UUID]:\n \n work_queues = []\n for name in self.work_queues:\n try:\n # support IDs and names\n if isinstance(name, UUID):\n work_queue = await self.client.read_work_queue(id=name)\n else:\n work_queue = await self.client.read_work_queue_by_name(name)\n except ObjectNotFound:\n work_queue = await self.client.create_work_queue(\n name=name, return_id=False\n )\n work_queues.append(work_queue)\n return work_queues\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 235, "n_words": 42, "vocab_size": 34, "complexity": 4, "nloc": 17, "token_counts": 86, "n_ast_nodes": 142, "n_identifiers": 16, "random_cut": "async def get_work_queues(self) -> Optional[UUID]:\n \n work_queues = []\n for name in self.work_queues:\n try:\n # support IDs and names\n if isinstance(name, UUID):\n work_queue = await self.client.read_work_queue(id=name)\n else:\n work_queue = await self.client.read_work_queue_by_name(name)\n except ObjectNotFound:\n work_queue = await self.client.create_work_queue(\n name=name, return_id=False\n ", "d_id": 11760, "documentation": { "docstring": "\n Loads the work queue objects corresponding to the agent's target work queues. If any of them don't exist, they are created.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 36, "language": "en" } }, { "id": 3356, "commit_id": "f83eca58eaf2129d21b5796a301732ab22675130", "repo": "airbyte", "path": "airbyte-cdk/python/unit_tests/sources/test_abstract_source.py", "file_name": "test_abstract_source.py", "fun_name": "test_read_nonexistent_stream_raises_exception", "commit_message": "CDK: Fix typing errors (#9037)\n\n* fix typing, drop AirbyteLogger\r\n\r\n* format\r\n\r\n* bump the version\r\n\r\n* use logger instead of fixture logger\r\n\r\nCo-authored-by: Eugene Kulak \r\nCo-authored-by: auganbay ", "code": "def test_read_nonexistent_stream_raises_exception(mocker):\n \n s1 = MockStream(name=\"s1\")\n s2 = MockStream(name=\"this_stream_doesnt_exist_in_the_source\")\n\n mocker.patch.object(MockStream, \"get_json_schema\", return_value={})\n\n src = MockSource(streams=[s1])\n catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s2, SyncMode.full_refresh)])\n with pytest.raises(KeyError):\n list(src.read(logger, {}, catalog))\n\n\nGLOBAL_EMITTED_AT = 1\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 53, "n_words": 26, "vocab_size": 22, "complexity": 1, "nloc": 8, "token_counts": 86, "n_ast_nodes": 150, "n_identifiers": 24, "random_cut": "def test_read_nonexistent_stream_raises_exception(mocker):\n \n s1 = MockStream(name=\"s1\")\n s2 = MockStream(name=\"this_stream_doesnt_exist_in_the_source\")\n\n mocker.patch.object(MockStream, \"get_json_schema\", return_value={})\n\n ", "d_id": 458, "documentation": { "docstring": "Tests that attempting to sync a stream which the source does not return from the `streams` method raises an exception", "n_words": 20, "vocab_size": 19, "n_whitespaces": 19, "language": "en" } }, { "id": 62021, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py", "file_name": "locators.py", "fun_name": "_should_queue", "commit_message": "upd; format", "code": "def _should_queue(self, link, referrer, rel):\n \n scheme, netloc, path, _, _, _ = urlparse(link)\n if path.endswith(self.source_extensions + self.binary_extensions +\n self.excluded_extensions):\n result = False\n elif self.skip_externals and not link.startswith(self.base_url):\n result = False\n elif not referrer.startswith(self.base_url):\n result = False\n elif rel not in ('homepage', 'download'):\n result = False\n elif scheme not in ('http', 'https', 'ftp'):\n result = False\n elif self._is_platform_dependent(link):\n result = False\n else:\n host = netloc.split(':', 1)[0]\n if host.lower() == 'localhost':\n result = False\n else:\n result = True\n logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,\n referrer, result)\n return result\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 339, "n_words": 89, "vocab_size": 51, "complexity": 9, "nloc": 24, "token_counts": 168, "n_ast_nodes": 272, "n_identifiers": 24, "random_cut": "def _should_queue(self, link, referrer, rel):\n \n scheme, netloc, path, _, _, _ = urlparse(link)\n if path.endswith(self.source_extensions + self.binary_extensions +\n ", "d_id": 12831, "documentation": { "docstring": "\n Determine whether a link URL from a referring page and with a\n particular \"rel\" attribute should be queued for scraping.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 42, "language": "en" } }, { "id": 112790, "commit_id": "f60d3d5e294510d99c65ba3292822cbb922adbf8", "repo": "nni", "path": "nni/runtime/tuner_command_channel/legacy.py", "file_name": "legacy.py", "fun_name": "receive", "commit_message": "WebSocket (step 1) - Python client (#4806)", "code": "def receive():\n \n header = _in_file.read(16)\n _logger.debug('Received command, header: [%s]', header)\n if header is None or len(header) < 16:\n # Pipe EOF encountered\n _logger.debug('Pipe EOF encountered')\n return None, None\n length = int(header[2:])\n data = _in_file.read(length)\n command = CommandType(header[:2])\n data = data.decode('utf8')\n _logger.debug('Received command, data: [%s]', data)\n return command, data\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 99, "n_words": 48, "vocab_size": 34, "complexity": 3, "nloc": 12, "token_counts": 91, "n_ast_nodes": 157, "n_identifiers": 13, "random_cut": "def receive():\n \n header = _in_file.read(16)\n _logger.debug('Received command, header: [%s]', header)\n if header is None or len(header) < 16:\n # Pipe EOF encountered\n _logger.", "d_id": 24762, "documentation": { "docstring": "Receive a command from Training Service.\n Returns a tuple of command (CommandType) and payload (str)\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 21, "language": "en" } }, { "id": 264446, "commit_id": "7c105019d8ae9205051c302e7499b33a455f9176", "repo": "netbox", "path": "netbox/utilities/templatetags/builtins/filters.py", "file_name": "filters.py", "fun_name": "bettertitle", "commit_message": "Closes #8600: Document built-in template tags & filters", "code": "def bettertitle(value):\n \n return ' '.join([w[0].upper() + w[1:] for w in value.split()])\n\n\n@register.filter()", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@register.filter()", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 17, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 2, "token_counts": 36, "n_ast_nodes": 72, "n_identifiers": 8, "random_cut": "def bettertitle(value):\n \n return ' '.join([w[0].upper() + w[1:] for w in value.s", "d_id": 77732, "documentation": { "docstring": "\n Alternative to the builtin title(). Ensures that the first letter of each word is uppercase but retains the\n original case of all others.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 33, "language": "en" } }, { "id": 60201, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py", "file_name": "pascal_multilabel_datalayers.py", "fun_name": "load_pascal_annotation", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def load_pascal_annotation(index, pascal_root):\n \n classes = ('__background__', # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n class_to_ind = dict(zip(classes, xrange(21)))\n\n filename = osp.join(pascal_root, 'Annotations', index + '.xml')\n # print 'Loading: {}'.format(filename)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 161, "n_words": 45, "vocab_size": 41, "complexity": 2, "nloc": 33, "token_counts": 317, "n_ast_nodes": 153, "n_identifiers": 11, "random_cut": "def load_pascal_annotation(index, pascal_root):\n \n classes = ('__background__', # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'trai", "d_id": 12009, "documentation": { "docstring": "\n This code is borrowed from Ross Girshick's FAST-RCNN code\n (https://github.com/rbgirshick/fast-rcnn).\n It parses the PASCAL .xml metadata files.\n See publication for further details: (http://arxiv.org/abs/1504.08083).\n\n Thanks Ross!\n\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 44, "language": "en" } }, { "id": 155189, "commit_id": "193505fdf0c984743397ba3df56262f30aee13a8", "repo": "modin", "path": "modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py", "file_name": "partition.py", "fun_name": "get", "commit_message": "FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)\n\nSigned-off-by: Igoshev, Iaroslav ", "code": "def get(self):\n \n logger = get_logger()\n logger.debug(f\"ENTER::Partition.get::{self._identity}\")\n if len(self.call_queue):\n self.drain_call_queue()\n result = UnidistWrapper.materialize(self._data)\n logger.debug(f\"EXIT::Partition.get::{self._identity}\")\n return result\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 15, "vocab_size": 13, "complexity": 2, "nloc": 8, "token_counts": 50, "n_ast_nodes": 101, "n_identifiers": 13, "random_cut": "def get(self):\n \n logger = get_logger()\n logger.debug(f\"ENTER::Partition.get::{self._identity}\")\n if len(self.call_queue):\n ", "d_id": 36280, "documentation": { "docstring": "\n Get the object wrapped by this partition out of the object store.\n\n Returns\n -------\n pandas.DataFrame\n The object from the object store.\n ", "n_words": 21, "vocab_size": 15, "n_whitespaces": 68, "language": "en" } }, { "id": 207491, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/test_actions.py", "file_name": "test_actions.py", "fun_name": "test_custom_function_action_no_perm_response", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_custom_function_action_no_perm_response(self):\n \n action_data = {\n ACTION_CHECKBOX_NAME: [self.s1.pk],\n \"action\": \"no_perm\",\n \"index\": 0,\n }\n response = self.client.post(\n reverse(\"admin:admin_views_externalsubscriber_changelist\"), action_data\n )\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.content, b\"No permission to perform this action\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 120, "n_words": 27, "vocab_size": 25, "complexity": 1, "nloc": 11, "token_counts": 64, "n_ast_nodes": 107, "n_identifiers": 13, "random_cut": "def test_custom_function_action_no_perm_response(self):\n \n action_data = {\n ACTION_CHECKBOX_NAME: [self.s1.pk],\n \"action\": \"no_perm\",\n \"index\": 0,\n }\n response = self.client.post(\n reverse(\"admin:admin_views_externalsubscriber_changelist", "d_id": 51981, "documentation": { "docstring": "A custom action may returns an HttpResponse with a 403 code.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 196391, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/matrices.py", "file_name": "matrices.py", "fun_name": "limit", "commit_message": "Moved imports to higher level", "code": "def limit(self, *args):\n \n return self.applyfunc(lambda x: x.limit(*args))\n\n\n# https://github.com/sympy/sympy/pull/12854", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 44, "n_identifiers": 5, "random_cut": "def limit(self, *args):\n ", "d_id": 47891, "documentation": { "docstring": "Calculate the limit of each element in the matrix.\n ``args`` will be passed to the ``limit`` function.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> from sympy.abc import x, y\n >>> M = Matrix([[x, y], [1, 0]])\n >>> M.limit(x, 2)\n Matrix([\n [2, y],\n [1, 0]])\n\n See Also\n ========\n\n integrate\n diff\n ", "n_words": 50, "vocab_size": 39, "n_whitespaces": 155, "language": "en" } }, { "id": 290875, "commit_id": "b6586d5c34bf7ea5c30fbb1b62c438078ea14f39", "repo": "core", "path": "tests/components/number/test_init.py", "file_name": "test_init.py", "fun_name": "test_device_classes_aligned", "commit_message": "Align number and sensor device classes (#81909)\n\n* Align number and sensor device classes\r\n\r\n* Add tests\r\n\r\n* Tweak tests", "code": "def test_device_classes_aligned():\n \n\n non_numeric_device_classes = {\n SensorDeviceClass.DATE,\n SensorDeviceClass.DURATION,\n SensorDeviceClass.TIMESTAMP,\n }\n\n for device_class in SensorDeviceClass:\n if device_class in non_numeric_device_classes:\n continue\n\n assert hasattr(NumberDeviceClass, device_class.name)\n assert getattr(NumberDeviceClass, device_class.name).value == device_class.value\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 26, "vocab_size": 23, "complexity": 3, "nloc": 11, "token_counts": 56, "n_ast_nodes": 86, "n_identifiers": 12, "random_cut": "def test_device_classes_aligned():\n \n\n non_numeric_device_classes = {\n SensorDeviceClass.DATE,\n SensorDeviceClass.DURATION,\n SensorDeviceClass.TIMESTAMP,\n }\n\n for ", "d_id": 89988, "documentation": { "docstring": "Make sure all sensor device classes are also available in NumberDeviceClass.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 30539, "commit_id": "5d0cc0a092f93640e1d83baaf1c738768481d208", "repo": "OCRmyPDF", "path": "tests/test_main.py", "file_name": "test_main.py", "fun_name": "valid_tess_config", "commit_message": "tests: Extract some test fixtures for better clarity", "code": "def valid_tess_config(outdir):\n cfg_file = outdir / 'test.cfg'\n with cfg_file.open('w') as f:\n f.write(\n \n )\n yield cfg_file\n\n", "url": "https://github.com/ocrmypdf/OCRmyPDF.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 11, "token_counts": 28, "n_ast_nodes": 57, "n_identifiers": 6, "random_cut": "def valid_tess_config(outdir):\n cfg_file = outdir / 'test.cfg'\n ", "d_id": 5628, "documentation": { "docstring": "\\\nload_system_dawg 0\nlanguage_model_penalty_non_dict_word 0\nlanguage_model_penalty_non_freq_dict_word 0\n", "n_words": 7, "vocab_size": 5, "n_whitespaces": 3, "language": "en" } }, { "id": 204883, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/operations.py", "file_name": "operations.py", "fun_name": "year_lookup_bounds_for_datetime_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def year_lookup_bounds_for_datetime_field(self, value, iso_year=False):\n \n if iso_year:\n first = datetime.datetime.fromisocalendar(value, 1, 1)\n second = datetime.datetime.fromisocalendar(\n value + 1, 1, 1\n ) - datetime.timedelta(microseconds=1)\n else:\n first = datetime.datetime(value, 1, 1)\n second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)\n if settings.USE_TZ:\n tz = timezone.get_current_timezone()\n first = timezone.make_aware(first, tz)\n second = timezone.make_aware(second, tz)\n first = self.adapt_datetimefield_value(first)\n second = self.adapt_datetimefield_value(second)\n return [first, second]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 211, "n_words": 59, "vocab_size": 37, "complexity": 3, "nloc": 16, "token_counts": 142, "n_ast_nodes": 212, "n_identifiers": 17, "random_cut": "def year_lookup_bounds_for_datetime_field(self, value, iso_year=False):\n \n if iso_year:\n first = datetime.datetime.fromisocalendar(value, 1, 1)\n second = datetime.datetime.fromisocalendar(\n ", "d_id": 50955, "documentation": { "docstring": "\n Return a two-elements list with the lower and upper bound to be used\n with a BETWEEN operator to query a DateTimeField value using a year\n lookup.\n\n `value` is an int, containing the looked-up year.\n If `iso_year` is True, return bounds for ISO-8601 week-numbering years.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 87, "language": "en" } }, { "id": 199694, "commit_id": "d1d46df73ebaad94089847558d00a8b7269f554d", "repo": "sympy", "path": "sympy/polys/orthopolys.py", "file_name": "orthopolys.py", "fun_name": "gegenbauer_poly", "commit_message": "Run orthopolys and appellseqs through a common interface\n\nIncluding unifying the two Chebyshev generators into one function.\nThere are also two kinds of Hermite polynomials, and they too share the\nsame recurrence, but the second type He_n(x) (aka the probabilist,\nreduced or small polynomials) will not be added here.", "code": "def gegenbauer_poly(n, a, x=None, polys=False):\n r\n return named_poly(n, dup_gegenbauer, None, \"Gegenbauer polynomial\", (x, a), polys)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 15, "token_counts": 36, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def gegenbauer_poly(n, a, x=None, polys=False):\n r\n return named_poly(n, dup_ge", "d_id": 49350, "documentation": { "docstring": "Generates the Gegenbauer polynomial `C_n^{(a)}(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n a\n Decides minimal domain for the list of coefficients.\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "n_words": 40, "vocab_size": 32, "n_whitespaces": 82, "language": "en" } }, { "id": 297864, "commit_id": "cb13418babd21a1e9584978b0c523f1b1e4e1cb0", "repo": "core", "path": "homeassistant/components/homekit_controller/connection.py", "file_name": "connection.py", "fun_name": "async_update", "commit_message": "String formatting and max line length - Part 2 (#84393)", "code": "async def async_update(self, now=None):\n \n if not self.pollable_characteristics:\n self.async_update_available_state()\n _LOGGER.debug(\n \"HomeKit connection not polling any characteristics: %s\", self.unique_id\n )\n return\n\n if self._polling_lock.locked():\n if not self._polling_lock_warned:\n _LOGGER.warning(\n (\n \"HomeKit controller update skipped as previous poll still in\"\n \" flight: %s\"\n ),\n self.unique_id,\n )\n self._polling_lock_warned = True\n return\n\n if self._polling_lock_warned:\n _LOGGER.info(\n (\n \"HomeKit controller no longer detecting back pressure - not\"\n \" skipping poll: %s\"\n ),\n self.unique_id,\n )\n self._polling_lock_warned = False\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 441, "n_words": 68, "vocab_size": 49, "complexity": 8, "nloc": 44, "token_counts": 177, "n_ast_nodes": 158, "n_identifiers": 13, "random_cut": "async def async_update(self, now=None):\n \n if not self.pollable_characteristics:\n self.async_update_available_state()\n _LOGGER.debug(\n \"HomeKit connection not polling any characteristics: %s\", self.unique_id\n )\n return\n\n if self._polling_lock.locked():\n if not self._polling_lock_warned:\n _LOGGER.warning(\n (\n \"HomeKit controller update skipped as previous poll still in\"\n \" flight: %s\"\n ),\n self.unique_id,\n )\n self._polling_lock_warned = True\n return\n\n if self._polling_lock_warned:\n _LOGGER.info(\n (\n \"HomeKit controller no longer detecting back pressure - not\"\n \"", "d_id": 96818, "documentation": { "docstring": "Poll state of all entities attached to this bridge/accessory.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 60892, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py", "file_name": "lazy_wheel.py", "fun_name": "_merge", "commit_message": "upd; format", "code": "def _merge(self, start, end, left, right):\n # type: (int, int, int, int) -> Iterator[Tuple[int, int]]\n \n lslice, rslice = self._left[left:right], self._right[left:right]\n i = start = min([start]+lslice[:1])\n end = max([end]+rslice[-1:])\n for j, k in zip(lslice, rslice):\n if j > i:\n yield i, j-1\n i = k + 1\n if i <= end:\n yield i, end\n self._left[left:right], self._right[left:right] = [start], [end]\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 162, "n_words": 58, "vocab_size": 43, "complexity": 4, "nloc": 11, "token_counts": 128, "n_ast_nodes": 197, "n_identifiers": 16, "random_cut": "def _merge(self, start, end, left, right):\n # type: (int, int, int, int) -> Iterator[Tuple[int, int]]\n \n lslice, rslice = self._left[left:right], self._right[left:right]\n i = start = min([start]+lslice[:1])\n end = max([end]+rslice[-1:])\n for j, k in zip(lslice, rslice):\n if j > i:\n yield i, j-1\n ", "d_id": 12324, "documentation": { "docstring": "Return an iterator of intervals to be fetched.\n\n Args:\n start (int): Start of needed interval\n end (int): End of needed interval\n left (int): Index of first overlapping downloaded data\n right (int): Index after last overlapping downloaded data\n ", "n_words": 37, "vocab_size": 25, "n_whitespaces": 95, "language": "en" } }, { "id": 266891, "commit_id": "4867ac217ba0164b433d0927488d153e116d175d", "repo": "ansible", "path": "lib/ansible/utils/collection_loader/_collection_finder.py", "file_name": "_collection_finder.py", "fun_name": "is_python_identifier", "commit_message": "Code cleanup for type hinting issues.", "code": "def is_python_identifier(self): # type: (str) -> bool\n \n # Ref: https://stackoverflow.com/a/55802320/595220\n return bool(re.match(_VALID_IDENTIFIER_STRING_REGEX, self))\n\n\nPB_EXTENSIONS = ('.yml', '.yaml')\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 38, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def is_python_identifier(self): # type: (str) -> bool\n \n # Ref: https://stackoverflow.com/a/55802320/5", "d_id": 78643, "documentation": { "docstring": "Determine whether the given string is a Python identifier.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 60901, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py", "file_name": "lazy_wheel.py", "fun_name": "_stream_response", "commit_message": "upd; format", "code": "def _stream_response(self, start, end, base_headers=HEADERS):\n # type: (int, int, Dict[str, str]) -> Response\n \n headers = base_headers.copy()\n headers['Range'] = f'bytes={start}-{end}'\n # TODO: Get range requests to be correctly cached\n headers['Cache-Control'] = 'no-cache'\n return self._session.get(self._url, headers=headers, stream=True)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 84, "n_words": 35, "vocab_size": 32, "complexity": 1, "nloc": 5, "token_counts": 53, "n_ast_nodes": 97, "n_identifiers": 12, "random_cut": "def _stream_response(self, start, end, base_headers=HEADERS):\n # type: (int, int, Dict[str, str]) -> Response\n \n headers = base_headers.copy()\n headers['Range'] = f'bytes={start}-{end}'\n ", "d_id": 12333, "documentation": { "docstring": "Return HTTP response to a range request from start to end.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 321142, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/webengine/webenginetab.py", "file_name": "webenginetab.py", "fun_name": "_inject_greasemonkey_scripts", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def _inject_greasemonkey_scripts(self, scripts):\n \n if sip.isdeleted(self._widget):\n return\n\n # Since we are inserting scripts into a per-tab collection,\n # rather than just injecting scripts on page load, we need to\n # make sure we replace existing scripts, not just add new ones.\n # While, taking care not to remove any other scripts that might\n # have been added elsewhere, like the one for stylesheets.\n page_scripts = self._widget.page().scripts()\n self._remove_all_greasemonkey_scripts()\n\n seen_names = set()\n for script in scripts:\n while script.full_name() in seen_names:\n script.dedup_suffix += 1\n seen_names.add(script.full_name())\n\n new_script = QWebEngineScript()\n\n try:\n world = int(script.jsworld)\n if not 0 <= world <= qtutils.MAX_WORLD_ID:\n log.greasemonkey.error(\n f\"script {script.name} has invalid value for '@qute-js-world'\"\n f\": {script.jsworld}, should be between 0 and \"\n f\"{qtutils.MAX_WORLD_ID}\")\n continue\n except ValueError:\n try:\n world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]]\n except KeyError:\n log.greasemonkey.error(\n f\"script {script.name} has invalid value for '@qute-js-world'\"\n f\": {script.jsworld}\")\n continue\n new_script.setWorldId(world)\n\n # Corresponds to \"@run-at document-end\" which is the default according to\n # https://wiki.greasespot.net/Metadata_Block#.40run-at - however,\n # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as\n # default.\n #\n # NOTE that this needs to be done before setSourceCode, so that\n # QtWebEngine's parsing of GreaseMonkey tags will override it if there is a\n # @run-at comment.\n new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady)\n\n new_script.setSourceCode(script.code())\n new_script.setName(script.full_name())\n new_script.setRunsOnSubFrames(script.runs_on_sub_frames)\n\n if script.needs_document_end_workaround():\n log.greasemonkey.debug(\n f\"Forcing @run-at document-end for {script.name}\")\n new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady)\n\n log.greasemonkey.debug(f'adding script: {new_script.name()}')\n page_scripts.insert(new_script)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 856, "n_words": 203, "vocab_size": 145, "complexity": 8, "nloc": 38, "token_counts": 232, "n_ast_nodes": 453, "n_identifiers": 44, "random_cut": "def _inject_greasemonkey_scripts(self, scripts):\n \n if sip.isdeleted(self._widget):\n return\n\n # Since we are inserting scripts into a per-tab collection,\n # rather than just injecting scripts on page load, we need to\n # make sure we replace existing scripts, not just add new ones.\n # While, taking care not to remove any other scripts that might\n # have been added elsewhere, like the one for stylesheets.\n page_scripts = self._widget.page().scripts()\n self._remove_all_greasemonkey_scripts()\n\n seen_names = set()\n for script in scripts:\n while script.full_name() in seen_names:\n script.dedup_suffix += 1\n seen_names.add(script.full_name())\n\n new_script = QWebEngineScript()\n\n try:\n world = int(script.jsworld)\n if not 0 <= world <= qtutils.MAX_WORLD_ID:\n log.greasemonkey.error(\n f\"script {script.name} has invalid value for '@qute-js-world'\"\n f\": {script.jsworld}, should be between 0 and \"\n f\"{qtutils.MAX_WORLD_ID}\")\n continue\n except ValueError:\n try:\n world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]]\n except KeyError:\n log.greasemonkey.error(\n f\"script {script.name} has invalid value for '@qute-js-world'\"\n f\": {script.jsworld}\")\n continue\n new_script.setWorldId(world)\n\n # Corresponds to \"@run-at document-end\" which is the default according to\n # https://wiki.greasespot.net/Metadata_Block#.40run-at - however,\n # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as\n # default.\n #\n # NOTE that this needs to be done before setSourceCode, so that\n # QtWebEngine's pars", "d_id": 117562, "documentation": { "docstring": "Register user JavaScript files with the current tab.\n\n Args:\n scripts: A list of GreasemonkeyScripts.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 39, "language": "en" } }, { "id": 100813, "commit_id": "ff6b0209dd5ad57b81b0aca570df7f39a7119bfb", "repo": "faceswap", "path": "plugins/train/model/_base/model.py", "file_name": "model.py", "fun_name": "config", "commit_message": "Refactoring and TravisCI to Github Actions (#1239)\n\n* refactor training\r\n\r\n* travis to actions", "code": "def config(self) -> dict:\n \n global _CONFIG # pylint: disable=global-statement\n if not _CONFIG:\n model_name = self._config_section\n logger.debug(\"Loading config for: %s\", model_name)\n _CONFIG = Config(model_name, configfile=self._configfile).config_dict\n return _CONFIG\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 88, "n_words": 26, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 43, "n_ast_nodes": 73, "n_identifiers": 12, "random_cut": "def config(self) -> dict:\n \n global _CONFIG # pylint: disable=global-statement\n if not _CONFIG:\n model_name = self._config_secti", "d_id": 20264, "documentation": { "docstring": " dict: The configuration dictionary for current plugin, as set by the user's\n configuration settings. ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 22, "language": "en" } }, { "id": 66806, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/stock_entry_enhancements.py", "file_name": "stock_entry_enhancements.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"stock_entry\")\n\tif frappe.db.has_column(\"Stock Entry\", \"add_to_transit\"):\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\n\t\tfrappe.reload_doc(\"stock\", \"doctype\", \"warehouse_type\")\n\t\tif not frappe.db.exists(\"Warehouse Type\", \"Transit\"):\n\t\t\tdoc = frappe.new_doc(\"Warehouse Type\")\n\t\t\tdoc.name = \"Transit\"\n\t\t\tdoc.insert()\n\n\t\tfrappe.reload_doc(\"stock\", \"doctype\", \"stock_entry_type\")\n\t\tfrappe.delete_doc_if_exists(\"Stock Entry Type\", \"Send to Warehouse\")\n\t\tfrappe.delete_doc_if_exists(\"Stock Entry Type\", \"Receive at Warehouse\")\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 29, "n_words": 44, "vocab_size": 31, "complexity": 3, "nloc": 26, "token_counts": 109, "n_ast_nodes": 210, "n_identifiers": 12, "random_cut": "def execute():\n\tfrappe.relo", "d_id": 14342, "documentation": { "docstring": "\n UPDATE `tabStock Entry` SET\n stock_entry_type = 'Material Transfer',\n purpose = 'Material Transfer',\n add_to_transit = 1 WHERE stock_entry_type = 'Send to Warehouse'\n UPDATE `tabStock Entry` SET\n stock_entry_type = 'Material Transfer',\n purpose = 'Material Transfer'\n WHERE stock_entry_type = 'Receive at Warehouse'\n ", "n_words": 39, "vocab_size": 18, "n_whitespaces": 139, "language": "en" } }, { "id": 67038, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/projects/utils.py", "file_name": "utils.py", "fun_name": "query_task", "commit_message": "style: format code with black", "code": "def query_task(doctype, txt, searchfield, start, page_len, filters):\n\tfrom frappe.desk.reportview import build_match_conditions\n\n\tsearch_string = \"%%%s%%\" % txt\n\torder_by_string = \"%s%%\" % txt\n\tmatch_conditions = build_match_conditions(\"Task\")\n\tmatch_conditions = (\"and\" + match_conditions) if match_conditions else \"\"\n\n\treturn frappe.db.sql(\n\t\t\n\t\t% (searchfield, \"%s\", \"%s\", match_conditions, \"%s\", searchfield, \"%s\", searchfield, \"%s\", \"%s\"),\n\t\t(search_string, search_string, order_by_string, order_by_string, start, page_len),\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 53, "vocab_size": 37, "complexity": 2, "nloc": 18, "token_counts": 96, "n_ast_nodes": 150, "n_identifiers": 16, "random_cut": "def query_task(doctype, txt, searchfield, start, page_len, filters):\n\tfrom frappe.desk.reportview import build_match_conditions\n\n\tsearch_string = \"%%%s%%\" % txt\n\torder_by_string = \"%s%%\" % txt\n\tmatch_conditions = build_match_conditions(\"Task\")\n\tmatch_conditions = (\"and\" + match_conditions) if match_conditions else \"\"\n\n\treturn frappe.db.sql(\n\t\t\n\t\t% (searchfield, \"%s\", \"%s\", match_condi", "d_id": 14415, "documentation": { "docstring": "select name, subject from `tabTask`\n\t\twhere (`%s` like %s or `subject` like %s) %s\n\t\torder by\n\t\t\tcase when `subject` like %s then 0 else 1 end,\n\t\t\tcase when `%s` like %s then 0 else 1 end,\n\t\t\t`%s`,\n\t\t\tsubject\n\t\tlimit %s, %s", "n_words": 41, "vocab_size": 25, "n_whitespaces": 33, "language": "en" } }, { "id": 275145, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/mixed_precision/policy.py", "file_name": "policy.py", "fun_name": "_parse_name", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _parse_name(self, name):\n \n if name.endswith(\"_float32_vars\"):\n error_msg = (\n \"Policies ending in '_float32_vars' have been removed \"\n \"from TensorFlow.\"\n )\n if name in (\"infer_float32_vars\", \"infer_with_float32_vars\"):\n error_msg += (\n \" Please use the 'mixed_float16' or 'mixed_bfloat16' \"\n \"policy instead.\"\n )\n elif name == \"float16_with_float32_vars\":\n error_msg += \" Please use the 'mixed_float16' policy instead.\"\n elif name == \"bfloat16_with_float32_vars\":\n error_msg += \" Please use the 'mixed_bfloat16' policy instead.\"\n error_msg += \" Got policy name: '%s'\" % name\n raise ValueError(error_msg)\n\n if name == \"mixed_float16\":\n return \"float16\", \"float32\"\n elif name == \"mixed_bfloat16\":\n return \"bfloat16\", \"float32\"\n elif name == \"_infer\":\n # The \"_infer\" policy exists only for compatibility with TF 1, where\n # \"_infer\" is the default. The behavior matches the behavior of TF 1's\n # behavior before policies were introduced. With \"_infer\", the computation\n # and variable dtype are inferred from the first input the first time the\n # layer is called. Once the layer is called for the first time, the\n # layer's policy will change to the dtype of the first input, and it will\n # no longer have the \"_infer\" policy.\n #\n # The infer policy should be considered an implementation detail and may\n # be removed in the future.\n return None, None\n\n try:\n dtype = tf.as_dtype(name).name\n except TypeError:\n error = (\n \"Cannot convert value %s to a mixed precision Policy. \"\n \"Valid policies include 'mixed_float16', 'mixed_bfloat16', \"\n \"and the name of any dtype such as 'float32'.\" % (name,)\n )\n raise ValueError(error)\n return dtype, dtype\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 735, "n_words": 242, "vocab_size": 132, "complexity": 9, "nloc": 33, "token_counts": 126, "n_ast_nodes": 256, "n_identifiers": 11, "random_cut": "def _parse_name(self, name):\n \n if name.endswith(\"_float32_vars\"):\n error_msg = (\n \"Policies ending in '_float32_vars' have been removed \"\n \"from TensorFlow.\"\n )\n if name in (\"infer_float32_vars\", \"infer_with_float32_vars\"):\n error_msg += (\n \" Please use the 'mixed_float16' or 'mixed_bfloat16' \"\n \"policy instead.\"\n ", "d_id": 81318, "documentation": { "docstring": "Parses a Policy name into a compute and variable dtype.\n\n Args:\n name: The name of the policy:\n\n Returns:\n The (compute_dtype, variable_dtype) pair.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 61, "language": "en" } }, { "id": 54365, "commit_id": "1a3defa3a4ee74fcea9ae5fa4edf6e5eed134930", "repo": "prefect", "path": "src/prefect/engine.py", "file_name": "engine.py", "fun_name": "reraise_exceptions_as_crashes", "commit_message": "Move state utilities to `prefect.states`", "code": "def reraise_exceptions_as_crashes():\n \n try:\n yield\n except BaseException as exc:\n state = exception_to_crashed_state(exc)\n raise Crash(message=state.message, cause=exc, state=state) from exc\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 47, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 6, "token_counts": 38, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def reraise_exceptions_as_crashes():\n \n try:\n yield\n except", "d_id": 11047, "documentation": { "docstring": "\n Detect crashes during this context, wrapping unexpected exceptions into `Crash`\n signals.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 21, "language": "en" } }, { "id": 44069, "commit_id": "75755d7f65fb06c6e2e74f805b877774bfa7fcda", "repo": "airflow", "path": "scripts/in_container/run_resource_check.py", "file_name": "run_resource_check.py", "fun_name": "resoure_check", "commit_message": "Verify enough resources for breeze (#20763)\n\nVerify resources, memory, cpus and disk for Docker in Python.", "code": "def resoure_check():\n \n MINIMUM_ALLOWED_MEMORY = 4\n MINIMUM_ALLOWED_CPUS = 2\n MINIMUM_ALLOWED_DISK = 20\n print(\"\\nChecking resources.\\n\")\n\n # Memory current available\n svmem = psutil.virtual_memory()\n mem_available = get_size(svmem.available)\n\n # Cpus current available\n cpus_available = psutil.cpu_count(logical=True)\n\n # Disk current available\n partitions = psutil.disk_partitions()\n partition_usage = psutil.disk_usage(partitions[0].mountpoint)\n disk_available = get_size(partition_usage.free)\n\n resources: Dict[str, Resource] = {\n 'Memory': Resource(current=mem_available, minimumAllowed=MINIMUM_ALLOWED_MEMORY),\n 'Cpus': Resource(current=cpus_available, minimumAllowed=MINIMUM_ALLOWED_CPUS),\n 'Disk': Resource(current=disk_available, minimumAllowed=MINIMUM_ALLOWED_DISK),\n }\n return resources\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 132, "n_words": 60, "vocab_size": 45, "complexity": 1, "nloc": 17, "token_counts": 123, "n_ast_nodes": 206, "n_identifiers": 27, "random_cut": "def resoure_check():\n \n MINIMUM_ALLOWED_MEMORY = 4\n MINIMUM_ALLOWED_CPUS = 2\n MINIMUM_ALLOWED_DISK = 20\n print(\"\\nChecking r", "d_id": 8135, "documentation": { "docstring": "\n Use gsutil to get resources in bytes for memory and disk\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 198085, "commit_id": "dcb6e3c69f4e47f2fdb10a2ef0ede2cc6c8f2e06", "repo": "sympy", "path": "sympy/functions/elementary/trigonometric.py", "file_name": "trigonometric.py", "fun_name": "_pi_coeff", "commit_message": "replace S.Pi with pi; cache InverseTrigonometric tables", "code": "def _pi_coeff(arg, cycles=1):\n r\n arg = sympify(arg)\n if arg is pi:\n return S.One\n elif not arg:\n return S.Zero\n elif arg.is_Mul:\n cx = arg.coeff(pi)\n if cx:\n c, x = cx.as_coeff_Mul() # pi is not included as coeff\n if c.is_Float:\n # recast exact binary fractions to Rationals\n f = abs(c) % 1\n if f != 0:\n p = -int(round(log(f, 2).evalf()))\n m = 2**p\n cm = c*m\n i = int(cm)\n if i == cm:\n c = Rational(i, m)\n cx = c*x\n else:\n c = Rational(int(c))\n cx = c*x\n if x.is_integer:\n c2 = c % 2\n if c2 == 1:\n return x\n elif not c2:\n if x.is_even is not None: # known parity\n return S.Zero\n return Integer(2)\n else:\n return c2*x\n return cx\n elif arg.is_zero:\n return S.Zero\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 618, "n_words": 122, "vocab_size": 70, "complexity": 13, "nloc": 72, "token_counts": 201, "n_ast_nodes": 327, "n_identifiers": 31, "random_cut": "def _pi_coeff(arg, cycles=1):\n r\n arg = sympify(arg)\n if arg is pi:\n return S.One\n elif not arg:\n return S.Zero\n elif arg.is_Mul:\n cx = arg.coeff(pi)\n if cx:\n c, x = cx.as_coeff_Mul() # pi is not included as coeff\n if c.is_Float:\n # recast exact binary fractions to Rationals\n f = abs(c) % 1\n if f != 0:\n p = -int(round(log(f, 2).evalf()))\n m = 2**p\n cm = c*m\n i = int(cm)\n if i == cm:\n c = Rational(i, m)\n cx = c*x\n else:\n c = Rational(int(c))\n cx = c*x\n if x.is_integer:\n c2 = c % 2\n if c2 == 1:\n return x\n elif not c2:\n ", "d_id": 48786, "documentation": { "docstring": "\n When arg is a Number times $\\pi$ (e.g. $3\\pi/2$) then return the Number\n normalized to be in the range $[0, 2]$, else `None`.\n\n When an even multiple of $\\pi$ is encountered, if it is multiplying\n something with known parity then the multiple is returned as 0 otherwise\n as 2.\n\n Examples\n ========\n\n >>> from sympy.functions.elementary.trigonometric import _pi_coeff\n >>> from sympy import pi, Dummy\n >>> from sympy.abc import x\n >>> _pi_coeff(3*x*pi)\n 3*x\n >>> _pi_coeff(11*pi/7)\n 11/7\n >>> _pi_coeff(-11*pi/7)\n 3/7\n >>> _pi_coeff(4*pi)\n 0\n >>> _pi_coeff(5*pi)\n 1\n >>> _pi_coeff(5.0*pi)\n 1\n >>> _pi_coeff(5.5*pi)\n 3/2\n >>> _pi_coeff(2 + pi)\n\n >>> _pi_coeff(2*Dummy(integer=True)*pi)\n 2\n >>> _pi_coeff(2*Dummy(even=True)*pi)\n 0\n\n ", "n_words": 98, "vocab_size": 68, "n_whitespaces": 189, "language": "en" } }, { "id": 294616, "commit_id": "c1a2be72fc8b76b55cfde1823c5688100e397369", "repo": "core", "path": "homeassistant/components/generic/config_flow.py", "file_name": "config_flow.py", "fun_name": "async_test_still", "commit_message": "Generic IP Camera configflow 2 (#52360)\n\nCo-authored-by: J. Nick Koston ", "code": "async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]:\n \n fmt = None\n if not (url := info.get(CONF_STILL_IMAGE_URL)):\n return {}, None\n if not isinstance(url, template_helper.Template) and url:\n url = cv.template(url)\n url.hass = hass\n try:\n url = url.async_render(parse_result=False)\n except TemplateError as err:\n _LOGGER.error(\"Error parsing template %s: %s\", url, err)\n return {CONF_STILL_IMAGE_URL: \"template_error\"}, None\n verify_ssl = info.get(CONF_VERIFY_SSL)\n auth = generate_auth(info)\n try:\n async_client = get_async_client(hass, verify_ssl=verify_ssl)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]:\n \"\"\"Verify that the still image is valid before we create an entity.\"\"\"\n fmt = None\n if not (url := info.get(CONF_STILL_IMAGE_URL)):\n return {}, None\n if not isinstance(url, template_helper.Template) and url:\n url = cv.template(url)\n url.hass = hass\n try:\n url = url.async_render(parse_result=False)\n except TemplateError as err:\n _LOGGER.error(\"Error parsing template %s: %s\", url, err)\n return {CONF_STILL_IMAGE_URL: \"template_error\"}, None\n verify_ssl = info.get(CONF_VERIFY_SSL)\n auth = generate_auth(info)\n try:\n async_client = get_async_client(hass, verify_ssl=verify_ssl)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 139, "n_words": 63, "vocab_size": 50, "complexity": 8, "nloc": 40, "token_counts": 253, "n_ast_nodes": 208, "n_identifiers": 27, "random_cut": "async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]:\n \n fmt = None\n if not (url := info.get(CONF_STILL_IMAGE_URL)):\n return {}, None\n if not isinstance(url, template_helper.Template) and url:\n url = cv.template(url)\n url.hass = hass\n try:\n url = url.async_render(parse_result=False)\n except TemplateError as err:\n _LOGGER.error(\"Error parsing template %s: %s\", url, err)\n return {CONF_STILL_IMAGE_URL: \"template_error\"}, None\n verify_ssl = info.get(CONF_VERIFY_SSL)\n au", "d_id": 93650, "documentation": { "docstring": "Verify that the still image is valid before we create an entity.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 219698, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "normalize", "commit_message": "add python 3.10.4 for windows", "code": "def normalize(self, a):\n \n a = _convert_other(a, raiseit=True)\n return a.normalize(context=self)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "def normalize(self, a):\n \n a = _convert_other(a, raiseit=True)\n return a.norm", "d_id": 55723, "documentation": { "docstring": "normalize reduces an operand to its simplest form.\n\n Essentially a plus operation with all trailing zeros removed from the\n result.\n\n >>> ExtendedContext.normalize(Decimal('2.1'))\n Decimal('2.1')\n >>> ExtendedContext.normalize(Decimal('-2.0'))\n Decimal('-2')\n >>> ExtendedContext.normalize(Decimal('1.200'))\n Decimal('1.2')\n >>> ExtendedContext.normalize(Decimal('-120'))\n Decimal('-1.2E+2')\n >>> ExtendedContext.normalize(Decimal('120.00'))\n Decimal('1.2E+2')\n >>> ExtendedContext.normalize(Decimal('0.00'))\n Decimal('0')\n >>> ExtendedContext.normalize(6)\n Decimal('6')\n ", "n_words": 41, "vocab_size": 35, "n_whitespaces": 160, "language": "en" } }, { "id": 116214, "commit_id": "9a0e918bba3439959112a7fd8e5210276b5ac255", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/druid_handler/druid_handler.py", "file_name": "druid_handler.py", "fun_name": "get_tables", "commit_message": "implemented the get_tables() and get_columns() methods", "code": "def get_tables(self) -> StatusResponse:\n \n\n query = \n result = self.native_query(query)\n df = result.data_frame\n\n df = df[['TABLE_NAME' 'TABLE_TYPE']]\n result.data_frame = df.rename(columns={'TABLE_NAME': 'table_name', 'TABLE_TYPE': 'table_type'})\n\n return result\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 24, "vocab_size": 17, "complexity": 1, "nloc": 15, "token_counts": 55, "n_ast_nodes": 103, "n_identifiers": 10, "random_cut": "def get_tables(self) -> StatusResponse:\n \n\n query = \n result = self.native_query(query)\n df = result.data_frame\n\n ", "d_id": 25698, "documentation": { "docstring": "\n Return list of entities that will be accessible as tables.\n Returns:\n HandlerResponse\n \n SELECT *\n FROM INFORMATION_SCHEMA.TABLES\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 79, "language": "en" } }, { "id": 34338, "commit_id": "ac227093e41cecb07c7e0f2fc9a504850907bd06", "repo": "transformers", "path": "tests/test_feature_extraction_vilt.py", "file_name": "test_feature_extraction_vilt.py", "fun_name": "get_expected_values", "commit_message": "Add ViLT (#14895)\n\n* First commit\r\n\r\n* Add conversion script\r\n\r\n* Make conversion script work for base model\r\n\r\n* More improvements\r\n\r\n* Update conversion script, works for vqa\r\n\r\n* Add indexing argument to meshgrid\r\n\r\n* Make conversion script work for ViltForPreTraining\r\n\r\n* Add ViltForPreTraining to docs\r\n\r\n* Fix device issue\r\n\r\n* Add processor\r\n\r\n* Add MinMaxResize to feature extractor\r\n\r\n* Implement call method of ViltProcessor\r\n\r\n* Fix tests\r\n\r\n* Add integration test\r\n\r\n* Add loss calculation for VQA\r\n\r\n* Improve tests\r\n\r\n* Improve some more tests\r\n\r\n* Debug tests\r\n\r\n* Small improvements\r\n\r\n* Add support for attention_mask\r\n\r\n* Remove mask_it\r\n\r\n* Add pixel_mask\r\n\r\n* Add tests for ViltFeatureExtractor\r\n\r\n* Improve tests\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning to conversion script\r\n\r\n* Minor fixes\r\n\r\n* Add support for image_embeds, update docstrings to markdown\r\n\r\n* Update docs to markdown\r\n\r\n* Improve conversion script\r\n\r\n* Rename ViltForPreTraining to ViltForMaskedLM\r\n\r\n* Improve conversion script\r\n\r\n* Convert docstrings to markdown\r\n\r\n* Fix code example of retrieval model\r\n\r\n* Properly convert masked language model\r\n\r\n* Add integration test for nlvr\r\n\r\n* Fix code quality\r\n\r\n* Apply suggestions from code review\r\n\r\n* Add copied from statements\r\n\r\n* Fix pretrained_config_archive_map\r\n\r\n* Fix docs\r\n\r\n* Add model to README\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply more suggestions from code review\r\n\r\n* Make code more readable\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning to the tests\r\n\r\n* Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering\r\n\r\n* Replace pixel_values_2 by single tensor\r\n\r\n* Add hidden_states and attentions\r\n\r\n* Fix one more test\r\n\r\n* Fix all tests\r\n\r\n* Update year\r\n\r\n* Fix rebase issues\r\n\r\n* Fix another rebase issue\r\n\r\n* Remove ViltForPreTraining from auto mapping\r\n\r\n* Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval\r\n\r\n* Make it possible to use BertTokenizerFast in the processor\r\n\r\n* Use BertTokenizerFast by default\r\n\r\n* Rename ViltForNaturalLanguageVisualReasoning, define custom model output\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def get_expected_values(self, image_inputs, batched=False):\n \n if not batched:\n image = image_inputs[0]\n if isinstance(image, Image.Image):\n w, h = image.size\n else:\n h, w = image.shape[1], image.shape[2]\n scale = self.size / min(w, h)\n if h < w:\n newh, neww = self.size, scale * w\n else:\n newh, neww = scale * h, self.size\n\n max_size = int((1333 / 800) * self.size)\n if max(newh, neww) > max_size:\n scale = max_size / max(newh, neww)\n newh = newh * scale\n neww = neww * scale\n\n newh, neww = int(newh + 0.5), int(neww + 0.5)\n expected_height, expected_width = (\n newh // self.size_divisor * self.size_divisor,\n neww // self.size_divisor * self.size_divisor,\n )\n\n else:\n expected_values = []\n for image in image_inputs:\n expected_height, expected_width = self.get_expected_values([image])\n expected_values.append((expected_height, expected_width))\n expected_height = max(expected_values, key=lambda item: item[0])[0]\n expected_width = max(expected_values, key=lambda item: item[1])[1]\n\n return expected_height, expected_width\n\n\n@require_torch\n@require_vision", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@require_torch\n@require_vision", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 487, "n_words": 131, "vocab_size": 69, "complexity": 6, "nloc": 30, "token_counts": 249, "n_ast_nodes": 409, "n_identifiers": 27, "random_cut": "def get_expected_values(self, image_inputs, batched=False):\n \n if not batched:\n image = image_inputs[0]\n if isinstance(image, Image.Image):\n w, h = image.size\n else:\n h, w = image.shape[1], image.shape[2]\n scale = self.size / min(w, h)\n if h < w:\n newh, neww = self.size, scale * w\n else:\n newh, neww = scale * h, self.size\n\n max_size = int((1333 / 800) * self.size)\n if max(newh, neww) > max_size:\n scale = max_size / max(newh, neww)\n newh = newh * scale\n neww = neww * scale\n\n newh, neww = int(newh + 0.5), int(neww + 0.5)\n expected_height, expected_width = (\n newh // self.size_divisor * self.size_divisor,\n neww // self.size_divisor * self.size_divisor,\n )\n\n else:\n expected_values = []\n for image in image_inputs:\n expecte", "d_id": 6261, "documentation": { "docstring": "\n This function computes the expected height and width when providing images to ViltFeatureExtractor,\n assuming do_resize is set to True with a scalar size and size_divisor.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 47, "language": "en" } }, { "id": 244366, "commit_id": "9c5b3331ac8edbfa328922fbab45c382380da540", "repo": "mmdetection", "path": "mmdet/models/detectors/base.py", "file_name": "base.py", "fun_name": "preprocss_testing_data", "commit_message": "Simplify api of one-stage detector", "code": "def preprocss_testing_data(self, data):\n \n\n num_augs = len(data[0]['img'])\n batch_size = len(data)\n aug_batch_imgs = []\n aug_batch_data_samples = []\n\n # adjust `images` and `data_samples` to a list of list\n # outer list is test-time augmentation and inter list\n # is batch dimension\n for aug_index in range(num_augs):\n batch_imgs = []\n batch_data_samples = []\n for batch_index in range(batch_size):\n single_img = data[batch_index]['img'][aug_index]\n\n # to gpu and normalize\n single_img = single_img.to(self.device)\n if self.to_rgb and single_img[0].size(0) == 3:\n single_img = single_img[[2, 1, 0], ...]\n single_img = (single_img - self.pixel_mean) / self.pixel_std\n\n batch_imgs.append(single_img)\n batch_data_samples.append(\n data[batch_index]['data_sample'][aug_index])\n aug_batch_imgs.append(stack_batch(batch_imgs))\n aug_batch_data_samples.append(batch_data_samples)\n\n return aug_batch_imgs, aug_batch_data_samples\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 358, "n_words": 90, "vocab_size": 61, "complexity": 5, "nloc": 20, "token_counts": 164, "n_ast_nodes": 265, "n_identifiers": 22, "random_cut": "def preprocss_testing_data(self, data):\n \n\n num_augs = len(data[0]['img'])\n batch_size = len(data)\n aug_batch_imgs = []\n aug_batch_data_samples = []\n\n # adjust `images` and `data_samples` to a list of list\n # outer list is test-time augmentation and inter list\n # is batch dimension\n for aug_index in range(num_augs):\n batch_imgs = []\n batch_data_samples = []\n for batch_index in range(batch_size):\n single_img = data[batch_index]['img'][aug_index]\n\n # to gpu and normalize\n single_img = single_img.to(self.device)\n if self.to_rgb and single_img[0].size(0) == 3:\n single_img = single_img[[2, 1, 0], ...]\n single_img = (single_img - self.pixel_mean) / self.pixel_std\n\n batch_imgs.append(single_img)\n batch_data_samples.append(\n data[batch_index]", "d_id": 70355, "documentation": { "docstring": " Process input data during training and testing phases.\n Args:\n data (list[dict]): The data to be processed, which\n comes from dataloader. The list indicate the batch dimension.\n Each dict contains these keys:\n\n - `img` (list[Tensor]): Image tensor with different test-time\n augmentation.\n - `data_sample` (list[:obj:`GeneralData`]): Meta information\n and annotations under different test-time augmentation.\n\n\n Returns:\n tuple: It should contain 2 items.\n\n - aug_batch_imgs (list[Tensor]): List of batch image\n tensor. The list indicate the test-time augmentations.\n Note that the batch size always is 1\n when do the augtest.\n - aug_batch_data_samples\n (list[list[:obj:`GeneralData`]], Optional):\n The Data Samples. It usually includes information such as\n `gt_instance`. Return None If the input datas does not\n contain `data_sample`. The outer list indicate the\n number of augmentations and inter list indicate the\n batch dimension.\n ", "n_words": 123, "vocab_size": 86, "n_whitespaces": 457, "language": "en" } }, { "id": 124307, "commit_id": "ea47d97a548504bdb6ff1afdb1021b0bc54d5dfa", "repo": "ray", "path": "python/ray/widgets/render.py", "file_name": "render.py", "fun_name": "list_templates", "commit_message": "[Core] Add HTML reprs for `ClientContext` and `WorkerContext` (#25730)", "code": "def list_templates() -> List[pathlib.Path]:\n \n return (pathlib.Path(__file__).parent / \"templates\").glob(\"*.html.j2\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 8, "token_counts": 30, "n_ast_nodes": 54, "n_identifiers": 7, "random_cut": "def list_templates() -> List[pathlib.Path]:\n \n return (pathlib.Path(__file__).parent / \"templates\").glob(\"*.html.j2\")\n", "d_id": 27573, "documentation": { "docstring": "List the available HTML templates.\n\n Returns:\n List[pathlib.Path]: A list of files with .html.j2 extensions inside\n ./templates/\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 56, "language": "en" } }, { "id": 266796, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/python_requirements.py", "file_name": "python_requirements.py", "fun_name": "usable_pip_file", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def usable_pip_file(path): # type: (t.Optional[str]) -> bool\n \n return bool(path) and os.path.exists(path) and bool(os.path.getsize(path))\n\n\n# Cryptography\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 21, "n_words": 15, "vocab_size": 13, "complexity": 3, "nloc": 2, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 6, "random_cut": "def usable_pip_file(path): # type: (t.Optional[str", "d_id": 78599, "documentation": { "docstring": "Return True if the specified pip file is usable, otherwise False.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 268903, "commit_id": "8bb1b365ca6bb21b32a1ee1654eecb02570970ac", "repo": "keras", "path": "keras/metrics/metrics.py", "file_name": "metrics.py", "fun_name": "binary_accuracy", "commit_message": "reverting binary accuracy to original", "code": "def binary_accuracy(y_true, y_pred, threshold=0.5):\n \n y_pred = tf.convert_to_tensor(y_pred)\n threshold = tf.cast(threshold, y_pred.dtype)\n y_pred = tf.cast(y_pred > threshold, y_pred.dtype)\n return backend.mean(tf.equal(y_true, y_pred), axis=-1)\n\n\n@keras_export('keras.metrics.categorical_accuracy')\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export('keras.metrics.categorical_accuracy')\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 26, "n_words": 23, "vocab_size": 19, "complexity": 1, "nloc": 5, "token_counts": 67, "n_ast_nodes": 123, "n_identifiers": 16, "random_cut": "def binary_accuracy(y_true, y_pred, threshold=0.5):\n \n y_pred = tf.convert_to_tensor(y_pred)\n threshold = tf.cast(threshold, y_pred.dtype)\n y_pred = tf.cast(y_pred > threshol", "d_id": 79764, "documentation": { "docstring": "Calculates how often predictions match binary labels.\n\n Standalone usage:\n >>> y_true = [[1], [1], [0], [0]]\n >>> y_pred = [[1], [1], [0], [0]]\n >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)\n >>> assert m.shape == (4,)\n >>> m.numpy()\n array([1., 1., 1., 1.], dtype=float32)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n prediction values are 1 or 0.\n\n Returns:\n Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`\n ", "n_words": 86, "vocab_size": 61, "n_whitespaces": 113, "language": "en" } }, { "id": 2224, "commit_id": "fd3b9772cb97127f9f356c1e854dc3b4a436402d", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/node_service/oblv/oblv_messages.py", "file_name": "oblv_messages.py", "fun_name": "_object2proto", "commit_message": "Changes for publishing data to enclave", "code": "def _object2proto(self) -> SyftOblvClient_PB:\n \n return SyftOblvClient_PB(\n token=self.token,\n oblivious_user_id=self.oblivious_user_id,\n cookies=self.cookies,\n headers=self.headers,\n timeout=self.timeout,\n verify_ssl=self.verify_ssl,\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 100, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 20, "token_counts": 48, "n_ast_nodes": 71, "n_identifiers": 9, "random_cut": "def _object2proto(self) -> SyftOblvClient_PB:\n \n return SyftOblvClient_PB(\n token=self.token,\n oblivious_user_id=self.oblivious_user_id,\n cookies=self.cookies,\n h", "d_id": 267, "documentation": { "docstring": "Returns a protobuf serialization of self.\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n :return: returns a protobuf object\n :rtype: SyftOblvClient_PB\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "n_words": 68, "vocab_size": 56, "n_whitespaces": 150, "language": "en" } }, { "id": 83633, "commit_id": "fcf82bf0477d7b5c6fe6d26f2458a5acef43dae2", "repo": "zulip", "path": "zerver/tests/test_digest.py", "file_name": "test_digest.py", "fun_name": "test_bulk_handle_digest_email_skips_deactivated_users", "commit_message": "digest: Don't send emails to deactivated users, even if queued.", "code": "def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None:\n \n realm = get_realm(\"zulip\")\n hamlet = self.example_user(\"hamlet\")\n user_ids = list(\n UserProfile.objects.filter(is_bot=False, realm=realm).values_list(\"id\", flat=True)\n )\n\n do_deactivate_user(hamlet, acting_user=None)\n\n with mock.patch(\"zerver.lib.digest.enough_traffic\", return_value=True), mock.patch(\n \"zerver.lib.digest.send_future_email\"\n ) as mock_send_email:\n bulk_handle_digest_email(user_ids, 1)\n\n emailed_user_ids = [\n call_args[1][\"to_user_ids\"][0] for call_args in mock_send_email.call_args_list\n ]\n\n self.assertEqual(\n set(emailed_user_ids), set(user_id for user_id in user_ids if user_id != hamlet.id)\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 189, "n_words": 50, "vocab_size": 41, "complexity": 4, "nloc": 21, "token_counts": 129, "n_ast_nodes": 213, "n_identifiers": 28, "random_cut": "def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None:\n \n realm = get_realm(\"zulip\")\n hamlet = self.example_user(\"hamlet\")\n user_ids = list(\n UserProfile.objects.filter(is_bot=False, realm=realm).values_list(\"id\", flat=True)\n )\n\n do_deactivate_us", "d_id": 17696, "documentation": { "docstring": "\n A user id may be added to the queue before the user is deactivated. In such a case,\n the function responsible for sending the email should correctly skip them.\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 51, "language": "en" } }, { "id": 181599, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/driver_tests.py", "file_name": "driver_tests.py", "fun_name": "test_driver_4", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_driver_4():\n \n args_list = [\n 'tests/tests.csv',\n '-is', ',',\n '-target', 'class',\n '-g', '1',\n '-p', '2',\n '-cv', '3',\n '-s', '42',\n '-config', 'TPOT light',\n '-v', '3'\n ]\n args = _get_arg_parser().parse_args(args_list)\n with captured_output() as (out, err):\n tpot_driver(args)\n ret_stdout = out.getvalue()\n\n assert \"TPOT settings\" in ret_stdout\n assert \"Final Pareto front testing scores\" in ret_stdout\n try:\n ret_val = float(ret_stdout.split('\\n')[-2].split('\\t')[1])\n except Exception:\n ret_val = -float('inf')\n assert ret_val > 0.0\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 263, "n_words": 62, "vocab_size": 51, "complexity": 2, "nloc": 23, "token_counts": 123, "n_ast_nodes": 229, "n_identifiers": 15, "random_cut": "def test_driver_4():\n \n args_list = [\n 'tests/tests.csv',\n '-is', ',',\n '-target', 'class',\n '-g', '1',\n '-p', '2',\n '-cv', '3',\n '-s', '42',\n '-config', 'TPOT light',\n '-v', '3'\n ]\n args = _get_arg_parser", "d_id": 43388, "documentation": { "docstring": "Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 3.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 249884, "commit_id": "09de2aecb05cb46e0513396e2675b24c8beedb68", "repo": "synapse", "path": "tests/handlers/test_sso.py", "file_name": "test_sso.py", "fun_name": "test_set_avatar", "commit_message": "Add support for handling avatar with SSO login (#13917)\n\nThis commit adds support for handling a provided avatar picture URL\r\nwhen logging in via SSO.\r\n\r\nSigned-off-by: Ashish Kumar \r\n\r\nFixes #9357.", "code": "async def test_set_avatar(self) -> None:\n \n handler = self.hs.get_sso_handler()\n\n # Create a new user to set avatar for\n reg_handler = self.hs.get_registration_handler()\n user_id = self.get_success(reg_handler.register_user(approved=True))\n\n self.assertTrue(\n self.get_success(handler.set_avatar(user_id, \"http://my.server/me.png\"))\n )\n\n # Ensure avatar is set on this newly created user,\n # so no need to compare for the exact image\n profile_handler = self.hs.get_profile_handler()\n profile = self.get_success(profile_handler.get_profile(user_id))\n self.assertIsNot(profile[\"avatar_url\"], None)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 55, "vocab_size": 45, "complexity": 1, "nloc": 11, "token_counts": 92, "n_ast_nodes": 158, "n_identifiers": 18, "random_cut": "async def test_set_avatar(self) -> None:\n \n handler = self.hs.get_sso_handler()\n\n # Create a new user to set avatar for\n reg_handler = s", "d_id": 73181, "documentation": { "docstring": "Tests successfully setting the avatar of a newly created user", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 125674, "commit_id": "8d7b865614f3635def12c42b653f8acd8b4ae56a", "repo": "ray", "path": "python/ray/tune/examples/wandb_example.py", "file_name": "wandb_example.py", "fun_name": "tune_decorated", "commit_message": "[air/tuner/docs] Update docs for Tuner() API 2a: Tune examples (non-docs) (#26931)\n\nSplitting up #26884: This PR includes changes to use Tuner() instead of tune.run() for all examples included in python/ray/tune/examples\r\n\r\nSigned-off-by: xwjiang2010 \r\nSigned-off-by: Kai Fricke \r\n\r\nCo-authored-by: xwjiang2010 \r\nCo-authored-by: Richard Liaw ", "code": "def tune_decorated(api_key_file):\n \n tuner = tune.Tuner(\n decorated_train_function,\n tune_config=tune.TuneConfig(\n metric=\"loss\",\n mode=\"min\",\n ),\n param_space={\n \"mean\": tune.grid_search([1, 2, 3, 4, 5]),\n \"sd\": tune.uniform(0.2, 0.8),\n \"wandb\": {\"api_key_file\": api_key_file, \"project\": \"Wandb_example\"},\n },\n )\n tuner.fit()\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 130, "n_words": 28, "vocab_size": 28, "complexity": 1, "nloc": 14, "token_counts": 87, "n_ast_nodes": 136, "n_identifiers": 14, "random_cut": "def tune_decorated(api_key_file):\n \n tuner = tune.Tuner(\n decorated_train_function,\n tune_config=tune.TuneConfig(\n metric=\"loss\",\n mode=\"min\",", "d_id": 27945, "documentation": { "docstring": "Example for using the @wandb_mixin decorator with the function API", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 60540, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py", "file_name": "parser.py", "fun_name": "format_usage", "commit_message": "upd; format", "code": "def format_usage(self, usage):\n # type: (str) -> str\n \n msg = \"\\nUsage: {}\\n\".format(self.indent_lines(textwrap.dedent(usage), \" \"))\n return msg\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 45, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "def format_usage(self, usage):\n # type: (str) -> str\n \n msg = \"\\nUsage: {}\\n\".format(self.indent_lines(text", "d_id": 12200, "documentation": { "docstring": "\n Ensure there is only one newline between usage and the first heading\n if there is no description.\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 39, "language": "en" } }, { "id": 198195, "commit_id": "a69c49bec6caf2cb460dc4eedf0fec184db92f0e", "repo": "sympy", "path": "sympy/matrices/expressions/matexpr.py", "file_name": "matexpr.py", "fun_name": "from_index_summation", "commit_message": "Rename files for array expression conversions in order to avoid naming conflicts in TAB-completion of the corresponding functions", "code": "def from_index_summation(expr, first_index=None, last_index=None, dimensions=None):\n r\n from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array\n from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix\n first_indices = []\n if first_index is not None:\n first_indices.append(first_index)\n if last_index is not None:\n first_indices.append(last_index)\n arr = convert_indexed_to_array(expr, first_indices=first_indices)\n return convert_array_to_matrix(arr)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 112, "n_words": 35, "vocab_size": 28, "complexity": 3, "nloc": 50, "token_counts": 86, "n_ast_nodes": 126, "n_identifiers": 16, "random_cut": "def from_index_summation(expr, first_index=None, last_index=None, dimensions=None):\n r\n from sympy.tensor.array.expressions.from_indexed_to_array import convert_index", "d_id": 48817, "documentation": { "docstring": "\n Parse expression of matrices with explicitly summed indices into a\n matrix expression without indices, if possible.\n\n This transformation expressed in mathematical notation:\n\n `\\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \\Longrightarrow \\mathbf{A}\\cdot \\mathbf{B}`\n\n Optional parameter ``first_index``: specify which free index to use as\n the index starting the expression.\n\n Examples\n ========\n\n >>> from sympy import MatrixSymbol, MatrixExpr, Sum\n >>> from sympy.abc import i, j, k, l, N\n >>> A = MatrixSymbol(\"A\", N, N)\n >>> B = MatrixSymbol(\"B\", N, N)\n >>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1))\n >>> MatrixExpr.from_index_summation(expr)\n A*B\n\n Transposition is detected:\n\n >>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1))\n >>> MatrixExpr.from_index_summation(expr)\n A.T*B\n\n Detect the trace:\n\n >>> expr = Sum(A[i, i], (i, 0, N-1))\n >>> MatrixExpr.from_index_summation(expr)\n Trace(A)\n\n More complicated expressions:\n\n >>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1))\n >>> MatrixExpr.from_index_summation(expr)\n A*B.T*A.T\n ", "n_words": 133, "vocab_size": 90, "n_whitespaces": 330, "language": "en" } }, { "id": 114699, "commit_id": "5c2ce68a8eb8b992ab841db3d3a6b4694ecd244b", "repo": "mindsdb", "path": "mindsdb/integrations/mysql_handler/mysql_handler.py", "file_name": "mysql_handler.py", "fun_name": "get_views", "commit_message": "Update mysql handler", "code": "def get_views(self):\n \n q = f\"SHOW FULL TABLES IN {self.database} WHERE TABLE_TYPE LIKE 'VIEW';\"\n result = self.native_query(q)\n return result\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 20, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def get_views(self):\n \n ", "d_id": 25253, "documentation": { "docstring": "\n Get more information about specific database views\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 133814, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/qmix/qmix_policy.py", "file_name": "qmix_policy.py", "fun_name": "_mac", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _mac(model, obs, h):\n \n B, n_agents = obs.size(0), obs.size(1)\n if not isinstance(obs, dict):\n obs = {\"obs\": obs}\n obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()}\n h_flat = [s.reshape([B * n_agents, -1]) for s in h]\n q_flat, h_flat = model(obs_agents_as_batches, h_flat, None)\n return q_flat.reshape([B, n_agents, -1]), [\n s.reshape([B, n_agents, -1]) for s in h_flat\n ]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 93, "n_words": 55, "vocab_size": 41, "complexity": 5, "nloc": 10, "token_counts": 130, "n_ast_nodes": 198, "n_identifiers": 18, "random_cut": "def _mac(model, obs, h):\n \n B, n_agents = obs.size(0), obs.size(1)\n if not isinstance(obs, dict):\n obs = {\"obs\": obs}\n obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()}\n h_flat = [s.reshape([B * n_agents, -1]) for s in h]\n q_flat, h_flat = model(obs_agents_as_batches, h_flat, None)\n return q_flat.reshape([B, n_agents, -1]), [\n s.reshape([B, n_agents, -1]) for s in h_fl", "d_id": 30118, "documentation": { "docstring": "Forward pass of the multi-agent controller.\n\n Args:\n model: TorchModelV2 class\n obs: Tensor of shape [B, n_agents, obs_size]\n h: List of tensors of shape [B, n_agents, h_size]\n\n Returns:\n q_vals: Tensor of shape [B, n_agents, n_actions]\n h: Tensor of shape [B, n_agents, h_size]\n ", "n_words": 41, "vocab_size": 23, "n_whitespaces": 85, "language": "en" } }, { "id": 100294, "commit_id": "30872ef265c0fc29465f4c3a0778d0049f8c3897", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_get_count", "commit_message": "alignments tool - Don't re-analyze video if metadata in alignments", "code": "def _get_count(self):\n \n has_meta = all(val is not None for val in self._alignments.video_meta_data.values())\n retval = len(self._alignments.video_meta_data[\"pts_time\"]) if has_meta else None\n logger.debug(\"Frame count from alignments file: (has_meta: %s, %s\", has_meta, retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 66, "n_words": 31, "vocab_size": 27, "complexity": 3, "nloc": 5, "token_counts": 56, "n_ast_nodes": 91, "n_identifiers": 12, "random_cut": "def _get_count(self):\n \n has_meta = all(val is not None for val in self._alignments.video_meta_data.values())\n retval = len(self._alignments.video_meta_data[\"pts_time\"]) if has_meta else None\n logger.debug(\"Frame count from alignments file: (has", "d_id": 19794, "documentation": { "docstring": " If the alignments file has been run through the manual tool, then it will hold video\n meta information, meaning that the count of frames in the alignment file can be relied\n on to be accurate.\n\n Returns\n -------\n int or ``None``\n For video input which contain video meta-data in the alignments file then the count of\n frames is returned. In all other cases ``None`` is returned\n ", "n_words": 65, "vocab_size": 47, "n_whitespaces": 122, "language": "en" } }, { "id": 181659, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/one_hot_encoder_tests.py", "file_name": "one_hot_encoder_tests.py", "fun_name": "test_sparse1_with_non_sparse_components", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_sparse1_with_non_sparse_components():\n \n fit_then_transform(\n sparse1_paratial_1h.todense(),\n sparse1,\n categorical_features=[True, False]\n )\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 38, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 23, "n_ast_nodes": 38, "n_identifiers": 6, "random_cut": "def test_sparse1_with_non_sparse_components():\n \n fit_then_transform(\n ", "d_id": 43447, "documentation": { "docstring": "Test fit_transform a sparse matrix with specifying categorical_features.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 160160, "commit_id": "729ad4f92420231e2a7009b3223c6c7620b8b808", "repo": "numpy", "path": "numpy/f2py/tests/test_f2py2e.py", "file_name": "test_f2py2e.py", "fun_name": "test_mod_gen_f77", "commit_message": "TST: Initialize f2py2e tests of the F2PY CLI (#20668)\n\nIncreases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.\r\n\r\nMore importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.", "code": "def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):\n \n MNAME = \"hi\"\n foutl = get_io_paths(hello_world_f90, mname=MNAME)\n ipath = foutl.f90inp\n monkeypatch.setattr(sys, \"argv\", f'f2py {ipath} -m {MNAME}'.split())\n with util.switchdir(ipath.parent):\n f2pycli()\n\n # Always generate C module\n assert Path.exists(foutl.cmodf)\n # File contains a function, check for F77 wrappers\n assert Path.exists(foutl.wrap77)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 41, "vocab_size": 37, "complexity": 1, "nloc": 9, "token_counts": 74, "n_ast_nodes": 134, "n_identifiers": 21, "random_cut": "def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):\n \n MNAME = \"hi\"\n foutl = get_io_paths(hello_world_f90, mname=MNAME)\n ipath = foutl.f90inp\n monkeypatch.setattr(sys, \"argv\", f'f2py {ipath} -m {MNAME}'.split())\n with util.switchdir(ipath.parent):\n f2pycli()\n\n # Always generate C module\n assert Path.exists(fou", "d_id": 38532, "documentation": { "docstring": "Checks the generation of files based on a module name\n CLI :: -m\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 266774, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/delegation.py", "file_name": "delegation.py", "fun_name": "delegate", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None\n \n assert isinstance(args, EnvironmentConfig)\n\n with delegation_context(args, host_state):\n if isinstance(args, TestConfig):\n args.metadata.ci_provider = get_ci_provider().code\n\n make_dirs(ResultType.TMP.path)\n\n with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:\n args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))\n args.metadata.to_file(args.metadata_path)\n\n try:\n delegate_command(args, host_state, exclude, require)\n finally:\n args.metadata_path = None\n else:\n delegate_command(args, host_state, exclude, require)\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 217, "n_words": 51, "vocab_size": 39, "complexity": 3, "nloc": 15, "token_counts": 146, "n_ast_nodes": 232, "n_identifiers": 31, "random_cut": "def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None\n \n assert isinstance(args, EnvironmentConfig)\n\n with delegation_context(args, host_state):\n if isinstance(args, TestConfig):\n args.metadata.ci_provider = get_ci_provider().code\n\n make_dirs(ResultType.TMP.path)\n\n with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:\n args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))\n args.metadata.to_file(args.metadata_path)\n\n try:\n delegate_command(args, host_state, exclude, require)\n finally:\n args.metadata_path = None\n else:\n delegate_command(args, host_state, exclude, require)\n\n", "d_id": 78577, "documentation": { "docstring": "Delegate execution of ansible-test to another environment.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 37514, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_torch_non_multi_gpu", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def require_torch_non_multi_gpu(test_case):\n \n if not is_torch_available():\n return unittest.skip(\"test requires PyTorch\")(test_case)\n\n import torch\n\n return unittest.skipUnless(torch.cuda.device_count() < 2, \"test requires 0 or 1 GPU\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 40, "n_words": 21, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 44, "n_ast_nodes": 79, "n_identifiers": 9, "random_cut": "def require_torch_non_multi_gpu(test_case):\n \n if not is_torch_available():\n return unittest.skip(\"test requires PyTorch\")(test_case)\n\n import torch\n\n return unittest.skipUnless(torch.cuda.device_count() < 2, \"test requires 0 or 1 GPU\")(test_cas", "d_id": 6819, "documentation": { "docstring": "\n Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 20, "language": "en" } }, { "id": 212800, "commit_id": "47047700dd76c40c4471635a7de5f770d5c23c02", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "_ReturnKeyHandler", "commit_message": "If an element is disabled, then don't generate events for it (specifically for Input element in this case)", "code": "def _ReturnKeyHandler(self, event):\n \n # if the element is disabled, ignore the event\n if self.Disabled:\n return\n\n MyForm = self.ParentForm\n button_element = self._FindReturnKeyBoundButton(MyForm)\n if button_element is not None:\n button_element.ButtonCallBack()\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 91, "n_words": 27, "vocab_size": 21, "complexity": 3, "nloc": 7, "token_counts": 38, "n_ast_nodes": 65, "n_identifiers": 9, "random_cut": "def _ReturnKeyHandler(self, event):\n \n # if the element is disabled, ignore the ", "d_id": 53412, "documentation": { "docstring": "\n Internal callback for the ENTER / RETURN key. Results in calling the ButtonCallBack for element that has the return key bound to it, just as if button was clicked.\n\n :param event:\n :type event:\n\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 62, "language": "en" } }, { "id": 164367, "commit_id": "419331c598a097896edae40bc0687e4127f97b6b", "repo": "pandas", "path": "pandas/tests/frame/conftest.py", "file_name": "conftest.py", "fun_name": "uint64_frame", "commit_message": "⬆️ UPGRADE: Autoupdate pre-commit config (#45752)\n\nCo-authored-by: MarcoGorelli ", "code": "def uint64_frame():\n \n return DataFrame(\n {\"A\": np.arange(3), \"B\": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64\n )\n\n\n@pytest.fixture", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 32, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 45, "n_ast_nodes": 80, "n_identifiers": 8, "random_cut": "def uint64_frame():\n \n return DataFrame(\n {", "d_id": 39556, "documentation": { "docstring": "\n Fixture for DataFrame with uint64 values\n\n Columns are ['A', 'B']\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 20, "language": "en" } }, { "id": 200995, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/annotations/tests.py", "file_name": "tests.py", "fun_name": "test_null_annotation", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_null_annotation(self):\n \n book = Book.objects.annotate(\n no_value=Value(None, output_field=IntegerField())\n ).first()\n self.assertIsNone(book.no_value)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 48, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 66, "n_identifiers": 12, "random_cut": "def test_null_annotation(self):\n \n ", "d_id": 49839, "documentation": { "docstring": "\n Annotating None onto a model round-trips\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 176161, "commit_id": "dec723f072eb997a497a159dbe8674cd39999ee9", "repo": "networkx", "path": "networkx/generators/small.py", "file_name": "small.py", "fun_name": "icosahedral_graph", "commit_message": "Docstrings for the small.py module (#5240)\n\n* added description for the first 5 small graphs\r\n\r\n* modified descriptions based on comment and added description for two more functions\r\n\r\n* added doctrings to all the functions\r\n\r\n* Minor touchups.\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def icosahedral_graph(create_using=None):\n \n description = [\n \"adjacencylist\",\n \"Platonic Icosahedral Graph\",\n 12,\n [\n [2, 6, 8, 9, 12],\n [3, 6, 7, 9],\n [4, 7, 9, 10],\n [5, 7, 10, 11],\n [6, 7, 11, 12],\n [7, 12],\n [],\n [9, 10, 11, 12],\n [10],\n [11],\n [12],\n [],\n ],\n ]\n G = make_small_undirected_graph(description, create_using)\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 233, "n_words": 51, "vocab_size": 37, "complexity": 1, "nloc": 22, "token_counts": 117, "n_ast_nodes": 150, "n_identifiers": 5, "random_cut": "def icosahedral_graph(create_using=None):\n \n description = [\n \"adjacencylist\",\n \"Platonic Icosahedral Graph\",\n 12,\n [\n [2, 6, 8, 9, 12],\n [3, 6, 7, 9],\n [4, 7, 9, 10],\n [5, 7, 10, 11],\n [", "d_id": 41731, "documentation": { "docstring": "\n Returns the Platonic Icosahedral graph.\n\n The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph\n whose nodes have the connectivity of the icosahedron. It is undirected,\n regular and Hamiltonian [1]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Icosahedral graph with 12 nodes and 30 edges.\n\n References\n ----------\n .. [1] https://mathworld.wolfram.com/IcosahedralGraph.html\n ", "n_words": 73, "vocab_size": 52, "n_whitespaces": 129, "language": "en" } }, { "id": 323029, "commit_id": "aa82dc06668ddca275e3a350d4c2793e4961086c", "repo": "PaddleNLP", "path": "examples/biomedical/cblue/train_spo.py", "file_name": "train_spo.py", "fun_name": "evaluate", "commit_message": "[ehealth] fix problems for dynamic2static", "code": "def evaluate(model, criterion, metric, data_loader):\n \n model.eval()\n metric.reset()\n losses = []\n for batch in tqdm(data_loader):\n input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch\n max_batch_len = input_ids.shape[-1]\n ent_mask = paddle.unsqueeze(masks, axis=2)\n spo_mask = paddle.matmul(ent_mask, ent_mask, transpose_y=True)\n spo_mask = paddle.unsqueeze(spo_mask, axis=1)\n\n logits = model(input_ids, token_type_ids, position_ids)\n\n ent_loss = criterion(\n logits[0], ent_label[0], weight=ent_mask, reduction='sum')\n spo_loss = criterion(\n logits[1], spo_label[0], weight=spo_mask, reduction='sum')\n loss = ent_loss + spo_loss\n losses.append(loss.numpy())\n lengths = paddle.sum(masks, axis=-1)\n correct = metric.compute(lengths, logits[0], logits[1], ent_label[1],\n spo_label[1])\n metric.update(correct)\n results = metric.accumulate()\n print('eval loss: %.5f, entity f1: %.5f, spo f1: %.5f' %\n (np.mean(losses), results['entity'][2], results['spo'][2]))\n model.train()\n metric.reset()\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 274, "n_words": 93, "vocab_size": 69, "complexity": 2, "nloc": 26, "token_counts": 256, "n_ast_nodes": 389, "n_identifiers": 44, "random_cut": "def evaluate(model, criterion, metric, data_loader):\n \n model.eval()\n metric.reset()\n losses = []\n for batch in tqdm(data_loader):\n input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch\n max_batch_len = input_ids.shape[-1]\n ent_mask = paddle.unsqueeze(masks, axis=2)\n spo_mask = paddle.matmul(ent_mask, ent_mask, transpose_y=True)\n spo_mask = paddle.unsqueeze(spo_mask, axis=1)\n\n logits = model(input_ids, token_type_ids, position_ids)\n\n ent_loss = criterion(\n logits[0], ent_label[0], weight=ent_mask, reduction='sum')\n spo_loss = criterion(\n logits[1], spo_label[0], weight=spo_mask, reduction='sum')\n loss = ent_loss + spo_loss\n losses.append(loss.numpy())\n lengths = paddle.sum(masks, axis=-1)\n correct = metric.compute(lengths, logits[0], logits[1], ent_label[1],\n spo_label[1])\n metric.update(correct)\n results = metric.accumulate()\n print('eval loss: %.5f, entity f1: %.5f, spo f1: %.5f' %\n (np.mean(losses), results['entity'][2], results['spo'][2]))\n model.train()\n metric.reset()\n\n", "d_id": 118342, "documentation": { "docstring": "\n Given a dataset, it evals model and compute the metric.\n Args:\n model(obj:`paddle.nn.Layer`): A model to classify texts.\n dataloader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.\n criterion(`paddle.nn.functional`): It can compute the loss.\n metric(obj:`paddle.metric.Metric`): The evaluation metric.\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 72, "language": "en" } }, { "id": 205717, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/options.py", "file_name": "options.py", "fun_name": "get_fields", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_fields(self, include_parents=True, include_hidden=False):\n \n if include_parents is False:\n include_parents = PROXY_PARENTS\n return self._get_fields(\n include_parents=include_parents, include_hidden=include_hidden\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 66, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 35, "n_ast_nodes": 55, "n_identifiers": 6, "random_cut": "def get_fields(self, include_parents=True, include_hidden=False):\n \n if include_parents is False:\n include_parents = PROXY_PARENTS\n return self._get_fields(\n ", "d_id": 51174, "documentation": { "docstring": "\n Return a list of fields associated to the model. By default, include\n forward and reverse fields, fields derived from inheritance, but not\n hidden fields. The returned fields can be changed using the parameters:\n\n - include_parents: include fields derived from inheritance\n - include_hidden: include fields that have a related_name that\n starts with a \"+\"\n ", "n_words": 53, "vocab_size": 40, "n_whitespaces": 123, "language": "en" } }, { "id": 204309, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/sessions/backends/file.py", "file_name": "file.py", "fun_name": "_key_to_file", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _key_to_file(self, session_key=None):\n \n if session_key is None:\n session_key = self._get_or_create_session_key()\n\n # Make sure we're not vulnerable to directory traversal. Session keys\n # should always be md5s, so they should never contain directory\n # components.\n if not set(session_key).issubset(VALID_KEY_CHARS):\n raise InvalidSessionKey(\"Invalid characters in session key\")\n\n return os.path.join(self.storage_path, self.file_prefix + session_key)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 119, "n_words": 48, "vocab_size": 41, "complexity": 3, "nloc": 6, "token_counts": 56, "n_ast_nodes": 96, "n_identifiers": 13, "random_cut": "def _key_to_file(self, session_key=None):\n \n if session_key is None:\n session_key ", "d_id": 50690, "documentation": { "docstring": "\n Get the file associated with this session key.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 61197, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/hashes.py", "file_name": "hashes.py", "fun_name": "check_against_chunks", "commit_message": "upd; format", "code": "def check_against_chunks(self, chunks):\n # type: (Iterator[bytes]) -> None\n \n gots = {}\n for hash_name in self._allowed.keys():\n try:\n gots[hash_name] = hashlib.new(hash_name)\n except (ValueError, TypeError):\n raise InstallationError(f\"Unknown hash name: {hash_name}\")\n\n for chunk in chunks:\n for hash in gots.values():\n hash.update(chunk)\n\n for hash_name, got in gots.items():\n if got.hexdigest() in self._allowed[hash_name]:\n return\n self._raise(gots)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 200, "n_words": 47, "vocab_size": 38, "complexity": 7, "nloc": 14, "token_counts": 101, "n_ast_nodes": 167, "n_identifiers": 20, "random_cut": "def check_against_chunks(self, chunks):\n # type: (Iterator[bytes]) -> None\n \n gots = {}\n for hash_name in self._allowed.keys():\n try:\n gots[hash_name] = hashlib.new(hash_name)\n except (ValueError, TypeError):\n raise Installati", "d_id": 12436, "documentation": { "docstring": "Check good hashes against ones built from iterable of chunks of\n data.\n\n Raise HashMismatch if none match.\n\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 244125, "commit_id": "3b2e9655631a2edd28bb94c640bd6a74c0bfad55", "repo": "mmdetection", "path": "mmdet/models/losses/cross_entropy_loss.py", "file_name": "cross_entropy_loss.py", "fun_name": "_expand_onehot_labels", "commit_message": "[Fix] Fix reduction=mean in CELoss. (#7449)\n\n* [Fix] Fix ignore in CELoss.\r\n\r\n* add ut\r\n\r\n* fix and add comments\r\n\r\n* add avg_non_ignore option\r\n\r\n* bce avg\r\n\r\n* fix lint", "code": "def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):\n \n bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n valid_mask = (labels >= 0) & (labels != ignore_index)\n inds = torch.nonzero(\n valid_mask & (labels < label_channels), as_tuple=False)\n\n if inds.numel() > 0:\n bin_labels[inds, labels[inds]] = 1\n\n valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),\n label_channels).float()\n if label_weights is None:\n bin_label_weights = valid_mask\n else:\n bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)\n bin_label_weights *= valid_mask\n\n return bin_labels, bin_label_weights, valid_mask\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 169, "n_words": 61, "vocab_size": 42, "complexity": 3, "nloc": 15, "token_counts": 147, "n_ast_nodes": 223, "n_identifiers": 19, "random_cut": "def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):\n \n bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n valid_mask = (labels >= 0) & (labels != ignore_index)\n inds = torch.nonzero(\n valid_mask & (labels < label_channels), as_tuple=False)\n\n if inds.numel() > 0:\n bin_labels[inds, labels[inds]] = 1\n\n valid_mask = valid_mask.view(-1, 1).expand(", "d_id": 70252, "documentation": { "docstring": "Expand onehot labels to match the size of prediction.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 271751, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_test.py", "file_name": "training_test.py", "fun_name": "test_sequence_input_types", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_sequence_input_types(self, input_type):\n \n if not tf.executing_eagerly():\n self.skipTest(\"Improved checking is only present in data_adapter.\")\n\n xy_function, x_function = self._make_sequence_input_functions(\n input_type\n )\n fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {}\n if input_type == \"generator\":\n fit_kwargs[\"steps_per_epoch\"] = 4\n evaluate_kwargs[\"steps\"] = 4\n predict_kwargs[\"steps\"] = 4\n\n model = test_utils.get_small_mlp(1, 1, 1)\n model.compile(\n loss=\"mse\",\n optimizer=\"sgd\",\n run_eagerly=test_utils.should_run_eagerly(),\n )\n\n model.fit(xy_function(use_namedtuple=False), **fit_kwargs)\n model.evaluate(xy_function(use_namedtuple=False), **evaluate_kwargs)\n model.predict(x_function(use_namedtuple=False), **predict_kwargs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 227, "n_words": 55, "vocab_size": 44, "complexity": 3, "nloc": 20, "token_counts": 144, "n_ast_nodes": 239, "n_identifiers": 24, "random_cut": "def test_sequence_input_types(self, input_type):\n \n if not tf.executing_eagerly():\n self.skipTest(\"Improved checking is only present in data_adapter.\")\n\n xy_function, x_function = self._make_sequence_input_functions(\n input_type\n )\n fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {}\n if input_type == \"generator\":\n fit_kwargs[\"steps_per_epoch\"] = 4\n evaluate_kwargs[\"steps\"] = 4\n predict_kwargs[\"steps\"] = 4\n\n model = test_utils.get_small_mlp(1, 1, 1)\n model.compile(\n loss=\"mse\",\n optimizer=\"sgd\",\n run_eagerly=test_utils.should_run_eagerly(),\n )\n\n model.fit(xy_function(use_namedtuple=", "d_id": 80852, "documentation": { "docstring": "Ensure that namedtuples and tuples are plumbed identically.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 68000, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/utils.py", "file_name": "utils.py", "fun_name": "get_stock_value_on", "commit_message": "style: format code with black", "code": "def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):\n\tif not posting_date:\n\t\tposting_date = nowdate()\n\n\tvalues, condition = [posting_date], \"\"\n\n\tif warehouse:\n\n\t\tlft, rgt, is_group = frappe.db.get_value(\"Warehouse\", warehouse, [\"lft\", \"rgt\", \"is_group\"])\n\n\t\tif is_group:\n\t\t\tvalues.extend([lft, rgt])\n\t\t\tcondition += \"and exists (\\\n\t\t\t\tselect name from `tabWarehouse` wh where wh.name = sle.warehouse\\\n\t\t\t\tand wh.lft >= %s and wh.rgt <= %s)\"\n\n\t\telse:\n\t\t\tvalues.append(warehouse)\n\t\t\tcondition += \" AND warehouse = %s\"\n\n\tif item_code:\n\t\tvalues.append(item_code)\n\t\tcondition += \" AND item_code = %s\"\n\n\tstock_ledger_entries = frappe.db.sql(\n\t\t.format(\n\t\t\tcondition\n\t\t),\n\t\tvalues,\n\t\tas_dict=1,\n\t)\n\n\tsle_map = {}\n\tfor sle in stock_ledger_entries:\n\t\tif not (sle.item_code, sle.warehouse) in sle_map:\n\t\t\tsle_map[(sle.item_code, sle.warehouse)] = flt(sle.stock_value)\n\n\treturn sum(sle_map.values())\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 70, "n_words": 100, "vocab_size": 75, "complexity": 7, "nloc": 35, "token_counts": 172, "n_ast_nodes": 294, "n_identifiers": 25, "random_cut": "def get_stock_value_on(warehouse=None, posting_date=None, item_code=None):\n\tif not posting_date:\n\t\tposting_date = nowdate()\n\n\tvalues, condition = [posting_date], \"\"\n\n\tif warehouse:\n\n\t\tlft, rgt, is_group = frappe.db.get_value(\"Warehouse\", w", "d_id": 14704, "documentation": { "docstring": "\n\t\tSELECT item_code, stock_value, name, warehouse\n\t\tFROM `tabStock Ledger Entry` sle\n\t\tWHERE posting_date <= %s {0}\n\t\t\tand is_cancelled = 0\n\t\tORDER BY timestamp(posting_date, posting_time) DESC, creation DESC\n\t", "n_words": 26, "vocab_size": 26, "n_whitespaces": 21, "language": "en" } }, { "id": 215084, "commit_id": "f1c37893caf90738288e789c3233ab934630254f", "repo": "salt", "path": "salt/modules/aixpkg.py", "file_name": "aixpkg.py", "fun_name": "_is_installed_rpm", "commit_message": "Working tests for install", "code": "def _is_installed_rpm(name):\n \n log.debug(f\"_is_installed_rpm '{name}'\")\n cmd = [\"/usr/bin/rpm\", \"-q\", name]\n return __salt__[\"cmd.retcode\"](cmd) == 0\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 62, "n_identifiers": 6, "random_cut": "def _is_installed_rpm(name):\n \n log.debug(f\"_is_installed_rpm '{name}'\")\n cmd = [\"/usr/bin/rpm\", \"-q\", name]\n return __sal", "d_id": 53802, "documentation": { "docstring": "\n Returns True if the rpm package is installed. Otherwise returns False.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 148283, "commit_id": "0e6c042e29cbbe429d81c9c1af3c75c261f00980", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "iter_tree_files", "commit_message": "[Bugfix] fix invalid excluding of Black (#24042)\n\n- We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options\r\n- Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.", "code": "def iter_tree_files(root, on_error=None, follow_links=None):\n\t\n\tif on_error is not None and not callable(on_error):\n\t\traise TypeError(\"on_error:{!r} is not callable.\".format(on_error))\n\n\tif follow_links is None:\n\t\tfollow_links = True\n\n\tfor entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links):\n\t\tif not entry.is_dir(follow_links):\n\t\t\tyield entry.path\n\n\n# Alias `iter_tree_files()` as `iter_tree()`.\niter_tree = iter_tree_files\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 35, "n_words": 45, "vocab_size": 36, "complexity": 6, "nloc": 8, "token_counts": 81, "n_ast_nodes": 136, "n_identifiers": 14, "random_cut": "def iter_tree_files(root, on_error=None, follow_links=None):\n\t\n\tif on_error is not None and not callable(on_error):\n\t\traise TypeError(\"on_error:{!r} is not callable.\".format(on_error))\n\n\tif follow_links is None:\n\t\tfollow_links = True\n\n\tfor entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links):\n\t\tif not entry.is_dir(follow_links):\n\t\t\tyield entry.path\n\n\n# Alias `iter_tree_files()` as `iter_tree()`.\niter_tree = iter_tree_files\n\n", "d_id": 34219, "documentation": { "docstring": "\n\tWalks the specified directory for all files.\n\n\t*root* (:class:`str`) is the root directory to search for files.\n\n\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n\toptionally is the error handler for file-system exceptions. It will be\n\tcalled with the exception (:exc:`OSError`). Reraise the exception to\n\tabort the walk. Default is :data:`None` to ignore file-system\n\texceptions.\n\n\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether\n\tto walk symbolic links that resolve to directories. Default is\n\t:data:`None` for :data:`True`.\n\n\tRaises :exc:`RecursionError` if recursion is detected.\n\n\tReturns an :class:`~collections.abc.Iterable` yielding the path to\n\teach file (:class:`str`) relative to *root*.\n\t", "n_words": 90, "vocab_size": 59, "n_whitespaces": 77, "language": "en" } }, { "id": 60970, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/req/constructors.py", "file_name": "constructors.py", "fun_name": "_get_url_from_path", "commit_message": "upd; format", "code": "def _get_url_from_path(path, name):\n # type: (str, str) -> Optional[str]\n \n if _looks_like_path(name) and os.path.isdir(path):\n if is_installable_dir(path):\n return path_to_url(path)\n raise InstallationError(\n f\"Directory {name!r} is not installable. Neither 'setup.py' \"\n \"nor 'pyproject.toml' found.\"\n )\n if not is_archive_file(path):\n return None\n if os.path.isfile(path):\n return path_to_url(path)\n urlreq_parts = name.split('@', 1)\n if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):\n # If the path contains '@' and the part before it does not look\n # like a path, try to treat it as a PEP 440 URL req instead.\n return None\n logger.warning(\n 'Requirement %r looks like a filename, but the '\n 'file does not exist',\n name\n )\n return path_to_url(path)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 240, "n_words": 100, "vocab_size": 73, "complexity": 8, "nloc": 21, "token_counts": 108, "n_ast_nodes": 191, "n_identifiers": 16, "random_cut": "def _get_url_from_path(path, name):\n # type: (str, str) -> Optional[str]\n \n if _looks_like_path(name) and os.path.isdir(path):\n if is_installable_dir(path):\n return path_to_url(path)\n raise InstallationError(\n f\"Directory {name!r} is not installable. Neither 'setup.py' \"\n \"nor 'pyproject.toml' found.\"\n )\n if not is_archive_file(path):\n return None\n if os.path.isfile(path):\n return path_to_url(path)\n urlreq_parts = name.split('@', 1)\n", "d_id": 12362, "documentation": { "docstring": "\n First, it checks whether a provided path is an installable directory\n (e.g. it has a setup.py). If it is, returns the path.\n\n If false, check if the path is an archive file (such as a .whl).\n The function checks if the path is a file. If false, if the path has\n an @, it will treat it as a PEP 440 URL requirement and return the path.\n ", "n_words": 67, "vocab_size": 39, "n_whitespaces": 86, "language": "en" } }, { "id": 215091, "commit_id": "f1c37893caf90738288e789c3233ab934630254f", "repo": "salt", "path": "tests/pytests/unit/modules/test_aixpkg.py", "file_name": "test_aixpkg.py", "fun_name": "test_install_fileset_with_bff_extension", "commit_message": "Working tests for install", "code": "def test_install_fileset_with_bff_extension():\n \n installp_call = MagicMock(return_value={\"retcode\": 0, \"stdout\": \"\"})\n fileset_pkg_name = (\n \"/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff\"\n )\n list_pkgs_mock = MagicMock(\n side_effect=[{\"bos.rte.printers\": \"7.1.6.0\"}, {\"bos.rte.printers\": \"7.2.4.0\"}]\n )\n with patch(\"pathlib.Path.is_file\", return_value=True):\n with patch.dict(\n aixpkg.__salt__,\n {\"cmd.run_all\": installp_call, \"config.get\": MagicMock(return_value=False)},\n ), patch.object(aixpkg, \"list_pkgs\", list_pkgs_mock):\n result = aixpkg.install(fileset_pkg_name)\n assert installp_call.call_count == 1\n installp_call.assert_any_call(\n \"/usr/sbin/installp -acYXg -d /cecc/repos/aix72/TL3/BASE/installp/ppc bos.rte.printers_7.2.2.0.bff\",\n python_shell=False,\n )\n expected = {\"bos.rte.printers\": {\"old\": \"7.1.6.0\", \"new\": \"7.2.4.0\"}}\n assert result == expected\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 228, "n_words": 61, "vocab_size": 49, "complexity": 1, "nloc": 21, "token_counts": 137, "n_ast_nodes": 248, "n_identifiers": 18, "random_cut": "def test_install_fileset_with_bff_extension():\n \n installp_call = MagicMock(return_value={\"retcode\": 0, \"stdout\": \"\"})\n fileset_pkg_name = (\n \"/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff\"\n )\n list_pkgs_mock = MagicMock(\n side_effect=[{\"bos.rte.printers\": \"7.1.6.0\"}, {\"bos.rte.printers\": \"7.2.4.0\"}]\n )\n with patch(\"pathlib.Path.is_file\", return_value=True):\n with patch.dict(\n aixpkg.__salt__,\n {\"cmd.run_all\": installp_call, \"config.get\": MagicMock(return_value=False)},\n ), patch.object(aixpkg, \"list_pkgs\", list_pkgs_mock):\n result = aixpkg.install(fileset_pkg_name)\n assert installp_call.call_count == ", "d_id": 53808, "documentation": { "docstring": "\n Test install of fileset with bff extension\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 66174, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/leave_block_list/leave_block_list.py", "file_name": "leave_block_list.py", "fun_name": "is_user_in_allow_list", "commit_message": "style: format code with black", "code": "def is_user_in_allow_list(block_list):\n\treturn frappe.session.user in frappe.db.sql_list(\n\t\t,\n\t\tblock_list,\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 4, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def is_user_in_allow_list(block_list):\n\treturn frappe.ses", "d_id": 14123, "documentation": { "docstring": "select allow_user\n\t\tfrom `tabLeave Block List Allow` where parent=%s", "n_words": 9, "vocab_size": 9, "n_whitespaces": 7, "language": "en" } }, { "id": 216276, "commit_id": "3c7e1ec1f08abd7cd1ba78ad7880acb6ba6fdce7", "repo": "salt", "path": "tests/pytests/functional/transport/server/test_req_channel.py", "file_name": "test_req_channel.py", "fun_name": "test_normalization", "commit_message": "Fix minion unit tests, specifically .../tests/pytests/test_minion.py", "code": "def test_normalization(push_channel):\n \n types = {\n \"list\": list,\n }\n msgs = [\n {\"list\": tuple([1, 2, 3])},\n ]\n for msg in msgs:\n ret = push_channel.send(msg, timeout=5, tries=1)\n for key, value in ret[\"load\"].items():\n assert types[key] == type(value)\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 34, "vocab_size": 30, "complexity": 3, "nloc": 11, "token_counts": 78, "n_ast_nodes": 124, "n_identifiers": 15, "random_cut": "def test_normalization(push_channel):\n \n types = {\n \"list\": list,\n }\n msgs = [\n {\"list\": tuple([1, 2, 3])},\n ]\n for msg in msgs:\n ret = push_channel.send(msg, timeout=5, tries=1)\n ", "d_id": 54495, "documentation": { "docstring": "\n Since we use msgpack, we need to test that list types are converted to lists\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 22, "language": "en" } }, { "id": 168206, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/core/arrays/datetimes.py", "file_name": "datetimes.py", "fun_name": "to_perioddelta", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def to_perioddelta(self, freq) -> TimedeltaArray:\n \n # Deprecaation GH#34853\n warnings.warn(\n \"to_perioddelta is deprecated and will be removed in a \"\n \"future version. \"\n \"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.\",\n FutureWarning,\n # stacklevel chosen to be correct for when called from DatetimeIndex\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n from pandas.core.arrays.timedeltas import TimedeltaArray\n\n if self._ndarray.dtype != \"M8[ns]\":\n raise NotImplementedError(\"Only supported for nanosecond resolution.\")\n\n i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8\n m8delta = i8delta.view(\"m8[ns]\")\n return TimedeltaArray(m8delta)\n\n # -----------------------------------------------------------------\n # Properties - Vectorized Timestamp Properties/Methods\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 219, "n_words": 73, "vocab_size": 63, "complexity": 2, "nloc": 27, "token_counts": 87, "n_ast_nodes": 152, "n_identifiers": 24, "random_cut": "def to_perioddelta(self, freq) -> TimedeltaArray:\n \n # Deprecaation GH#34853\n warnings.warn(\n \"to_perioddelta is deprecated and will be removed in a \"\n \"future version. \"\n \"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.\",\n FutureWarning,\n # stacklevel chosen to be correct for when called from DatetimeIndex\n stacklevel=find_stack_level(inspect.curre", "d_id": 40230, "documentation": { "docstring": "\n Calculate deltas between self values and self converted to Periods at a freq.\n\n Used for vectorized offsets.\n\n Parameters\n ----------\n freq : Period frequency\n\n Returns\n -------\n TimedeltaArray/Index\n ", "n_words": 26, "vocab_size": 25, "n_whitespaces": 90, "language": "en" } }, { "id": 22678, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "linear-algebra-python/src/lib.py", "file_name": "lib.py", "fun_name": "set", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def set(self, components):\n \n if len(components) > 0:\n self.__components = components\n else:\n raise Exception(\"please give any vector\")\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 6, "random_cut": "def set(self, components):\n \n if len(components) > 0:\n ", "d_id": 4407, "documentation": { "docstring": "\n input: new components\n changes the components of the vector.\n replace the components with newer one.\n ", "n_words": 15, "vocab_size": 11, "n_whitespaces": 44, "language": "en" } }, { "id": 112742, "commit_id": "cbac2c5c0f7606aca8ccf08fbd418ffe3adfe427", "repo": "nni", "path": "nni/algorithms/compression/v2/pytorch/pruning/tools/base.py", "file_name": "base.py", "fun_name": "get_best_result", "commit_message": "[Compression] fix typehints (#4800)", "code": "def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]:\n \n if self._best_task_id is not None:\n compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth'))\n compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth'))\n with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f:\n config_list = json_tricks.load(f)\n return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list\n return None\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 123, "n_words": 43, "vocab_size": 35, "complexity": 2, "nloc": 15, "token_counts": 128, "n_ast_nodes": 199, "n_identifiers": 24, "random_cut": "def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]:\n \n if self._best_task_id is not None:\n compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth'))\n compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth'))\n with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f:\n config_list = json_tricks.load(f)\n return self._best_task_id,", "d_id": 24742, "documentation": { "docstring": "\n Returns\n -------\n Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]]\n If self._best_task_id is not None,\n return best task id, best compact model, masks on the compact model, score, config list used in this task.\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 84, "language": "en" } }, { "id": 243757, "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", "repo": "Pillow", "path": "src/PIL/ImageFont.py", "file_name": "ImageFont.py", "fun_name": "set_variation_by_axes", "commit_message": "Improve exception traceback readability", "code": "def set_variation_by_axes(self, axes):\n \n try:\n self.font.setvaraxes(axes)\n except AttributeError as e:\n msg = \"FreeType 2.9.1 or greater is required\"\n raise NotImplementedError(msg) from e\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 21, "vocab_size": 21, "complexity": 2, "nloc": 6, "token_counts": 33, "n_ast_nodes": 58, "n_identifiers": 9, "random_cut": "def set_variation_by_axes(self, axes):\n \n try:\n self.font.setvaraxes(", "d_id": 70113, "documentation": { "docstring": "\n :param axes: A list of values for each axis.\n :exception OSError: If the font is not a variation font.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 221111, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "dispatch_line", "commit_message": "add python 3.10.4 for windows", "code": "def dispatch_line(self, frame):\n \n if self.stop_here(frame) or self.break_here(frame):\n self.user_line(frame)\n if self.quitting: raise BdbQuit\n return self.trace_dispatch\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 57, "n_words": 14, "vocab_size": 13, "complexity": 4, "nloc": 5, "token_counts": 40, "n_ast_nodes": 66, "n_identifiers": 9, "random_cut": "def dispatch_line(self, frame):\n \n if self.stop_here(frame) or self.break_here(frame):\n self.user_line(frame)\n if self.quitting: raise BdbQuit\n return self.trace_dispatch\n", "d_id": 56214, "documentation": { "docstring": "Invoke user function and return trace function for line event.\n\n If the debugger stops on the current line, invoke\n self.user_line(). Raise BdbQuit if self.quitting is set.\n Return self.trace_dispatch to continue tracing in this scope.\n ", "n_words": 34, "vocab_size": 32, "n_whitespaces": 62, "language": "en" } }, { "id": 138497, "commit_id": "9ee24530abf1b5e3239869b5257dd7b678337b90", "repo": "ray", "path": "python/ray/data/impl/plan.py", "file_name": "plan.py", "fun_name": "clear", "commit_message": "[Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931)\n\nThis PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible in order to more eagerly release block memory. This PR is the final precursor to adding the actual out-of-band serialization APIs (PR 3/3).\r\n\r\nThe fully lineage has to be maintained, even for eagerly computed datasets, since the lineage is needed for out-of-band serialization of datasets.", "code": "def clear(self) -> None:\n \n self._in_blocks.clear()\n self._snapshot_blocks = None\n self._snapshot_stats = None\n # We're erasing the snapshot, so put all stages into the \"after snapshot\"\n # bucket.\n self._stages_after_snapshot = (\n self._stages_before_snapshot + self._stages_after_snapshot\n )\n self._stages_before_snapshot = []\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 110, "n_words": 36, "vocab_size": 28, "complexity": 1, "nloc": 11, "token_counts": 44, "n_ast_nodes": 76, "n_identifiers": 7, "random_cut": "def clear(self) -> None:\n \n self._in_blocks.clear()\n self._snapshot_blocks = None\n self._snapshot_stats = None\n #", "d_id": 31440, "documentation": { "docstring": "Clear all cached block references of this plan, including input blocks.\n\n This will render the plan un-executable unless the root is a LazyBlockList.", "n_words": 23, "vocab_size": 22, "n_whitespaces": 29, "language": "en" } }, { "id": 260331, "commit_id": "24c2448cc7687fbacbc3a9af13f47a935dfcbeeb", "repo": "scikit-learn", "path": "sklearn/cluster/_birch.py", "file_name": "_birch.py", "fun_name": "_global_clustering", "commit_message": "MAINT validate parameters in Birch (#23593)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: jeremiedbb ", "code": "def _global_clustering(self, X=None):\n \n clusterer = self.n_clusters\n centroids = self.subcluster_centers_\n compute_labels = (X is not None) and self.compute_labels\n\n # Preprocessing for the global clustering.\n not_enough_centroids = False\n if isinstance(clusterer, Integral):\n clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)\n # There is no need to perform the global clustering step.\n if len(centroids) < self.n_clusters:\n not_enough_centroids = True\n\n # To use in predict to avoid recalculation.\n self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)\n\n if clusterer is None or not_enough_centroids:\n self.subcluster_labels_ = np.arange(len(centroids))\n if not_enough_centroids:\n warnings.warn(\n \"Number of subclusters found (%d) by BIRCH is less \"\n \"than (%d). Decrease the threshold.\"\n % (len(centroids), self.n_clusters),\n ConvergenceWarning,\n )\n else:\n # The global clustering step that clusters the subclusters of\n # the leaves. It assumes the centroids of the subclusters as\n # samples and finds the final centroids.\n self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)\n\n if compute_labels:\n self.labels_ = self._predict(X)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 446, "n_words": 131, "vocab_size": 88, "complexity": 8, "nloc": 23, "token_counts": 151, "n_ast_nodes": 249, "n_identifiers": 25, "random_cut": "def _global_clustering(self, X=None):\n \n clusterer = self.n_clusters\n centroids = self.subcluster_centers_\n compute_labels = (X is not None) and self.compute_la", "d_id": 76190, "documentation": { "docstring": "\n Global clustering for the subclusters obtained after fitting\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 47930, "commit_id": "1dfae80412377eef0a38637535d6a1d3393cc4fe", "repo": "airflow", "path": "tests/www/views/test_views_connection.py", "file_name": "test_views_connection.py", "fun_name": "test_prefill_form_backcompat", "commit_message": "Enable use of custom conn extra fields without prefix (#22607)\n\nPreviously, connection \"extra\" fields which were added as custom fields in the\r\nwebserver connection form had to be named with prefix `extra____`.\r\nThis was because custom fields are registered globally on the connection view model,\r\nso the prefix was necessary to prevent collisions.\r\n\r\nBut the prefix is ugly and cumbersome in the `extra` field. So now what we do is\r\nadd this prefix when defining the field internally in the model, and strip it when\r\nsaving the connection.\r\n\r\nThis doesn't change any providers -- each of those will have to be updated in order to use no-prefix custom fields, with special care to handle backcompat.", "code": "def test_prefill_form_backcompat(extras, expected):\n \n mock_form = mock.Mock()\n mock_form.data = {\"conn_id\": \"test\", \"extra\": json.dumps(extras), \"conn_type\": \"test\"}\n cmv = ConnectionModelView()\n cmv.extra_fields = ['extra__test__my_param']\n\n # this is set by `lazy_add_provider_discovered_options_to_connection_form`\n cmv.extra_field_name_mapping['extra__test__my_param'] = 'my_param'\n\n cmv.prefill_form(form=mock_form, pk=1)\n assert mock_form.extra__test__my_param.data == expected\n\n\n@pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field'])\n@mock.patch('airflow.utils.module_loading.import_string')\n@mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock)", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field'])\n@mock.patch('airflow.utils.module_loading.import_string')\n@mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 65, "n_words": 41, "vocab_size": 37, "complexity": 1, "nloc": 8, "token_counts": 77, "n_ast_nodes": 197, "n_identifiers": 23, "random_cut": "def test_prefill_form_backcompat(extras, expected):\n \n mock_form = mock.Mock()\n mock_form.data = {\"conn_id\": \"test\", \"extra\": json.dumps(extras), \"conn_type\": \"test\"}\n cmv = ConnectionModelView()\n cmv.extra_fields = ['extra__test__my_param']\n\n # this is set by `lazy_add_provider_discovered_options_to_connection_form`\n cmv.extra_field_name", "d_id": 9302, "documentation": { "docstring": "\n When populating custom fields in the connection form we should first check for the non-prefixed\n value (since prefixes in extra are deprecated) and then fallback to the prefixed value.\n\n Either way, the field is known internally to the model view as the prefixed value.\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 57, "language": "en" } }, { "id": 93030, "commit_id": "522d6f27c28dc5fd4d996ed605865c42fbda0da8", "repo": "sentry", "path": "src/sentry/search/utils.py", "file_name": "utils.py", "fun_name": "tokenize_query", "commit_message": "ref: replace legacy compat.map with list comprehensions (#36372)", "code": "def tokenize_query(query):\n \n result = defaultdict(list)\n query_params = defaultdict(list)\n tokens = split_query_into_tokens(query)\n for token in tokens:\n if token.upper() in [\"OR\", \"AND\"] or token.strip(\"()\") == \"\":\n continue\n\n state = \"query\"\n for idx, char in enumerate(token):\n next_char = token[idx + 1] if idx < len(token) - 1 else None\n if idx == 0 and char in ('\"', \"'\", \":\"):\n break\n if char == \":\":\n if next_char in (\":\", \" \"):\n state = \"query\"\n else:\n state = \"tags\"\n break\n query_params[state].append(token)\n\n if \"query\" in query_params:\n result[\"query\"] = [format_query(query) for query in query_params[\"query\"]]\n for tag in query_params[\"tags\"]:\n key, value = format_tag(tag)\n result[key].append(value)\n return dict(result)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 313, "n_words": 98, "vocab_size": 63, "complexity": 13, "nloc": 25, "token_counts": 185, "n_ast_nodes": 322, "n_identifiers": 24, "random_cut": "def tokenize_query(query):\n \n result = defaultdict(list)\n query_params = defaultdict(list)\n tokens = split_query_into_tokens(query)\n for token in tokens:\n if token.upper() in [\"OR\", \"AND\"] or token.strip(\"()\") == \"\":\n continue\n\n state = \"query\"\n for idx, char in enumerate(token):\n next_char = token[idx + 1] if idx < len(token) - 1 else None\n if idx == 0 and char in ('\"', \"'\", \":\"):\n break\n if char == \":\":\n if next_char in (\":\", \" \"):\n state = \"query\"\n", "d_id": 18967, "documentation": { "docstring": "\n Tokenizes a standard Sentry search query.\n\n Example:\n >>> query = 'is:resolved foo bar tag:value'\n >>> tokenize_query(query)\n {\n 'is': ['resolved'],\n 'query': ['foo', 'bar'],\n 'tag': ['value'],\n }\n\n Has a companion implementation in static/app/utils/tokenizeSearch.tsx\n ", "n_words": 31, "vocab_size": 29, "n_whitespaces": 77, "language": "en" } }, { "id": 280311, "commit_id": "c9068087d9142bab573e0c300bf9874a957accff", "repo": "keras", "path": "keras/saving/saving_api.py", "file_name": "saving_api.py", "fun_name": "save_model", "commit_message": "Prepare public API surface for v3 saving.\n\nPiperOrigin-RevId: 484397600", "code": "def save_model(model, filepath, overwrite=True, save_format=None, **kwargs):\n \n save_format = get_save_format(filepath, save_format)\n if save_format not in (\"keras\", \"tf\", \"h5\", \"keras_v3\"):\n raise ValueError(\n \"Unknown `save_format` argument. Expected one of \"\n \"'keras', 'tf', or 'h5'. \"\n f\"Received: save_format{save_format}\"\n )\n if save_format == \"keras_v3\" or (\n saving_lib.saving_v3_enabled() and save_format == \"keras\"\n ):\n # If file exists and should not be overwritten.\n try:\n exists = os.path.exists(filepath)\n except TypeError:\n exists = False\n if exists and not overwrite:\n proceed = io_utils.ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n if kwargs:\n raise ValueError(\n \"The following argument(s) are not supported \"\n f\"with the native Keras format: {list(kwargs.keys())}\"\n )\n saving_lib.save_model(model, filepath)\n else:\n # Legacy case\n return legacy_sm_saving_lib.save_model(\n model,\n filepath,\n overwrite=overwrite,\n save_format=save_format,\n **kwargs,\n )\n\n\n@keras_export(\"keras.models.load_model\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.models.load_model\")", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 410, "n_words": 110, "vocab_size": 80, "complexity": 10, "nloc": 33, "token_counts": 144, "n_ast_nodes": 273, "n_identifiers": 21, "random_cut": "def save_model(model, filepath, overwrite=True, save_format=None, **kwargs):\n \n save_format = get_save_format(filepath, save_format)\n if save_format not in (\"keras\", \"tf\", \"h5\", \"keras_v3\"):\n raise ValueError(\n \"Unknown `save_format` argument. Expecte", "d_id": 83344, "documentation": { "docstring": "Saves a model as a TensorFlow SavedModel or HDF5 file.\n\n See the [Serialization and Saving guide](\n https://keras.io/guides/serialization_and_saving/) for details.\n\n Args:\n model: Keras model instance to be saved.\n filepath: `str` or `pathlib.Path` object. Path where to save the model.\n overwrite: Whether we should overwrite any existing model at the target\n location, or instead ask the user via an interactive prompt.\n save_format: Either `\"keras\"`, `\"tf\"`, `\"h5\"`,\n indicating whether to save the model\n in the native Keras format (`.keras`),\n in the TensorFlow SavedModel format (referred to as \"SavedModel\"\n below), or in the legacy HDF5 format (`.h5`).\n Defaults to `\"tf\"` in TF 2.X, and `\"h5\"` in TF 1.X.\n\n SavedModel format arguments:\n include_optimizer: Only applied to SavedModel and legacy HDF5 formats.\n If False, do not save the optimizer state. Defaults to True.\n signatures: Only applies to SavedModel format. Signatures to save\n with the SavedModel. See the `signatures` argument in\n `tf.saved_model.save` for details.\n options: Only applies to SavedModel format.\n `tf.saved_model.SaveOptions` object that specifies SavedModel\n saving options.\n save_traces: Only applies to SavedModel format. When enabled, the\n SavedModel will store the function traces for each layer. This\n can be disabled, so that only the configs of each layer are stored.\n Defaults to `True`. Disabling this will decrease serialization time\n and reduce file size, but it requires that all custom layers/models\n implement a `get_config()` method.\n\n Example:\n\n ```python\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(5, input_shape=(3,)),\n tf.keras.layers.Softmax()])\n model.save(\"model.keras\")\n loaded_model = tf.keras.models.load_model(\"model.keras\")\n x = tf.random.uniform((10, 3))\n assert np.allclose(model.predict(x), loaded_model.predict(x))\n ```\n\n Note that `model.save()` is an alias for `tf.keras.models.save_model()`.\n\n The SavedModel or HDF5 file contains:\n\n - The model's configuration (architecture)\n - The model's weights\n - The model's optimizer's state (if any)\n\n Thus models can be reinstantiated in the exact same state, without any of\n the code used for model definition or training.\n\n Note that the model weights may have different scoped names after being\n loaded. Scoped names include the model/layer names, such as\n `\"dense_1/kernel:0\"`. It is recommended that you use the layer properties to\n access specific variables, e.g. `model.get_layer(\"dense_1\").kernel`.\n\n __SavedModel serialization format__\n\n With `save_format=\"tf\"`, the model and all trackable objects attached\n to the it (e.g. layers and variables) are saved as a TensorFlow SavedModel.\n The model config, weights, and optimizer are included in the SavedModel.\n Additionally, for every Keras layer attached to the model, the SavedModel\n stores:\n\n * The config and metadata -- e.g. name, dtype, trainable status\n * Traced call and loss functions, which are stored as TensorFlow\n subgraphs.\n\n The traced functions allow the SavedModel format to save and load custom\n layers without the original class definition.\n\n You can choose to not save the traced functions by disabling the\n `save_traces` option. This will decrease the time it takes to save the model\n and the amount of disk space occupied by the output SavedModel. If you\n enable this option, then you _must_ provide all custom class definitions\n when loading the model. See the `custom_objects` argument in\n `tf.keras.models.load_model`.\n ", "n_words": 472, "vocab_size": 267, "n_whitespaces": 847, "language": "en" } }, { "id": 176745, "commit_id": "99d31932bd7388aadfa54305c116ca0c9261a67e", "repo": "networkx", "path": "networkx/algorithms/tree/tests/test_mst.py", "file_name": "test_mst.py", "fun_name": "test_random_spanning_tree_additive_small", "commit_message": "Moved random_spanning_tree to public API (#5656)\n\nAdds two new functions random_spanning_tree and\r\ntotal_spanning_tree_weight to public networkx API, accessible\r\nfrom the main namespace.\r\n\r\nThese functions had previously been defined, tested, and used internally\r\nin the TSP package, but have now been added to the public API as they\r\nare generally applicable.\r\n\r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Ross Barnowski ", "code": "def test_random_spanning_tree_additive_small():\n \n pytest.importorskip(\"numpy\")\n\n edges = {\n (0, 1): 1,\n (0, 2): 1,\n (0, 5): 3,\n (1, 2): 2,\n (1, 4): 3,\n (2, 3): 3,\n (5, 3): 4,\n (5, 4): 5,\n (4, 3): 4,\n }\n\n # Build the graph\n G = nx.Graph()\n for u, v in edges:\n G.add_edge(u, v, weight=edges[(u, v)])\n\n solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)]\n solution = nx.Graph()\n solution.add_edges_from(solution_edges)\n\n sampled_tree = nx.random_spanning_tree(\n G, weight=\"weight\", multiplicative=False, seed=37\n )\n\n assert nx.utils.edges_equal(solution.edges, sampled_tree.edges)\n\n\n@pytest.mark.slow", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@pytest.mark.slow", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 193, "n_words": 78, "vocab_size": 57, "complexity": 2, "nloc": 23, "token_counts": 201, "n_ast_nodes": 295, "n_identifiers": 22, "random_cut": "def test_random_spanning_tree_additive_small():\n \n pytest.importorskip(\"numpy\")\n\n edges = {\n (0, 1): 1,\n (0, 2): 1,\n (0, 5): 3,\n (1, 2): 2,\n (1, 4): 3,\n (2, 3): 3,\n (5, 3): 4,\n (5, 4): 5,\n (4, 3): 4,\n }\n\n # Build the graph\n G = nx.Graph()\n for u, v in edges:\n G.add_edge(u, v, weight=edges[(u, v)])\n\n solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)]\n solution = nx.Graph()\n solution.add_edges_from(solution_edges)\n\n sampled_tree = nx.random_spanning_tree(\n G, weight=\"weight\", multiplicative=False, seed=37\n )\n\n assert nx.utils.edge", "d_id": 42077, "documentation": { "docstring": "\n Sample a single spanning tree from the additive method.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 192979, "commit_id": "6ca9c76adb6daf2695d603ad623a9cf1c4f4806f", "repo": "vision", "path": "references/optical_flow/utils.py", "file_name": "utils.py", "fun_name": "sequence_loss", "commit_message": "Upgrade usort to `1.0.2` and black to 22.3.0 (#5106)\n\n* upgrade usort to\r\n\r\n* Also update black\r\n\r\n* Actually use 1.0.2\r\n\r\n* Apply pre-commit\r\n\r\nCo-authored-by: Nicolas Hug ", "code": "def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400):\n \n\n if gamma > 1:\n raise ValueError(f\"Gamma should be < 1, got {gamma}.\")\n\n # exlude invalid pixels and extremely large diplacements\n flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()\n valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)\n\n valid_flow_mask = valid_flow_mask[:, None, :, :]\n\n flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W)\n\n abs_diff = (flow_preds - flow_gt).abs()\n abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4))\n\n num_predictions = flow_preds.shape[0]\n weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.device)\n flow_loss = (abs_diff * weights).sum()\n\n return flow_loss\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 133, "n_words": 86, "vocab_size": 65, "complexity": 2, "nloc": 13, "token_counts": 157, "n_ast_nodes": 244, "n_identifiers": 24, "random_cut": "def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400):\n \n\n if gamma > 1:\n raise ValueError(f\"Gamma should be < 1, got {gamma}.\")\n\n # exlude invalid pixels and extremely large diplacements\n flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()\n valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)\n\n valid_flow_mask = valid_flow_mask[:, None, :, :]\n\n flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W)\n\n abs_diff = (flow_preds - flow_gt).abs()\n abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4))\n\n num_predictions = flow_preds.shape[0]\n weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.", "d_id": 46922, "documentation": { "docstring": "Loss function defined over sequence of flow predictions", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 34756, "commit_id": "09f9d07271297e97f5a0495fcf7e9cc107fedbdd", "repo": "transformers", "path": "src/transformers/modeling_tf_utils.py", "file_name": "modeling_tf_utils.py", "fun_name": "booleans_processing", "commit_message": "Misfiring tf warnings (#15442)\n\n* Fix spurious warning in TF TokenClassification models\r\n\r\n* Fixing one last spurious warning\r\n\r\n* Removing outdated warning altogether", "code": "def booleans_processing(config, **kwargs):\n \n final_booleans = {}\n\n if tf.executing_eagerly():\n final_booleans[\"output_attentions\"] = (\n kwargs[\"output_attentions\"] if kwargs[\"output_attentions\"] is not None else config.output_attentions\n )\n final_booleans[\"output_hidden_states\"] = (\n kwargs[\"output_hidden_states\"]\n if kwargs[\"output_hidden_states\"] is not None\n else config.output_hidden_states\n )\n final_booleans[\"return_dict\"] = (\n kwargs[\"return_dict\"] if kwargs[\"return_dict\"] is not None else config.return_dict\n )\n\n if \"use_cache\" in kwargs:\n final_booleans[\"use_cache\"] = (\n kwargs[\"use_cache\"] if kwargs[\"use_cache\"] is not None else getattr(config, \"use_cache\", None)\n )\n else:\n final_booleans[\"output_attentions\"] = config.output_attentions\n final_booleans[\"output_hidden_states\"] = config.output_hidden_states\n\n if kwargs.get(\"return_dict\", None) not in (None, True):\n tf_logger.warning(\n \"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`.\"\n )\n final_booleans[\"return_dict\"] = True\n\n if \"use_cache\" in kwargs:\n final_booleans[\"use_cache\"] = getattr(config, \"use_cache\", None)\n\n return final_booleans\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 347, "n_words": 108, "vocab_size": 51, "complexity": 13, "nloc": 38, "token_counts": 247, "n_ast_nodes": 322, "n_identifiers": 13, "random_cut": "def booleans_processing(config, **kwargs):\n \n final_booleans = {}\n\n if tf.executing_eagerly():\n final_booleans[\"output_attentions\"] = (\n kwargs[\"output_attentions\"] if kwargs[\"output_attentions\"] is not None else config.output_attentions\n )\n final_booleans[\"output_hidden_states\"] = (\n kwargs[\"output_hidden_states\"]\n if kwargs[\"output_hidden_states\"] is not None\n else config.output_hidden_states\n )\n final_booleans[\"return_dict\"] = ", "d_id": 6324, "documentation": { "docstring": "\n Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or\n graph)\n\n Args:\n config ([`PretrainedConfig`]):\n The config of the running model.\n **kwargs:\n The boolean parameters\n\n Returns:\n A dictionary with the proper values for each boolean\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 104, "language": "en" } }, { "id": 46950, "commit_id": "9769a65c20f6028d640061efacbc5bfeb5ebaf3d", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_dagrun_root_fail_unfinished", "commit_message": "Fixed backfill interference with scheduler (#22701)\n\nCo-authored-by: Dmirty Suvorov ", "code": "def test_dagrun_root_fail_unfinished(self):\n \n # TODO: this should live in test_dagrun.py\n # Run both the failed and successful tasks\n dag_id = 'test_dagrun_states_root_fail_unfinished'\n dag = self.dagbag.get_dag(dag_id)\n dr = dag.create_dagrun(\n run_type=DagRunType.SCHEDULED,\n execution_date=DEFAULT_DATE,\n state=None,\n )\n self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', dr.run_id)\n\n with pytest.raises(AirflowException):\n dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec)\n\n # Mark the successful task as never having run since we want to see if the\n # dagrun will be in a running state despite having an unfinished task.\n with create_session() as session:\n ti = dr.get_task_instance('test_dagrun_unfinished', session=session)\n ti.state = State.NONE\n session.commit()\n dr.update_state()\n assert dr.state == State.RUNNING\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 259, "n_words": 84, "vocab_size": 70, "complexity": 1, "nloc": 17, "token_counts": 128, "n_ast_nodes": 215, "n_identifiers": 33, "random_cut": "def test_dagrun_root_fail_unfinished(self):\n \n # TODO: this should live in test_dagrun.py\n # Run both the failed and successful tasks\n dag_id = 'test_dagrun_states_root_fail_unfinished'\n dag = self.dagbag.get_", "d_id": 9045, "documentation": { "docstring": "\n DagRuns with one unfinished and one failed root task -> RUNNING\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 168328, "commit_id": "8b72297c8799725e98cb2c6aee664325b752194f", "repo": "pandas", "path": "pandas/plotting/_core.py", "file_name": "_core.py", "fun_name": "_get_call_args", "commit_message": "DEPR: `sort_columns` in `plot` (#47563) (#48073)", "code": "def _get_call_args(backend_name, data, args, kwargs):\n \n if isinstance(data, ABCSeries):\n arg_def = [\n (\"kind\", \"line\"),\n (\"ax\", None),\n (\"figsize\", None),\n (\"use_index\", True),\n (\"title\", None),\n (\"grid\", None),\n (\"legend\", False),\n (\"style\", None),\n (\"logx\", False),\n (\"logy\", False),\n (\"loglog\", False),\n (\"xticks\", None),\n (\"yticks\", None),\n (\"xlim\", None),\n (\"ylim\", None),\n (\"rot\", None),\n (\"fontsize\", None),\n (\"colormap\", None),\n (\"table\", False),\n (\"yerr\", None),\n (\"xerr\", None),\n (\"label\", None),\n (\"secondary_y\", False),\n (\"xlabel\", None),\n (\"ylabel\", None),\n ]\n elif isinstance(data, ABCDataFrame):\n arg_def = [\n (\"x\", None),\n (\"y\", None),\n (\"kind\", \"line\"),\n (\"ax\", None),\n (\"subplots\", False),\n (\"sharex\", None),\n (\"sharey\", False),\n (\"layout\", None),\n (\"figsize\", None),\n (\"use_index\", True),\n (\"title\", None),\n (\"grid\", None),\n (\"legend\", True),\n (\"style\", None),\n (\"logx\", False),\n (\"logy\", False),\n (\"loglog\", False),\n (\"xticks\", None),\n (\"yticks\", None),\n (\"xlim\", None),\n (\"ylim\", None),\n (\"rot\", None),\n (\"fontsize\", None),\n (\"colormap\", None),\n (\"table\", False),\n (\"yerr\", None),\n (\"xerr\", None),\n (\"secondary_y\", False),\n (\"sort_columns\", False),\n (\"xlabel\", None),\n (\"ylabel\", None),\n ]\n else:\n raise TypeError(\n f\"Called plot accessor for type {type(data).__name__}, \"\n \"expected Series or DataFrame\"\n )\n\n if \"sort_columns\" in itertools.chain(args, kwargs.keys()):\n warnings.warn(\n \"`sort_columns` is deprecated and will be removed in a future \"\n \"version.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n\n if args and isinstance(data, ABCSeries):\n positional_args = str(args)[1:-1]\n keyword_args = \", \".join(\n [f\"{name}={repr(value)}\" for (name, _), value in zip(arg_def, args)]\n )\n msg = (\n \"`Series.plot()` should not be called with positional \"\n \"arguments, only keyword arguments. The order of \"\n \"positional arguments will change in the future. \"\n f\"Use `Series.plot({keyword_args})` instead of \"\n f\"`Series.plot({positional_args})`.\"\n )\n raise TypeError(msg)\n\n pos_args = {name: value for (name, _), value in zip(arg_def, args)}\n if backend_name == \"pandas.plotting._matplotlib\":\n kwargs = dict(arg_def, **pos_args, **kwargs)\n else:\n kwargs = dict(pos_args, **kwargs)\n\n x = kwargs.pop(\"x\", None)\n y = kwargs.pop(\"y\", None)\n kind = kwargs.pop(\"kind\", \"line\")\n return x, y, kind, kwargs\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1553, "n_words": 266, "vocab_size": 142, "complexity": 9, "nloc": 97, "token_counts": 570, "n_ast_nodes": 924, "n_identifiers": 38, "random_cut": "def _get_call_args(backend_name, data, args, kwargs):\n \n if isinstance(data, ABCSeries):\n arg_def = [\n (\"kind\", \"line\"),\n (\"ax\", None),\n (\"figsize\", None),\n (\"use_index\", True),\n (\"title\", None),\n (\"grid\", None),\n (\"legend\", False),\n (\"style\", None),\n (\"logx\", False),\n (\"logy\", False),\n (\"loglog\", False),\n (\"xticks\", None),\n (\"yticks\", None),\n (\"xlim\", None),\n (\"ylim\", None),\n (\"rot\", None),\n (\"fontsize\", None),\n (\"colormap\", None),\n (\"table\", False),\n (\"yerr\", None),\n (\"xerr\", None),\n (\"label\", None),\n (\"secondary_y\", False),\n (\"xlabel\", None),\n (\"ylabel\", None),\n ]\n elif isinstance(data, ABCDataFrame):\n arg_def = [\n (\"x\", None),\n (\"y\", None),\n (\"kind\", \"line\"),\n (\"ax\", None),\n (\"subplots\", False),\n (\"sharex\", None),\n (\"sharey\", False),\n (\"layout\", None),\n (\"figsize\", None),\n (\"use_index\", True),\n (\"title\", None),\n (\"grid\", None),\n (\"legend\", True),\n (\"style\", None),\n (\"logx\", False),\n (\"logy\", False),\n (\"loglog\", False),\n (\"xticks\", None),\n (\"yticks\", None),\n (\"xlim\", None),\n (\"ylim\", None),\n (\"rot\", None),\n (\"fontsize\", None),\n (\"colormap\", None),\n (\"table\", False),\n (\"yerr\", None),\n (\"xerr\", None),\n (\"secondary_y\", False),\n (\"sort_columns\", False),\n (\"xlabel\", None),\n (\"ylabel\", None),\n ]\n else:\n raise TypeError(\n f\"Called plot accessor for type {type(data).__name__}, \"\n \"expected Series or DataFrame\"\n )\n\n if \"sort_columns\" in itertools.chain(args, kwargs.keys()):\n warnings.warn(\n \"`sort_columns` is deprecated and will be removed in a future \"\n \"version.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n\n if args and isinstance(data, ABCSeries):\n positional_args = str(args)[1:-1]\n keyword_args = \", \".join(\n [f\"{name}={repr(value)}\" for (name, _), value in zip(arg_def, args)]\n )\n msg = (\n \"`Series.plot()` should not be called with positional \"\n \"arguments, only keyword arguments. The order of \"", "d_id": 40287, "documentation": { "docstring": "\n This function makes calls to this accessor `__call__` method compatible\n with the previous `SeriesPlotMethods.__call__` and\n `DataFramePlotMethods.__call__`. Those had slightly different\n signatures, since `DataFramePlotMethods` accepted `x` and `y`\n parameters.\n ", "n_words": 28, "vocab_size": 27, "n_whitespaces": 71, "language": "en" } }, { "id": 202094, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/cache/tests_async.py", "file_name": "tests_async.py", "fun_name": "test_ahas_key", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "async def test_ahas_key(self):\n \n await cache.aset(\"hello1\", \"goodbye1\")\n self.assertIs(await cache.ahas_key(\"hello1\"), False)\n self.assertIs(await cache.ahas_key(\"goodbye1\"), False)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 40, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 43, "n_ast_nodes": 80, "n_identifiers": 6, "random_cut": "async def test_ahas_key(self):\n \n await cache.aset(\"hello1\", \"goodbye1\")\n self.assertIs(await cache.ahas_key(\"hello1\"), False)\n self.assertIs(await cache.ahas_key(\"goodbye1\"), False)\n", "d_id": 50048, "documentation": { "docstring": "ahas_key() doesn't ever return True for the dummy cache backend.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 297101, "commit_id": "68e454712dae5b65599ef12a025bc4446f7e3e6e", "repo": "core", "path": "homeassistant/components/gree/climate.py", "file_name": "climate.py", "fun_name": "min_temp", "commit_message": "Use UnitOfTemperature in climate entities [g-l] (#83127)\n\n* Use UnitOfTemperature in climate entities [g-l]\r\n\r\n* Adjust gree\r\n\r\n* Adjust honeywell", "code": "def min_temp(self) -> float:\n \n if self.temperature_unit == UnitOfTemperature.CELSIUS:\n return TEMP_MIN\n return TEMP_MIN_F\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 44, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 8, "random_cut": "def min_temp(self) -> float:\n \n if self.temperature_unit == UnitOfTemperature.CELSIUS:\n return TEMP_MIN\n return TEMP_MIN_F\n", "d_id": 96070, "documentation": { "docstring": "Return the minimum temperature supported by the device.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 195878, "commit_id": "46ba104ee0f9cb35b54c2f5f5591cfabb26d0301", "repo": "sympy", "path": "sympy/core/expr.py", "file_name": "expr.py", "fun_name": "nseries", "commit_message": "Fixed failing doctest", "code": "def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0):\n \n if x and x not in self.free_symbols:\n return self\n if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):\n return self.series(x, x0, n, dir, cdir=cdir)\n else:\n return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 111, "n_words": 49, "vocab_size": 40, "complexity": 6, "nloc": 7, "token_counts": 91, "n_ast_nodes": 135, "n_identifiers": 11, "random_cut": "def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0):\n \n if x and x not in self.free_symbols:\n return self\n if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):\n return self.series(x, x0, n, dir, cdir=cdir)\n else:\n return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)\n", "d_id": 47462, "documentation": { "docstring": "\n Wrapper to _eval_nseries if assumptions allow, else to series.\n\n If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is\n called. This calculates \"n\" terms in the innermost expressions and\n then builds up the final series just by \"cross-multiplying\" everything\n out.\n\n The optional ``logx`` parameter can be used to replace any log(x) in the\n returned series with a symbolic value to avoid evaluating log(x) at 0. A\n symbol to use in place of log(x) should be provided.\n\n Advantage -- it's fast, because we do not have to determine how many\n terms we need to calculate in advance.\n\n Disadvantage -- you may end up with less terms than you may have\n expected, but the O(x**n) term appended will always be correct and\n so the result, though perhaps shorter, will also be correct.\n\n If any of those assumptions is not met, this is treated like a\n wrapper to series which will try harder to return the correct\n number of terms.\n\n See also lseries().\n\n Examples\n ========\n\n >>> from sympy import sin, log, Symbol\n >>> from sympy.abc import x, y\n >>> sin(x).nseries(x, 0, 6)\n x - x**3/6 + x**5/120 + O(x**6)\n >>> log(x+1).nseries(x, 0, 5)\n x - x**2/2 + x**3/3 - x**4/4 + O(x**5)\n\n Handling of the ``logx`` parameter --- in the following example the\n expansion fails since ``sin`` does not have an asymptotic expansion\n at -oo (the limit of log(x) as x approaches 0):\n\n >>> e = sin(log(x))\n >>> e.nseries(x, 0, 6)\n Traceback (most recent call last):\n ...\n PoleError: ...\n ...\n >>> logx = Symbol('logx')\n >>> e.nseries(x, 0, 6, logx=logx)\n sin(logx)\n\n In the following example, the expansion works but only returns self\n unless the ``logx`` parameter is used:\n\n >>> e = x**y\n >>> e.nseries(x, 0, 2)\n x**y\n >>> e.nseries(x, 0, 2, logx=logx)\n exp(logx*y)\n\n ", "n_words": 294, "vocab_size": 182, "n_whitespaces": 610, "language": "en" } }, { "id": 187229, "commit_id": "d09112ab1f6db6aa605650fe1ff6a3028344f90d", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "test_getitem_error", "commit_message": "plugin.api.validate: rewrite tests\n\nCompletely rewrite tests using pytest, with full coverage", "code": "def test_getitem_error(self, exception):\n container = self.Container(exception(\"failure\"))\n with pytest.raises(validate.ValidationError) as cm:\n validate.validate(validate.get(\"foo\", default=\"default\"), container)\n assert_validationerror(cm.value, )\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 10, "token_counts": 55, "n_ast_nodes": 96, "n_identifiers": 14, "random_cut": "def test_getitem_error(self, exception):\n container = self.Container(exception(\"failure\"))\n with pytest.raises(validate.ValidationError) as cm:", "d_id": 45750, "documentation": { "docstring": "\n ValidationError(GetItemSchema):\n Could not get key 'foo' from object Container\n Context:\n failure\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 71, "language": "en" } }, { "id": 245986, "commit_id": "92e2eb355bc964af5e798281bcb4cb9179fdaecc", "repo": "mmdetection", "path": "mmdet/models/task_modules/prior_generators/point_generator.py", "file_name": "point_generator.py", "fun_name": "num_base_priors", "commit_message": "[Doc]: Add typehint and update docstring for task modules (#9468)\n\n* part 11\r\n\r\n* part 11\r\n\r\n* part 11\r\n\r\n* part 11", "code": "def num_base_priors(self) -> List[int]:\n \n return [1 for _ in range(len(self.strides))]\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 8, "random_cut": "def num_base_priors(self) -> List[int]:\n ", "d_id": 70935, "documentation": { "docstring": "list[int]: The number of priors (points) at a point\n on the feature grid", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 207420, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_utils/test_logentry.py", "file_name": "test_logentry.py", "fun_name": "test_logentry_change_message_localized_datetime_input", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_logentry_change_message_localized_datetime_input(self):\n \n post_data = {\n \"site\": self.site.pk,\n \"title\": \"Changed\",\n \"hist\": \"Some content\",\n \"created_0\": \"12/03/2008\",\n \"created_1\": \"11:54\",\n }\n with translation.override(\"fr\"):\n change_url = reverse(\n \"admin:admin_utils_article_change\", args=[quote(self.a1.pk)]\n )\n response = self.client.post(change_url, post_data)\n self.assertRedirects(\n response, reverse(\"admin:admin_utils_article_changelist\")\n )\n logentry = LogEntry.objects.filter(\n content_type__model__iexact=\"article\"\n ).latest(\"id\")\n self.assertEqual(logentry.get_change_message(), \"Changed Title and History.\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 243, "n_words": 43, "vocab_size": 39, "complexity": 1, "nloc": 20, "token_counts": 113, "n_ast_nodes": 206, "n_identifiers": 24, "random_cut": "def test_logentry_change_message_localized_datetime_input(self):\n \n post_data = {\n \"site\": self.site.pk,\n \"title\": \"Changed\",\n \"hist\": \"Some content\",\n \"created_0\": \"12/03/2008\",\n \"created_1\": \"11:54\",\n }\n with translation.override(\"fr\"):\n change_url = reverse(\n \"admin:admin_utils_article_change\", args=[quote(self.a1.pk)]\n )\n response = self.client.post(change_url, post_data)\n self.assertRedirects(\n response, reverse(\"admin:admin_utils_article_changelist\")\n )\n logentry = LogEntry.objects.filter(\n content_type__model__iexact=\"article\"\n ).latest(\"id\")\n self.assertEqual(logentry.get_change_message(), \"Changed Tit", "d_id": 51961, "documentation": { "docstring": "\n Localized date/time inputs shouldn't affect changed form data detection.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 263954, "commit_id": "8ee5afa1ea56906b30ba2ea4578082c61a1f94e2", "repo": "pyinstaller", "path": "PyInstaller/depend/bytecode.py", "file_name": "bytecode.py", "fun_name": "_cleanup_code", "commit_message": "depend: adjust bytecode scanner for python 3.11\n\nPython 3.11 removed CALL_FUNCTION and CALL_METHOD opcodes, replacing\nthem with PRECALL + CALL. For our purposes, it should be enough to\nmatch PRECALL only (as both opcodes have same parameter, i.e., the\nargument count).\n\nIn addition, the bytecode is now peppered with CACHE instructions,\nwhich we filter out in pre-processing phase to avoid complicating\nthe regex rules.\n\nFurthermore, the low bit of argument to LOAD_GLOBAL opcode must\nnow be ignored.", "code": "def _cleanup_code(code):\n return code # Nothing to do here\n\n # language=PythonVerboseRegExp\n _call_function_bytecode = bytecode_regex(\n rb\n )\nelse:\n # Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides\n # unless show_caches=True is used). Dealing with these CACHE instructions in regex rules is going to render them\n # unreadable, so instead we pre-process the bytecode and filter the offending opcodes out.", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "else:", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 97, "n_words": 66, "vocab_size": 52, "complexity": 1, "nloc": 2, "token_counts": 7, "n_ast_nodes": 32, "n_identifiers": 5, "random_cut": "def _cleanup_code(code):\n return code # Nothing to do here\n\n # language=PythonVerboseRegExp\n _call_function_bytecode = bytecode_regex(\n rb\n )\nelse:\n # Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides\n # unless show_caches=True is used). Dealing with these CACHE instructions in regex rules is goin", "d_id": 77523, "documentation": { "docstring": "\n # Matches `global_function('some', 'constant', 'arguments')`.\n\n # Load the global function. In code with >256 of names, this may require extended name references.\n ((?:`EXTENDED_ARG`.)*\n (?:`LOAD_NAME`|`LOAD_GLOBAL`|`LOAD_FAST`).)\n\n # For foo.bar.whizz(), the above is the 'foo', below is the 'bar.whizz'.\n ((?:(?:`EXTENDED_ARG`.)*\n (?:`LOAD_METHOD`|`LOAD_ATTR`).)*)\n\n # Load however many arguments it takes. These (for now) must all be constants.\n # Again, code with >256 constants may need extended enumeration.\n ((?:(?:`EXTENDED_ARG`.)*\n `LOAD_CONST`.)*)\n\n # Call the function. The parameter is the argument count (which may also be >256) if CALL_FUNCTION or\n # CALL_METHOD are used. For CALL_FUNCTION_EX, the parameter are flags.\n ((?:`EXTENDED_ARG`.)*\n (?:`CALL_FUNCTION`|`CALL_METHOD`|`CALL_FUNCTION_EX`).)\n ", "n_words": 94, "vocab_size": 66, "n_whitespaces": 207, "language": "en" } }, { "id": 21469, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "filemode", "commit_message": "Vendor in pip 22.1.2", "code": "def filemode(mode):\n \n perm = []\n for table in filemode_table:\n for bit, char in table:\n if mode & bit == bit:\n perm.append(char)\n break\n else:\n perm.append(\"-\")\n return \"\".join(perm)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 104, "n_words": 26, "vocab_size": 24, "complexity": 4, "nloc": 10, "token_counts": 51, "n_ast_nodes": 89, "n_identifiers": 9, "random_cut": "def filemode(mode):\n \n perm = []\n for table in filemode_table", "d_id": 3858, "documentation": { "docstring": "Convert a file's mode to a string of the form\n -rwxrwxrwx.\n Used by TarFile.list()\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 9415, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py", "file_name": "upfirdn_2d.py", "fun_name": "filter_2d", "commit_message": "initialize ostec", "code": "def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):\n r\n\n k = _setup_kernel(k) * gain\n p = k.shape[0] - 1\n return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)\n\n#----------------------------------------------------------------------------\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 25, "vocab_size": 23, "complexity": 1, "nloc": 21, "token_counts": 54, "n_ast_nodes": 105, "n_identifiers": 12, "random_cut": "def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'):\n r\n\n k = _setup_kernel(k) * gain\n ", "d_id": 1612, "documentation": { "docstring": "Filter a batch of 2D images with the given FIR filter.\n\n Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`\n and filters each image with the given filter. The filter is normalized so that\n if the input pixels are constant, they will be scaled by the specified `gain`.\n Pixels outside the image are assumed to be zero.\n\n Args:\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).\n gain: Scaling factor for signal magnitude (default: 1.0).\n data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the same shape and datatype as `x`.\n ", "n_words": 130, "vocab_size": 83, "n_whitespaces": 232, "language": "en" } }, { "id": 66513, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v10_0/set_currency_in_pricing_rule.py", "file_name": "set_currency_in_pricing_rule.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doctype(\"Pricing Rule\")\n\n\tcurrency = frappe.db.get_default(\"currency\")\n\tfor doc in frappe.get_all(\"Pricing Rule\", fields=[\"company\", \"name\"]):\n\t\tif doc.company:\n\t\t\tcurrency = frappe.get_cached_value(\"Company\", doc.company, \"default_currency\")\n\n\t\tfrappe.db.sql(\n\t\t\t, (currency, doc.name)\n\t\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 17, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 9, "token_counts": 73, "n_ast_nodes": 126, "n_identifiers": 13, "random_cut": "def execute():\n\tfrappe.reload_doctype(\"Pricing Rule\")\n\n\tcurrency = frappe.db.get_default(\"currency\")\n\tfor doc in frappe.get_all(\"Pricing Rule\", fields=[\"company\", \"name\"]):\n\t\tif doc.company:\n\t\t\tcurrenc", "d_id": 14198, "documentation": { "docstring": "update `tabPricing Rule` set currency = %s where name = %s", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 51941, "commit_id": "2e373966a7fd3119c205350fb14d0b7bfe74185d", "repo": "PaddleHub", "path": "modules/image/Image_editing/super_resolution/swinir_l_real_sr_x4/swinir.py", "file_name": "swinir.py", "fun_name": "forward", "commit_message": "add swinir_l_real_sr_x4 (#2076)\n\n* git add swinir_l_real_sr_x4\r\n\r\n* fix typo\r\n\r\n* fix typo\r\n\r\nCo-authored-by: chenjian ", "code": "def forward(self, x, mask=None):\n \n B_, N, C = x.shape\n qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4))\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose((0, 1, 3, 2)))\n\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape(\n (-1, ))].reshape((self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1],\n -1)) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.transpose((2, 0, 1)) # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nW = mask.shape[0]\n attn = attn.reshape((B_ // nW, nW, self.num_heads, N, N)) + mask.unsqueeze(1).unsqueeze(0)\n attn = attn.reshape((-1, self.num_heads, N, N))\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose((0, 2, 1, 3)).reshape((B_, N, C))\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 341, "n_words": 131, "vocab_size": 80, "complexity": 2, "nloc": 23, "token_counts": 288, "n_ast_nodes": 490, "n_identifiers": 27, "random_cut": "def forward(self, x, mask=None):\n \n B_, N, C = x.shape\n qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4))\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose((0, 1, 3, 2)))\n\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape(\n (-1, ))].reshape((self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1],\n -1)) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.transpose((2, 0, 1)) # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nW = mask.shape[0]\n attn = attn.reshape((B_ // nW, nW, self.num_heads, N, N)) + mask.unsqueeze(1).unsqueeze(0)\n ", "d_id": 10437, "documentation": { "docstring": "\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 58, "language": "en" } }, { "id": 248546, "commit_id": "2959184a42398277ff916206235b844a8f7be5d7", "repo": "synapse", "path": "tests/test_event_auth.py", "file_name": "test_event_auth.py", "fun_name": "test_join_rules_invite", "commit_message": "EventAuthTestCase: build events for the right room version\n\nIn practice, when we run the auth rules, all of the events have the right room\nversion. Let's stop building Room V1 events for these tests and use the right\nversion.", "code": "def test_join_rules_invite(self):\n \n creator = \"@creator:example.com\"\n pleb = \"@joiner:example.com\"\n\n auth_events = {\n (\"m.room.create\", \"\"): _create_event(RoomVersions.V6, creator),\n (\"m.room.member\", creator): _join_event(RoomVersions.V6, creator),\n (\"m.room.join_rules\", \"\"): _join_rules_event(\n RoomVersions.V6, creator, \"invite\"\n ),\n }\n\n # A join without an invite is rejected.\n with self.assertRaises(AuthError):\n event_auth.check_auth_rules_for_event(\n RoomVersions.V6,\n _join_event(RoomVersions.V6, pleb),\n auth_events.values(),\n )\n\n # A user cannot be force-joined to a room.\n with self.assertRaises(AuthError):\n event_auth.check_auth_rules_for_event(\n RoomVersions.V6,\n _member_event(RoomVersions.V6, pleb, \"join\", sender=creator),\n auth_events.values(),\n )\n\n # Banned should be rejected.\n auth_events[(\"m.room.member\", pleb)] = _member_event(\n RoomVersions.V6, pleb, \"ban\"\n )\n with self.assertRaises(AuthError):\n event_auth.check_auth_rules_for_event(\n RoomVersions.V6,\n _join_event(RoomVersions.V6, pleb),\n auth_events.values(),\n )\n\n # A user who left cannot re-join.\n auth_events[(\"m.room.member\", pleb)] = _member_event(\n RoomVersions.V6, pleb, \"leave\"\n )\n with self.assertRaises(AuthError):\n event_auth.check_auth_rules_for_event(\n RoomVersions.V6,\n _join_event(RoomVersions.V6, pleb),\n auth_events.values(),\n )\n\n # A user can send a join if they're in the room.\n auth_events[(\"m.room.member\", pleb)] = _member_event(\n RoomVersions.V6, pleb, \"join\"\n )\n event_auth.check_auth_rules_for_event(\n RoomVersions.V6,\n _join_event(RoomVersions.V6, pleb),\n auth_events.values(),\n )\n\n # A user can accept an invite.\n auth_events[(\"m.room.member\", pleb)] = _member_event(\n RoomVersions.V6, pleb, \"invite\", sender=creator\n )\n event_auth.check_auth_rules_for_event(\n RoomVersions.V6,\n _join_event(RoomVersions.V6, pleb),\n auth_events.values(),\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 780, "n_words": 154, "vocab_size": 69, "complexity": 1, "nloc": 56, "token_counts": 325, "n_ast_nodes": 518, "n_identifiers": 17, "random_cut": "def test_join_rules_invite(self):\n \n creator = \"@creator:example.com\"\n pleb = \"@joiner:example.com\"\n\n auth_events = {\n (\"m.room.create\", \"\"): _create_event(RoomVersions.V6, creator),\n (\"m.room.member\", creator): _join_event(RoomVersions.V6, creator),\n (\"m.room.join_rules\", \"\"): _join_rules_event(\n ", "d_id": 72338, "documentation": { "docstring": "\n Test joining an invite only room.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 194765, "commit_id": "2d062907bcf416150e36879a2246218babad28b1", "repo": "ParlAI", "path": "parlai/crowdsourcing/tasks/pairwise_per_turn_eval/worlds.py", "file_name": "worlds.py", "fun_name": "validate_onboarding", "commit_message": "Create Per-Turn Evaluation Folder in ParlAI (#4323)\n\n* Auto fixes\r\n\r\n* Remove worker blocklists\r\n\r\n* Add __init__.py\r\n\r\n* Add __init__.py\r\n\r\n* Lint fix\r\n\r\n* Rename task configs\r\n\r\n* More lint error fixes\r\n\r\n* Update Per Turn Eval README with link to paper\r\n\r\n* Add configs to example\r\n\r\n* Remove commented out lines\r\n\r\n* More README improvements\r\n\r\n* Add bibtex to README\r\n\r\n* address comment", "code": "def validate_onboarding(data):\n \n logging.info(f\"Validating onboarding data {data}\")\n messages = data['outputs']['messages']\n if len(messages) == 0:\n return False\n status_message = messages[-2]\n if status_message is None:\n return False\n submitted_data = status_message.get('data')\n if submitted_data is None:\n return False\n final_status = submitted_data.get('final_status')\n return final_status == ONBOARD_SUCCESS\n\n\n# TODO: find a better way to avoid duplicating this from model_chat world.py", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 103, "n_words": 53, "vocab_size": 37, "complexity": 4, "nloc": 13, "token_counts": 73, "n_ast_nodes": 132, "n_identifiers": 11, "random_cut": "def validate_onboarding(data):\n \n logging.info(f\"Validating onboarding data {data}\")\n messages = data['outputs']['messages']\n if len(messages) == 0:\n return False\n status_message = messages[-2]\n if status_message is None:\n return False\n submitted_data = status_message.get('data')\n if submitted_data is None:\n return False\n final_status = submitted_data.get('final_status')\n return final_sta", "d_id": 47065, "documentation": { "docstring": "\n Check the contents of the data to ensure they are valid.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 18, "language": "en" } }, { "id": 300869, "commit_id": "8f4caf414124f380a8f5e1d54aedb54a8f6c5c05", "repo": "core", "path": "tests/helpers/test_event.py", "file_name": "test_event.py", "fun_name": "test_async_track_entity_registry_updated_event_with_empty_list", "commit_message": "Clean up accessing event helpers via hass (#72011)", "code": "async def test_async_track_entity_registry_updated_event_with_empty_list(hass):\n \n unsub_single = async_track_entity_registry_updated_event(\n hass, [], ha.callback(lambda event: None)\n )\n unsub_single2 = async_track_entity_registry_updated_event(\n hass, [], ha.callback(lambda event: None)\n )\n\n unsub_single2()\n unsub_single()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 58, "n_words": 23, "vocab_size": 15, "complexity": 1, "nloc": 9, "token_counts": 50, "n_ast_nodes": 84, "n_identifiers": 8, "random_cut": "async def test_async_track_entity_registry_updated_event_with_empty_list(hass):\n \n unsub_single = async_track_entity_registry_updated_event(\n hass, [], ha.callback(lambda event: None)\n )\n unsub_single2 = async_track_entity_registry_updated_event(\n hass, [], ha.callback(lamb", "d_id": 99723, "documentation": { "docstring": "Test async_track_entity_registry_updated_event passing an empty list of entities.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 130296, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/tls_utils.py", "file_name": "tls_utils.py", "fun_name": "generate_self_signed_tls_certs", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def generate_self_signed_tls_certs():\n \n try:\n from cryptography import x509\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives import hashes, serialization\n from cryptography.hazmat.primitives.asymmetric import rsa\n from cryptography.x509.oid import NameOID\n except ImportError:\n raise ImportError(\n \"Using `Security.temporary` requires `cryptography`, please \"\n \"install it using either pip or conda\"\n )\n key = rsa.generate_private_key(\n public_exponent=65537, key_size=2048, backend=default_backend()\n )\n key_contents = key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ).decode()\n\n ray_interal = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"ray-internal\")])\n # This is the same logic used by the GCS server to acquire a\n # private/interal IP address to listen on. If we just use localhost +\n # 127.0.0.1 then we won't be able to connect to the GCS and will get\n # an error like \"No match found for server name: 192.168.X.Y\"\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n private_ip_address = s.getsockname()[0]\n s.close()\n altnames = x509.SubjectAlternativeName(\n [\n x509.DNSName(\n socket.gethostbyname(socket.gethostname())\n ), # Probably 127.0.0.1\n x509.DNSName(\"127.0.0.1\"),\n x509.DNSName(private_ip_address), # 192.168.*.*\n x509.DNSName(\"localhost\"),\n ]\n )\n now = datetime.datetime.utcnow()\n cert = (\n x509.CertificateBuilder()\n .subject_name(ray_interal)\n .issuer_name(ray_interal)\n .add_extension(altnames, critical=False)\n .public_key(key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(now)\n .not_valid_after(now + datetime.timedelta(days=365))\n .sign(key, hashes.SHA256(), default_backend())\n )\n\n cert_contents = cert.public_bytes(serialization.Encoding.PEM).decode()\n\n return cert_contents, key_contents\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 484, "n_words": 167, "vocab_size": 132, "complexity": 2, "nloc": 49, "token_counts": 324, "n_ast_nodes": 522, "n_identifiers": 67, "random_cut": "def generate_self_signed_tls_certs():\n \n try:\n from cryptography import x509\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives import hashes, serialization\n from cryptography.hazmat.primitives.asymmetric import rsa\n from cryptography.x509.oid import NameOID\n except ImportError:\n raise ImportError(\n \"Using `Security.temporary` requires `cryptography`, please \"\n \"install it using either pip or conda\"\n )\n key = rsa.generate_private_key(\n public_exponent=65537, key_size=2048, backend=default_backend()\n )\n key_contents = key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n enc", "d_id": 29220, "documentation": { "docstring": "Create self-signed key/cert pair for testing.\n\n This method requires the library ``cryptography`` be installed.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 284585, "commit_id": "a6f7e111e68346aeab315985b3704c2710693b38", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/government/gov_controller.py", "file_name": "gov_controller.py", "fun_name": "print_help", "commit_message": "Bounty Hunter mood: 11 bugs fixed (#1853)\n\n* fix #1850\r\n\r\n* fix #1831\r\n\r\n* add extra check to Reddit API keys\r\n\r\n* ignore warning message to update praw api\r\n\r\n* improve OpenBB links\r\n\r\n* fix quick performance only on stocks class because I'm James bitch\r\n\r\n* fix quick performance only on stocks class because I'm James bitch\r\n\r\n* fix #1829\r\n\r\n* fix #1821\r\n\r\n* add messari to keys - fix #1819\r\n\r\n* example of multiple oclumns to check on options/chains\r\n\r\n* minor improvement in xlabel re. #1814\r\n\r\n* remove repeated command\r\n\r\n* fix #1698\r\n\r\n* fix line too long\r\n\r\n* fix #1814 fr now\r\n\r\n* fix tests", "code": "def print_help(self):\n \n has_ticker_start = \"[unvl]\" if not self.ticker else \"\"\n has_ticker_end = \"[/unvl]\" if not self.ticker else \"\"\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Government\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 25, "vocab_size": 18, "complexity": 3, "nloc": 24, "token_counts": 42, "n_ast_nodes": 96, "n_identifiers": 10, "random_cut": "def print_help(self):\n \n has_ticker_start = \"[unvl]\" if not self.ticker else \"\"\n has_ticker_end = \"[/unvl]\" if not self.ticker else \"\"\n ", "d_id": 84835, "documentation": { "docstring": "Print help\n[src][QuiverQuant][/src]\n\n[info]Explore:[/info][cmds]\n lasttrades last trades\n topbuys show most purchased stocks\n topsells show most sold stocks\n lastcontracts show last government contracts given out\n qtrcontracts quarterly government contracts analysis\n toplobbying top corporate lobbying tickers\n\n load load a specific ticker for analysis[/cmds]\n\n[param]Ticker: [/param]{self.ticker or None}{has_ticker_start}[cmds]\n\n gtrades show government trades for ticker\n contracts show government contracts for ticker\n histcont show historical quarterly government contracts for ticker\n lobbying corporate lobbying details for ticker[/cmds]{has_ticker_end}\n ", "n_words": 71, "vocab_size": 42, "n_whitespaces": 191, "language": "en" } }, { "id": 67844, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/reorder_item.py", "file_name": "reorder_item.py", "fun_name": "get_item_warehouse_projected_qty", "commit_message": "style: format code with black", "code": "def get_item_warehouse_projected_qty(items_to_consider):\n\titem_warehouse_projected_qty = {}\n\n\tfor item_code, warehouse, projected_qty in frappe.db.sql(\n\t\t.format(\n\t\t\t\", \".join([\"%s\"] * len(items_to_consider))\n\t\t),\n\t\titems_to_consider,\n\t):\n\n\t\tif item_code not in item_warehouse_projected_qty:\n\t\t\titem_warehouse_projected_qty.setdefault(item_code, {})\n\n\t\tif warehouse not in item_warehouse_projected_qty.get(item_code):\n\t\t\titem_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty)\n\n\t\twarehouse_doc = frappe.get_doc(\"Warehouse\", warehouse)\n\n\t\twhile warehouse_doc.parent_warehouse:\n\t\t\tif not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse):\n\t\t\t\titem_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt(\n\t\t\t\t\tprojected_qty\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\titem_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty)\n\t\t\twarehouse_doc = frappe.get_doc(\"Warehouse\", warehouse_doc.parent_warehouse)\n\n\treturn item_warehouse_projected_qty\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 38, "n_words": 60, "vocab_size": 44, "complexity": 6, "nloc": 24, "token_counts": 166, "n_ast_nodes": 265, "n_identifiers": 18, "random_cut": "def get_item_warehouse_projected_qty(items_to_consider):\n\titem_warehouse_projected_qty = {}\n\n\tfor item_code, warehouse, projected_qty in frappe.db.sql(\n\t\t.format(\n\t\t\t\", \".join([\"%s\"] * le", "d_id": 14639, "documentation": { "docstring": "select item_code, warehouse, projected_qty\n\t\tfrom tabBin where item_code in ({0})\n\t\t\tand (warehouse != \"\" and warehouse is not null)", "n_words": 19, "vocab_size": 18, "n_whitespaces": 16, "language": "en" } }, { "id": 21619, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", "file_name": "typing_extensions.py", "fun_name": "_collect_type_vars", "commit_message": "Vendor in pip 22.1.2", "code": "def _collect_type_vars(types, typevar_types=None):\n \n if typevar_types is None:\n typevar_types = typing.TypeVar\n tvars = []\n for t in types:\n if (\n isinstance(t, typevar_types) and\n t not in tvars and\n not _is_unpack(t)\n ):\n tvars.append(t)\n if _should_collect_from_parameters(t):\n tvars.extend([t for t in t.__parameters__ if t not in tvars])\n return tuple(tvars)\n\n\nNoReturn = typing.NoReturn\n\n# Some unconstrained type variables. These are used by the container types.\n# (These are not for export.)\nT = typing.TypeVar('T') # Any type.\nKT = typing.TypeVar('KT') # Key type.\nVT = typing.TypeVar('VT') # Value type.\nT_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.\nT_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.\n\nClassVar = typing.ClassVar\n\n# On older versions of typing there is an internal class named \"Final\".\n# 3.8+\nif hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):\n Final = typing.Final\n# 3.7\nelse:", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 225, "n_words": 132, "vocab_size": 89, "complexity": 9, "nloc": 14, "token_counts": 86, "n_ast_nodes": 293, "n_identifiers": 27, "random_cut": "def _collect_type_vars(types, typevar_types=None):\n \n if typevar_types is None:\n typevar_types = typing.TypeVar\n tvars = []\n for t in types:\n if (\n isinstance(t, typevar_types) and\n t not in tvars and\n not _is_unpack(t)\n ):\n tvars.append(t)\n if _should_collect_from_parameters(t):\n tvars.extend([t for t in t.__parameters__ if t not in tvars])\n return tuple(tvars)\n\n\nNoReturn = typing.NoReturn\n\n# Some unconstrained type variables. These are used by the container types.\n# (These are not for export.)\nT = typing.TypeVar('T') # Any type.\nKT = typing.TypeVar('KT') # Key type.\nVT = typing.TypeVar('VT') # Value type.\nT_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.\nT_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.\n\nClassVar = typing.ClassVar\n\n# On older versions of typing there is an internal class named \"Final\".\n# 3.8+\nif hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):\n Final = typing.Final\n# 3.7\nelse:", "d_id": 3959, "documentation": { "docstring": "Collect all type variable contained in types in order of\n first appearance (lexicographic order). For example::\n\n _collect_type_vars((T, List[S, T])) == (T, S)\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 35, "language": "en" } }, { "id": 166238, "commit_id": "90140f055892a46f473bd26affab88a7f171e394", "repo": "pandas", "path": "pandas/core/exchange/from_dataframe.py", "file_name": "from_dataframe.py", "fun_name": "from_dataframe", "commit_message": "ENH: Implement DataFrame interchange protocol (#46141)", "code": "def from_dataframe(df, allow_copy=True):\n \n if isinstance(df, pd.DataFrame):\n return df\n\n if not hasattr(df, \"__dataframe__\"):\n raise ValueError(\"`df` does not support __dataframe__\")\n\n return _from_dataframe(df.__dataframe__(allow_copy=allow_copy))\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 6, "token_counts": 48, "n_ast_nodes": 81, "n_identifiers": 10, "random_cut": "def from_dataframe(df, allow_copy=True):\n \n if isinstance(df, pd.DataFrame):\n return df\n\n if not hasattr(df, \"__dataframe__\"):\n ", "d_id": 39794, "documentation": { "docstring": "\n Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.\n\n Parameters\n ----------\n df : DataFrameXchg\n Object supporting the exchange protocol, i.e. `__dataframe__` method.\n allow_copy : bool, default: True\n Whether to allow copying the memory to perform the conversion\n (if false then zero-copy approach is requested).\n\n Returns\n -------\n pd.DataFrame\n ", "n_words": 48, "vocab_size": 42, "n_whitespaces": 97, "language": "en" } }, { "id": 61340, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py", "file_name": "wheel.py", "fun_name": "parse_wheel", "commit_message": "upd; format", "code": "def parse_wheel(wheel_zip, name):\n # type: (ZipFile, str) -> Tuple[str, Message]\n \n try:\n info_dir = wheel_dist_info_dir(wheel_zip, name)\n metadata = wheel_metadata(wheel_zip, info_dir)\n version = wheel_version(metadata)\n except UnsupportedWheel as e:\n raise UnsupportedWheel(\"{} has an invalid wheel, {}\".format(name, str(e)))\n\n check_compatibility(version, name)\n\n return info_dir, metadata\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 85, "n_words": 39, "vocab_size": 35, "complexity": 2, "nloc": 9, "token_counts": 62, "n_ast_nodes": 103, "n_identifiers": 14, "random_cut": "def parse_wheel(wheel_zip, name):\n # type: (ZipFile, str) -> Tuple[str, Message]\n \n try:\n info_dir = wheel_dist_info_dir(wheel_zip, name)\n metadata = wheel", "d_id": 12522, "documentation": { "docstring": "Extract information from the provided wheel, ensuring it meets basic\n standards.\n\n Returns the name of the .dist-info directory and the parsed WHEEL metadata.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 32, "language": "en" } }, { "id": 34177, "commit_id": "1144d336b689d1710534b245697e41be7a168075", "repo": "transformers", "path": "utils/style_doc.py", "file_name": "style_doc.py", "fun_name": "style_docstrings_in_code", "commit_message": "Copies and docstring styling (#15202)\n\n* Style docstrings when making/checking copies\r\n\r\n* Polish", "code": "def style_docstrings_in_code(code, max_len=119):\n \n # fmt: off\n splits = code.split('\\\"\\\"\\\"')\n splits = [\n (s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len))\n for i, s in enumerate(splits)\n ]\n black_errors = \"\\n\\n\".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0])\n splits = [s[0] if isinstance(s, tuple) else s for s in splits]\n clean_code = '\\\"\\\"\\\"'.join(splits)\n # fmt: on\n\n return clean_code, black_errors\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 114, "n_words": 70, "vocab_size": 48, "complexity": 9, "nloc": 10, "token_counts": 131, "n_ast_nodes": 212, "n_identifiers": 17, "random_cut": "def style_docstrings_in_code(code, max_len=119):\n \n # fmt: off\n splits = code.split('\\\"\\\"\\\"')\n splits = [\n (s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len))\n for i, s in enumerate(splits)\n ]\n black_errors = \"\\n\\n\".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0])\n splits = [s[0] if isinstance(s, tuple) else s for s in splits]\n clean_code = '\\\"\\\"\\\"'.join(splits)\n # fmt: on\n\n return clean_code, black_errors\n\n", "d_id": 6207, "documentation": { "docstring": "\n Style all docstrings in some code.\n\n Args:\n code (`str`): The code in which we want to style the docstrings.\n max_len (`int`): The maximum number of characters per line.\n\n Returns:\n `Tuple[str, str]`: A tuple with the clean code and the black errors (if any)\n ", "n_words": 43, "vocab_size": 37, "n_whitespaces": 77, "language": "en" } }, { "id": 148116, "commit_id": "d5d2ef424965b2cfdc62a97674dbca7abe3af34b", "repo": "ray", "path": "python/ray/_private/utils.py", "file_name": "utils.py", "fun_name": "check_version_info", "commit_message": "[Core] Add a utility to check GCS / Ray cluster health (#23382)\n\n* Provide a utility to ping a Ray cluster and verify it has the same Ray version. This is useful to check if a Ray cluster is available at a given address, without connecting to the cluster with the more heavyweight ray.init(). This utility is integrated with ray memory to provide a better error message when the Ray cluster is unavailable. There seem to be user demand for exposing this as an API as well.\r\n* Improve the error message when the address provided to Ray does not contain port.", "code": "def check_version_info(cluster_metadata):\n \n cluster_version_info = (\n cluster_metadata[\"ray_version\"],\n cluster_metadata[\"python_version\"],\n )\n version_info = compute_version_info()\n if version_info != cluster_version_info:\n node_ip_address = ray._private.services.get_node_ip_address()\n error_message = (\n \"Version mismatch: The cluster was started with:\\n\"\n \" Ray: \" + cluster_version_info[0] + \"\\n\"\n \" Python: \" + cluster_version_info[1] + \"\\n\"\n \"This process on node \" + node_ip_address + \" was started with:\" + \"\\n\"\n \" Ray: \" + version_info[0] + \"\\n\"\n \" Python: \" + version_info[1] + \"\\n\"\n )\n raise RuntimeError(error_message)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 208, "n_words": 73, "vocab_size": 39, "complexity": 2, "nloc": 17, "token_counts": 90, "n_ast_nodes": 176, "n_identifiers": 12, "random_cut": "def check_version_info(cluster_metadata):\n \n cluster_version_info = (\n cluster_metadata[\"ray_version\"],\n cluster_metadata[\"python_version\"],\n )\n version_info = compute_version_info()\n if version_info != cluster_version_info:\n node_ip_address = ray._private.services.get_node_ip_address()\n error_message = (\n \"Version mismatch: The cluster was started with:\\n\"\n \" Ray: \" + cluster_version_info[0] + \"\\n\"\n \" Python: \" + cluster_version_info[1] + \"\\n\"\n \"This process on node \" + node_ip_address + \" was started with:\" + \"\\n\"\n \" Ray: \" + version_info[0] + \"\\n\"\n \" Python: \" + version_info[1] ", "d_id": 34179, "documentation": { "docstring": "Check if the Python and Ray versions stored in GCS matches this process.\n Args:\n cluster_metadata: Ray cluster metadata from GCS.\n\n Raises:\n Exception: An exception is raised if there is a version mismatch.\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 55, "language": "en" } }, { "id": 20690, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/__init__.py", "file_name": "__init__.py", "fun_name": "get_console", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def get_console() -> \"Console\":\n \n global _console\n if _console is None:\n from .console import Console\n\n _console = Console()\n\n return _console\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 12, "token_counts": 26, "n_ast_nodes": 50, "n_identifiers": 4, "random_cut": "def get_console() -> \"Console\":\n \n ", "d_id": 3481, "documentation": { "docstring": "Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,\n and hasn't been explicitly given one.\n\n Returns:\n Console: A console instance.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 41, "language": "en" } }, { "id": 60852, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/models/link.py", "file_name": "link.py", "fun_name": "is_hash_allowed", "commit_message": "upd; format", "code": "def is_hash_allowed(self, hashes):\n # type: (Optional[Hashes]) -> bool\n \n if hashes is None or not self.has_hash:\n return False\n # Assert non-None so mypy knows self.hash_name and self.hash are str.\n assert self.hash_name is not None\n assert self.hash is not None\n\n return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)\n\n\n# TODO: Relax this comparison logic to ignore, for example, fragments.", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 111, "n_words": 52, "vocab_size": 40, "complexity": 3, "nloc": 6, "token_counts": 49, "n_ast_nodes": 79, "n_identifiers": 7, "random_cut": "def is_hash_allowed(self, hashes):\n # type: (Optional[Hashes]) -> bool\n \n if hashes is None or not self.has_hash:\n return False\n # Assert non-None so mypy knows self.hash_name and self.hash are str.\n assert self.hash_name is not None\n assert self.h", "d_id": 12300, "documentation": { "docstring": "\n Return True if the link has a hash and it is allowed.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 260218, "commit_id": "e98e0353787f87ce10d6d47e643bbefe9b6a8ddd", "repo": "scikit-learn", "path": "sklearn/metrics/_ranking.py", "file_name": "_ranking.py", "fun_name": "coverage_error", "commit_message": "FIX Ensure correct sklearn.metrics.coverage_error error message for 1D array (#23548)\n\n* Change input array to ensure_2d=True\n\n* Reshape input list to 2D if metric is coverage_error\n\n* Add test for error message with 1D array on coverage_error\n\n* Modify 1D error message test\n\n* Use parametrize to test different 1d arrays\n\n* Explain why reshape in test_regression_thresholded_inf_nan_input\n\n* Add changelog entry for this fix\n\n* Add test comments to sklearn/metrics/tests/test_ranking.py\n\nCo-authored-by: Julien Jerphanion \n\nCo-authored-by: Julien Jerphanion ", "code": "def coverage_error(y_true, y_score, *, sample_weight=None):\n \n y_true = check_array(y_true, ensure_2d=True)\n y_score = check_array(y_score, ensure_2d=True)\n check_consistent_length(y_true, y_score, sample_weight)\n\n y_type = type_of_target(y_true, input_name=\"y_true\")\n if y_type != \"multilabel-indicator\":\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n if y_true.shape != y_score.shape:\n raise ValueError(\"y_true and y_score have different shape\")\n\n y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))\n y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))\n coverage = (y_score >= y_min_relevant).sum(axis=1)\n coverage = coverage.filled(0)\n\n return np.average(coverage, weights=sample_weight)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 60, "vocab_size": 46, "complexity": 3, "nloc": 14, "token_counts": 153, "n_ast_nodes": 244, "n_identifiers": 28, "random_cut": "def coverage_error(y_true, y_score, *, sample_weight=None):\n \n y_true = check_array(y_true, ensure_2d=True)\n y_score = check_array(y_score, ensure_2d=True)\n check_consistent_length(y_true, y_score, sample_weight)\n\n y_type = type_of_target(y_true, input_name=\"y_true\")\n if y_type != \"multilabel-indicator\":\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n if y_true.shape != y_score.shape:\n raise ValueError(\"y_true and y_score have different shape\")\n\n y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))\n y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))\n coverage = (y_score >= y_min_relevant).sum(axis=1)\n coverage = coverage.filled(0)\n\n return np.average(coverage, weights=sample_weight)\n\n", "d_id": 76126, "documentation": { "docstring": "Coverage error measure.\n\n Compute how far we need to go through the ranked scores to cover all\n true labels. The best value is equal to the average number\n of labels in ``y_true`` per sample.\n\n Ties in ``y_scores`` are broken by giving maximal rank that would have\n been assigned to all tied values.\n\n Note: Our implementation's score is 1 greater than the one given in\n Tsoumakas et al., 2010. This extends it to handle the degenerate case\n in which an instance has 0 true labels.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples, n_labels)\n True binary labels in binary indicator format.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates of the positive\n class, confidence values, or non-thresholded measure of decisions\n (as returned by \"decision_function\" on some classifiers).\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n coverage_error : float\n\n References\n ----------\n .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).\n Mining multi-label data. In Data mining and knowledge discovery\n handbook (pp. 667-685). Springer US.\n\n ", "n_words": 179, "vocab_size": 144, "n_whitespaces": 297, "language": "en" } }, { "id": 241685, "commit_id": "8a549a550cb10189ff1db382f546a40cd1c6c5b3", "repo": "lightning", "path": "pytorch_lightning/callbacks/progress/base.py", "file_name": "base.py", "fun_name": "total_predict_batches", "commit_message": "Integrate progress tracking into the progress bar (#11213)", "code": "def total_predict_batches(self) -> int:\n \n return sum(self.trainer.num_predict_batches)\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 17, "n_ast_nodes": 30, "n_identifiers": 6, "random_cut": "def total_predict_batches(self) -> int:\n \n return sum(self.trainer.num_predict_batches)\n", "d_id": 69654, "documentation": { "docstring": "The total number of prediction batches, which may change from epoch to epoch.\n\n Use this to set the total number of iterations in the progress bar. Can return ``inf`` if the predict dataloader\n is of infinite size.\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 58, "language": "en" } }, { "id": 104793, "commit_id": "1904d0c0a3a96330d9b870cdca3e9a3a137f2977", "repo": "datasets", "path": "src/datasets/dataset_dict.py", "file_name": "dataset_dict.py", "fun_name": "shape", "commit_message": "Add code examples for DatasetDict (#4245)\n\n* 📝 add code examples for DatasetDict\r\n\r\n* 🖍 apply quentin review", "code": "def shape(self) -> Dict[str, Tuple[int]]:\n \n self._check_values_type()\n return {k: dataset.shape for k, dataset in self.items()}\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 14, "token_counts": 39, "n_ast_nodes": 62, "n_identifiers": 10, "random_cut": "def shape(self) -> Dict[str, Tuple[int]]:\n \n self._check_values_type()\n return {k: datas", "d_id": 21975, "documentation": { "docstring": "Shape of each split of the dataset (number of columns, number of rows).\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\")\n >>> ds.shape\n {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}\n ```\n ", "n_words": 36, "vocab_size": 29, "n_whitespaces": 92, "language": "en" } }, { "id": 36018, "commit_id": "50dd314d939a86f3a81e19af01459f449fbaeeca", "repo": "transformers", "path": "src/transformers/onnx/config.py", "file_name": "config.py", "fun_name": "is_torch_support_available", "commit_message": "Add ONNX export for ViT (#15658)\n\n* Add ONNX support for ViT\r\n\r\n* Refactor to use generic preprocessor\r\n\r\n* Add vision dep to tests\r\n\r\n* Extend ONNX slow tests to ViT\r\n\r\n* Add dummy image generator\r\n\r\n* Use model_type to determine modality\r\n\r\n* Add deprecation warnings for tokenizer argument\r\n\r\n* Add warning when overwriting the preprocessor\r\n\r\n* Add optional args to docstrings\r\n\r\n* Add minimum PyTorch version to OnnxConfig\r\n\r\n* Refactor OnnxConfig class variables from CONSTANT_NAME to snake_case\r\n\r\n* Add reasonable value for default atol\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def is_torch_support_available(self) -> bool:\n \n if is_torch_available():\n from transformers.file_utils import torch_version\n\n return torch_version >= self.torch_onnx_minimum_version\n else:\n return False\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 71, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 12, "token_counts": 29, "n_ast_nodes": 50, "n_identifiers": 8, "random_cut": "def is_torch_support_available(self) -> bool:\n \n if is_torch_available():\n from transformers.file_utils import t", "d_id": 6560, "documentation": { "docstring": "\n The minimum PyTorch version required to export the model.\n\n Returns:\n `bool`: Whether the installed version of PyTorch is compatible with the model.\n ", "n_words": 22, "vocab_size": 17, "n_whitespaces": 55, "language": "en" } }, { "id": 3887, "commit_id": "1e0ac30ebdcfce55a5644bcd486044da45c93dd6", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-orb/source_orb/source.py", "file_name": "source.py", "fun_name": "check_connection", "commit_message": "🎉 New Source: Orb (#9985)\n\n* V1 of source_orb connector\r\n\r\n* add boostrap.md file\r\n\r\n* add clause on Pagination to bootstrap.md\r\n\r\n* add SUMMARY documentation\r\n\r\n* add lookback_window_days connector parameter\r\n\r\n* Add support for start_date parameter\r\n\r\n* Add ability to transform record in order to un-nest IDs\r\n\r\n* Add support for extracting event properties based on connector configuration", "code": "def check_connection(self, logger, config) -> Tuple[bool, any]:\n \n auth_header = TokenAuthenticator(token=config[\"api_key\"]).get_auth_header()\n ping_url = ORB_API_BASE_URL + \"ping\"\n ping_response = requests.get(ping_url, headers=auth_header)\n try:\n ping_response.raise_for_status()\n return True, None\n except Exception as e:\n return False, e\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 106, "n_words": 31, "vocab_size": 28, "complexity": 2, "nloc": 13, "token_counts": 69, "n_ast_nodes": 114, "n_identifiers": 20, "random_cut": "def check_connection(self, logger, config) -> Tuple[bool, any]:\n \n auth_header = TokenAuthenticator(token=config[\"api_key\"]).get_auth_header()\n ping_url = ORB_API_BASE_URL + \"ping\"\n ping_response = requests.get(ping_url, headers=", "d_id": 589, "documentation": { "docstring": "\n Makes a request to the /ping endpoint, which validates that the authentication credentials are appropriate.\n API Docs: https://docs.withorb.com/reference/ping\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 75517, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/backends/database/postgres/postgres.py", "file_name": "postgres.py", "fun_name": "add_items_upsert", "commit_message": "Reformat with black", "code": "def add_items_upsert(self, content_type_pk, indexers):\n compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection)\n title_sql = []\n autocomplete_sql = []\n body_sql = []\n data_params = []\n\n for indexer in indexers:\n data_params.extend((content_type_pk, indexer.id))\n\n # Compile title value\n value = compiler.prepare_value(\n IndexEntry._meta.get_field(\"title\"), indexer.title\n )\n sql, params = value.as_sql(compiler, self.connection)\n title_sql.append(sql)\n data_params.extend(params)\n\n # Compile autocomplete value\n value = compiler.prepare_value(\n IndexEntry._meta.get_field(\"autocomplete\"), indexer.autocomplete\n )\n sql, params = value.as_sql(compiler, self.connection)\n autocomplete_sql.append(sql)\n data_params.extend(params)\n\n # Compile body value\n value = compiler.prepare_value(\n IndexEntry._meta.get_field(\"body\"), indexer.body\n )\n sql, params = value.as_sql(compiler, self.connection)\n body_sql.append(sql)\n data_params.extend(params)\n\n data_sql = \", \".join(\n [\n \"(%%s, %%s, %s, %s, %s, 1.0)\" % (a, b, c)\n for a, b, c in zip(title_sql, autocomplete_sql, body_sql)\n ]\n )\n\n with self.connection.cursor() as cursor:\n cursor.execute(\n \n % (IndexEntry._meta.db_table, data_sql),\n data_params,\n )\n\n self._refresh_title_norms()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 555, "n_words": 112, "vocab_size": 67, "complexity": 3, "nloc": 47, "token_counts": 260, "n_ast_nodes": 417, "n_identifiers": 37, "random_cut": "def add_items_upsert(self, content_type_pk, indexers):\n compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection)\n title_sql = []\n autocomplete_sql ", "d_id": 16416, "documentation": { "docstring": "\n INSERT INTO %s (content_type_id, object_id, title, autocomplete, body, title_norm)\n (VALUES %s)\n ON CONFLICT (content_type_id, object_id)\n DO UPDATE SET title = EXCLUDED.title,\n title_norm = 1.0,\n autocomplete = EXCLUDED.autocomplete,\n body = EXCLUDED.body\n ", "n_words": 30, "vocab_size": 26, "n_whitespaces": 193, "language": "en" } }, { "id": 94500, "commit_id": "7bbb85a0d95d23620228a02bb4401fc09658f5f1", "repo": "sentry", "path": "tests/sentry/sentry_metrics/test_all_indexers.py", "file_name": "test_all_indexers.py", "fun_name": "test_already_created_plus_written_results", "commit_message": "ref(metrics): Split caching out of indexers, random test refactoring [sns-1606] (#37714)", "code": "def test_already_created_plus_written_results(indexer, indexer_cache) -> None:\n \n org_id = 1234\n\n raw_indexer = indexer\n indexer = CachingIndexer(indexer_cache, indexer)\n\n v0 = raw_indexer.record(use_case_id, org_id, \"v1.2.0\")\n v1 = raw_indexer.record(use_case_id, org_id, \"v1.2.1\")\n v2 = raw_indexer.record(use_case_id, org_id, \"v1.2.2\")\n\n expected_mapping = {\"v1.2.0\": v0, \"v1.2.1\": v1, \"v1.2.2\": v2}\n\n results = indexer.bulk_record(\n use_case_id=use_case_id, org_strings={org_id: {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\"}}\n )\n assert len(results[org_id]) == len(expected_mapping) == 3\n\n for string, id in results[org_id].items():\n assert expected_mapping[string] == id\n\n results = indexer.bulk_record(\n use_case_id=use_case_id,\n org_strings={org_id: {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\", \"v1.2.3\"}},\n )\n v3 = raw_indexer.resolve(use_case_id, org_id, \"v1.2.3\")\n expected_mapping[\"v1.2.3\"] = v3\n\n assert len(results[org_id]) == len(expected_mapping) == 4\n\n for string, id in results[org_id].items():\n assert expected_mapping[string] == id\n\n fetch_meta = results.get_fetch_metadata()\n assert_fetch_type_for_tag_string_set(\n fetch_meta[org_id], FetchType.CACHE_HIT, {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\"}\n )\n assert_fetch_type_for_tag_string_set(fetch_meta[org_id], FetchType.FIRST_SEEN, {\"v1.2.3\"})\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 216, "n_words": 108, "vocab_size": 62, "complexity": 3, "nloc": 32, "token_counts": 257, "n_ast_nodes": 411, "n_identifiers": 27, "random_cut": "def test_already_created_plus_written_results(indexer, indexer_cache) -> None:\n \n org_id = 1234\n\n raw_indexer = indexer\n indexer = CachingIndexer(indexer_cache, indexer)\n\n v0 = raw_indexer.record(use_case_id, org_id, \"v1.2.0\")\n v1 = raw_indexer.record(use_case_id, org_id, \"v1.2.1\")\n v2 = raw_indexer.record(use_case_id, org_id, \"v1.2.2\")\n\n expected_mapping = {\"v1.2.0\": v0, \"v1.2.1\": v1, \"v1.2.2\": v2}\n\n results = indexer.bulk_record(\n use_case_id=use_case_id, org_strings={org_id: {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\"}}\n )\n assert len(results[org_id]) == len(expected_mapping) == 3\n\n for string, id in results[org_id].items():\n assert expected_mapping[string] == id\n\n results = indexer.bulk_record(\n use_case_", "d_id": 19097, "documentation": { "docstring": "\n Test that we correctly combine db read results with db write results\n for the same organization.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 26, "language": "en" } }, { "id": 219596, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_osx_support.py", "file_name": "_osx_support.py", "fun_name": "compiler_fixup", "commit_message": "add python 3.10.4 for windows", "code": "def compiler_fixup(compiler_so, cc_args):\n \n stripArch = stripSysroot = False\n\n compiler_so = list(compiler_so)\n\n if not _supports_universal_builds():\n # OSX before 10.4.0, these don't support -arch and -isysroot at\n # all.\n stripArch = stripSysroot = True\n else:\n stripArch = '-arch' in cc_args\n stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot'))\n\n if stripArch or 'ARCHFLAGS' in os.environ:\n while True:\n try:\n index = compiler_so.index('-arch')\n # Strip this argument and the next one:\n del compiler_so[index:index+2]\n except ValueError:\n break\n\n elif not _supports_arm64_builds():\n # Look for \"-arch arm64\" and drop that\n for idx in reversed(range(len(compiler_so))):\n if compiler_so[idx] == '-arch' and compiler_so[idx+1] == \"arm64\":\n del compiler_so[idx:idx+2]\n\n if 'ARCHFLAGS' in os.environ and not stripArch:\n # User specified different -arch flags in the environ,\n # see also distutils.sysconfig\n compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()\n\n if stripSysroot:\n while True:\n indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]\n if not indices:\n break\n index = indices[0]\n if compiler_so[index] == '-isysroot':\n # Strip this argument and the next one:\n del compiler_so[index:index+2]\n else:\n # It's '-isysroot/some/path' in one arg\n del compiler_so[index:index+1]\n\n # Check if the SDK that is used during compilation actually exists,\n # the universal build requires the usage of a universal SDK and not all\n # users have that installed by default.\n sysroot = None\n argvar = cc_args\n indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')]\n if not indices:\n argvar = compiler_so\n indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]\n\n for idx in indices:\n if argvar[idx] == '-isysroot':\n sysroot = argvar[idx+1]\n break\n else:\n sysroot = argvar[idx][len('-isysroot'):]\n break\n\n if sysroot and not os.path.isdir(sysroot):\n sys.stderr.write(f\"Compiling with an SDK that doesn't seem to exist: {sysroot}\\n\")\n sys.stderr.write(\"Please check your Xcode installation\\n\")\n sys.stderr.flush()\n\n return compiler_so\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 740, "n_words": 268, "vocab_size": 135, "complexity": 29, "nloc": 49, "token_counts": 357, "n_ast_nodes": 613, "n_identifiers": 32, "random_cut": "def compiler_fixup(compiler_so, cc_args):\n \n stripArch = stripSysroot = False\n\n compiler_so = list(compiler_so)\n\n if not _supports_universal_builds():\n # OSX before 10.4.0, these don't support -arch and -isysroot at\n # all.\n stripArch = stripSysroot = True\n else:\n stripArch = '-arch' in cc_args\n stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot'))\n\n if stripArch or 'ARCHFLAGS' in os.environ:\n while True:\n try:\n index = compiler_so.index('-arch')\n # Strip this argument and the next one:\n del compiler_so[index:index+2]\n except ValueError:\n break\n\n elif not _supports_arm64_builds():\n # Look for \"-arch arm64\" and drop that\n for idx in reversed(range(len(compiler_so))):\n if compiler_so[idx] == '-arch' and compiler_so[idx+1] == \"arm64\":\n del compiler_so[idx:idx+2]\n\n if 'ARCHFLAGS' in os.environ and not stripArch:\n # User specified different -arch flags in the environ,\n # see also distutils.sysconfig\n compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()\n\n if stripSysroot:\n while True:\n indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]\n if not indices:\n break\n index = indices[0]\n if compiler_so[index] == '-isysroot':\n # Strip this argument and the next one:\n del compiler_so[index:index+2]\n else:\n # It's '-isysroot/some/path' in one arg\n del compiler_so[index:index+1]\n\n # Check if the SDK that is used during compilation actually exists,\n # the universal build requires the usage of a universal SDK and not all\n # users have that install", "d_id": 55634, "documentation": { "docstring": "\n This function will strip '-isysroot PATH' and '-arch ARCH' from the\n compile flags if the user has specified one them in extra_compile_flags.\n\n This is needed because '-arch ARCH' adds another architecture to the\n build, without a way to remove an architecture. Furthermore GCC will\n barf if multiple '-isysroot' arguments are present.\n ", "n_words": 51, "vocab_size": 43, "n_whitespaces": 70, "language": "en" } }, { "id": 74277, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_copy_page_with_excluded_parental_and_child_relations", "commit_message": "Reformat with black", "code": "def test_copy_page_with_excluded_parental_and_child_relations(self):\n \n\n try:\n # modify excluded fields for this test\n EventPage.exclude_fields_in_copy = [\n \"advert_placements\",\n \"categories\",\n \"signup_link\",\n ]\n\n # set up data\n christmas_event = EventPage.objects.get(url_path=\"/home/events/christmas/\")\n summer_category = EventCategory.objects.create(name=\"Summer\")\n holiday_category = EventCategory.objects.create(name=\"Holidays\")\n\n # add URL (to test excluding a basic field)\n christmas_event.signup_link = \"https://christmas-is-awesome.com/rsvp\"\n\n # add parental many to many relations\n christmas_event.categories = (summer_category, holiday_category)\n christmas_event.save()\n\n # Copy it\n new_christmas_event = christmas_event.copy(\n update_attrs={\n \"title\": \"New christmas event\",\n \"slug\": \"new-christmas-event\",\n }\n )\n\n # check that the signup_link was NOT copied\n self.assertEqual(\n christmas_event.signup_link, \"https://christmas-is-awesome.com/rsvp\"\n )\n self.assertEqual(new_christmas_event.signup_link, \"\")\n\n # check that original event is untouched\n self.assertEqual(\n christmas_event.categories.count(),\n 2,\n \"Child objects (parental many to many) defined on the superclass were removed from the original page\",\n )\n\n # check that parental many to many are NOT copied\n self.assertEqual(\n new_christmas_event.categories.count(),\n 0,\n \"Child objects (parental many to many) were copied but should be excluded\",\n )\n\n # check that child objects on original event were left untouched\n self.assertEqual(\n christmas_event.advert_placements.count(),\n 1,\n \"Child objects defined on the original superclass were edited when copied\",\n )\n\n # check that child objects were NOT copied\n self.assertEqual(\n new_christmas_event.advert_placements.count(),\n 0,\n \"Child objects defined on the superclass were copied and should not be\",\n )\n\n finally:\n # reset excluded fields for future tests\n EventPage.exclude_fields_in_copy = []\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 889, "n_words": 197, "vocab_size": 107, "complexity": 2, "nloc": 45, "token_counts": 190, "n_ast_nodes": 337, "n_identifiers": 22, "random_cut": "def test_copy_page_with_excluded_parental_and_child_relations(self):\n \n\n try:\n # modify excluded fields for this test\n EventPage.exclude_fields_in_copy = [\n \"advert_placements\",\n \"categories\",\n \"signup_link\",\n ]\n\n # set up data\n christmas_event = EventPage.objects.get(url_path=\"/home/events/christmas/\")\n summer_category = EventCategory.objects.create(name=\"Summer\")\n holiday_category = EventCategory.objects.create(name=\"Holidays\")\n\n # add URL (to test excluding a basic field)\n christmas_event.signup_link = \"https://christmas-is-awesome.com/rsvp\"\n\n # add parenta", "d_id": 16243, "documentation": { "docstring": "Test that a page will be copied with parental and child relations removed if excluded.", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 207694, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_app_model_in_list_body_class", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_app_model_in_list_body_class(self):\n \n response = self.client.get(reverse(\"admin:admin_views_section_changelist\"))\n self.assertContains(response, ' Tuple[str, str, str]\n \n return _distro.version_parts(best)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 3, "random_cut": "def version_parts(best=False):\n # type: (bool) -> Tuple[s", "d_id": 3214, "documentation": { "docstring": "\n Return the version of the current OS distribution as a tuple\n ``(major, minor, build_number)`` with items as follows:\n\n * ``major``: The result of :func:`distro.major_version`.\n\n * ``minor``: The result of :func:`distro.minor_version`.\n\n * ``build_number``: The result of :func:`distro.build_number`.\n\n For a description of the *best* parameter, see the :func:`distro.version`\n method.\n ", "n_words": 47, "vocab_size": 32, "n_whitespaces": 75, "language": "en" } }, { "id": 114734, "commit_id": "cf75c4186e1caa36b18c9ddffce98da94b9904e6", "repo": "mindsdb", "path": "mindsdb/integrations/mssql_handler/mssql_handler.py", "file_name": "mssql_handler.py", "fun_name": "check_status", "commit_message": "Add sql server handler", "code": "def check_status(self):\n \n status = {\n 'success': False\n }\n try:\n con = self.__connect()\n with closing(con) as con:\n #TODO: best way to check con.connected ? \n status['success'] = True\n except Exception as e:\n log.error(f'Error connecting to SQL Server {self.database}, {e}!')\n status['error'] = e\n return status\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 182, "n_words": 42, "vocab_size": 36, "complexity": 2, "nloc": 12, "token_counts": 56, "n_ast_nodes": 116, "n_identifiers": 11, "random_cut": "def check_status(self):\n \n status = {\n 'success': False\n }\n try:\n con = s", "d_id": 25267, "documentation": { "docstring": "\n Check the connection of the SQL Server database\n :return: success status and error message if error occurs\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 39, "language": "en" } }, { "id": 275885, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/model_config.py", "file_name": "model_config.py", "fun_name": "model_from_config", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def model_from_config(config, custom_objects=None):\n \n if isinstance(config, list):\n raise TypeError(\n \"`model_from_config` expects a dictionary, not a list. \"\n f\"Received: config={config}. Did you meant to use \"\n \"`Sequential.from_config(config)`?\"\n )\n from keras.layers import deserialize # pylint: disable=g-import-not-at-top\n\n return deserialize(config, custom_objects=custom_objects)\n\n\n@keras_export(\"keras.models.model_from_yaml\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.models.model_from_yaml\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 96, "n_words": 37, "vocab_size": 35, "complexity": 2, "nloc": 9, "token_counts": 41, "n_ast_nodes": 85, "n_identifiers": 10, "random_cut": "def model_from_config(config, custom_objects=None):\n \n if isinstance(config, list):\n raise TypeError(\n \"`model_from_config` expects a dictionary, not a list. \"\n f\"Received: co", "d_id": 81500, "documentation": { "docstring": "Instantiates a Keras model from its config.\n\n Usage:\n ```\n # for a Functional API model\n tf.keras.Model().from_config(model.get_config())\n\n # for a Sequential model\n tf.keras.Sequential().from_config(model.get_config())\n ```\n\n Args:\n config: Configuration dictionary.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\n Returns:\n A Keras model instance (uncompiled).\n\n Raises:\n TypeError: if `config` is not a dictionary.\n ", "n_words": 57, "vocab_size": 45, "n_whitespaces": 140, "language": "en" } }, { "id": 74269, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_golden_path", "commit_message": "Reformat with black", "code": "def test_golden_path(self):\n \n with self.assertNumQueries(0):\n result = self.page.cached_content_type\n self.assertEqual(result, ContentType.objects.get(id=self.page.content_type_id))\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 41, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 71, "n_identifiers": 12, "random_cut": "def test_golden_path(self):\n \n with self.assertNumQueries(0):\n result = self.page.cached_", "d_id": 16242, "documentation": { "docstring": "\n The return value should match the value you'd get\n if fetching the ContentType from the database,\n and shouldn't trigger any database queries when\n the ContentType is already in memory.\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 65, "language": "en" } }, { "id": 88971, "commit_id": "8078d89b46841c7f7a57cc49a4b9cafb42b12ce0", "repo": "sentry", "path": "src/sentry/lang/javascript/processor.py", "file_name": "processor.py", "fun_name": "fold_function_name", "commit_message": "ref(processor): Fold occurences of property names in function_name (#41697)\n\nFold multiple consecutive occurrences of the same property name into a\r\nsingle group, excluding the last component.\r\n\r\n```\r\nfoo | foo\r\nfoo.foo | foo.foo\r\nfoo.foo.foo | {foo#2}.foo\r\nbar.foo.foo | bar.foo.foo\r\nbar.foo.foo.foo | bar.{foo#2}.foo\r\nbar.foo.foo.onError | bar.{foo#2}.onError\r\nbar.bar.bar.foo.foo.onError | {bar#3}.{foo#2}.onError\r\nbar.foo.foo.bar.bar.onError | bar.{foo#2}.{bar#2}.onError\r\n```\r\n\r\nThis is mostly done for React, where some frames have function name like\r\n`.children.children.children.onSubmitError` when function is a\r\nprop passed down the component stack.", "code": "def fold_function_name(function_name):\n \n\n parts = function_name.split(\".\")\n\n if len(parts) == 1:\n return function_name\n\n tail = parts.pop()\n grouped = [list(g) for _, g in groupby(parts)]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 22, "vocab_size": 20, "complexity": 3, "nloc": 8, "token_counts": 53, "n_ast_nodes": 82, "n_identifiers": 12, "random_cut": "def fold_function_name(function_name):\n \n\n parts = function_name.split(\".\")\n\n if len(parts) == 1:\n retu", "d_id": 18484, "documentation": { "docstring": "\n Fold multiple consecutive occurences of the same property name into a single group, excluding the last component.\n\n foo | foo\n foo.foo | foo.foo\n foo.foo.foo | {foo#2}.foo\n bar.foo.foo | bar.foo.foo\n bar.foo.foo.foo | bar.{foo#2}.foo\n bar.foo.foo.onError | bar.{foo#2}.onError\n bar.bar.bar.foo.foo.onError | {bar#3}.{foo#2}.onError\n bar.foo.foo.bar.bar.onError | bar.{foo#2}.{bar#2}.onError\n ", "n_words": 41, "vocab_size": 30, "n_whitespaces": 72, "language": "en" } }, { "id": 200507, "commit_id": "807f499971f9c298bc6bacbb08bcb19299fbb42c", "repo": "sympy", "path": "sympy/integrals/transforms.py", "file_name": "transforms.py", "fun_name": "_laplace_rule_exp", "commit_message": "Fixed Issue #24294", "code": "def _laplace_rule_exp(f, t, s, doit=True, **hints):\n \n hints.pop('simplify', True)\n a = Wild('a', exclude=[t])\n\n y = Wild('y')\n z = Wild('z')\n k, func = f.as_independent(t, as_Add=False)\n ma1 = func.match(exp(y)*z)\n if ma1:\n ma2 = ma1[y].collect(t).match(a*t)\n if ma2:\n debug('_laplace_apply_rules match:')\n debug(' f: %s ( %s, %s )'%(f, ma1, ma2))\n debug(' rule: multiply with exp (1.5)')\n L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=doit, **hints)\n try:\n r, p, c = L\n return (k*r, p+ma2[a], c)\n except TypeError:\n return k*L\n return None\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 238, "n_words": 73, "vocab_size": 59, "complexity": 4, "nloc": 20, "token_counts": 178, "n_ast_nodes": 283, "n_identifiers": 28, "random_cut": "def _laplace_rule_exp(f, t, s, doit=True, **hints):\n \n hints.pop('simplify', True)\n a = Wild('a', exclude=[t])\n\n y = Wild('y')\n z = Wild('z')\n k, func = f.as_independent(t, as_Add=False)\n ma1 = func.match(exp(y)*z)\n if ma1:\n ma2 = ma1[y].collect(t).match(a*t)\n if ma2:\n debug('_laplace_apply_rules match:')\n debug(' f: %s ( %s, %s )'%(f, ma1, ma2))\n debug(' rule: multiply with exp (1.5)')\n L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=do", "d_id": 49675, "documentation": { "docstring": "\n This internal helper function tries to transform a product containing the\n `exp` function and returns `None` if it cannot do it.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 31, "language": "en" } }, { "id": 60370, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/scripts/cpp_lint.py", "file_name": "cpp_lint.py", "fun_name": "FindEndOfExpressionInLine", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):\n \n for i in xrange(startpos, len(line)):\n if line[i] == startchar:\n depth += 1\n elif line[i] == endchar:\n depth -= 1\n if depth == 0:\n return (i + 1, 0)\n return (-1, depth)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 68, "n_words": 37, "vocab_size": 29, "complexity": 5, "nloc": 9, "token_counts": 69, "n_ast_nodes": 103, "n_identifiers": 9, "random_cut": "def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):\n \n for i in xrange(startpos, len(line)):\n if line[i] == startchar:\n depth += 1\n elif line[i] == endchar:\n depth -= 1\n if depth == 0:\n return (i + 1, 0)\n return (-1, depth)\n\n", "d_id": 12099, "documentation": { "docstring": "Find the position just after the matching endchar.\n\n Args:\n line: a CleansedLines line.\n startpos: start searching at this position.\n depth: nesting level at startpos.\n startchar: expression opening character.\n endchar: expression closing character.\n\n Returns:\n On finding matching endchar: (index just after matching endchar, 0)\n Otherwise: (-1, new depth at end of this line)\n ", "n_words": 52, "vocab_size": 41, "n_whitespaces": 76, "language": "en" } }, { "id": 260762, "commit_id": "d7c978b764c6aafb65cc28757baf3f64da2cae34", "repo": "scikit-learn", "path": "sklearn/neighbors/tests/test_nca.py", "file_name": "test_nca.py", "fun_name": "test_toy_example_collapse_points", "commit_message": "MAINT Parameters validation for NeighborhoodComponentsAnalysis (#24195)\n\nCo-authored-by: jeremie du boisberranger ", "code": "def test_toy_example_collapse_points():\n \n rng = np.random.RandomState(42)\n input_dim = 5\n two_points = rng.randn(2, input_dim)\n X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]])\n y = [0, 0, 1]\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 40, "n_words": 22, "vocab_size": 18, "complexity": 1, "nloc": 15, "token_counts": 132, "n_ast_nodes": 99, "n_identifiers": 14, "random_cut": "def test_toy_example_collapse_points():\n \n rng = np.random.RandomState(42)\n input_dim = 5\n two_points = rng.randn(2, input_dim)\n ", "d_id": 76472, "documentation": { "docstring": "Test on a toy example of three points that should collapse\n\n We build a simple example: two points from the same class and a point from\n a different class in the middle of them. On this simple example, the new\n (transformed) points should all collapse into one single point. Indeed, the\n objective is 2/(1 + exp(d/2)), with d the euclidean distance between the\n two samples from the same class. This is maximized for d=0 (because d>=0),\n with an objective equal to 1 (loss=-1.).\n\n ", "n_words": 83, "vocab_size": 60, "n_whitespaces": 104, "language": "en" } }, { "id": 93936, "commit_id": "f31b57cbc5ec359c8ef9c6459d3d9d8ffcd6e8d9", "repo": "sentry", "path": "tests/sentry/sentry_metrics/test_batch.py", "file_name": "test_batch.py", "fun_name": "_get_string_indexer_log_records", "commit_message": "ref(metrics_indexer): Improve typing, introduce more dataclasses, fix org_id namespacing bug in metadata [INGEST-1380] (#37170)", "code": "def _get_string_indexer_log_records(caplog):\n \n return [\n (\n rec.message,\n {\n k: v\n for k, v in rec.__dict__.items()\n if k\n in (\n \"string_type\",\n \"is_global_quota\",\n \"num_global_quotas\",\n \"num_global_quotas\",\n \"org_batch_size\",\n )\n },\n )\n for rec in caplog.records\n ]\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 264, "n_words": 31, "vocab_size": 24, "complexity": 4, "nloc": 19, "token_counts": 54, "n_ast_nodes": 88, "n_identifiers": 9, "random_cut": "def _get_string_indexer_log_records(caplog):\n \n return [\n (\n rec.message,\n {\n k: v\n for k, v in rec.__dict__.items()\n if k\n ", "d_id": 19027, "documentation": { "docstring": "\n Get all log records and relevant extra arguments for easy snapshotting.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 294963, "commit_id": "ab0abdc988ac101217ba043909c4be8b33101ab3", "repo": "core", "path": "tests/components/subaru/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "test_registered_pin_required", "commit_message": "Add 2FA support for Subaru integration setup (#68753)\n\n* Add 2FA support for Subaru integration setup\r\n\r\n* Update config flow to abort with 2FA request fail", "code": "async def test_registered_pin_required(hass, user_form):\n \n with patch(MOCK_API_CONNECT, return_value=True), patch(\n MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock\n ) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True):\n mock_device_registered.return_value = True\n await hass.config_entries.flow.async_configure(\n user_form[\"flow_id\"], user_input=TEST_CREDS\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 8, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 16, "random_cut": "async def test_registered_pin_required(hass, user_form):\n \n with patch(MOCK_API_CONNECT, return_value=True), patch(\n MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock\n ) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True):\n mock_device_registered.return_value = True\n await hass.config_entries.flow.async_configure(\n ", "d_id": 93990, "documentation": { "docstring": "Test if the device is already registered and PIN required.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 249920, "commit_id": "854a6884d81c95297bf93badcddc00a4cab93418", "repo": "synapse", "path": "tests/replication/test_pusher_shard.py", "file_name": "test_pusher_shard.py", "fun_name": "test_send_push_multiple_workers", "commit_message": "Modernize unit tests configuration settings for workers. (#14568)\n\nUse the newer foo_instances configuration instead of the\r\ndeprecated flags to enable specific features (e.g. start_pushers).", "code": "def test_send_push_multiple_workers(self):\n \n http_client_mock1 = Mock(spec_set=[\"post_json_get_json\"])\n http_client_mock1.post_json_get_json.side_effect = (\n lambda *_, **__: defer.succeed({})\n )\n\n self.make_worker_hs(\n \"synapse.app.generic_worker\",\n {\n \"worker_name\": \"pusher1\",\n \"pusher_instances\": [\"pusher1\", \"pusher2\"],\n },\n proxied_blacklisted_http_client=http_client_mock1,\n )\n\n http_client_mock2 = Mock(spec_set=[\"post_json_get_json\"])\n http_client_mock2.post_json_get_json.side_effect = (\n lambda *_, **__: defer.succeed({})\n )\n\n self.make_worker_hs(\n \"synapse.app.generic_worker\",\n {\n \"worker_name\": \"pusher2\",\n \"pusher_instances\": [\"pusher1\", \"pusher2\"],\n },\n proxied_blacklisted_http_client=http_client_mock2,\n )\n\n # We choose a user name that we know should go to pusher1.\n event_id = self._create_pusher_and_send_msg(\"user2\")\n\n # Advance time a bit, so the pusher will register something has happened\n self.pump()\n\n http_client_mock1.post_json_get_json.assert_called_once()\n http_client_mock2.post_json_get_json.assert_not_called()\n self.assertEqual(\n http_client_mock1.post_json_get_json.call_args[0][0],\n \"https://push.example.com/_matrix/push/v1/notify\",\n )\n self.assertEqual(\n event_id,\n http_client_mock1.post_json_get_json.call_args[0][1][\"notification\"][\n \"event_id\"\n ],\n )\n\n http_client_mock1.post_json_get_json.reset_mock()\n http_client_mock2.post_json_get_json.reset_mock()\n\n # Now we choose a user name that we know should go to pusher2.\n event_id = self._create_pusher_and_send_msg(\"user4\")\n\n # Advance time a bit, so the pusher will register something has happened\n self.pump()\n\n http_client_mock1.post_json_get_json.assert_not_called()\n http_client_mock2.post_json_get_json.assert_called_once()\n self.assertEqual(\n http_client_mock2.post_json_get_json.call_args[0][0],\n \"https://push.example.com/_matrix/push/v1/notify\",\n )\n self.assertEqual(\n event_id,\n http_client_mock2.post_json_get_json.call_args[0][1][\"notification\"][\n \"event_id\"\n ],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 674, "n_words": 133, "vocab_size": 71, "complexity": 1, "nloc": 55, "token_counts": 278, "n_ast_nodes": 473, "n_identifiers": 22, "random_cut": "def test_send_push_multiple_workers(self):\n \n http_client_mock1 = Mock(spec_set=[\"post_json_get_json\"])\n http_client_mock1.post_jso", "d_id": 73197, "documentation": { "docstring": "Test that registration works when using sharded pusher workers.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 277639, "commit_id": "8401e08334d4b1f102a6ee9479738bacfee0600c", "repo": "keras", "path": "keras/layers/reshaping/reshape.py", "file_name": "reshape.py", "fun_name": "_fix_unknown_dimension", "commit_message": "reduce layers line-too-long", "code": "def _fix_unknown_dimension(self, input_shape, output_shape):\n \n output_shape = list(output_shape)\n msg = (\n \"total size of new array must be unchanged, \"\n \"input_shape = {}, output_shape = {}\".format(\n input_shape, output_shape\n )\n )\n\n known, unknown = 1, None\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError(\n f\"There must be at most one unknown dimension in \"\n f\"output_shape. Received: output_shape={output_shape}.\"\n )\n else:\n known *= dim\n\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original // known\n elif original != known:\n raise ValueError(msg)\n return output_shape\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 441, "n_words": 105, "vocab_size": 65, "complexity": 8, "nloc": 28, "token_counts": 128, "n_ast_nodes": 212, "n_identifiers": 18, "random_cut": "def _fix_unknown_dimension(self, input_shape, output_shape):\n \n output_shape = list(output_shape)\n msg = (\n \"total size of new array must be unchanged, \"\n \"input_shape = {}, output_shape = {}\".format(\n input_shape, output_shape\n )\n )\n\n known, unknown = 1, None\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError(\n f\"There must be at most one unknown dimension in \"\n f\"output_shape. Received: output_shape={output_shape}.\"\n )\n else:\n known *= dim\n\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original // known\n elif original != known:\n raise ValueError(msg)\n return output_", "d_id": 82113, "documentation": { "docstring": "Find and replace a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n Args:\n input_shape: Shape of array being reshaped\n output_shape: Desired shape of the array with at most a single -1\n which indicates a dimension that should be derived from the input\n shape.\n\n Returns:\n The new output shape with a -1 replaced with its computed value.\n\n Raises:\n ValueError: If the total array size of the output_shape is\n different than the input_shape, or more than one unknown dimension\n is specified.\n ", "n_words": 91, "vocab_size": 65, "n_whitespaces": 209, "language": "en" } }, { "id": 159898, "commit_id": "1e6b72b42292e62c1c86e4f77e30324e43aaa218", "repo": "numpy", "path": "numpy/lib/tests/test_loadtxt.py", "file_name": "test_loadtxt.py", "fun_name": "test_converter_with_unicode_dtype", "commit_message": "TST,STY: Add small additional tests for converters/usecols\n\nAlso fix style a bit to silence linter (hopefully), removes some\nblack style, but I am not too opinionated about that :)", "code": "def test_converter_with_unicode_dtype():\n \n txt = StringIO('abc,def\\nrst,xyz')\n conv = bytes.upper\n res = np.loadtxt(\n txt, dtype=np.dtype(\"U3\"), converters=conv, delimiter=\",\")\n expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])\n assert_equal(res, expected)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 52, "n_words": 23, "vocab_size": 20, "complexity": 1, "nloc": 7, "token_counts": 67, "n_ast_nodes": 118, "n_identifiers": 15, "random_cut": "def test_converter_with_unicode_dtype():\n \n txt = StringIO('abc,def\\nrst,xyz')\n conv = bytes.upper\n res = np.loadtxt(\n txt, dtype=np.dtype(\"U3\"), converters=conv, delimiter=\",\")\n expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])\n assert_equal(res, expec", "d_id": 38437, "documentation": { "docstring": "\n With the default 'bytes' encoding, tokens are encoded prior to being\n passed to the converter. This means that the output of the converter may\n be bytes instead of unicode as expected by `read_rows`.\n\n This test checks that outputs from the above scenario are properly decoded\n prior to parsing by `read_rows`.\n ", "n_words": 50, "vocab_size": 37, "n_whitespaces": 69, "language": "en" } }, { "id": 53272, "commit_id": "36e7e0838aeaffc9492b330297e4905f3ab4b11f", "repo": "prefect", "path": "src/prefect/orion/database/alembic_commands.py", "file_name": "alembic_commands.py", "fun_name": "alembic_stamp", "commit_message": "code review revisions pt3", "code": "def alembic_stamp(revision):\n \n # lazy import for performance\n import alembic.command\n\n alembic.command.stamp(alembic_config(), revision=revision)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def alembic_stamp(revision):\n \n # lazy import for performance\n import alembic.command\n\n alemb", "d_id": 10767, "documentation": { "docstring": "\n Stamp the revision table with the given revision; don’t run any migrations\n\n Args:\n revision: The revision passed to `alembic stamp`.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 37, "language": "en" } }, { "id": 122886, "commit_id": "4b587fa1f0049db5366fd04812ab940d80a71a22", "repo": "jax", "path": "jax/_src/pjit.py", "file_name": "pjit.py", "fun_name": "unflatten_superdims", "commit_message": "Move `pjit.py` to `jax/_src` in preparation for merging the `jit` and `pjit` frontend APIs\n\nPiperOrigin-RevId: 495944279", "code": "def unflatten_superdims(assignment):\n \n def check(cond):\n if cond: return\n raise NotImplementedError(\"Failed to convert OpSharding into a ShardingSpec. \"\n \"Please open a bug report!\")\n flat_assignment = np.asarray(assignment, dtype=np.int64)\n check(flat_assignment[0] == 0)\n dims = []\n while flat_assignment.size > 1:\n stride = flat_assignment[1]\n for i in range(len(flat_assignment)):\n if flat_assignment[i] != i * stride: break\n else:\n # After this loop i should point to an \"element after the sequence\", so\n # we have to increment it if the whole array is a strided sequence.\n i += 1\n size = i\n dims.append((size, stride))\n assert size > 1 # Ensure progress\n flat_assignment = flat_assignment[::size]\n return dims\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 182, "n_words": 98, "vocab_size": 74, "complexity": 4, "nloc": 16, "token_counts": 101, "n_ast_nodes": 192, "n_identifiers": 17, "random_cut": "def unflatten_superdims(assignment):\n \n def check(cond):\n if cond: return\n raise NotImplementedError(\"Failed to convert OpSharding into a ShardingSpec. \"\n \"Please open a bug report!\")\n flat_assignment = np.asarray(assignment, dtype=np.int64)\n check(flat_assignment[0] == 0)\n dims = []\n while flat_assignment.size > 1:\n stride = flat_assignment[1]\n for i in range(len(flat_assignment)):\n if flat_assignment[i] != i * stride: break\n else:\n # After this loop i should point to an \"element after the sequence\", so\n # we have t", "d_id": 27261, "documentation": { "docstring": "Unflatten a list of dimension sizes and their strides that generates assignment.\n\n If this function succeeds for a given ``assignment``, then the following property\n should be satisfied::\n\n dims_with_strides = unflatten_superdims(assignment)\n base_array = np.arange(map(fst, sorted(dims_with_strides, key=snd, reverse=True)))\n assignment == base_array.transpose(argsort(dims_with_strides, key=snd, reverse=True)).flatten()\n\n That is, the returned dimensions list all sizes of the base array (with strides\n indicating their initial order). The order of dimensions in the list corresponds\n to the permutation that applied to the base array generates the assignment.\n ", "n_words": 79, "vocab_size": 56, "n_whitespaces": 94, "language": "en" } }, { "id": 86128, "commit_id": "bbd7137b3d379744265f46564d5878490046dd3b", "repo": "sentry", "path": "tests/sentry/event_manager/test_event_manager.py", "file_name": "test_event_manager.py", "fun_name": "test_perf_issue_no_associate_error_event", "commit_message": "chore(perf issues): Check group type before adding event (#39171)\n\nEnsure the group type matches the kind of event before association, e.g.\r\ndon't put an error event on a performance issue and vice versa.", "code": "def test_perf_issue_no_associate_error_event(self):\n \n self.project.update_option(\"sentry:performance_issue_creation_rate\", 1.0)\n\n with mock.patch(\"sentry_sdk.tracing.Span.containing_transaction\"), self.feature(\n {\n \"projects:performance-suspect-spans-ingestion\": True,\n \"organizations:performance-issues-ingest\": True,\n }\n ):\n manager = EventManager(make_event())\n manager.normalize()\n event = manager.save(self.project.id)\n assert len(event.groups) == 1\n\n # sneakily make the group type wrong\n group = event.groups[0]\n group.type = GroupType.PERFORMANCE_N_PLUS_ONE.value\n group.save()\n manager = EventManager(make_event())\n manager.normalize()\n event = manager.save(self.project.id)\n\n assert len(event.groups) == 0\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 262, "n_words": 50, "vocab_size": 35, "complexity": 1, "nloc": 19, "token_counts": 132, "n_ast_nodes": 222, "n_identifiers": 21, "random_cut": "def test_perf_issue_no_associate_error_event(self):\n \n self.project.update_option(\"sentry:performance_issue_creation_rate\", 1.0)\n\n with mock.patch(\"sentry_sdk.tracing.Span.containing_transaction\"), self.feature(\n {\n \"projects:performance-suspect-spans-ingestion\": True,\n \"organizations:performance-issues-ingest\": True,\n }\n ):\n manager = EventManager(make_event())\n manager.normalize()\n event = manager.save(self.project.id)\n assert len(event.groups) == 1\n\n # sneakily make the group type wrong\n group = event.groups[0]\n group.type = GroupType.PERFORMANCE_N_PLUS_ONE.value\n group.save()\n manager = EventManager(make_event())\n manager.normalize()\n event = manager.", "d_id": 18080, "documentation": { "docstring": "Test that you can't associate an error event with a performance issue", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 131286, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_autoscaler.py", "file_name": "test_autoscaler.py", "fun_name": "testNodeTerminatedDuringUpdate", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testNodeTerminatedDuringUpdate(self):\n \n cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG)\n cluster_config[\"available_node_types\"][\"ray.worker.default\"][\"min_workers\"] = 2\n cluster_config[\"worker_start_ray_commands\"] = [\"ray_start_cmd\"]\n\n # Don't need the extra node type or a docker config.\n cluster_config[\"head_node_type\"] = [\"ray.worker.default\"]\n del cluster_config[\"available_node_types\"][\"ray.head.default\"]\n del cluster_config[\"docker\"]\n\n config_path = self.write_config(cluster_config)\n\n self.provider = MockProvider()\n runner = MockProcessRunner()\n lm = LoadMetrics()\n mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())\n autoscaler = MockAutoscaler(\n config_path,\n lm,\n MockNodeInfoStub(),\n max_failures=0,\n process_runner=runner,\n update_interval_s=0,\n prom_metrics=mock_metrics,\n )\n\n # Scale up to two up-to-date workers\n autoscaler.update()\n self.waitForNodes(2)\n self.provider.finish_starting_nodes()\n autoscaler.update()\n self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})\n\n # Mark both nodes as unhealthy\n for _ in range(5):\n if autoscaler.updaters:\n time.sleep(0.05)\n autoscaler.update()\n\n lm.last_heartbeat_time_by_ip[\"172.0.0.0\"] = 0\n lm.last_heartbeat_time_by_ip[\"172.0.0.1\"] = 0\n\n # Expect both updates to be successful, no nodes in updating state\n assert mock_metrics.successful_updates.inc.call_count == 2\n assert mock_metrics.worker_update_time.observe.call_count == 2\n mock_metrics.updating_nodes.set.assert_called_with(0)\n assert not autoscaler.updaters\n\n # Set up process runner to terminate worker 0 during missed heartbeat\n # recovery and also cause the updater to fail.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 476, "n_words": 134, "vocab_size": 99, "complexity": 3, "nloc": 89, "token_counts": 545, "n_ast_nodes": 392, "n_identifiers": 45, "random_cut": "def testNodeTerminatedDuringUpdate(self):\n \n cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG)\n cluster_config[\"available_node_types\"][\"ray.worker.default\"][\"min_workers\"] = 2\n cluster_config[\"worker_start_ray_commands\"] = [\"ray_start_cmd\"]\n\n # Don't need the extra node type or a docker config.\n cluster_config[\"head_node_type\"] = [\"ray.worker.default\"]\n del cluster_config[\"available_node_types\"][\"ray.head.default\"]\n del cluster_config[\"docker\"]\n\n config_path = self.write_config(cluster_config)\n\n self.provider = MockProvider()\n runner = MockProcessRunner()\n lm = LoadMetrics()\n mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())\n autoscaler = MockAutoscaler(\n config_path,\n lm,\n MockNodeInfoStub(),\n max_failures=0,\n process_runner=runner,\n update_interval_s=0,\n prom_metrics=mock_metrics,\n )\n\n # Scale up to two up-to-date workers\n autoscaler.update()\n self.waitForNodes(2)\n self.provider.finish_starting_nodes()\n autoscaler.update()\n self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})\n\n # Mark both nodes as unhealthy\n for _ in range(5):\n if autoscaler.updaters:\n time.sleep(0.05)\n autoscaler.update()\n\n lm.last_heartbeat_time_by_ip[\"172.0.0.0\"] = 0\n lm.last_hear", "d_id": 29495, "documentation": { "docstring": "\n Tests autoscaler handling a node getting terminated during an update\n triggered by the node missing a heartbeat.\n\n Extension of testRecoverUnhealthyWorkers.\n\n In this test, two nodes miss a heartbeat.\n One of them (node 0) is terminated during its recovery update.\n The other (node 1) just fails its update.\n\n When processing completed updates, the autoscaler terminates node 1\n but does not try to terminate node 0 again.\n ", "n_words": 65, "vocab_size": 51, "n_whitespaces": 129, "language": "en" } }, { "id": 284059, "commit_id": "670402396e7e25e95bd6497affb143565d9bd4ea", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/overview/overview_controller.py", "file_name": "overview_controller.py", "fun_name": "call_cr", "commit_message": "Replaces coingecko deprecated commands (#1650)\n\n* removes cgproducts and cgplatforms and replaces with cr\r\n\r\n* add ignore word\r\n\r\n* added .openbb script\r\n\r\n* reverted crypto change\r\n\r\n* doc\r\n\r\n* failing tests\r\n\r\n* trying chart and fixed minh issues\r\n\r\n* Create barh\r\n\r\n* Fix ticker labels\r\n\r\n* fix test\r\n\r\n* loanscan mock\r\n\r\n* defi test\r\n\r\n* defi test\r\n\r\n* Fix defi test\r\n\r\nCo-authored-by: Minh Hoang \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>", "code": "def call_cr(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"cr\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-t\",\n \"--type\",\n dest=\"type\",\n type=str,\n help=\"Select interest rate type\",\n default=\"supply\",\n choices=[\"borrow\", \"supply\"],\n )\n parser.add_argument(\n \"-c\",\n \"--cryptocurrrencies\",\n dest=\"cryptos\",\n type=loanscan_model.check_valid_coin,\n help=f,\n default=\"BTC,ETH,USDT,USDC\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--platforms\",\n dest=\"platforms\",\n type=loanscan_model.check_valid_platform,\n help=f,\n default=\"BlockFi,Ledn,SwissBorg,Youhodler\",\n )\n\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-t\")\n\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED, limit=10\n )\n if ns_parser:\n loanscan_view.display_crypto_rates(\n rate_type=ns_parser.type,\n cryptos=ns_parser.cryptos,\n platforms=ns_parser.platforms,\n limit=ns_parser.limit,\n export=ns_parser.export,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 529, "n_words": 66, "vocab_size": 56, "complexity": 4, "nloc": 49, "token_counts": 196, "n_ast_nodes": 346, "n_identifiers": 35, "random_cut": "def call_cr(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"cr\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-t\",\n \"--type\",\n dest=\"type\",\n type=str,\n help=\"Select interest rate type\",\n default=\"supply\",\n choices=[\"borrow\", \"supply\"],\n )\n parser.add_argument(\n \"-c\",\n \"--cryptocurrrencies\",\n dest=\"cryptos\",\n type=loanscan_model.check_valid_coin,\n help=f,\n default=\"BTC,ETH,USDT,USDC\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--platforms\",\n dest=\"platforms\",\n type=loanscan_model.check_valid_platform,\n help=f,\n default=\"BlockFi,Ledn,SwissBorg,Youhodler\",\n )\n\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-t\")\n\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ON", "d_id": 84626, "documentation": { "docstring": "Process cr commandDisplays crypto {borrow,supply} interest rates for cryptocurrencies across several platforms.\n You can select rate type with --type {borrow,supply}\n You can display only N number of platforms with --limit parameter.Cryptocurrencies to search interest rates for separated by comma.\n Default: BTC,ETH,USDT,USDC. Options: {\",\".join(loanscan_model.CRYPTOS)}Platforms to search interest rates in separated by comma.\n Default: BlockFi,Ledn,SwissBorg,Youhodler. Options: {\",\".join(loanscan_model.PLATFORMS)}", "n_words": 55, "vocab_size": 39, "n_whitespaces": 106, "language": "en" } }, { "id": 111750, "commit_id": "8b2eb425274cdb4537fbce4a315aec12a378d6db", "repo": "nni", "path": "nni/retiarii/oneshot/pytorch/base_lightning.py", "file_name": "base_lightning.py", "fun_name": "export", "commit_message": "Lightning implementation for retiarii oneshot nas (#4479)", "code": "def export(self):\n \n result = {}\n for name, module in self.nas_modules:\n if name not in result:\n result[name] = module.export()\n return result\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 6, "token_counts": 37, "n_ast_nodes": 61, "n_identifiers": 6, "random_cut": "def export(self):\n \n result = {}\n for na", "d_id": 24479, "documentation": { "docstring": "\n Export the NAS result, ideally the best choice of each nas_modules.\n You may implement an ``export`` method for your customized nas_module.\n\n Returns\n --------\n result : Dict[str, int]\n Keys are names of nas_modules, and values are the choice indices of them.\n ", "n_words": 40, "vocab_size": 34, "n_whitespaces": 94, "language": "en" } }, { "id": 181840, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/base.py", "file_name": "base.py", "fun_name": "clean_pipeline_string", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def clean_pipeline_string(self, individual):\n \n dirty_string = str(individual)\n # There are many parameter prefixes in the pipeline strings, used solely for\n # making the terminal name unique, eg. LinearSVC__.\n parameter_prefixes = [\n (m.start(), m.end()) for m in re.finditer(\", [\\w]+__\", dirty_string)\n ]\n # We handle them in reverse so we do not mess up indices\n pretty = dirty_string\n for (start, end) in reversed(parameter_prefixes):\n pretty = pretty[: start + 2] + pretty[end:]\n\n return pretty\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 162, "n_words": 70, "vocab_size": 55, "complexity": 3, "nloc": 9, "token_counts": 74, "n_ast_nodes": 120, "n_identifiers": 13, "random_cut": "def clean_pipeline_string(self, individual):\n \n dirty_string = str(individual)\n # There are many parameter prefixes in the pipeline strings, used solely for\n # making the terminal name unique, eg. LinearSVC__.\n parameter_prefixes = [\n (m.start(), m.end()) for m in re.finditer(\", [\\w]+__\", dirty_string)\n ]\n # We handle them in reverse so we do not mess up indices\n pretty = dirty_string\n for (start, end) in reversed(parameter_prefixes", "d_id": 43614, "documentation": { "docstring": "Provide a string of the individual without the parameter prefixes.\n\n Parameters\n ----------\n individual: individual\n Individual which should be represented by a pretty string\n\n Returns\n -------\n A string like str(individual), but with parameter prefixes removed.\n\n ", "n_words": 34, "vocab_size": 28, "n_whitespaces": 94, "language": "en" } }, { "id": 199118, "commit_id": "5534ff6796b8d515192576f771af8488a838775c", "repo": "sympy", "path": "sympy/polys/matrices/linsolve.py", "file_name": "linsolve.py", "fun_name": "_lin_eq2dict", "commit_message": "Revert \"solve changes\"", "code": "def _lin_eq2dict(a, symset):\n \n if a in symset:\n return S.Zero, {a: S.One}\n elif a.is_Add:\n terms_list = defaultdict(list)\n coeff_list = []\n for ai in a.args:\n ci, ti = _lin_eq2dict(ai, symset)\n coeff_list.append(ci)\n for mij, cij in ti.items():\n terms_list[mij].append(cij)\n coeff = Add(*coeff_list)\n terms = {sym: Add(*coeffs) for sym, coeffs in terms_list.items()}\n return coeff, terms\n elif a.is_Mul:\n terms = terms_coeff = None\n coeff_list = []\n for ai in a.args:\n ci, ti = _lin_eq2dict(ai, symset)\n if not ti:\n coeff_list.append(ci)\n elif terms is None:\n terms = ti\n terms_coeff = ci\n else:\n raise PolyNonlinearError\n coeff = Mul(*coeff_list)\n if terms is None:\n return coeff, {}\n else:\n terms = {sym: coeff * c for sym, c in terms.items()}\n return coeff * terms_coeff, terms\n elif a.is_Equality:\n return _lin_eq2dict(a.lhs - a.rhs, symset)\n elif not a.has_free(*symset):\n return a, {}\n else:\n raise PolyNonlinearError\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 448, "n_words": 129, "vocab_size": 62, "complexity": 14, "nloc": 38, "token_counts": 252, "n_ast_nodes": 402, "n_identifiers": 33, "random_cut": "def _lin_eq2dict(a, symset):\n \n if a in symset:\n return S.Zero, {a: S.One}\n elif a.is_Add:\n terms_list = defaultdict(list)\n coeff_list = []\n for ai in a.args:\n ci, ti = _lin_eq2dict(ai, symset)\n coeff_list.append(ci)\n for mij, cij in ti.items():\n terms_list[mij].append(cij)\n coeff = Add(*coeff_list)\n terms = {sym: Add(*coeffs) for sym, coeffs in t", "d_id": 49151, "documentation": { "docstring": "Efficiently convert a linear equation to a dict of coefficients", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 130325, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/_azure/node_provider.py", "file_name": "node_provider.py", "fun_name": "internal_ip", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def internal_ip(self, node_id):\n \n ip = (\n self._get_cached_node(node_id=node_id)[\"internal_ip\"]\n or self._get_node(node_id=node_id)[\"internal_ip\"]\n )\n return ip\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 62, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 6, "token_counts": 37, "n_ast_nodes": 63, "n_identifiers": 6, "random_cut": "def internal_ip(self, node_id):\n \n ip = (\n self._get_cached_node(node_id=node_id)[\"internal_ip\"]\n or self._get_node(node_id=node_id)[\"internal_ip\"]\n )\n return ip\n", "d_id": 29231, "documentation": { "docstring": "Returns the internal ip (Ray ip) of the given node.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 262793, "commit_id": "9541ad638f73c1442c35ea870ad9c6e4f8cd9b62", "repo": "pyinstaller", "path": "PyInstaller/archive/writers.py", "file_name": "writers.py", "fun_name": "_write_file", "commit_message": "Fix file handle leaks.\n\nThis is mostly a refactoring of CArchiveWriter().add() which has gotten somewhat\ntangled trying to apply various file modifications whilst simultaneously\njuggling file streaming and optional zip compression. Since the modifications\nare all done on Python source/byte code files which are small, split the archive\npacking into two helpers, one which streams big files and a more malleable one\nwhich writes small files from memory without streaming, both of which handle the\nbookkeeping job of updating the TOC. This fixes a file handle leak.\n\nAdditionally take advantage of Python's builtin, but suppressed by default, file\nhandle leakage detection; any such leakages under a pytest run will now fail the\ntest. This requires a few other leakage fixes throughout the test suite to make\nit pass.", "code": "def _write_file(self, source, dest, type, compress=False):\n \n start = self.lib.tell()\n length = os.stat(source).st_size\n with open(source, 'rb') as f:\n if compress:\n buffer = bytearray(16 * 1024)\n compressor = zlib.compressobj(self.LEVEL)\n while 1:\n read = f.readinto(buffer)\n if not read:\n break\n self.lib.write(compressor.compress(buffer[:read]))\n self.lib.write(compressor.flush())\n\n else:\n shutil.copyfileobj(f, self.lib)\n self.toc.add(start, self.lib.tell() - start, length, int(compress), type, dest)\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 261, "n_words": 49, "vocab_size": 42, "complexity": 4, "nloc": 16, "token_counts": 152, "n_ast_nodes": 243, "n_identifiers": 30, "random_cut": "def _write_file(self, source, dest, type, compress=False):\n \n start = self.lib.tell()\n length = os.stat(source).st_size\n with open(source, 'rb') as f:\n ", "d_id": 77374, "documentation": { "docstring": "\n Stream copy a large file into the archive and update the table of contents.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 176189, "commit_id": "5dfd57af2a141a013ae3753e160180b82bec9469", "repo": "networkx", "path": "networkx/linalg/graphmatrix.py", "file_name": "graphmatrix.py", "fun_name": "incidence_matrix", "commit_message": "Use scipy.sparse array datastructure (#5139)\n\n* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.\r\n\r\nSeems like a reasonable place to start.\r\nnx.to_scipy_sparse_matrix is one of the primary interfaces to\r\nscipy.sparse from within NetworkX.\r\n\r\n* 1: Use np.outer instead of mult col/row vectors\r\n\r\nFix two instances in modularitymatrix where a new 2D array was being\r\ncreated via an outer product of two \\\"vectors\\\".\r\n\r\nIn the matrix case, this was a row vector \\* a column vector. In the\r\narray case this can be disambiguated by being explicit with np.outer.\r\n\r\n* Update _transition_matrix in laplacianmatrix module\r\n\r\n - A few instances of matrix multiplication operator\r\n - Add np.newaxis + transpose to get shape right for broadcasting\r\n - Explicitly convert e.g. sp.sparse.spdiags to a csr_array.\r\n\r\n* Update directed_combinitorial_laplacian w/ sparse array.\r\n\r\n - Wrap spdiags in csr_array and update matmul operators.\r\n\r\n* Rm matrix-specific code from lgc and hmn modules\r\n\r\n - Replace .A call with appropriate array semantics\r\n - wrap sparse.diags in csr_array.\r\n\r\n* Change hits to use sparse array semantics.\r\n\r\n - Replace * with @\r\n - Remove superfluous calls to flatten.\r\n\r\n* Update sparse matrix usage in layout module.\r\n - Simplify lil.getrowview call\r\n - Wrap spdiags in csr_array.\r\n\r\n* lil_matrix -> lil_array in graphmatrix.py.\r\n\r\n* WIP: Start working on algebraic connectivity module.\r\n\r\n* Incorporate auth mat varname feedback.\r\n\r\n* Revert 1D slice and comment for 1D sparse future.\r\n\r\n* Add TODOs: rm csr_array wrapper around spdiags etc.\r\n\r\n* WIP: cleanup algebraicconn: tracemin_fiedler.\r\n\r\n* Typo.\r\n\r\n* Finish reviewing algebraicconnectivity.\r\n\r\n* Convert bethe_hessian matrix to use sparse arrays.\r\n\r\n* WIP: update laplacian.\r\n\r\nUpdate undirected laplacian functions.\r\n\r\n* WIP: laplacian - add comment about _transition_matrix return types.\r\n\r\n* Finish laplacianmatrix review.\r\n\r\n* Update attrmatrix.\r\n\r\n* Switch to official laplacian function.\r\n\r\n* Update pagerank to use sparse array.\r\n\r\n* Switch bipartite matrix to sparse arrays.\r\n\r\n* Check from_scipy_sparse_matrix works with arrays.\r\n\r\nModifies test suite.\r\n\r\n* Apply changes from review.\r\n\r\n* Fix failing docstring tests.\r\n\r\n* Fix missing axis for in-place multiplication.\r\n\r\n* Use scipy==1.8rc2\r\n\r\n* Use matrix multiplication\r\n\r\n* Fix PyPy CI\r\n\r\n* [MRG] Create plot_subgraphs.py example (#5165)\r\n\r\n* Create plot_subgraphs.py\r\n\r\nhttps://github.com/networkx/networkx/issues/4220\r\n\r\n* Update plot_subgraphs.py\r\n\r\nblack\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint plus font_size\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded more plots\r\n\r\n* Update plot_subgraphs.py\r\n\r\nremoved plots from the unit test and added comments\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint\r\n\r\n* Update plot_subgraphs.py\r\n\r\ntypos fixed\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded nodes to the plot of the edges removed that was commented out for whatever reason\r\n\r\n* Update plot_subgraphs.py\r\n\r\nrevert the latest commit - the line was commented out for a reason - it's broken\r\n\r\n* Update plot_subgraphs.py\r\n\r\nfixed node color issue\r\n\r\n* Update plot_subgraphs.py\r\n\r\nformat fix\r\n\r\n* Update plot_subgraphs.py\r\n\r\nforgot to draw the nodes... now fixed\r\n\r\n* Fix sphinx warnings about heading length.\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult \r\n\r\n* Add traveling salesman problem to example gallery (#4874)\r\n\r\nAdds an example of the using Christofides to solve the TSP problem to the example galery.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037)\r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges()\r\n\r\n* Resolved Requested Changes\r\n\r\n* Revert changes to degree docstrings.\r\n\r\n* Update comments in example.\r\n\r\n* Apply wording to edges method in all graph classes.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Compatibility updates from testing with numpy/scipy/pytest rc's (#5226)\r\n\r\n* Rm deprecated scipy subpkg access.\r\n\r\n* Use recwarn fixture in place of deprecated pytest pattern.\r\n\r\n* Rm unnecessary try/except from tests.\r\n\r\n* Replace internal `close` fn with `math.isclose`. (#5224)\r\n\r\n* Replace internal close fn with math.isclose.\r\n\r\n* Fix lines in docstring examples.\r\n\r\n* Fix Python 3.10 deprecation warning w/ int div. (#5231)\r\n\r\n* Touchups and suggestions for subgraph gallery example (#5225)\r\n\r\n* Simplify construction of G with edges rm'd\r\n\r\n* Rm unused graph attribute.\r\n\r\n* Shorten categorization by node type.\r\n\r\n* Simplify node coloring.\r\n\r\n* Simplify isomorphism check.\r\n\r\n* Rm unit test.\r\n\r\n* Rm redundant plotting of each subgraph.\r\n\r\n* Use new package name (#5234)\r\n\r\n* Allowing None edges in weight function of bidirectional Dijkstra (#5232)\r\n\r\n* added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None.\r\n\r\n* changed syntax for better readability and code duplicate avoidance\r\n\r\nCo-authored-by: Hohmann, Nikolas \r\n\r\n* Add an FAQ about assigning issues. (#5182)\r\n\r\n* Add FAQ about assigning issues.\r\n\r\n* Add note about linking issues from new PRs.\r\n\r\n* Update dev deps (#5243)\r\n\r\n* Update minor doc issues with tex notation (#5244)\r\n\r\n* Add FutureWarnings to fns that return sparse matrices\r\n\r\n - biadjacency_matrix.\r\n - bethe_hessian_matrix.\r\n - incidence_matrix.\r\n - laplacian functions.\r\n - modularity_matrix functions.\r\n - adjacency_matrix.\r\n\r\n* Add to_scipy_sparse_array and use it everywhere.\r\n\r\nAdd a new conversion function to preserve array semantics internally\r\nwhile not altering behavior for users.\r\n\r\nAlso adds FutureWarning to to_scipy_sparse_matrix.\r\n\r\n* Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix.\r\n\r\n* Handle deprecations in separate PR.\r\n\r\n* Fix docstring examples.\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\nCo-authored-by: Jarrod Millman \r\nCo-authored-by: Andrew Knyazev \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: eskountis <56514439+eskountis@users.noreply.github.com>\r\nCo-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com>\r\nCo-authored-by: NikHoh \r\nCo-authored-by: Hohmann, Nikolas \r\nCo-authored-by: Sultan Orazbayev \r\nCo-authored-by: Mridul Seth ", "code": "def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None):\n \n import scipy as sp\n import scipy.sparse # call as sp.sparse\n\n if nodelist is None:\n nodelist = list(G)\n if edgelist is None:\n if G.is_multigraph():\n edgelist = list(G.edges(keys=True))\n else:\n edgelist = list(G.edges())\n A = sp.sparse.lil_array((len(nodelist), len(edgelist)))\n node_index = {node: i for i, node in enumerate(nodelist)}\n for ei, e in enumerate(edgelist):\n (u, v) = e[:2]\n if u == v:\n continue # self loops give zero column\n try:\n ui = node_index[u]\n vi = node_index[v]\n except KeyError as err:\n raise nx.NetworkXError(\n f\"node {u} or {v} in edgelist but not in nodelist\"\n ) from err\n if weight is None:\n wt = 1\n else:\n if G.is_multigraph():\n ekey = e[2]\n wt = G[u][v][ekey].get(weight, 1)\n else:\n wt = G[u][v].get(weight, 1)\n if oriented:\n A[ui, ei] = -wt\n A[vi, ei] = wt\n else:\n A[ui, ei] = wt\n A[vi, ei] = wt\n import warnings\n\n warnings.warn(\n \"incidence_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0.\",\n FutureWarning,\n stacklevel=2,\n )\n # TODO: Rm sp.sparse.csc_matrix in Networkx 3.0\n return A.asformat(\"csc\")\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 517, "n_words": 164, "vocab_size": 103, "complexity": 11, "nloc": 44, "token_counts": 290, "n_ast_nodes": 463, "n_identifiers": 38, "random_cut": "def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None):\n \n import scipy as sp\n import scipy.sparse # call as sp.sparse\n\n if nodelist is None:\n nodelist = list(G)\n if edgelist is None:\n if G.is_multigraph():\n edgelist = list(G.edges(keys=True))\n else:\n edgelist = list(G.edges())\n A = sp.sparse.lil_array((len(nodelist), len(edgelist)))\n node_index = {node: i for i, node in enumerate(nodelist)}\n for ei, e in enumerate(edgelist):\n (u, v) = e[:2]\n if u == v:\n continue # self loops give zero column\n try:\n ui = node_index[u]\n vi = node_index[v]\n except KeyError as err:\n raise nx.NetworkXError(\n f\"node {u} or {v} in edgelist but not in nodelist\"\n ) from err\n if weight is None:\n wt = 1\n else:\n if G.is_multigraph():\n ekey = e[2]\n wt = G[u][v][ekey].get(weight, 1)\n else:\n wt = G[u][v].get(weight, 1)\n if oriented:\n A[ui, ei] = -wt\n A[vi, ei] = wt\n else:\n A[ui, ei] = wt\n A[vi, ei] = wt\n import warnings\n\n warnings.warn(\n \"incidence", "d_id": 41755, "documentation": { "docstring": "Returns incidence matrix of G.\n\n The incidence matrix assigns each row to a node and each column to an edge.\n For a standard incidence matrix a 1 appears wherever a row's node is\n incident on the column's edge. For an oriented incidence matrix each\n edge is assigned an orientation (arbitrarily for undirected and aligning to\n direction for directed). A -1 appears for the source (tail) of an edge and\n 1 for the destination (head) of the edge. The elements are zero otherwise.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional (default= all nodes in G)\n The rows are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n edgelist : list, optional (default= all edges in G)\n The columns are ordered according to the edges in edgelist.\n If edgelist is None, then the ordering is produced by G.edges().\n\n oriented: bool, optional (default=False)\n If True, matrix elements are +1 or -1 for the head or tail node\n respectively of each edge. If False, +1 occurs at both nodes.\n\n weight : string or None, optional (default=None)\n The edge data key used to provide each value in the matrix.\n If None, then each edge has weight 1. Edge weights, if used,\n should be positive so that the orientation can provide the sign.\n\n Returns\n -------\n A : SciPy sparse matrix\n The incidence matrix of G.\n\n Notes\n -----\n For MultiGraph/MultiDiGraph, the edges in edgelist should be\n (u,v,key) 3-tuples.\n\n \"Networks are the best discrete model for so many problems in\n applied mathematics\" [1]_.\n\n References\n ----------\n .. [1] Gil Strang, Network applications: A = incidence matrix,\n http://videolectures.net/mit18085f07_strang_lec03/\n ", "n_words": 272, "vocab_size": 140, "n_whitespaces": 428, "language": "en" } }, { "id": 278117, "commit_id": "6fafb567af4e4d9f42974d0b6c55b18bc03e17eb", "repo": "keras", "path": "keras/feature_column/sequence_feature_column.py", "file_name": "sequence_feature_column.py", "fun_name": "call", "commit_message": "resolve line-too-long in feature_column", "code": "def call(self, features, training=None):\n \n if not isinstance(features, dict):\n raise ValueError(\n \"We expected a dictionary here. Instead we got: \", features\n )\n if training is None:\n training = backend.learning_phase()\n transformation_cache = (\n tf.__internal__.feature_column.FeatureTransformationCache(features)\n )\n output_tensors = []\n sequence_lengths = []\n\n for column in self._feature_columns:\n with backend.name_scope(column.name):\n try:\n (\n dense_tensor,\n sequence_length,\n ) = column.get_sequence_dense_tensor(\n transformation_cache,\n self._state_manager,\n training=training,\n )\n except TypeError:\n (\n dense_tensor,\n sequence_length,\n ) = column.get_sequence_dense_tensor(\n transformation_cache, self._state_manager\n )\n # Flattens the final dimension to produce a 3D Tensor.\n output_tensors.append(\n self._process_dense_tensor(column, dense_tensor)\n )\n sequence_lengths.append(sequence_length)\n\n # Check and process sequence lengths.\n kfc._verify_static_batch_size_equality(\n sequence_lengths, self._feature_columns\n )\n sequence_length = _assert_all_equal_and_return(sequence_lengths)\n\n return self._verify_and_concat_tensors(output_tensors), sequence_length\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 677, "n_words": 98, "vocab_size": 73, "complexity": 5, "nloc": 39, "token_counts": 167, "n_ast_nodes": 264, "n_identifiers": 31, "random_cut": "def call(self, features, training=None):\n \n if not isinstance(features, dict):\n raise ValueError(\n \"We expected a dictionary here. Instead we got: \", features\n )\n if training is None:\n training = backend.learning_phase()\n transformation_cache = (\n tf.__internal__.feature_column.FeatureTransformationCache(features)\n )\n output_tensors = []\n sequence_lengths = []\n\n for column in self._feature_columns:\n with backend.name_scope(column.name):\n try:\n (\n dense_tensor,\n sequence_length,\n ) = column.get_sequence_dense_tensor(\n transformation_cache,\n self._state_manager,\n training=training,\n )\n except TypeError:\n (\n dense_tensor,\n sequence_length,\n ) = column.get_sequence_dense_tensor(\n transformation_cache, self._state_manager\n )\n # Flattens the final dimension to produce a 3D Tensor.\n output_tensors.append(\n self._process_dense_tensor(column, dense_tensor)\n )\n sequence_lengths.append(sequence_length)\n\n # Check and process sequence lengths.\n kfc._verify", "d_id": 82375, "documentation": { "docstring": "Returns sequence input corresponding to the `feature_columns`.\n\n Args:\n features: A dict mapping keys to tensors.\n training: Python boolean or None, indicating whether to the layer is\n being run in training mode. This argument is passed to the call\n method of any `FeatureColumn` that takes a `training` argument. For\n example, if a `FeatureColumn` performed dropout, the column could\n expose a `training` argument to control whether the dropout should\n be applied. If `None`, defaults to\n `tf.keras.backend.learning_phase()`.\n\n\n Returns:\n An `(input_layer, sequence_length)` tuple where:\n - input_layer: A float `Tensor` of shape `[batch_size, T, D]`.\n `T` is the maximum sequence length for this batch, which could\n differ from batch to batch. `D` is the sum of `num_elements` for\n all `feature_columns`.\n - sequence_length: An int `Tensor` of shape `[batch_size]`. The\n sequence length for each example.\n\n Raises:\n ValueError: If features are not a dictionary.\n ", "n_words": 137, "vocab_size": 99, "n_whitespaces": 335, "language": "en" } }, { "id": 220706, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/sslproto.py", "file_name": "sslproto.py", "fun_name": "feed_eof", "commit_message": "add python 3.10.4 for windows", "code": "def feed_eof(self):\n \n self._incoming.write_eof()\n ssldata, appdata = self.feed_ssldata(b'')\n assert appdata == [] or appdata == [b'']\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 43, "n_words": 15, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 36, "n_ast_nodes": 62, "n_identifiers": 7, "random_cut": "def feed_eof(self):\n \n self._incoming.write", "d_id": 56094, "documentation": { "docstring": "Send a potentially \"ragged\" EOF.\n\n This method will raise an SSL_ERROR_EOF exception if the EOF is\n unexpected.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 38, "language": "en" } }, { "id": 184614, "commit_id": "b22436933acc0d7440ec300f971a249bd6105a5b", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "screen", "commit_message": "lots of docstrings", "code": "def screen(self) -> Screen:\n \n try:\n return self._screen_stack[-1]\n except IndexError:\n raise ScreenStackError(\"No screens on stack\") from None\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 13, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 6, "random_cut": "def screen(self) -> Screen:\n \n try:\n return ", "d_id": 44714, "documentation": { "docstring": "Get the current screen.\n\n Raises:\n ScreenStackError: If there are no screens on the stack.\n\n Returns:\n Screen: The currently active screen.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 63, "language": "en" } }, { "id": 201102, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/apps/tests.py", "file_name": "tests.py", "fun_name": "test_empty_dunder_path_no_dunder_file", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_empty_dunder_path_no_dunder_file(self):\n \n with self.assertRaises(ImproperlyConfigured):\n AppConfig(\"label\", Stub(__path__=[]))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 31, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 7, "random_cut": "def test_empty_dunder_path_no_dunder_file(self):\n \n with self.assertRaises(ImproperlyConfigured):\n AppConfig(\"label\", Stub(__path__=[]", "d_id": 49872, "documentation": { "docstring": "If the __path__ attr is empty and there is no __file__, raise.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 158287, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/tensorflow.py", "file_name": "tensorflow.py", "fun_name": "train_epoch_ch3", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def train_epoch_ch3(net, train_iter, loss, updater):\n \n # Sum of training loss, sum of training accuracy, no. of examples\n metric = Accumulator(3)\n for X, y in train_iter:\n # Compute gradients and update parameters\n with tf.GradientTape() as tape:\n y_hat = net(X)\n # Keras implementations for loss takes (labels, predictions)\n # instead of (predictions, labels) that users might implement\n # in this book, e.g. `cross_entropy` that we implemented above\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n # Keras loss by default returns the average loss in a batch\n l_sum = l * float(tf.size(y)) if isinstance(\n loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n # Return training loss and training accuracy\n return metric[0] / metric[2], metric[1] / metric[2]\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 352, "n_words": 134, "vocab_size": 98, "complexity": 5, "nloc": 19, "token_counts": 207, "n_ast_nodes": 324, "n_identifiers": 33, "random_cut": "def train_epoch_ch3(net, train_iter, loss, updater):\n \n # Sum of training loss, sum of training accuracy, no. of examples\n metric = Accumulator(3)\n for X, y ", "d_id": 37440, "documentation": { "docstring": "The training loop defined in Chapter 3.\n\n Defined in :numref:`sec_softmax_scratch`", "n_words": 10, "vocab_size": 9, "n_whitespaces": 12, "language": "en" } }, { "id": 261169, "commit_id": "b22f7fa552c03aa7f6b9b4d661470d0173f8db5d", "repo": "scikit-learn", "path": "sklearn/utils/discovery.py", "file_name": "discovery.py", "fun_name": "all_estimators", "commit_message": "MNT numpydoc validation for Displays (#21469)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def all_estimators(type_filter=None):\n \n # lazy import to avoid circular imports from sklearn.base\n from . import IS_PYPY\n from ._testing import ignore_warnings\n from ..base import (\n BaseEstimator,\n ClassifierMixin,\n RegressorMixin,\n TransformerMixin,\n ClusterMixin,\n )\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 82, "n_words": 29, "vocab_size": 23, "complexity": 23, "nloc": 67, "token_counts": 361, "n_ast_nodes": 62, "n_identifiers": 11, "random_cut": "def all_estimators(type_filter=None):\n \n # lazy import to avoid circular imports from sklearn.base\n from . import IS_PYPY\n from ._testing import ignore_warnings\n", "d_id": 76674, "documentation": { "docstring": "Get a list of all estimators from `sklearn`.\n\n This function crawls the module and gets all classes that inherit\n from BaseEstimator. Classes that are defined in test-modules are not\n included.\n\n Parameters\n ----------\n type_filter : {\"classifier\", \"regressor\", \"cluster\", \"transformer\"} \\\n or list of such str, default=None\n Which kind of estimators should be returned. If None, no filter is\n applied and all estimators are returned. Possible values are\n 'classifier', 'regressor', 'cluster' and 'transformer' to get\n estimators only of these specific types, or a list of these to\n get the estimators that fit at least one of the types.\n\n Returns\n -------\n estimators : list of tuples\n List of (name, class), where ``name`` is the class name as string\n and ``class`` is the actual type of the class.\n ", "n_words": 124, "vocab_size": 83, "n_whitespaces": 215, "language": "en" } }, { "id": 110587, "commit_id": "d8bb1a52316c38434e526412c27d9c4b01960084", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_legend.py", "file_name": "test_legend.py", "fun_name": "test_legend_auto5", "commit_message": "ENH: rely on non-rectangular patch paths rather than bboxes for legend auto-placing (fix #9580) (#9598)\n\n* use path rather than bbox for non rectangular patches\r\n\r\n* Add tests\r\n\r\n* Add a short breadcrumb note in api_changes", "code": "def test_legend_auto5():\n \n fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8))\n\n leg_bboxes = []\n for ax, loc in zip(axs.flat, (\"center\", \"best\")):\n # An Ellipse patch at the top, a U-shaped Polygon patch at the\n # bottom and a ring-like Wedge patch: the correct placement of\n # the legend should be in the center.\n for _patch in [\n mpatches.Ellipse(\n xy=(0.5, 0.9), width=0.8, height=0.2, fc=\"C1\"),\n mpatches.Polygon(np.array([\n [0, 1], [0, 0], [1, 0], [1, 1], [0.9, 1.0], [0.9, 0.1],\n [0.1, 0.1], [0.1, 1.0], [0.1, 1.0]]), fc=\"C1\"),\n mpatches.Wedge((0.5, 0.5), 0.5, 0, 360, width=0.05, fc=\"C0\")\n ]:\n ax.add_patch(_patch)\n\n ax.plot([0.1, 0.9], [0.9, 0.9], label=\"A segment\") # sthg to label\n\n leg = ax.legend(loc=loc)\n fig.canvas.draw()\n leg_bboxes.append(\n leg.get_window_extent().transformed(ax.transAxes.inverted()))\n\n assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds)\n\n\n@image_comparison(['legend_various_labels'], remove_text=True)", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@image_comparison(['legend_various_labels'], remove_text=True)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 319, "n_words": 109, "vocab_size": 82, "complexity": 3, "nloc": 19, "token_counts": 300, "n_ast_nodes": 390, "n_identifiers": 39, "random_cut": "def test_legend_auto5():\n \n fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8))\n\n leg_bboxes = []\n for ax, loc in zip(axs.flat, (\"center\", \"best\")):\n # An Ellipse patch at the top, a U-shaped Polygon patch at the\n # bottom and a ring-like Wedge patch: the correct placement of\n # the legend should be in the center.\n for _patch in [\n mpatches.Ellipse(\n xy=(0.5, 0.9), width=0.8, height=0.2, fc=\"C1\"),\n mpatches.Polygon(np.array([\n [0, 1], [0, 0], [1, 0], [1, 1], [0.9, 1.0], [0.9, 0.1],\n [0.1, 0.1], [0.1, 1.0], [0.1, 1.0]]), fc=\"C1\"),\n mpatches.Wedge((0.5, 0.5), 0.5, 0, 360, width=0.05, fc=\"C0\")\n ]:\n ax.add_patch(_patch)\n\n ax.plot([0.1, 0.9], [0.9, 0.9], label=\"A segment\") # sthg to label\n\n leg = ax.legend(loc=loc)\n fig.canvas.draw()\n leg_bboxes.append(\n leg.get_window_extent().transformed(ax.transAxes.inverted()))\n\n assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds)\n\n\n@image_comparison(['legend_various_labels'], remove_t", "d_id": 24225, "documentation": { "docstring": "\n Check that the automatic placement handle a rather complex\n case with non rectangular patch. Related to issue #9580.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 28, "language": "en" } }, { "id": 183454, "commit_id": "d8179c70dc06e06b2f445fdfb47fb7012d4cb2ed", "repo": "textual", "path": "src/textual/widgets/text_input.py", "file_name": "text_input.py", "fun_name": "_toggle_cursor_visible", "commit_message": "Conditional blinking", "code": "def _toggle_cursor_visible(self):\n \n if time.monotonic() - self._last_keypress_time > self.cursor_blink_period:\n self._cursor_blink_visible = not self._cursor_blink_visible\n self.refresh()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 59, "n_identifiers": 8, "random_cut": "def _toggle_cursor_visible(self):\n \n if time.monotonic() - self._last_keypress_time > self.cursor", "d_id": 44195, "documentation": { "docstring": "Manages the blinking of the cursor - ensuring blinking only starts when the\n user hasn't pressed a key in some time", "n_words": 21, "vocab_size": 18, "n_whitespaces": 27, "language": "en" } }, { "id": 98450, "commit_id": "4ffb52489e662029a08169351cd997d525977e88", "repo": "sentry", "path": "src/sentry/search/events/filter.py", "file_name": "filter.py", "fun_name": "parse_semver", "commit_message": "fix(events-search): Return helpful error message on semver filter (#33785)\n\n'IN' type queries currently raise an unhandled KeyError, raising an\r\nInvalidSearchQuery instead.", "code": "def parse_semver(version, operator) -> Optional[SemverFilter]:\n \n (operator, negated) = handle_operator_negation(operator)\n try:\n operator = OPERATOR_TO_DJANGO[operator]\n except KeyError:\n raise InvalidSearchQuery(\"Invalid operation 'IN' for semantic version filter.\")\n\n version = version if \"@\" in version else f\"{SEMVER_FAKE_PACKAGE}@{version}\"\n parsed = parse_release_relay(version)\n parsed_version = parsed.get(\"version_parsed\")\n if parsed_version:\n # Convert `pre` to always be a string\n prerelease = parsed_version[\"pre\"] if parsed_version[\"pre\"] else \"\"\n semver_filter = SemverFilter(\n operator,\n [\n parsed_version[\"major\"],\n parsed_version[\"minor\"],\n parsed_version[\"patch\"],\n parsed_version[\"revision\"],\n 0 if prerelease else 1,\n prerelease,\n ],\n negated=negated,\n )\n if parsed[\"package\"] and parsed[\"package\"] != SEMVER_FAKE_PACKAGE:\n semver_filter.package = parsed[\"package\"]\n return semver_filter\n else:\n # Try to parse as a wildcard match\n package, version = version.split(\"@\", 1)\n version_parts = []\n if version:\n for part in version.split(\".\", 3):\n if part in SEMVER_WILDCARDS:\n break\n try:\n # We assume all ints for a wildcard match - not handling prerelease as\n # part of these\n version_parts.append(int(part))\n except ValueError:\n raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE)\n\n package = package if package and package != SEMVER_FAKE_PACKAGE else None\n return SemverFilter(\"exact\", version_parts, package, negated)\n\n\nkey_conversion_map: Mapping[\n str,\n Callable[[SearchFilter, str, Mapping[str, Union[int, str, datetime]]], Optional[Sequence[any]]],\n] = {\n \"environment\": _environment_filter_converter,\n \"message\": _message_filter_converter,\n TRANSACTION_STATUS_ALIAS: _transaction_status_filter_converter,\n \"issue.id\": _issue_id_filter_converter,\n USER_DISPLAY_ALIAS: _user_display_filter_converter,\n ERROR_UNHANDLED_ALIAS: _error_unhandled_filter_converter,\n \"error.handled\": _error_handled_filter_converter,\n TEAM_KEY_TRANSACTION_ALIAS: _team_key_transaction_filter_converter,\n RELEASE_STAGE_ALIAS: _release_stage_filter_converter,\n SEMVER_ALIAS: _semver_filter_converter,\n SEMVER_PACKAGE_ALIAS: _semver_package_filter_converter,\n SEMVER_BUILD_ALIAS: _semver_build_filter_converter,\n}\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 651, "n_words": 191, "vocab_size": 132, "complexity": 14, "nloc": 50, "token_counts": 224, "n_ast_nodes": 498, "n_identifiers": 55, "random_cut": "def parse_semver(version, operator) -> Optional[SemverFilter]:\n \n (operator, negated) = handle_operator_negation(operator)\n try:\n operator = OPERATOR_TO_DJANGO[operator]\n except KeyError:\n raise InvalidSearchQuery(\"Invalid operation 'IN' for semantic version filter.\")\n\n version = version if \"@\" in version else f\"{SEMVER_FAKE_PACKAGE}@{version}\"\n parsed = parse_release_relay(version)\n parsed_version = parsed.get(\"version_parsed\")\n if parsed_version:\n # Convert `pre` to always be a string\n prerelease = parsed_version[\"pre\"] if parsed_version[\"pre\"] else \"\"\n semver_filter = SemverFilter(\n operator,\n [\n parsed_version[\"major\"],\n parsed_version[\"minor\"],\n parsed_version[\"patch\"],\n parsed_version[\"revision\"],\n 0 if prerelease else 1,\n prerelease,\n ],\n negated=negated,\n )\n if parsed[\"package\"] and parsed[\"package\"] != SEMVER_FAKE_PACKAGE:\n semver_filter.package = parsed[\"package\"]\n return semver_filter\n else:\n # Try to parse as a wildcard match\n package, version = version.split(\"@\", 1)\n version_parts = []\n if version:\n for part in version.split(\".\", 3):\n if part in SEMVER_WILDCARDS:\n break\n try:\n # We assume all ints for a wildcard match - not handling prerelease as\n # part of these\n version_parts.append(int(part))\n except ValueError:\n raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE)\n\n package = package if package and package != SEMVER_FAKE_PACKAGE else None\n return SemverFilter(\"exact\", version_parts, package, negated)\n\n\nkey_convers", "d_id": 19571, "documentation": { "docstring": "\n Attempts to parse a release version using our semver syntax. version should be in\n format `@` or ``, where package_name is a string and\n version is a version string matching semver format (https://semver.org/). We've\n slightly extended this format to allow up to 4 integers. EG\n - sentry@1.2.3.4\n - sentry@1.2.3.4-alpha\n - 1.2.3.4\n - 1.2.3.4-alpha\n - 1.*\n ", "n_words": 55, "vocab_size": 39, "n_whitespaces": 91, "language": "en" } }, { "id": 71810, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_account_management.py", "file_name": "test_account_management.py", "fun_name": "test_not_logged_in_gives_403_to_ajax_requests", "commit_message": "Reformat with black", "code": "def test_not_logged_in_gives_403_to_ajax_requests(self):\n \n # Get dashboard\n response = self.client.get(\n reverse(\"wagtailadmin_home\"), HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n\n # AJAX requests should be given a 403 error instead of being redirected\n self.assertEqual(response.status_code, 403)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 79, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 5, "token_counts": 33, "n_ast_nodes": 60, "n_identifiers": 9, "random_cut": "def test_not_logged_in_gives_403_to_ajax_requests(self):\n \n # Get dashboard\n response = self.client.get(\n reverse(\"wagtailadmin_home\"), HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n\n # AJAX requests should be given a 403 error instead of being redirected\n self.assertEqual(response.status_code, 403)\n", "d_id": 15757, "documentation": { "docstring": "\n This tests that a not logged in user is given a 403 error on AJAX requests\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 194784, "commit_id": "81f722d29045a7a5841d0931a082ded1d1f13863", "repo": "ParlAI", "path": "parlai/utils/bpe.py", "file_name": "bpe.py", "fun_name": "bytes_to_unicode", "commit_message": "autoformat (#4378)", "code": "def bytes_to_unicode(self) -> Dict[int, str]:\n \n bs: List[int] = (\n list(range(ord(\"!\"), ord(\"~\") + 1))\n + list(range(ord(\"¡\"), ord(\"¬\") + 1))\n + list(range(ord(\"®\"), ord(\"ÿ\") + 1))\n )\n cs: List[int] = bs[:]\n n = 0\n for b in range(2 ** 8):\n if b not in bs:\n bs.append(b)\n cs.append(2 ** 8 + n)\n n += 1\n str_cs: List[str] = [chr(n) for n in cs]\n return dict(zip(bs, str_cs))\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 207, "n_words": 62, "vocab_size": 43, "complexity": 4, "nloc": 25, "token_counts": 151, "n_ast_nodes": 247, "n_identifiers": 18, "random_cut": "def bytes_to_unicode(self) -> Dict[int, str]:\n \n bs: List[int] = (\n list(range(ord(\"!\"), ord(\"~\") + 1))\n + list(range(ord(\"¡\"), ord(\"¬\") + 1))\n + list(range(ord(\"®\"), ord(\"ÿ\") + 1))\n )\n cs: List[int] = bs[:]\n ", "d_id": 47077, "documentation": { "docstring": "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n\n The reversible bpe codes work on unicode strings. This means you need a large #\n of unicode characters in your vocab if you want to avoid UNKs. When you're at\n something like a 10B token dataset you end up needing around 5K for decent\n coverage. This is a significant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n ", "n_words": 93, "vocab_size": 69, "n_whitespaces": 150, "language": "en" } }, { "id": 291809, "commit_id": "532ab12a48b6832180599088250fc23446a45d1e", "repo": "core", "path": "tests/components/caldav/test_calendar.py", "file_name": "test_calendar.py", "fun_name": "test_get_events_custom_calendars", "commit_message": "Local calendar integration (#79601)", "code": "async def test_get_events_custom_calendars(hass, calendar, get_api_events):\n \n config = dict(CALDAV_CONFIG)\n config[\"custom_calendars\"] = [\n {\"name\": \"Private\", \"calendar\": \"Private\", \"search\": \"This is a normal event\"}\n ]\n\n assert await async_setup_component(hass, \"calendar\", {\"calendar\": config})\n await hass.async_block_till_done()\n\n events = await get_api_events(\"calendar.private_private\")\n assert events == [\n {\n \"end\": {\"dateTime\": \"2017-11-27T10:00:00-08:00\"},\n \"start\": {\"dateTime\": \"2017-11-27T09:00:00-08:00\"},\n \"summary\": \"This is a normal event\",\n \"location\": \"Hamburg\",\n \"description\": \"Surprisingly rainy\",\n \"uid\": None,\n \"recurrence_id\": None,\n \"rrule\": None,\n }\n ]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 200, "n_words": 64, "vocab_size": 48, "complexity": 1, "nloc": 20, "token_counts": 110, "n_ast_nodes": 212, "n_identifiers": 10, "random_cut": "async def test_get_events_custom_calendars(hass, calendar, get_api_events):\n \n config = dict(CALDAV_CONFIG)\n config[\"custom_calendars\"] = [\n {\"name\": \"Private\", \"calendar\": \"Private\", \"search\": \"This is a normal event\"}\n ]\n\n assert await async_setup_component(hass, \"calendar\", {\"calend", "d_id": 90913, "documentation": { "docstring": "Test that only searched events are returned on API.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 113538, "commit_id": "8f454f3bf29e2c3cd0d359231a46edd8ee768d42", "repo": "nni", "path": "nni/mutable/symbol.py", "file_name": "symbol.py", "fun_name": "leaf_symbols", "commit_message": "Mutable V3 (Stage 2) - Symbolic execution engine (#5195)", "code": "def leaf_symbols(self) -> Iterable[Symbol]:\n \n for arg in self.arguments:\n if isinstance(arg, SymbolicExpression):\n yield from arg.leaf_symbols()\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 10, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def leaf_symbols(self) -> Iterable[Symbol]:\n \n for arg in self.arguments:\n if isinstanc", "d_id": 24940, "documentation": { "docstring": "\n Return a generator of all leaf symbols.\n\n Useful for when you want to inspect when the symbols come from.\n No deduplication even if the symbols has duplicates.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 56, "language": "en" } }, { "id": 137777, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/evaluate.py", "file_name": "evaluate.py", "fun_name": "append_step", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def append_step(self, obs, action, next_obs, reward, terminated, truncated, info):\n \n if self._outfile:\n if self._save_info:\n self._current_rollout.append(\n [obs, action, next_obs, reward, terminated, truncated, info]\n )\n else:\n self._current_rollout.append(\n [obs, action, next_obs, reward, terminated, truncated]\n )\n self._total_steps += 1\n\n\n@eval_app.command()", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@eval_app.command()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 175, "n_words": 35, "vocab_size": 22, "complexity": 3, "nloc": 11, "token_counts": 79, "n_ast_nodes": 120, "n_identifiers": 16, "random_cut": "def append_step(self, obs, action, next_obs, reward, terminated, truncated, info):\n \n if self._outfile:\n if self._save_info:\n self._current_rollout.append(\n [obs, action, next_obs, reward, terminated, truncated, info]\n )\n else:\n self._current_rollout.append(\n [obs, action,", "d_id": 31241, "documentation": { "docstring": "Add a step to the current rollout, if we are saving them", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 202956, "commit_id": "f5233dce309543c826224be9dfa9c9f4f855f73c", "repo": "django", "path": "tests/prefetch_related/tests.py", "file_name": "tests.py", "fun_name": "test_nested_prefetch_is_not_overwritten_by_related_object", "commit_message": "Fixed #32511 -- Corrected handling prefetched nested reverse relationships.\n\nWhen prefetching a set of child objects related to a set of parent\nobjects, we usually want to populate the relationship back from the\nchild to the parent to avoid a query when accessing that relationship\nattribute. However, there's an edge case where the child queryset\nitself specifies a prefetch back to the parent. In that case, we want\nto use the prefetched relationship rather than populating the reverse\nrelationship from the parent.", "code": "def test_nested_prefetch_is_not_overwritten_by_related_object(self):\n \n queryset = House.objects.only('name').prefetch_related(\n Prefetch('rooms', queryset=Room.objects.prefetch_related(\n Prefetch('house', queryset=House.objects.only('address')),\n )),\n )\n with self.assertNumQueries(3):\n house = queryset.first()\n\n self.assertIs(Room.house.is_cached(self.room), True)\n with self.assertNumQueries(0):\n house.rooms.first().house.address\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 122, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 11, "token_counts": 102, "n_ast_nodes": 175, "n_identifiers": 17, "random_cut": "def test_nested_prefetch_is_not_overwritten_by_related_object(self):\n \n queryset = House.objects.only('name').prefetch_related(\n Prefetch('rooms', queryset=Room.objects.prefetch_related(\n Prefetch('house', queryset=House.objects.only('address')),\n )),\n )\n with self.assertNumQueries(3):\n house = queryset.first()\n\n self.assertIs(", "d_id": 50188, "documentation": { "docstring": "\n The prefetched relationship is used rather than populating the reverse\n relationship from the parent, when prefetching a set of child objects\n related to a set of parent objects and the child queryset itself\n specifies a prefetch back to the parent.\n ", "n_words": 40, "vocab_size": 29, "n_whitespaces": 76, "language": "en" } }, { "id": 200382, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/combinatorics/permutations.py", "file_name": "permutations.py", "fun_name": "apply", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def apply(self, i):\n r\n i = _sympify(i)\n if i.is_integer is False:\n raise NotImplementedError(\"{} should be an integer.\".format(i))\n\n n = self.size\n if (i < 0) == True or (i >= n) == True:\n raise NotImplementedError(\n \"{} should be an integer between 0 and {}\".format(i, n-1))\n\n if i.is_Integer:\n return Integer(self._array_form[i])\n return AppliedPermutation(self, i)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 147, "n_words": 51, "vocab_size": 41, "complexity": 5, "nloc": 46, "token_counts": 90, "n_ast_nodes": 144, "n_identifiers": 13, "random_cut": "def apply(self, i):\n r\n i = _sympify(i)\n if i.is_integer is False:\n raise NotImplementedError(\"{} should be an integer.\".format(i))\n\n n = self.size\n if (i < 0) == True or (i >= n) == True:\n raise NotImplementedError(\n \"{} should be an integer between 0 and {}\".format(i, n-1))\n\n if i.is_Integer:\n return Integer(self._array_form[i])\n retur", "d_id": 49612, "documentation": { "docstring": "Apply the permutation to an expression.\n\n Parameters\n ==========\n\n i : Expr\n It should be an integer between $0$ and $n-1$ where $n$\n is the size of the permutation.\n\n If it is a symbol or a symbolic expression that can\n have integer values, an ``AppliedPermutation`` object\n will be returned which can represent an unevaluated\n function.\n\n Notes\n =====\n\n Any permutation can be defined as a bijective function\n $\\sigma : \\{ 0, 1, \\dots, n-1 \\} \\rightarrow \\{ 0, 1, \\dots, n-1 \\}$\n where $n$ denotes the size of the permutation.\n\n The definition may even be extended for any set with distinctive\n elements, such that the permutation can even be applied for\n real numbers or such, however, it is not implemented for now for\n computational reasons and the integrity with the group theory\n module.\n\n This function is similar to the ``__call__`` magic, however,\n ``__call__`` magic already has some other applications like\n permuting an array or attaching new cycles, which would\n not always be mathematically consistent.\n\n This also guarantees that the return type is a SymPy integer,\n which guarantees the safety to use assumptions.\n ", "n_words": 180, "vocab_size": 116, "n_whitespaces": 386, "language": "en" } }, { "id": 109611, "commit_id": "4a5d09cba5f4a20e14553cebd8f70c1f34d20d35", "repo": "matplotlib", "path": "lib/matplotlib/collections.py", "file_name": "collections.py", "fun_name": "_convert_mesh_to_triangles", "commit_message": "Deprecate draw_gouraud_triangle (#23824)\n\n* Deprecate draw_gouraud_triangle\r\n\r\n* DOC: minor rewording\r\n\r\nCo-authored-by: Elliott Sales de Andrade \r\n\r\nCo-authored-by: Thomas A Caswell \r\nCo-authored-by: Elliott Sales de Andrade ", "code": "def _convert_mesh_to_triangles(self, coordinates):\n \n if isinstance(coordinates, np.ma.MaskedArray):\n p = coordinates.data\n else:\n p = coordinates\n\n p_a = p[:-1, :-1]\n p_b = p[:-1, 1:]\n p_c = p[1:, 1:]\n p_d = p[1:, :-1]\n p_center = (p_a + p_b + p_c + p_d) / 4.0\n triangles = np.concatenate([\n p_a, p_b, p_center,\n p_b, p_c, p_center,\n p_c, p_d, p_center,\n p_d, p_a, p_center,\n ], axis=2).reshape((-1, 3, 2))\n\n c = self.get_facecolor().reshape((*coordinates.shape[:2], 4))\n c_a = c[:-1, :-1]\n c_b = c[:-1, 1:]\n c_c = c[1:, 1:]\n c_d = c[1:, :-1]\n c_center = (c_a + c_b + c_c + c_d) / 4.0\n colors = np.concatenate([\n c_a, c_b, c_center,\n c_b, c_c, c_center,\n c_c, c_d, c_center,\n c_d, c_a, c_center,\n ], axis=2).reshape((-1, 3, 4))\n\n return triangles, colors\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 355, "n_words": 112, "vocab_size": 56, "complexity": 2, "nloc": 29, "token_counts": 273, "n_ast_nodes": 390, "n_identifiers": 27, "random_cut": "def _convert_mesh_to_triangles(self, coordinates):\n \n if isinstance(coordinates, np.ma.MaskedArray):\n p = coordinates.data\n else:\n p = coordinates\n\n p_a = p[:-1, :-1]\n p_b = p[:-1, 1:]\n p_c = p[1:, 1:]\n p_d = p[1:, :-1]\n p_center = (p_a + p_", "d_id": 23670, "documentation": { "docstring": "\n Convert a given mesh into a sequence of triangles, each point\n with its own color. The result can be used to construct a call to\n `~.RendererBase.draw_gouraud_triangles`.\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 56, "language": "en" } }, { "id": 163270, "commit_id": "d603d43df2057ecdf74010d9dadc735e37f8f7b5", "repo": "pandas", "path": "pandas/core/series.py", "file_name": "series.py", "fun_name": "count", "commit_message": "TYP: Ignore numpy related issues (#45244)", "code": "def count(self, level=None):\n \n if level is None:\n return notna(self._values).sum().astype(\"int64\")\n else:\n warnings.warn(\n \"Using the level keyword in DataFrame and Series aggregations is \"\n \"deprecated and will be removed in a future version. Use groupby \"\n \"instead. ser.count(level=1) should use ser.groupby(level=1).count().\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n if not isinstance(self.index, MultiIndex):\n raise ValueError(\"Series.count level is only valid with a MultiIndex\")\n\n index = self.index\n assert isinstance(index, MultiIndex) # for mypy\n\n if isinstance(level, str):\n level = index._get_level_number(level)\n\n lev = index.levels[level]\n level_codes = np.array(index.codes[level], subok=False, copy=True)\n\n mask = level_codes == -1\n if mask.any():\n level_codes[mask] = cnt = len(lev)\n lev = lev.insert(cnt, lev._na_value)\n\n obs = level_codes[notna(self._values)]\n # Argument \"minlength\" to \"bincount\" has incompatible type \"Optional[int]\";\n # expected \"SupportsIndex\" [arg-type]\n out = np.bincount(obs, minlength=len(lev) or None) # type: ignore[arg-type]\n return self._constructor(out, index=lev, dtype=\"int64\").__finalize__(\n self, method=\"count\"\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 419, "n_words": 126, "vocab_size": 98, "complexity": 6, "nloc": 28, "token_counts": 211, "n_ast_nodes": 343, "n_identifiers": 40, "random_cut": "def count(self, level=None):\n \n if level is None:\n return notna(self._values).sum().astype(\"int64\")\n else:\n warnings.warn(\n \"Using the level keyword in DataFrame and Series aggregations is \"\n \"deprecated and will be removed in a future version. Use groupby \"\n \"instead. ser.count(level=1) should use ser.groupby(level=1).count().\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n if not isinstance(self.index, MultiIndex):\n raise ValueError(\"Series.count level is only valid with a MultiIndex\")\n\n index = self.index\n assert isinstance(index, MultiIndex) # for mypy\n\n if isinstance(level, str):\n level = index._get_level_number(level)\n\n lev = index.levels[level]\n level_codes = np.array(index.codes[level], subok=False, copy=True)\n\n mask = level_codes == -1\n if mask.any():\n level_codes[mask] = cnt = len(lev)\n lev = lev.insert(cnt, lev._na_value)\n\n obs = level_codes[notna(self._values)]", "d_id": 39415, "documentation": { "docstring": "\n Return number of non-NA/null observations in the Series.\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series.\n\n Returns\n -------\n int or Series (if level specified)\n Number of non-null values in the Series.\n\n See Also\n --------\n DataFrame.count : Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> s = pd.Series([0.0, 1.0, np.nan])\n >>> s.count()\n 2\n ", "n_words": 74, "vocab_size": 58, "n_whitespaces": 220, "language": "en" } }, { "id": 219678, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "__format__", "commit_message": "add python 3.10.4 for windows", "code": "def __format__(self, specifier, context=None, _localeconv=None):\n \n\n # Note: PEP 3101 says that if the type is not present then\n # there should be at least one digit after the decimal point.\n # We take the liberty of ignoring this requirement for\n # Decimal---it's presumably there to make sure that\n # format(float, '') behaves similarly to str(float).\n if context is None:\n context = getcontext()\n\n spec = _parse_format_specifier(specifier, _localeconv=_localeconv)\n\n # special values don't care about the type or precision\n if self._is_special:\n sign = _format_sign(self._sign, spec)\n body = str(self.copy_abs())\n if spec['type'] == '%':\n body += '%'\n return _format_align(sign, body, spec)\n\n # a type of None defaults to 'g' or 'G', depending on context\n if spec['type'] is None:\n spec['type'] = ['g', 'G'][context.capitals]\n\n # if type is '%', adjust exponent of self accordingly\n if spec['type'] == '%':\n self = _dec_from_triple(self._sign, self._int, self._exp+2)\n\n # round if necessary, taking rounding mode from the context\n rounding = context.rounding\n precision = spec['precision']\n if precision is not None:\n if spec['type'] in 'eE':\n self = self._round(precision+1, rounding)\n elif spec['type'] in 'fF%':\n self = self._rescale(-precision, rounding)\n elif spec['type'] in 'gG' and len(self._int) > precision:\n self = self._round(precision, rounding)\n # special case: zeros with a positive exponent can't be\n # represented in fixed point; rescale them to 0e0.\n if not self and self._exp > 0 and spec['type'] in 'fF%':\n self = self._rescale(0, rounding)\n\n # figure out placement of the decimal point\n leftdigits = self._exp + len(self._int)\n if spec['type'] in 'eE':\n if not self and precision is not None:\n dotplace = 1 - precision\n else:\n dotplace = 1\n elif spec['type'] in 'fF%':\n dotplace = leftdigits\n elif spec['type'] in 'gG':\n if self._exp <= 0 and leftdigits > -6:\n dotplace = leftdigits\n else:\n dotplace = 1\n\n # find digits before and after decimal point, and get exponent\n if dotplace < 0:\n intpart = '0'\n fracpart = '0'*(-dotplace) + self._int\n elif dotplace > len(self._int):\n intpart = self._int + '0'*(dotplace-len(self._int))\n fracpart = ''\n else:\n intpart = self._int[:dotplace] or '0'\n fracpart = self._int[dotplace:]\n exp = leftdigits-dotplace\n\n # done with the decimal-specific stuff; hand over the rest\n # of the formatting to the _format_number function\n return _format_number(self._sign, intpart, fracpart, exp, spec)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 951, "n_words": 350, "vocab_size": 171, "complexity": 24, "nloc": 49, "token_counts": 411, "n_ast_nodes": 704, "n_identifiers": 31, "random_cut": "def __format__(self, specifier, context=None, _localeconv=None):\n \n\n # Note: PEP 3101 says that if the type is not present then\n # there should be at least one digit after the decimal point.\n # We take the liberty of ignoring this requirement for\n # Decimal---it's presumably there to make sure that\n # format(float, '') behaves similarly to str(float).\n if context is None:\n context = getcontext()\n\n spec = _parse_format_specifier(specifier, _localeconv=_localeconv)\n\n # special values don't care about the type or precision\n if self._is_special:\n sign = _format_sign(self._sign, spec)\n body = str(self.copy_abs())\n if spec['type'] == '%':\n body += '%'\n return _format_align(sign, body, spec)\n\n # a type of None defaults to 'g' or 'G', depending on context\n if spec['type'] is None:\n spec['type'] = ['g', 'G'][context.capitals]\n\n # if type is '%', adjust exponent of self accordingly\n if spec['type'] == '%':\n self = _dec_from_triple(self._sign, self._int, self._exp+2)\n\n # round if necessary, taking rounding mode from the context\n rounding = context.rounding\n precision = spec['precision']\n if precision is not None:\n if spec['type'] in 'eE':\n self = self._round(precision+1, rounding)\n elif spec['type'] in 'fF%':\n self = self._rescale(-precision, rounding)\n elif spec['type'] in 'gG' and len(self._int) > precision:\n self = self._round(precision, rounding)\n # special case: zeros with a positive exponent can't be\n # represented in fixed point; rescale them to 0e0.\n if not self and self._exp > 0 and spec['type'] in 'fF%':\n self = self._rescale(0, rounding)\n\n # figure out placement of the decimal point\n leftdigits = self._exp + len(self._int)\n if spec['type'] in 'eE':\n if not self and precision is not None:\n dotplace = 1 - precision\n else:\n dotplace = 1\n elif spec['type'] in 'fF%':\n dotplace = leftdigits\n elif spec['type'] in 'gG':\n if self._exp <= 0 and leftdigits > -6:\n dotplace = leftdigits\n else:\n dotplace =", "d_id": 55704, "documentation": { "docstring": "Format a Decimal instance according to the given specifier.\n\n The specifier should be a standard format specifier, with the\n form described in PEP 3101. Formatting types 'e', 'E', 'f',\n 'F', 'g', 'G', 'n' and '%' are supported. If the formatting\n type is omitted it defaults to 'g' or 'G', depending on the\n value of context.capitals.\n ", "n_words": 55, "vocab_size": 49, "n_whitespaces": 99, "language": "en" } }, { "id": 258694, "commit_id": "9f85c9d44965b764f40169ef2917e5f7a798684f", "repo": "scikit-learn", "path": "sklearn/tests/test_base.py", "file_name": "test_base.py", "fun_name": "test_feature_names_in", "commit_message": "TST Better info when checking for no warnings in tests (#22362)", "code": "def test_feature_names_in():\n \n pd = pytest.importorskip(\"pandas\")\n iris = datasets.load_iris()\n X_np = iris.data\n df = pd.DataFrame(X_np, columns=iris.feature_names)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 30, "n_words": 15, "vocab_size": 12, "complexity": 4, "nloc": 47, "token_counts": 339, "n_ast_nodes": 68, "n_identifiers": 13, "random_cut": "def test_feature_names_in():\n \n pd = pytest.importorskip(\"pandas\")\n iris = datasets.load_iris()\n X_np = iris.data\n df = pd.DataFrame(X_np, columns=iris.feature_names)\n", "d_id": 75368, "documentation": { "docstring": "Check that feature_name_in are recorded by `_validate_data`", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 241658, "commit_id": "e15579a4f32ee3c08318a466583f4a0a8517d654", "repo": "lightning", "path": "pytorch_lightning/trainer/connectors/accelerator_connector.py", "file_name": "accelerator_connector.py", "fun_name": "check_interactive_compatibility", "commit_message": "Rename `_distrib_type` to `_strategy_type` (#11328)\n\nCo-authored-by: Jirka Borovec ", "code": "def check_interactive_compatibility(self):\n \n from pytorch_lightning.utilities import _IS_INTERACTIVE\n\n if _IS_INTERACTIVE and self._strategy_type is not None and not self._strategy_type.is_interactive_compatible():\n raise MisconfigurationException(\n f\"`Trainer(strategy={self._strategy_type.value!r})` or\"\n f\" `Trainer(accelerator={self._strategy_type.value!r})` is not compatible with an interactive\"\n \" environment. Run your code as a script, or choose one of the compatible backends:\"\n f\" {', '.join(_StrategyType.interactive_compatible_types())}.\"\n \" In case you are spawning processes yourself, make sure to include the Trainer\"\n \" creation inside the worker function.\"\n )\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 200, "n_words": 67, "vocab_size": 56, "complexity": 4, "nloc": 11, "token_counts": 44, "n_ast_nodes": 121, "n_identifiers": 12, "random_cut": "def check_interactive_compatibility(self):\n \n from pytorch_lightning.utilities import _IS_INTERACTIVE\n\n if _IS_INTERACTIVE and self._strategy_type is not None and not self._strategy_type.is_interactive_compatible():\n raise MisconfigurationException(\n f\"`Trainer(strategy={self._strategy_type.value!r})` or\"\n f\" `Trainer(accelerator={self._strategy_type.value!r})` is not compatible with an interactive\"\n \" environment. Run your code as a script, or choose one of the compatible backends:\"\n f\" {', '.join(_StrategyType.interactive_compatible_types())}.\"\n \" In case you are spaw", "d_id": 69642, "documentation": { "docstring": "Raises a `MisconfigurationException` if the accelerator and/or plugin is not compatible with an\n interactive environment.", "n_words": 15, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 212929, "commit_id": "dfad2e3b7671b7128895c8a0e29fff38d7efe6e9", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "theme_global", "commit_message": "Better error checking/reporting in theme_global. NEW THEME DarkGrey15", "code": "def theme_global(new_theme=None):\n \n if new_theme is not None:\n if new_theme not in theme_list():\n popup_error_with_traceback('Cannot use custom themes with theme_global call',\n 'Your request to use theme {} cannot be performed.'.format(new_theme),\n 'The PySimpleGUI Global User Settings are meant for PySimpleGUI standard items, not user config items',\n 'You can use any of the many built-in themes instead or use your own UserSettings file to store your custom theme')\n return pysimplegui_user_settings.get('-theme-', CURRENT_LOOK_AND_FEEL)\n pysimplegui_user_settings.set('-theme-', new_theme)\n theme(new_theme)\n return new_theme\n else:\n return pysimplegui_user_settings.get('-theme-', CURRENT_LOOK_AND_FEEL)\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 256, "n_words": 76, "vocab_size": 59, "complexity": 3, "nloc": 13, "token_counts": 71, "n_ast_nodes": 125, "n_identifiers": 10, "random_cut": "def theme_global(new_theme=None):\n \n if new_theme is not None:\n if new_theme not in theme_list():\n popup_error_with_traceback('Cannot use custom themes with theme_global call',\n 'Your request to use theme {} cannot be performed.'.format(new_theme),\n 'The PySimpleGUI Global User Se", "d_id": 53522, "documentation": { "docstring": "\n Sets / Gets the global PySimpleGUI Theme. If none is specified then returns the global theme from user settings.\n Note the theme must be a standard, built-in PySimpleGUI theme... not a user-created theme.\n\n :param new_theme: the new theme name to use\n :type new_theme: (str)\n :return: the currently selected theme\n :rtype: (str)\n ", "n_words": 51, "vocab_size": 39, "n_whitespaces": 94, "language": "en" } }, { "id": 226116, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/chart-studio/chart_studio/plotly/chunked_requests/chunked_request.py", "file_name": "chunked_request.py", "fun_name": "close", "commit_message": "switch to black .22", "code": "def close(self):\n \n self._reset_retries()\n self._closed = True\n\n # Chunked-encoded posts are terminated with '0\\r\\n\\r\\n'\n # For some reason, either Python or node.js seems to\n # require an extra \\r\\n.\n try:\n self._conn.send(\"\\r\\n0\\r\\n\\r\\n\".encode(\"utf-8\"))\n except http_client.socket.error:\n # In case the socket has already been closed\n return \"\"\n\n return self._getresponse()\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 141, "n_words": 45, "vocab_size": 41, "complexity": 2, "nloc": 8, "token_counts": 46, "n_ast_nodes": 95, "n_identifiers": 11, "random_cut": "def close(self):", "d_id": 57796, "documentation": { "docstring": "Close the connection to server.\n\n If available, return a http_client.HTTPResponse object.\n\n Closing the connection involves sending the\n Transfer-Encoding terminating bytes.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 48, "language": "en" } }, { "id": 216555, "commit_id": "5bf2904e7ac79d438ce03a673aa9a5c99f4e8e0f", "repo": "salt", "path": "salt/modules/runit.py", "file_name": "runit.py", "fun_name": "_get_svc_path", "commit_message": "fix runit module failing on non-symlinked service", "code": "def _get_svc_path(name=\"*\", status=None):\n \n\n # This is the core routine to work with services, called by many\n # other functions of this module.\n #\n # The name of a service is the \"apparent\" folder's name that contains its\n # \"run\" script. If its \"folder\" is a symlink, the service is an \"alias\" of\n # the targeted service.\n\n if not SERVICE_DIR:\n raise CommandExecutionError(\"Could not find service directory.\")\n\n # path list of enabled services as /AVAIL_SVR_DIRS/$service,\n # taking care of any service aliases (do not use os.path.realpath()).\n ena = set()\n for el in glob.glob(os.path.join(SERVICE_DIR, name)):\n if _is_svc(el):\n if os.path.islink(el):\n ena.add(os.readlink(el))\n else:\n ena.add(el)\n log.trace(\"found enabled service path: %s\", el)\n\n if status == \"ENABLED\":\n return sorted(ena)\n\n # path list of available services as /AVAIL_SVR_DIRS/$service\n ava = set()\n for d in AVAIL_SVR_DIRS:\n for el in glob.glob(os.path.join(d, name)):\n if _is_svc(el):\n ava.add(el)\n log.trace(\"found available service path: %s\", el)\n\n if status == \"DISABLED\":\n # service available but not enabled\n ret = ava.difference(ena)\n else:\n # default: return available services\n ret = ava.union(ena)\n\n return sorted(ret)\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 381, "n_words": 164, "vocab_size": 95, "complexity": 10, "nloc": 24, "token_counts": 175, "n_ast_nodes": 304, "n_identifiers": 25, "random_cut": "def _get_svc_path(name=\"*\", status=None):\n \n\n # This is the core routine to work with services, called by many\n # other functions of this module.\n #\n # The name of a service is the \"apparent\" folder's name that contains its\n # \"run\" script. If its \"folder\" is a symlink, the service is an \"alias\" of\n # the targeted service.\n\n if not SERVICE_DIR:\n raise CommandExecutionError(\"Could not find service directory.\")\n\n # path list of enabled services as /AVAIL_SVR_DIRS/$service,\n # taking care of any service aliases (do not use os.path.realpath()).\n", "d_id": 54636, "documentation": { "docstring": "\n Return a list of paths to services with ``name`` that have the specified ``status``\n\n name\n a glob for service name. default is '*'\n\n status\n None : all services (no filter, default choice)\n 'DISABLED' : available service(s) that is not enabled\n 'ENABLED' : enabled service (whether started on boot or not)\n ", "n_words": 50, "vocab_size": 41, "n_whitespaces": 98, "language": "en" } }, { "id": 241761, "commit_id": "d2d284fd6e3e8f53e9a44ab233771850af1e4dab", "repo": "lightning", "path": "tests/checkpointing/test_torch_saving.py", "file_name": "test_torch_saving.py", "fun_name": "test_model_torch_save_ddp_cpu", "commit_message": "Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408)\n\nCo-authored-by: Carlos Mocholí ", "code": "def test_model_torch_save_ddp_cpu(tmpdir):\n \n model = BoringModel()\n num_epochs = 1\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=num_epochs, strategy=\"ddp_spawn\", accelerator=\"cpu\", devices=2, logger=False\n )\n temp_path = os.path.join(tmpdir, \"temp.pt\")\n trainer.fit(model)\n\n # Ensure these do not fail\n torch.save(trainer.model, temp_path)\n torch.save(trainer, temp_path)\n\n\n@RunIf(min_gpus=2)", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@RunIf(min_gpus=2)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 70, "n_words": 34, "vocab_size": 30, "complexity": 1, "nloc": 10, "token_counts": 78, "n_ast_nodes": 139, "n_identifiers": 22, "random_cut": "def test_model_torch_save_ddp_cpu(tmpdir):\n \n model = BoringModel()\n num_epochs = 1\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=num_epochs, strategy=\"ddp_spawn\", accelerator=\"cpu\", devices=2, logger=False\n )\n temp_path = os.path.jo", "d_id": 69688, "documentation": { "docstring": "Test to ensure torch save does not fail for model and trainer using cpu ddp.", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 36020, "commit_id": "50dd314d939a86f3a81e19af01459f449fbaeeca", "repo": "transformers", "path": "src/transformers/onnx/config.py", "file_name": "config.py", "fun_name": "default_batch_size", "commit_message": "Add ONNX export for ViT (#15658)\n\n* Add ONNX support for ViT\r\n\r\n* Refactor to use generic preprocessor\r\n\r\n* Add vision dep to tests\r\n\r\n* Extend ONNX slow tests to ViT\r\n\r\n* Add dummy image generator\r\n\r\n* Use model_type to determine modality\r\n\r\n* Add deprecation warnings for tokenizer argument\r\n\r\n* Add warning when overwriting the preprocessor\r\n\r\n* Add optional args to docstrings\r\n\r\n* Add minimum PyTorch version to OnnxConfig\r\n\r\n* Refactor OnnxConfig class variables from CONSTANT_NAME to snake_case\r\n\r\n* Add reasonable value for default atol\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def default_batch_size(self) -> int:\n \n # Using 2 avoid ONNX making assumption about single sample batch\n return OnnxConfig.default_fixed_batch\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 38, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 8, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 5, "random_cut": "def default_batch_size(self) -> int:\n \n # Using 2 avoid ONNX making assumption about single sample batch\n return On", "d_id": 6562, "documentation": { "docstring": "\n The default batch size to use if no other indication\n\n Returns:\n Integer > 0\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 47, "language": "en" } }, { "id": 178942, "commit_id": "51ca460bd8c382cc165cbb1325e7cb65895d1a0b", "repo": "Nuitka", "path": "nuitka/utils/Signing.py", "file_name": "Signing.py", "fun_name": "addMacOSCodeSignature", "commit_message": "macOS: Add support for specifying signing identity and access to protected resources.", "code": "def addMacOSCodeSignature(filenames):\n \n\n # Weak signing.\n identity = getMacOSSigningIdentity()\n\n command = [\n \"codesign\",\n \"-s\",\n identity,\n \"--force\",\n \"--deep\",\n \"--preserve-metadata=entitlements\",\n ]\n\n assert type(filenames) is not str\n command.extend(filenames)\n\n with withMadeWritableFileMode(filenames):\n executeToolChecked(\n logger=postprocessing_logger,\n command=command,\n absence_message=macos_codesign_usage,\n stderr_filter=_filterSigntoolErrorOutput,\n )\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 156, "n_words": 32, "vocab_size": 31, "complexity": 1, "nloc": 19, "token_counts": 66, "n_ast_nodes": 112, "n_identifiers": 16, "random_cut": "def addMacOSCodeSignature(filenames):\n \n\n # Weak signing.\n identity = getMacOSSigningIdentity()\n\n command = [\n \"codesign\",\n \"-s\",\n identity,\n \"--force\",\n \"--deep\",\n \"--preserve-metadata=entitlements\",\n ]\n\n assert type(filenames) is not str\n command.extend(filenam", "d_id": 42870, "documentation": { "docstring": "Remove the code signature from a filename.\n\n Args:\n filenames - The files to be signed.\n\n Returns:\n None\n\n Notes:\n This is macOS specific.\n ", "n_words": 22, "vocab_size": 22, "n_whitespaces": 55, "language": "en" } }, { "id": 48680, "commit_id": "56946fac8f29aa44ce84391f138d63c4c8a2a285", "repo": "django-rest-framework", "path": "rest_framework/views.py", "file_name": "views.py", "fun_name": "exception_handler", "commit_message": "Preserve exception messages for wrapped Django exceptions (#8051)\n\n* Preserve messages for wrapped Django exceptions\r\n\r\n* Fix the test\r\n\r\n* Update test_generics.py\r\n\r\n* Update test_generics.py\r\n\r\nCo-authored-by: Tom Christie ", "code": "def exception_handler(exc, context):\n \n if isinstance(exc, Http404):\n exc = exceptions.NotFound(*(exc.args))\n elif isinstance(exc, PermissionDenied):\n exc = exceptions.PermissionDenied(*(exc.args))\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n data = exc.detail\n else:\n data = {'detail': exc.detail}\n\n set_rollback()\n return Response(data, status=exc.status_code, headers=headers)\n\n return None\n\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 178, "n_words": 56, "vocab_size": 39, "complexity": 7, "nloc": 18, "token_counts": 152, "n_ast_nodes": 246, "n_identifiers": 22, "random_cut": "def exception_handler(exc, context):\n \n if isinstance(exc, Http", "d_id": 9566, "documentation": { "docstring": "\n Returns the response that should be used for any given exception.\n\n By default we handle the REST framework `APIException`, and also\n Django's built-in `Http404` and `PermissionDenied` exceptions.\n\n Any unhandled exceptions may return `None`, which will cause a 500 error\n to be raised.\n ", "n_words": 42, "vocab_size": 39, "n_whitespaces": 61, "language": "en" } }, { "id": 39239, "commit_id": "96b5053fa688bec79a729f9ea238e5f916bced01", "repo": "recommenders", "path": "recommenders/models/sar/sar_singlenode.py", "file_name": "sar_singlenode.py", "fun_name": "compute_cooccurrence_matrix", "commit_message": "Remove drop_duplicates() from SAR method fix #1464 (#1588)\n\n* Remove drop_duplicates() from SAR method fix #1464\r\n\r\n* flake is complaining\r\n\r\n* Typos\r\n\r\n* Define self.unity_user_affinity inside __init__()\r\n\r\n* Remove drop_duplicates() from SAR method\r\n\r\n* Remove duplicates in testing data\r\n\r\n* Remove duplicates in test data for recommend_k_items\r\n\r\n* Allow duplicates in score data\r\n\r\nCo-authored-by: miguelgfierro \r\nCo-authored-by: Andreas Argyriou \r\nCo-authored-by: Simon Zhao <43029286+simonzhaoms@users.noreply.github.com>", "code": "def compute_cooccurrence_matrix(self, df):\n \n\n user_item_hits = sparse.coo_matrix(\n (np.repeat(1, df.shape[0]), (df[self.col_user_id], df[self.col_item_id])),\n shape=(self.n_users, self.n_items),\n ).tocsr()\n\n item_cooccurrence = user_item_hits.transpose().dot(user_item_hits)\n item_cooccurrence = item_cooccurrence.multiply(\n item_cooccurrence >= self.threshold\n )\n\n return item_cooccurrence.astype(df[self.col_rating].dtype)\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 107, "n_words": 25, "vocab_size": 21, "complexity": 1, "nloc": 10, "token_counts": 101, "n_ast_nodes": 153, "n_identifiers": 22, "random_cut": "def compute_cooccurrence_matrix(self, df):\n \n\n u", "d_id": 7156, "documentation": { "docstring": "Co-occurrence matrix.\n\n The co-occurrence matrix is defined as :math:`C = U^T * U`\n\n where U is the user_affinity matrix with 1's as values (instead of ratings).\n\n Args:\n df (pandas.DataFrame): DataFrame of users and items\n\n Returns:\n numpy.ndarray: Co-occurrence matrix\n ", "n_words": 38, "vocab_size": 32, "n_whitespaces": 95, "language": "en" } }, { "id": 20609, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/helpers.py", "file_name": "helpers.py", "fun_name": "_makeTags", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _makeTags(tagStr, xml, suppress_LT=Suppress(\"<\"), suppress_GT=Suppress(\">\")):\n \n if isinstance(tagStr, str_type):\n resname = tagStr\n tagStr = Keyword(tagStr, caseless=not xml)\n else:\n resname = tagStr.name\n\n tagAttrName = Word(alphas, alphanums + \"_-:\")\n if xml:\n tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)\n openTag = (\n suppress_LT\n + tagStr(\"tag\")\n + Dict(ZeroOrMore(Group(tagAttrName + Suppress(\"=\") + tagAttrValue)))\n + Opt(\"/\", default=[False])(\"empty\").set_parse_action(\n lambda s, l, t: t[0] == \"/\"\n )\n + suppress_GT\n )\n else:\n tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(\n printables, exclude_chars=\">\"\n )\n openTag = (\n suppress_LT\n + tagStr(\"tag\")\n + Dict(\n ZeroOrMore(\n Group(\n tagAttrName.set_parse_action(lambda t: t[0].lower())\n + Opt(Suppress(\"=\") + tagAttrValue)\n )\n )\n )\n + Opt(\"/\", default=[False])(\"empty\").set_parse_action(\n lambda s, l, t: t[0] == \"/\"\n )\n + suppress_GT\n )\n closeTag = Combine(Literal(\"\", adjacent=False)\n\n openTag.set_name(\"<%s>\" % resname)\n # add start results name in parse action now that ungrouped names are not reported at two levels\n openTag.add_parse_action(\n lambda t: t.__setitem__(\n \"start\" + \"\".join(resname.replace(\":\", \" \").title().split()), t.copy()\n )\n )\n closeTag = closeTag(\n \"end\" + \"\".join(resname.replace(\":\", \" \").title().split())\n ).set_name(\"\" % resname)\n openTag.tag = resname\n closeTag.tag = resname\n openTag.tag_body = SkipTo(closeTag())\n return openTag, closeTag\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 26, "n_whitespaces": 615, "n_words": 164, "vocab_size": 96, "complexity": 3, "nloc": 53, "token_counts": 365, "n_ast_nodes": 627, "n_identifiers": 48, "random_cut": "def _makeTags(tagStr, xml, suppress_LT=Suppress(\"<\"), suppress_GT=Suppress(\">\")):\n \n if isinstance(tagStr, str_type):\n resname = tagStr\n tagStr = Keyword(tagStr, caseless=not xml)\n else:\n resname = tagStr.name\n\n tagAttrName = Word(alphas, alphanums + \"_-:\")\n if xml:\n tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)\n openTag = (\n suppress_LT\n + tagStr(\"tag\")\n + Dict(ZeroOrMore(Group(tagAttrName + Suppress(\"=\") + tagAttrValue)))\n + Opt(\"/\", default=[False])(\"empty\").set_parse_action(\n lambda s, l, t: t[0] == \"/\"\n )\n + suppress_GT\n )\n else:\n tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(\n printables, exclude_chars=\">\"\n )\n openTag = (\n suppress_LT\n + tagStr(\"tag\")\n + Dict(\n ZeroOrMore(\n Group(\n tagAttrName.set_parse_action(lambda t: t[0].lower())\n + Opt(Suppress(\"=\") + tagAttrValue)\n )\n )\n )\n + Opt(\"/\", default=[False])(\"empty\").set_parse_action(\n lambda s, l, t: t[0] == \"/\"\n )\n + suppress_GT\n )\n closeTag = Combine(Literal(\"\", adjacent=False)\n\n openTag.set_name(\"<%s>\" % resname)\n # add start results name in parse action now that ungrouped names are not reported at two levels\n openTag.add_parse_action(\n lambda t: t.__setitem__(\n \"start\" + \"\".join(resname.replace(\":\", \" \").title().split()), t.copy()\n )\n ", "d_id": 3452, "documentation": { "docstring": "Internal helper to construct opening and closing tag expressions, given a tag name", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 270135, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/datasets/cifar100.py", "file_name": "cifar100.py", "fun_name": "load_data", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def load_data(label_mode=\"fine\"):\n \n if label_mode not in [\"fine\", \"coarse\"]:\n raise ValueError(\n '`label_mode` must be one of `\"fine\"`, `\"coarse\"`. '\n f\"Received: label_mode={label_mode}.\"\n )\n\n dirname = \"cifar-100-python\"\n origin = \"https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz\"\n path = get_file(\n dirname,\n origin=origin,\n untar=True,\n file_hash=\"85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7\",\n )\n\n fpath = os.path.join(path, \"train\")\n x_train, y_train = load_batch(fpath, label_key=label_mode + \"_labels\")\n\n fpath = os.path.join(path, \"test\")\n x_test, y_test = load_batch(fpath, label_key=label_mode + \"_labels\")\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n if backend.image_data_format() == \"channels_last\":\n x_train = x_train.transpose(0, 2, 3, 1)\n x_test = x_test.transpose(0, 2, 3, 1)\n\n return (x_train, y_train), (x_test, y_test)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 208, "n_words": 88, "vocab_size": 64, "complexity": 3, "nloc": 24, "token_counts": 185, "n_ast_nodes": 303, "n_identifiers": 24, "random_cut": "def load_data(label_mode=\"fine\"):\n \n if label_mode not in [\"fine\", \"coarse\"]:\n raise ValueError(\n '`label_mode` must be one of `\"fine\"`, `\"coarse\"`. '\n f\"Received: label_mode=", "d_id": 80398, "documentation": { "docstring": "Loads the CIFAR100 dataset.\n\n This is a dataset of 50,000 32x32 color training images and\n 10,000 test images, labeled over 100 fine-grained classes that are\n grouped into 20 coarse-grained classes. See more info at the\n [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).\n\n Args:\n label_mode: one of \"fine\", \"coarse\". If it is \"fine\" the category labels\n are the fine-grained labels, if it is \"coarse\" the output labels are the\n coarse-grained superclasses.\n\n Returns:\n Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.\n\n **x_train**: uint8 NumPy array of grayscale image data with shapes\n `(50000, 32, 32, 3)`, containing the training data. Pixel values range\n from 0 to 255.\n\n **y_train**: uint8 NumPy array of labels (integers in range 0-99)\n with shape `(50000, 1)` for the training data.\n\n **x_test**: uint8 NumPy array of grayscale image data with shapes\n `(10000, 32, 32, 3)`, containing the test data. Pixel values range\n from 0 to 255.\n\n **y_test**: uint8 NumPy array of labels (integers in range 0-99)\n with shape `(10000, 1)` for the test data.\n\n Example:\n\n ```python\n (x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()\n assert x_train.shape == (50000, 32, 32, 3)\n assert x_test.shape == (10000, 32, 32, 3)\n assert y_train.shape == (50000, 1)\n assert y_test.shape == (10000, 1)\n ```\n ", "n_words": 193, "vocab_size": 106, "n_whitespaces": 304, "language": "en" } }, { "id": 46789, "commit_id": "4ffd4f09532fceb67675fce4c1f5cd383eff992e", "repo": "airflow", "path": "dev/breeze/src/airflow_breeze/utils/path_utils.py", "file_name": "path_utils.py", "fun_name": "find_airflow_sources_root", "commit_message": "Prepare Breeze2 for prime time :) (#22713)\n\nThis is a review and clean-up for all the parameters and\r\ncommands for Breeze2 in order to prepare it for being\r\nused by the contribugors.\r\n\r\nThere are various small fixes here and there, removal\r\nof duplicated code, refactoring and moving code around\r\nas well as cleanup and review all the parameters used\r\nfor all implemented commands.\r\n\r\nThe parameters, default values and their behaviours were\r\nupdated to match \"new\" life of Breeze rather than old\r\none.\r\n\r\nSome improvements are made to the autocomplete and\r\nclick help messages printed. Full list of choices is\r\nalways displayed, parameters are groups according to\r\ntheir target audience, and they were sorted according\r\nto importance and frequency of use.\r\n\r\nVarious messages have been colourised according to their\r\nmeaning - warnings as yellow, errors as red and\r\ninformational messages as bright_blue.\r\n\r\nThe `dry-run` option has been added to just show what\r\nwould have been run without actually running some\r\npotentially \"write\" commands (read commands are still\r\nexecuted) so that you can easily verify and manually\r\ncopy and execute the commands with option to modify\r\nthem before. The `dry_run` and `verbose` options are\r\nnow used for all commands.\r\n\r\nThe \"main\" command now runs \"shell\" by default similarly\r\nas the original Breeze.\r\n\r\nAll \"shortcut\" parameters have been standardized - i.e\r\ncommon options (verbose/dry run/help) have one and all\r\ncommon flags that are likely to be used often have an\r\nassigned shortcute.\r\n\r\nThe \"stop\" and \"cleanup\" command have been added\r\nas they are necessary for average user to complete the\r\nregular usage cycle.\r\n\r\nDocumentation for all the important methods have been\r\nupdated.", "code": "def find_airflow_sources_root() -> Path:\n \n default_airflow_sources_root = Path.cwd()\n # Try to find airflow sources in current working dir\n airflow_sources_root = search_upwards_for_airflow_sources_root(Path.cwd())\n if not airflow_sources_root:\n # Or if it fails, find it in parents of the directory where the ./breeze.py is.\n airflow_sources_root = search_upwards_for_airflow_sources_root(Path(__file__).resolve().parent)\n if airflow_sources_root:\n os.chdir(airflow_sources_root)\n return Path(airflow_sources_root)\n else:\n console.print(\n f\"\\n[bright_yellow]Could not find Airflow sources location. \"\n f\"Assuming {default_airflow_sources_root}\"\n )\n os.chdir(default_airflow_sources_root)\n return Path(default_airflow_sources_root)\n\n\nAIRFLOW_SOURCES_ROOT = find_airflow_sources_root()\n\nBUILD_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.build'\nFILES_DIR = AIRFLOW_SOURCES_ROOT / 'files'\nMSSQL_DATA_VOLUME = AIRFLOW_SOURCES_ROOT / 'tmp_mssql_volume'\nMYPY_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.mypy_cache'\nLOGS_DIR = AIRFLOW_SOURCES_ROOT / 'logs'\nDIST_DIR = AIRFLOW_SOURCES_ROOT / 'dist'\nSCRIPTS_CI_DIR = AIRFLOW_SOURCES_ROOT / 'scripts' / 'ci'\nDOCKER_CONTEXT_DIR = AIRFLOW_SOURCES_ROOT / 'docker-context-files'\nCACHE_TMP_FILE_DIR = tempfile.TemporaryDirectory()\nOUTPUT_LOG = Path(CACHE_TMP_FILE_DIR.name, 'out.log')\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 194, "n_words": 114, "vocab_size": 71, "complexity": 3, "nloc": 26, "token_counts": 79, "n_ast_nodes": 267, "n_identifiers": 27, "random_cut": "def find_airflow_sources_root() -> Path:\n \n default_airflow_sources_root = Path.cwd()\n # Try to find airflow sources in current working dir\n airflow_sources_root = search_upwards_for_airflow_sources_root(Path.cwd())\n if not airflow_sources_root:\n # Or if it fails, find it in parents of the directory where the ./breeze.py is.\n airflow_sources_root = search_upwards_for_airflow_sources_root(Path(__file__).resolve().parent)\n if airflow_sources_root:\n os.chdir(airflow_sources_root)\n return Path(airflow_sources_root)\n else:\n console.print(\n f\"\\n[bright_yellow]Could not find Airflow sources location. \"\n f\"Assuming {default_airflow_sources_root}\"\n )\n os.chdir(default_airflow_sources_root)\n return Path(default_airflow_sources_root)\n\n\nAIRFLOW_SOURCES_ROOT = find_airflow_sources_root()\n\nBUILD_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.build'\nFILES_DIR = AIRFLOW", "d_id": 8993, "documentation": { "docstring": "\n Find the root of airflow sources. When Breeze is run from sources, it is easy, but this one also\n has to handle the case when Breeze is installed via `pipx` so it searches upwards of the current\n directory to find the right root of airflow directory.\n\n If not found, current directory is returned (this handles the case when Breeze is run from the local\n directory.\n\n :return: Path for the found sources.\n\n ", "n_words": 71, "vocab_size": 45, "n_whitespaces": 93, "language": "en" } }, { "id": 321097, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/network/pac.py", "file_name": "pac.py", "fun_name": "_parse_proxy_entry", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def _parse_proxy_entry(proxy_str):\n \n config = [c.strip() for c in proxy_str.split(' ') if c]\n if not config:\n raise ParseProxyError(\"Empty proxy entry\")\n\n if config[0] == \"DIRECT\":\n if len(config) != 1:\n raise ParseProxyError(\"Invalid number of parameters for \" +\n \"DIRECT\")\n return QNetworkProxy(QNetworkProxy.ProxyType.NoProxy)\n elif config[0] == \"PROXY\":\n if len(config) != 2:\n raise ParseProxyError(\"Invalid number of parameters for PROXY\")\n host, port = PACResolver._parse_proxy_host(config[1])\n return QNetworkProxy(QNetworkProxy.ProxyType.HttpProxy, host, port)\n elif config[0] in [\"SOCKS\", \"SOCKS5\"]:\n if len(config) != 2:\n raise ParseProxyError(\"Invalid number of parameters for SOCKS\")\n host, port = PACResolver._parse_proxy_host(config[1])\n return QNetworkProxy(QNetworkProxy.ProxyType.Socks5Proxy, host, port)\n else:\n err = \"Unknown proxy type: {}\"\n raise ParseProxyError(err.format(config[0]))\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 346, "n_words": 94, "vocab_size": 52, "complexity": 10, "nloc": 22, "token_counts": 183, "n_ast_nodes": 307, "n_identifiers": 19, "random_cut": "def _parse_proxy_entry(proxy_str):\n \n config = [c.strip() for c in proxy_str.split(' ') if c]\n if not config:\n raise ParseProxyError(\"Empty proxy entry\")\n\n if config[0] == \"DIRECT\":\n if len(config) != 1:\n raise ParseProxyError(\"Invalid number of parameters for \" +\n \"DIRECT\")\n return QNetworkProxy(QNetworkProxy.ProxyType.NoProxy)\n elif config[0] == \"PROXY\":\n if len(config) != 2:\n raise ParseProxyError(\"Invalid number of parameters for PROXY\")\n host, port = PACResolver._parse_proxy_host(config[1])\n return QNetworkProxy(QNetworkProxy.ProxyType.HttpProxy, host, port)\n elif config[0] in [\"SOCKS\", \"SOCKS5\"]:\n if len(config) != 2:\n raise ParseProxyError(\"Invalid number of parameters for SOCKS\")\n host, port = PACResol", "d_id": 117527, "documentation": { "docstring": "Parse one proxy string entry, as described in PAC specification.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 160133, "commit_id": "729ad4f92420231e2a7009b3223c6c7620b8b808", "repo": "numpy", "path": "numpy/f2py/tests/test_f2py2e.py", "file_name": "test_f2py2e.py", "fun_name": "test_gen_pyf", "commit_message": "TST: Initialize f2py2e tests of the F2PY CLI (#20668)\n\nIncreases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.\r\n\r\nMore importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.", "code": "def test_gen_pyf(capfd, hello_world_f90, monkeypatch):\n \n ipath = Path(hello_world_f90)\n opath = Path(hello_world_f90).stem + \".pyf\"\n monkeypatch.setattr(sys, \"argv\", f'f2py -h {opath} {ipath}'.split())\n\n with util.switchdir(ipath.parent):\n f2pycli() # Generate wrappers\n out, _ = capfd.readouterr()\n assert \"Saving signatures to file\" in out\n assert Path(f'{opath}').exists()\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 81, "n_words": 37, "vocab_size": 34, "complexity": 1, "nloc": 9, "token_counts": 77, "n_ast_nodes": 147, "n_identifiers": 19, "random_cut": "def test_gen_pyf(capfd, hello_world_f90, monkeypatch):\n \n ipath = Path(hello_world_f90)\n opath = Path(hello_world_f90).stem + \".pyf\"\n monkeypatch.setattr(sys, ", "d_id": 38505, "documentation": { "docstring": "Ensures that a signature file is generated via the CLI\n CLI :: -h\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 60671, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/configuration.py", "file_name": "configuration.py", "fun_name": "_dictionary", "commit_message": "upd; format", "code": "def _dictionary(self):\n # type: () -> Dict[str, Any]\n \n # NOTE: Dictionaries are not populated if not loaded. So, conditionals\n # are not needed here.\n retval = {}\n\n for variant in OVERRIDE_ORDER:\n retval.update(self._config[variant])\n\n return retval\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 34, "vocab_size": 28, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def _dictionary(self):\n # type: () -> Dict[str, Any]\n \n # NOTE: Dictionaries are not populated if not loaded. So, conditionals\n # are not needed here.\n retval = {}\n\n for variant in OVERRIDE_ORDER:", "d_id": 12237, "documentation": { "docstring": "A dictionary representing the loaded configuration.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 13, "language": "en" } }, { "id": 205770, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/query.py", "file_name": "query.py", "fun_name": "using", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def using(self, alias):\n \n return RawQuerySet(\n self.raw_query,\n model=self.model,\n query=self.query.chain(using=alias),\n params=self.params,\n translations=self.translations,\n using=alias,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 99, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 9, "token_counts": 51, "n_ast_nodes": 75, "n_identifiers": 10, "random_cut": "def using(self, alias):\n \n return RawQuerySet(\n self.raw_query,\n model=self.model,\n query=self.query.chain(using=alias),\n params=self.params,\n translations=self.translations,\n using=alias,\n )\n", "d_id": 51204, "documentation": { "docstring": "Select the database this RawQuerySet should execute against.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 275492, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/optimizer_v2.py", "file_name": "optimizer_v2.py", "fun_name": "get_weights", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_weights(self):\n \n params = self.weights\n return backend.batch_get_value(params)\n\n # TODO(tanzheny): Maybe share this logic with base_layer.", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 39, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 33, "n_identifiers": 6, "random_cut": "def get_weights(self):\n \n params = self.we", "d_id": 81394, "documentation": { "docstring": "Returns the current weights of the optimizer.\n\n The weights of an optimizer are its state (ie, variables).\n This function returns the weight values associated with this\n optimizer as a list of Numpy arrays. The first value is always the\n iterations count of the optimizer, followed by the optimizer's state\n variables in the order they were created. The returned list can in turn\n be used to load state into similarly parameterized optimizers.\n\n For example, the RMSprop optimizer for this simple model returns a list of\n three values-- the iteration count, followed by the root-mean-square value\n of the kernel and bias of the single Dense layer:\n\n >>> opt = tf.keras.optimizers.RMSprop()\n >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])\n >>> m.compile(opt, loss='mse')\n >>> data = np.arange(100).reshape(5, 20)\n >>> labels = np.zeros(5)\n >>> results = m.fit(data, labels) # Training.\n >>> len(opt.get_weights())\n 3\n\n Returns:\n Weights values as a list of numpy arrays.\n ", "n_words": 143, "vocab_size": 94, "n_whitespaces": 288, "language": "en" } }, { "id": 244030, "commit_id": "cac356380d505bf15587f07c0529218cc36b9652", "repo": "mmdetection", "path": "mmdet/core/bbox/match_costs/match_cost.py", "file_name": "match_cost.py", "fun_name": "_focal_loss_cost", "commit_message": "[Feature] Add Maskformer to mmdet (#7212)\n\n* first commit\r\n\r\n* add README\r\n\r\n* move model description from config to readme\r\n\r\nadd description for binary_input\r\n\r\nadd description for dice loss\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nremove compatibility of pretrain in maskformer\r\n\r\n* update comments in maskformer_head\r\n\r\n* update docs format", "code": "def _focal_loss_cost(self, cls_pred, gt_labels):\n \n cls_pred = cls_pred.sigmoid()\n neg_cost = -(1 - cls_pred + self.eps).log() * (\n 1 - self.alpha) * cls_pred.pow(self.gamma)\n pos_cost = -(cls_pred + self.eps).log() * self.alpha * (\n 1 - cls_pred).pow(self.gamma)\n\n cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]\n return cls_cost * self.weight\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 108, "n_words": 44, "vocab_size": 27, "complexity": 1, "nloc": 8, "token_counts": 102, "n_ast_nodes": 161, "n_identifiers": 14, "random_cut": "def _focal_loss_cost(self, cls_pred, gt_labels):\n \n cls_pred = cls_pred.sigmoid()\n neg_cost = -(1 - cls_pred + self.eps).log() * (\n 1 - self.alpha) * cls_pred.pow(self.gamma)\n pos_cost = -(cls_pred + sel", "d_id": 70202, "documentation": { "docstring": "\n Args:\n cls_pred (Tensor): Predicted classification logits, shape\n (num_query, num_class).\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n\n Returns:\n torch.Tensor: cls_cost value with weight\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 92, "language": "en" } }, { "id": 204887, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/operations.py", "file_name": "operations.py", "fun_name": "date_extract_sql", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def date_extract_sql(self, lookup_type, field_name):\n \n raise NotImplementedError(\n \"subclasses of BaseDatabaseOperations may require a date_extract_sql() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 5, "random_cut": "def date_extract_sql(self, lookup_type, field_name):\n \n raise NotImplementedError(\n \"subclasses of BaseDatabaseOperations may require a date_extract_sq", "d_id": 50958, "documentation": { "docstring": "\n Given a lookup_type of 'year', 'month', or 'day', return the SQL that\n extracts a value from the given date field field_name.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 43, "language": "en" } }, { "id": 248557, "commit_id": "a164a46038b0e51142781619db0e6dec8e0c2aaa", "repo": "synapse", "path": "tests/rest/client/test_rooms.py", "file_name": "test_rooms.py", "fun_name": "test_threepid_invite_spamcheck", "commit_message": "Uniformize spam-checker API, part 4: port other spam-checker callbacks to return `Union[Allow, Codes]`. (#12857)\n\nCo-authored-by: Brendan Abolivier ", "code": "def test_threepid_invite_spamcheck(self) -> None:\n \n # Mock a few functions to prevent the test from failing due to failing to talk to\n # a remote IS. We keep the mock for make_and_store_3pid_invite around so we\n # can check its call_count later on during the test.\n make_invite_mock = Mock(return_value=make_awaitable(0))\n self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock\n self.hs.get_identity_handler().lookup_3pid = Mock(\n return_value=make_awaitable(None),\n )\n\n # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it\n # allow everything for now.\n # `spec` argument is needed for this function mock to have `__qualname__`, which\n # is needed for `Measure` metrics buried in SpamChecker.\n mock = Mock(\n return_value=make_awaitable(synapse.module_api.NOT_SPAM),\n spec=lambda *x: None,\n )\n self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock)\n\n # Send a 3PID invite into the room and check that it succeeded.\n email_to_invite = \"teresa@example.com\"\n channel = self.make_request(\n method=\"POST\",\n path=\"/rooms/\" + self.room_id + \"/invite\",\n content={\n \"id_server\": \"example.com\",\n \"id_access_token\": \"sometoken\",\n \"medium\": \"email\",\n \"address\": email_to_invite,\n },\n access_token=self.tok,\n )\n self.assertEqual(channel.code, 200)\n\n # Check that the callback was called with the right params.\n mock.assert_called_with(self.user_id, \"email\", email_to_invite, self.room_id)\n\n # Check that the call to send the invite was made.\n make_invite_mock.assert_called_once()\n\n # Now change the return value of the callback to deny any invite and test that\n # we can't send the invite.\n mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN)\n channel = self.make_request(\n method=\"POST\",\n path=\"/rooms/\" + self.room_id + \"/invite\",\n content={\n \"id_server\": \"example.com\",\n \"id_access_token\": \"sometoken\",\n \"medium\": \"email\",\n \"address\": email_to_invite,\n },\n access_token=self.tok,\n )\n self.assertEqual(channel.code, 403)\n\n # Also check that it stopped before calling _make_and_store_3pid_invite.\n make_invite_mock.assert_called_once()\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 714, "n_words": 227, "vocab_size": 131, "complexity": 1, "nloc": 44, "token_counts": 243, "n_ast_nodes": 424, "n_identifiers": 36, "random_cut": "def test_threepid_invite_spamcheck(self) -> None:\n \n # Mock a few functions to prevent the test from failing due to failing to talk to\n # a remote IS. We keep the mock for make_and_store_3pid_invite around so we\n # can check its call_count later on during the test.\n make_invite_mock = Mock(return_value=make_awaitable(0))\n self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock\n self.hs.get_identity_handler().lookup_3pid = Mock(\n return_value=make_awaitable(None),\n )\n\n # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it\n # allow everything for now.\n # `spec` argument is needed for this function mock to have `__qualname__`, which\n # is needed for `Measure` metrics buried in SpamChecker.\n mock = Mock(\n return_value=make_awaitable(synapse.module_api.NOT_SPAM),\n spec=lambda *x: None,\n )\n self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock)\n\n # Send a 3PID invite into the room and check that it succeeded.\n email_to_invite = \"teresa@example.com\"\n channel = self.make_request(\n method=\"POST\",\n path=\"/rooms/\" + self.room_id + \"/invite\",\n content={\n \"id_server\": \"example.com\",\n \"id_access_token\": \"sometoken\",\n \"medium\": \"email\",\n \"address\":", "d_id": 72349, "documentation": { "docstring": "\n Test allowing/blocking threepid invites with a spam-check module.\n\n In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal[\"NOT_SPAM\"]]`.", "n_words": 24, "vocab_size": 23, "n_whitespaces": 38, "language": "en" } }, { "id": 260155, "commit_id": "02cbe01e67165d7d38e5e441cfccd6b57b2207b6", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_param_validation.py", "file_name": "test_param_validation.py", "fun_name": "test_generate_invalid_param_val_all_valid", "commit_message": "FIX Param validation: fix generating invalid param when 2 interval constraints (#23513)\n\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_generate_invalid_param_val_all_valid(constraints):\n \n with pytest.raises(NotImplementedError):\n generate_invalid_param_val(constraints[0], constraints=constraints)\n\n\n@pytest.mark.parametrize(\n \"constraint\",\n [\n _ArrayLikes,\n _Callables,\n _InstancesOf,\n _NoneConstraint,\n _RandomStates,\n _SparseMatrices,\n ],\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"constraint\",\n [\n _ArrayLikes,\n _Callables,\n _InstancesOf,\n _NoneConstraint,\n _RandomStates,\n _SparseMatrices,\n ],\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 79, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 78, "n_identifiers": 14, "random_cut": "def test_generate_invalid_param_val_all_valid(constraints):\n \n with pytest.raises(NotImplementedError):\n generate_invalid_param_val(constraints[0], constraints=constraints)\n\n\n@pytest.mark.parametrize(\n \"constraint\",\n [\n _ArrayLikes,\n _Callables,\n _InstancesOf,\n _NoneConstraint,\n _RandomStates,\n ", "d_id": 76095, "documentation": { "docstring": "Check that the function raises NotImplementedError when there's no invalid value\n for the constraint.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 20, "language": "en" } }, { "id": 149953, "commit_id": "16b4a5b71ff140f5de31e5d5572f1f193457cf6b", "repo": "freqtrade", "path": "freqtrade/freqai/data_drawer.py", "file_name": "data_drawer.py", "fun_name": "load_drawer_from_disk", "commit_message": "rehaul of backend data management - increasing performance by holding history in memory, reducing load on the ratelimit by only pinging exchange once per candle. Improve code readability.", "code": "def load_drawer_from_disk(self):\n \n exists = Path(self.full_path / str('pair_dictionary.json')).resolve().exists()\n if exists:\n with open(self.full_path / str('pair_dictionary.json'), \"r\") as fp:\n self.pair_dict = json.load(fp)\n elif not self.follow_mode:\n logger.info(\"Could not find existing datadrawer, starting from scratch\")\n else:\n logger.warning(f'Follower could not find pair_dictionary at {self.full_path} '\n 'sending null values back to strategy')\n\n return exists\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 163, "n_words": 47, "vocab_size": 41, "complexity": 3, "nloc": 11, "token_counts": 81, "n_ast_nodes": 156, "n_identifiers": 16, "random_cut": "def load_drawer_from_disk(self):\n \n exists = Path(se", "d_id": 34607, "documentation": { "docstring": "\n Locate and load a previously saved data drawer full of all pair model metadata in\n present model folder.\n :returns:\n exists: bool = whether or not the drawer was located\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 65, "language": "en" } }, { "id": 218510, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "address_exclude", "commit_message": "add python 3.10.4 for windows", "code": "def address_exclude(self, other):\n \n if not self._version == other._version:\n raise TypeError(\"%s and %s are not of the same version\" % (\n self, other))\n\n if not isinstance(other, _BaseNetwork):\n raise TypeError(\"%s is not a network object\" % other)\n\n if not other.subnet_of(self):\n raise ValueError('%s not contained in %s' % (other, self))\n if other == self:\n return\n\n # Make sure we're comparing the network of other.\n other = other.__class__('%s/%s' % (other.network_address,\n other.prefixlen))\n\n s1, s2 = self.subnets()\n while s1 != other and s2 != other:\n if other.subnet_of(s1):\n yield s2\n s1, s2 = s1.subnets()\n elif other.subnet_of(s2):\n yield s1\n s1, s2 = s2.subnets()\n else:\n # If we got here, there's a bug somewhere.\n raise AssertionError('Error performing exclusion: '\n 's1: %s s2: %s other: %s' %\n (s1, s2, other))\n if s1 == other:\n yield s2\n elif s2 == other:\n yield s1\n else:\n # If we got here, there's a bug somewhere.\n raise AssertionError('Error performing exclusion: '\n 's1: %s s2: %s other: %s' %\n (s1, s2, other))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 658, "n_words": 157, "vocab_size": 77, "complexity": 11, "nloc": 32, "token_counts": 191, "n_ast_nodes": 324, "n_identifiers": 16, "random_cut": "def address_exclude(self, other):\n \n if not self._version == other._version:\n raise TypeError(\"%s and %s are not of the same version\" % (\n self, other))\n\n if not isinstance(other, _BaseNetwork):\n", "d_id": 55356, "documentation": { "docstring": "Remove an address from a larger block.\n\n For example:\n\n addr1 = ip_network('192.0.2.0/28')\n addr2 = ip_network('192.0.2.1/32')\n list(addr1.address_exclude(addr2)) =\n [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),\n IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]\n\n or IPv6:\n\n addr1 = ip_network('2001:db8::1/32')\n addr2 = ip_network('2001:db8::1/128')\n list(addr1.address_exclude(addr2)) =\n [ip_network('2001:db8::1/128'),\n ip_network('2001:db8::2/127'),\n ip_network('2001:db8::4/126'),\n ip_network('2001:db8::8/125'),\n ...\n ip_network('2001:db8:8000::/33')]\n\n Args:\n other: An IPv4Network or IPv6Network object of the same type.\n\n Returns:\n An iterator of the IPv(4|6)Network objects which is self\n minus other.\n\n Raises:\n TypeError: If self and other are of differing address\n versions, or if other is not a network object.\n ValueError: If other is not completely contained by self.\n\n ", "n_words": 88, "vocab_size": 65, "n_whitespaces": 390, "language": "en" } }, { "id": 39440, "commit_id": "1d7341e93d1f03387699fb3c6ae0b6c0e464296f", "repo": "recommenders", "path": "recommenders/utils/python_utils.py", "file_name": "python_utils.py", "fun_name": "mutual_information", "commit_message": "Add new item similarity metrics for SAR (#1754)\n\n* Add mutual information similarity in SAR\r\n\r\n* Add lexicographers mutual information similarity for SAR\r\n\r\n* Add cosine similarity for SAR\r\n\r\n* Add inclusion index for SAR\r\n\r\n* Typos\r\n\r\n* Change SARSingleNode to SAR\r\n\r\n* Convert item similarity matrix to np.array\r\n\r\n* Update\r\n\r\n* Update SAR tests\r\n\r\n* Remove unused imports\r\n\r\n* Add explanations for new similarity metrics", "code": "def mutual_information(cooccurrence):\n \n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n result = np.log2(cooccurrence.shape[0] * lift(cooccurrence))\n\n return np.array(result)\n\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 28, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 45, "n_ast_nodes": 79, "n_identifiers": 11, "random_cut": "def mutual_information(cooccurrence):\n \n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n result = np.log2(cooccurrence.shape[0] * lift(cooccurrence))\n\n return np.array(result)\n\n", "d_id": 7232, "documentation": { "docstring": "Helper method to calculate the Mutual Information of a matrix of\n co-occurrences.\n\n Mutual information is a measurement of the amount of information\n explained by the i-th j-th item column vector.\n\n Args:\n cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items.\n\n Returns:\n numpy.ndarray: The matrix of mutual information between any two items.\n\n ", "n_words": 51, "vocab_size": 35, "n_whitespaces": 83, "language": "en" } }, { "id": 20908, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", "file_name": "typing_extensions.py", "fun_name": "_is_dunder", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _is_dunder(name):\n \n return len(name) > 4 and name.startswith('__') and name.endswith('__')\n\n # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality\n # checks, argument expansion etc. are done on the _subs_tre. As a result we\n # can't provide a get_type_hints function that strips out annotations.\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 49, "vocab_size": 44, "complexity": 3, "nloc": 2, "token_counts": 27, "n_ast_nodes": 53, "n_identifiers": 5, "random_cut": "def _is_dunder(name):\n \n return len(name) > 4 and name.startswith('__') and name.endswith('__", "d_id": 3612, "documentation": { "docstring": "Returns True if name is a __dunder_variable_name__.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 259851, "commit_id": "dedaa8f25f136e954941d15151bbbc88150789fc", "repo": "scikit-learn", "path": "sklearn/neighbors/_kde.py", "file_name": "_kde.py", "fun_name": "fit", "commit_message": "FEA Added Kernel Density bandwidth estimation and test (#22993)\n\nCo-authored-by: STOJANOVIC Jovan \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def fit(self, X, y=None, sample_weight=None):\n \n\n algorithm = self._choose_algorithm(self.algorithm, self.metric)\n\n if isinstance(self.bandwidth, str):\n methods_supported = (\"scott\", \"silvermann\")\n if self.bandwidth not in methods_supported:\n raise ValueError(\n \"When `bandwidth` is a string, it should be one of: \"\n f\"{', '.join(methods_supported)}. Got {self.bandwidth!r} instead.\"\n )\n if self.bandwidth == \"scott\":\n self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))\n elif self.bandwidth == \"silvermann\":\n self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (\n -1 / (X.shape[1] + 4)\n )\n else:\n check_scalar(\n self.bandwidth,\n \"bandwidth\",\n target_type=numbers.Real,\n min_val=0,\n include_boundaries=\"neither\",\n )\n self.bandwidth_ = self.bandwidth\n if self.kernel not in VALID_KERNELS:\n raise ValueError(\"invalid kernel: '{0}'\".format(self.kernel))\n\n X = self._validate_data(X, order=\"C\", dtype=DTYPE)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(\n sample_weight, X, DTYPE, only_non_negative=True\n )\n\n kwargs = self.metric_params\n if kwargs is None:\n kwargs = {}\n self.tree_ = TREE_DICT[algorithm](\n X,\n metric=self.metric,\n leaf_size=self.leaf_size,\n sample_weight=sample_weight,\n **kwargs,\n )\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 615, "n_words": 133, "vocab_size": 89, "complexity": 8, "nloc": 42, "token_counts": 278, "n_ast_nodes": 454, "n_identifiers": 36, "random_cut": "def fit(self, X, y=None, sample_weight=None):\n \n\n algorithm = self._choose_algorithm(self.algorithm, self.metric)\n\n if isinstance(self.bandwidth, str):\n methods_supported = (\"scott\", \"silvermann\")\n if self.bandwidth not in methods_supported:\n raise ValueError(\n \"When `bandwidth` is a string, it should be one of: \"\n f\"{', '.join(methods_supported)}. Got {self.bandwidth!r} instead.\"\n )\n if self.bandwidth == \"scott\":\n self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))\n elif self.bandwidth == \"silvermann\":\n self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (\n -1 / (X.shape[1] + 4)\n )\n else:\n check_scalar(\n self.bandwidth,\n \"bandwidth\",\n target_type=numbers.Real,\n min_val=0,\n include_boundaries=\"neither\",\n )\n self.bandwidth_ = self.bandwidth\n if self.kernel not in VALID_KERNELS:\n raise ValueError(\"invalid kernel: '{0}'\".format(self.kernel))\n\n X = self._validate_data(X, order=\"C\", dtype=DTYP", "d_id": 75949, "documentation": { "docstring": "Fit the Kernel Density model on the data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : None\n Ignored. This parameter exists only for compatibility with\n :class:`~sklearn.pipeline.Pipeline`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n List of sample weights attached to the data X.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ", "n_words": 70, "vocab_size": 54, "n_whitespaces": 211, "language": "en" } }, { "id": 243767, "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", "repo": "Pillow", "path": "src/PIL/ImageMorph.py", "file_name": "ImageMorph.py", "fun_name": "get_on_pixels", "commit_message": "Improve exception traceback readability", "code": "def get_on_pixels(self, image):\n \n\n if image.mode != \"L\":\n msg = \"Image mode must be L\"\n raise ValueError(msg)\n return _imagingmorph.get_on_pixels(image.im.id)\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 61, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 5, "token_counts": 34, "n_ast_nodes": 60, "n_identifiers": 9, "random_cut": "def get_on_pixels(self, image):\n ", "d_id": 70117, "documentation": { "docstring": "Get a list of all turned on pixels in a binary image\n\n Returns a list of tuples of (x,y) coordinates\n of all matching pixels. See :ref:`coordinate-system`.", "n_words": 26, "vocab_size": 19, "n_whitespaces": 39, "language": "en" } }, { "id": 197273, "commit_id": "e95d725680aab772037848628471a31f03a13901", "repo": "sympy", "path": "sympy/parsing/ast_parser.py", "file_name": "ast_parser.py", "fun_name": "visit_Num", "commit_message": "Inserted the `visit_Num` function back in.\n\nThis was required to keep SymPy compatible with Python 3.7.", "code": "def visit_Num(self, node):\n \n if isinstance(node.n, int):\n return fix_missing_locations(Call(func=Name('Integer', Load()),\n args=[node], keywords=[]))\n elif isinstance(node.n, float):\n return fix_missing_locations(Call(func=Name('Float', Load()),\n args=[node], keywords=[]))\n return node\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 109, "n_words": 21, "vocab_size": 15, "complexity": 3, "nloc": 8, "token_counts": 86, "n_ast_nodes": 136, "n_identifiers": 14, "random_cut": "def visit_Num(self, node):\n \n if isinstance(node.n, int):\n return fix_missing_locations(Call(func=Name('Integer', Load(", "d_id": 48420, "documentation": { "docstring": "This function exists for backwards compatibility with Python 3.7.\n It should be removed when SymPy removes support for Python 3.7.", "n_words": 20, "vocab_size": 17, "n_whitespaces": 29, "language": "en" } }, { "id": 221318, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/cgitb.py", "file_name": "cgitb.py", "fun_name": "enable", "commit_message": "add python 3.10.4 for windows", "code": "def enable(display=1, logdir=None, context=5, format=\"html\"):\n \n sys.excepthook = Hook(display=display, logdir=logdir,\n context=context, format=format)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 42, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def enable(display=1, logdir=None, context=5, format=\"html\"):\n \n sys.excepthook = Hook(display=display, logdir=logdir,\n context=context, format=format", "d_id": 56343, "documentation": { "docstring": "Install an exception handler that formats tracebacks as HTML.\n\n The optional argument 'display' can be set to 0 to suppress sending the\n traceback to the browser, and 'logdir' can be set to a directory to cause\n tracebacks to be written to files there.", "n_words": 43, "vocab_size": 31, "n_whitespaces": 51, "language": "en" } }, { "id": 63660, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "parse_list_header", "commit_message": "upd; format", "code": "def parse_list_header(value):\n \n result = []\n for item in _parse_list_header(value):\n if item[:1] == item[-1:] == '\"':\n item = unquote_header_value(item[1:-1])\n result.append(item)\n return result\n\n\n# From mitsuhiko/werkzeug (used with permission).", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 63, "n_words": 27, "vocab_size": 23, "complexity": 3, "nloc": 7, "token_counts": 54, "n_ast_nodes": 91, "n_identifiers": 7, "random_cut": "def parse_list_header(value):\n \n result = []\n for item in", "d_id": 13455, "documentation": { "docstring": "Parse lists as described by RFC 2068 Section 2.\n\n In particular, parse comma-separated lists where the elements of\n the list may include quoted-strings. A quoted-string could\n contain a comma. A non-quoted string could have quotes in the\n middle. Quotes are removed automatically after parsing.\n\n It basically works like :func:`parse_set_header` just that items\n may appear multiple times and case sensitivity is preserved.\n\n The return value is a standard :class:`list`:\n\n >>> parse_list_header('token, \"quoted value\"')\n ['token', 'quoted value']\n\n To create a header from the :class:`list` again, use the\n :func:`dump_header` function.\n\n :param value: a string with a list header.\n :return: :class:`list`\n :rtype: list\n ", "n_words": 99, "vocab_size": 82, "n_whitespaces": 147, "language": "en" } }, { "id": 306845, "commit_id": "5276d849ec497ccd0cecf3cb6a8dacae4fa6f845", "repo": "core", "path": "homeassistant/components/apple_tv/media_player.py", "file_name": "media_player.py", "fun_name": "media_series_title", "commit_message": "Improve type hints in apple_tv media player (#77940)", "code": "def media_series_title(self) -> str | None:\n \n if self._playing and self._is_feature_available(FeatureName.SeriesName):\n return self._playing.series_name\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 5, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 8, "random_cut": "def media_series_title(self) -> str | None:\n \n if self._playing and self._is_feature_available(FeatureName.SeriesName):\n return self", "d_id": 105628, "documentation": { "docstring": "Title of series of current playing media, TV show only.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 224209, "commit_id": "dca7cbb43fcd6ea7c677c98ba585395b070d387b", "repo": "mkdocs", "path": "mkdocs/commands/build.py", "file_name": "build.py", "fun_name": "_build_template", "commit_message": "Format code with `black -l100 --skip-string-normalization`", "code": "def _build_template(name, template, files, config, nav):\n \n\n # Run `pre_template` plugin events.\n template = config['plugins'].run_event(\n 'pre_template', template, template_name=name, config=config\n )\n\n if utils.is_error_template(name):\n # Force absolute URLs in the nav of error pages and account for the\n # possibility that the docs root might be different than the server root.\n # See https://github.com/mkdocs/mkdocs/issues/77.\n # However, if site_url is not set, assume the docs root and server root\n # are the same. See https://github.com/mkdocs/mkdocs/issues/1598.\n base_url = urlsplit(config['site_url'] or '/').path\n else:\n base_url = utils.get_relative_url('.', name)\n\n context = get_context(nav, files, config, base_url=base_url)\n\n # Run `template_context` plugin events.\n context = config['plugins'].run_event(\n 'template_context', context, template_name=name, config=config\n )\n\n output = template.render(context)\n\n # Run `post_template` plugin events.\n output = config['plugins'].run_event('post_template', output, template_name=name, config=config)\n\n return output\n\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 221, "n_words": 116, "vocab_size": 73, "complexity": 3, "nloc": 15, "token_counts": 134, "n_ast_nodes": 221, "n_identifiers": 18, "random_cut": "def _build_template(name, template, files, config, nav):\n \n\n # Run `pre_template` plugin events.\n template = config['plugins'].run_event(\n 'pre_template', template, template_name=name, config=config\n )\n\n if utils.is_error_template(name):\n # Force absolute URLs in the nav of error pages and account for the\n # possibility that the docs root might be different than the server root.\n # See https://github.com/mkdocs/mkdocs/issues/77.\n # However, if site_url is not set, assume the docs root and server root\n # are the same. See https://github.com/mkdocs/mkdocs/issues/1598.\n base_url = urlsplit(config['site_url'] or '/').path\n else:\n base_url = utils.get_relative_url('.', name)\n\n context = get_contex", "d_id": 57244, "documentation": { "docstring": "\n Return rendered output for given template as a string.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 198467, "commit_id": "9d58006fc0a23afcba38f641c9472917c436428a", "repo": "sympy", "path": "sympy/core/basic.py", "file_name": "basic.py", "fun_name": "matches", "commit_message": "Code cleanup", "code": "def matches(self, expr, repl_dict=None, old=False):\n \n expr = sympify(expr)\n if not isinstance(expr, self.__class__):\n return None\n\n if repl_dict is None:\n repl_dict = {}\n else:\n repl_dict = repl_dict.copy()\n\n if self == expr:\n return repl_dict\n\n if len(self.args) != len(expr.args):\n return None\n\n d = repl_dict # already a copy\n for arg, other_arg in zip(self.args, expr.args):\n if arg == other_arg:\n continue\n if arg.is_Relational:\n try:\n d = arg.xreplace(d).matches(other_arg, d, old=old)\n except TypeError: # Should be InvalidComparisonError when introduced\n d = None\n else:\n d = arg.xreplace(d).matches(other_arg, d, old=old)\n if d is None:\n return None\n return d\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 375, "n_words": 88, "vocab_size": 52, "complexity": 10, "nloc": 26, "token_counts": 164, "n_ast_nodes": 260, "n_identifiers": 18, "random_cut": "def matches(self, expr, repl_dict=None, old=False):\n \n expr = sympify(expr)\n if not isinstance(expr, self.__class__):\n return None\n\n if repl_dict is None:\n repl_dict = {}\n else:\n repl_dict = repl_dict.copy()\n\n if self == expr:\n ", "d_id": 48951, "documentation": { "docstring": "\n Helper method for match() that looks for a match between Wild symbols\n in self and expressions in expr.\n\n Examples\n ========\n\n >>> from sympy import symbols, Wild, Basic\n >>> a, b, c = symbols('a b c')\n >>> x = Wild('x')\n >>> Basic(a + x, x).matches(Basic(a + b, c)) is None\n True\n >>> Basic(a + x, x).matches(Basic(a + b + c, b + c))\n {x_: b + c}\n ", "n_words": 66, "vocab_size": 45, "n_whitespaces": 151, "language": "en" } }, { "id": 133393, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/worker_group.py", "file_name": "worker_group.py", "fun_name": "_create_placement_group", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _create_placement_group(self, num_workers):\n \n pg = get_current_placement_group()\n if pg is None:\n bundle = {\"CPU\": self._num_cpus_per_worker, \"GPU\": int(self._use_gpu)}\n bundles = [bundle] * num_workers\n pg = ray.util.placement_group(bundles, strategy=\"SPREAD\")\n logger.debug(\"Waiting for placement group to start.\")\n ready, _ = ray.wait([pg.ready()], timeout=SGD_PLACEMENT_GROUP_TIMEOUT_S)\n if ready:\n logger.debug(\"Placement group has started.\")\n else:\n raise TimeoutError(\n \"Placement group creation timed out. Make sure \"\n \"your cluster either has enough resources or use \"\n \"an autoscaling cluster. Current resources \"\n \"available: {}, resources requested by the \"\n \"placement group: {}\".format(\n ray.available_resources(), pg.bundle_specs\n )\n )\n self._worker_placement_group = pg\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 376, "n_words": 85, "vocab_size": 67, "complexity": 3, "nloc": 21, "token_counts": 121, "n_ast_nodes": 212, "n_identifiers": 26, "random_cut": "def _create_placement_group(self, num_workers):\n \n pg = get_current_placement_group()\n if pg is None:\n bundle = {\"CPU\": self._num_cpus_per_worker, \"GPU\": int(self._use_gpu)}\n bundles = [bundle] * num_workers\n pg = ray.util.placement_group(bundles, strategy=\"SPREAD\")\n logger.debug(\"Waiting for placement group to start.\")\n ready, _ = ray.wait([pg.ready()], timeout=SGD_PLACEMENT_GROUP_TIMEOUT_S)\n if ready:\n logger.debug(\"Placement group has started.\")\n ", "d_id": 30006, "documentation": { "docstring": "Creates a placement group for the workers.\n\n If this worker is already in a placement group then a new one will\n not be created. This is primarily for when Tune is the upstream and\n will allocate resources for SGD workers.\n\n If this worker is not in a placement group, a new one will be created\n and set. The placement group will have a single bundle for each worker\n and use the SPREAD strategy for an even distribution.\n ", "n_words": 77, "vocab_size": 43, "n_whitespaces": 126, "language": "en" } }, { "id": 67953, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/report/warehouse_wise_item_balance_age_and_value/warehouse_wise_item_balance_age_and_value.py", "file_name": "warehouse_wise_item_balance_age_and_value.py", "fun_name": "get_warehouse_list", "commit_message": "style: format code with black", "code": "def get_warehouse_list(filters):\n\tfrom frappe.core.doctype.user_permission.user_permission import get_permitted_documents\n\n\tcondition = \"\"\n\tuser_permitted_warehouse = get_permitted_documents(\"Warehouse\")\n\tvalue = ()\n\tif user_permitted_warehouse:\n\t\tcondition = \"and name in %s\"\n\t\tvalue = set(user_permitted_warehouse)\n\telif not user_permitted_warehouse and filters.get(\"warehouse\"):\n\t\tcondition = \"and name = %s\"\n\t\tvalue = filters.get(\"warehouse\")\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tcondition=condition\n\t\t),\n\t\tvalue,\n\t\tas_dict=1,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 30, "n_words": 48, "vocab_size": 33, "complexity": 4, "nloc": 20, "token_counts": 87, "n_ast_nodes": 149, "n_identifiers": 16, "random_cut": "def get_warehouse_list(filters):\n\tfrom frappe.core.doctype.user_permission.user_permission import get_permitted_documents\n\n\tcondition = \"\"\n\tuser_permitted_warehouse = get_permitted_documents(\"Warehouse\")\n\tvalue = ()\n\tif user_permitted_warehouse:\n\t\tcondition = \"and name in %s\"\n\t\tvalue = set(", "d_id": 14679, "documentation": { "docstring": "select name\n\t\tfrom `tabWarehouse` where is_group = 0\n\t\t{condition}", "n_words": 9, "vocab_size": 9, "n_whitespaces": 6, "language": "en" } }, { "id": 45743, "commit_id": "b65e52205a7045eb08d471289b85abda587442b7", "repo": "airflow", "path": "airflow/models/mappedoperator.py", "file_name": "mappedoperator.py", "fun_name": "unmap", "commit_message": "More explicit mapped argument validation (#21933)\n\n* More explicit mapped argument validation\r\n\r\nInstead of always using MagicMock to validate mapped arguments, this\r\nimplements a more sophisticated protocol that allows an operator to\r\nimplement a 'validate_mapped_arguments' to provide custom validation\r\nlogic. If an operator just wants to use __init__ for validation,\r\nhowever, they can set a flag 'mapped_arguments_validated_by_init' to get\r\nthe behavior easily. (This does *not* use MagicMock, however, since any\r\ncustom validation logic should be able to handle those on its own).\r\n\r\nThe 'validate_mapped_arguments' flag is currently only set on\r\nPythonOperator. It can likely be used on a lot more operators down the\r\nroad.\r\n\r\n* Add flag to distinguish a validation-only init\r\n\r\nThere's just too much magic during a task's initialization that tries to\r\nadd it into the dependency graph. This flag is needed to work around all\r\nthat, I think.", "code": "def unmap(self) -> \"BaseOperator\":\n \n dag = self.dag\n if not dag:\n raise RuntimeError(\"Cannot unmap a task without a DAG\")\n dag._remove_task(self.task_id)\n if isinstance(self.operator_class, str):\n raise RuntimeError(\"Cannot unmap a deserialized operator\")\n return self.operator_class(**self._get_unmap_kwargs())\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 94, "n_words": 30, "vocab_size": 24, "complexity": 3, "nloc": 9, "token_counts": 57, "n_ast_nodes": 101, "n_identifiers": 10, "random_cut": "def unmap(self) -> \"BaseOperator\":\n \n dag = self.dag\n if not dag:\n ", "d_id": 8684, "documentation": { "docstring": "Get the \"normal\" Operator after applying the current mapping.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 337338, "commit_id": "5668270de74a09e5bff15891054f73ddbb1176ac", "repo": "accelerate", "path": "src/accelerate/test_utils/testing.py", "file_name": "testing.py", "fun_name": "require_tensorflow", "commit_message": "Add logging capabilities (#293)\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n- Added experiment tracking API, and support for Weights and Biases, TensorBoard, and CometML + Tests\r\n- Added `tensorflow` to a new dependency list to be used during tests\r\n- Added three new functions in `Accelerator` to interact with the API", "code": "def require_tensorflow(test_case):\n \n if not is_tensorflow_available():\n return unittest.skip(\"test requires TensorFlow\")(test_case)\n else:\n return test_case\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 5, "random_cut": "def require_tensorflow(test_case):\n \n if not is_tensorflow_available():\n return unittest.skip(", "d_id": 121034, "documentation": { "docstring": "\n Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n installed\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 221571, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/concurrent/futures/_base.py", "file_name": "_base.py", "fun_name": "as_completed", "commit_message": "add python 3.10.4 for windows", "code": "def as_completed(fs, timeout=None):\n \n if timeout is not None:\n end_time = timeout + time.monotonic()\n\n fs = set(fs)\n total_futures = len(fs)\n with _AcquireFutures(fs):\n finished = set(\n f for f in fs\n if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])\n pending = fs - finished\n waiter = _create_and_install_waiters(fs, _AS_COMPLETED)\n finished = list(finished)\n try:\n yield from _yield_finished_futures(finished, waiter,\n ref_collect=(fs,))\n\n while pending:\n if timeout is None:\n wait_timeout = None\n else:\n wait_timeout = end_time - time.monotonic()\n if wait_timeout < 0:\n raise TimeoutError(\n '%d (of %d) futures unfinished' % (\n len(pending), total_futures))\n\n waiter.event.wait(wait_timeout)\n\n with waiter.lock:\n finished = waiter.finished_futures\n waiter.finished_futures = []\n waiter.event.clear()\n\n # reverse to keep finishing order\n finished.reverse()\n yield from _yield_finished_futures(finished, waiter,\n ref_collect=(fs, pending))\n\n finally:\n # Remove waiter from unfinished futures\n for f in fs:\n with f._condition:\n f._waiters.remove(waiter)\n\nDoneAndNotDoneFutures = collections.namedtuple(\n 'DoneAndNotDoneFutures', 'done not_done')", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 595, "n_words": 125, "vocab_size": 81, "complexity": 9, "nloc": 36, "token_counts": 212, "n_ast_nodes": 365, "n_identifiers": 36, "random_cut": "def as_completed(fs, timeout=None):\n \n if timeout is not None:\n end_time = timeout + time.monotonic()\n\n fs = set(fs)\n total_futures = len(fs)\n with _AcquireFutures(fs):\n finished = set(\n f for f in fs\n if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])\n pending = fs - finished\n waiter = _create_and_install_waiters(fs, _AS_COMPLETED)\n finished = list(finished)\n try:\n yield from _yield_finished_futures(finished, waiter,\n ref_collect=(fs,))\n\n while pending:\n if timeout is None:\n wait_timeout = None\n else:\n wait_timeout = end_time - time.monotonic()\n if wait_timeout < 0:\n raise TimeoutError(\n '%d (of %d) futures unfinished' % (\n len(pending), total_futures))\n\n waiter.event.wait(wait_timeout)\n\n with waiter.lock:\n finished = waiter.finished_futures\n waiter.finished_futures = []\n wai", "d_id": 56433, "documentation": { "docstring": "An iterator over the given futures that yields each as it completes.\n\n Args:\n fs: The sequence of Futures (possibly created by different Executors) to\n iterate over.\n timeout: The maximum number of seconds to wait. If None, then there\n is no limit on the wait time.\n\n Returns:\n An iterator that yields the given Futures as they complete (finished or\n cancelled). If any given Futures are duplicated, they will be returned\n once.\n\n Raises:\n TimeoutError: If the entire result iterator could not be generated\n before the given timeout.\n ", "n_words": 85, "vocab_size": 63, "n_whitespaces": 172, "language": "en" } }, { "id": 151842, "commit_id": "7b4abd5ef50f3c6f84c6604fc1f79ff4b92c2575", "repo": "freqtrade", "path": "freqtrade/freqai/RL/BaseReinforcementLearningModel.py", "file_name": "BaseReinforcementLearningModel.py", "fun_name": "pack_env_dict", "commit_message": "use a dictionary to make code more readable", "code": "def pack_env_dict(self) -> Dict[str, Any]:\n \n env_info = {\"window_size\": self.CONV_WIDTH,\n \"reward_kwargs\": self.reward_params,\n \"config\": self.config,\n \"live\": self.live}\n if self.data_provider:\n env_info[\"fee\"] = self.data_provider._exchange \\\n .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore\n\n return env_info\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 139, "n_words": 27, "vocab_size": 25, "complexity": 2, "nloc": 12, "token_counts": 74, "n_ast_nodes": 122, "n_identifiers": 15, "random_cut": "def pack_env_dict(self) -> Dict[str, Any]:\n \n env_info = {\"window_size\": self.CONV_WIDTH,\n \"reward_kwargs\": self.reward_params,\n \"config\": self.config,\n \"live\": self.live}\n if self.data_provider:\n env_info[\"fee\"] = self.data_pr", "d_id": 35150, "documentation": { "docstring": "\n Create dictionary of environment arguments\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 139820, "commit_id": "3815e52a61b6afe44b883d7d745fa00b599f66ca", "repo": "ray", "path": "rllib/evaluation/rollout_worker.py", "file_name": "rollout_worker.py", "fun_name": "sample", "commit_message": "[RLlib] Agents to algos: DQN w/o Apex and R2D2, DDPG/TD3, SAC, SlateQ, QMIX, PG, Bandits (#24896)", "code": "def sample(self) -> SampleBatchType:\n \n\n if self.fake_sampler and self.last_batch is not None:\n return self.last_batch\n elif self.input_reader is None:\n raise ValueError(\n \"RolloutWorker has no `input_reader` object! \"\n \"Cannot call `sample()`. You can try setting \"\n \"`create_env_on_driver` to True.\"\n )\n\n if log_once(\"sample_start\"):\n logger.info(\n \"Generating sample batch of size {}\".format(\n self.rollout_fragment_length\n )\n )\n\n batches = [self.input_reader.next()]\n steps_so_far = (\n batches[0].count\n if self.count_steps_by == \"env_steps\"\n else batches[0].agent_steps()\n )\n\n # In truncate_episodes mode, never pull more than 1 batch per env.\n # This avoids over-running the target batch size.\n if self.batch_mode == \"truncate_episodes\":\n max_batches = self.num_envs\n else:\n max_batches = float(\"inf\")\n\n while (\n steps_so_far < self.rollout_fragment_length and len(batches) < max_batches\n ):\n batch = self.input_reader.next()\n steps_so_far += (\n batch.count\n if self.count_steps_by == \"env_steps\"\n else batch.agent_steps()\n )\n batches.append(batch)\n batch = batches[0].concat_samples(batches) if len(batches) > 1 else batches[0]\n\n self.callbacks.on_sample_end(worker=self, samples=batch)\n\n # Always do writes prior to compression for consistency and to allow\n # for better compression inside the writer.\n self.output_writer.write(batch)\n\n # Do off-policy estimation, if needed.\n if self.reward_estimators:\n for sub_batch in batch.split_by_episode():\n for estimator in self.reward_estimators:\n estimator.process(sub_batch)\n\n if log_once(\"sample_end\"):\n logger.info(\"Completed sample batch:\\n\\n{}\\n\".format(summarize(batch)))\n\n if self.compress_observations:\n batch.compress(bulk=self.compress_observations == \"bulk\")\n\n if self.fake_sampler:\n self.last_batch = batch\n return batch\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 734, "n_words": 184, "vocab_size": 119, "complexity": 17, "nloc": 66, "token_counts": 284, "n_ast_nodes": 481, "n_identifiers": 41, "random_cut": "def sample(self) -> SampleBatchType:\n \n\n if self.fake_sampler and self.last_batch is not None:\n return self.last_batch\n elif self.input_reader is None:\n raise ValueError(\n \"RolloutWorker has no `input_reader` object! \"\n \"Cannot call `sample()`. You can try setting \"\n \"`create_env_on_driver` to True.\"\n )\n\n if log_once(\"sample_start\"):\n logger.info(\n \"Generating sample batch of size {}\".format(\n self.rollout_fragment_length\n )\n )\n\n batches = [self.input_reader.next()]\n steps_so_far = (\n batches[0].count\n if self.count_steps_by == \"env_steps\"\n else batches[0].agent_steps()\n )\n\n # In truncate_episodes mode, never pull more than 1 batch per env.\n # This avoids over-running the target batch size.\n if self.batch_mode == \"truncate_episodes\":\n max_batches = self.num_envs\n else:\n max_batches = float(\"inf\")\n\n while (\n steps_so_far < self.rollout_fragment_length and len(batches) < max_batches\n ):\n batch = self.input_reader.next()\n steps_so_far += (\n batch.count\n if self.count_steps_by == \"env_steps\"\n else batch.agent_steps()\n )\n batches.append(batch)\n batch = batches[0].concat_samples(batches) if len(batches) > 1 else batches[0]\n\n self.callbacks.on_sample_end(worker=self, samples=batch)\n\n # Always do writes prior to compression for consistency and to allow\n # for better compression inside the writer.\n self.output_writer.write(batch)\n\n # Do off-policy estimation, if needed.\n if self.reward_estimators:\n for sub_batch in batch.split_by_episode():\n for estimator in self.reward_estimators:\n estimator.process(sub_batch)\n\n if log_once(\"sample_end\"):\n logger.info(\"Completed sample batch:\\n\\n{}\\n\".format(summarize(batch)))\n\n if self.compress_observations:\n batch.compress(bulk=self.compress_observations == \"bulk\")\n\n if self.fake_sampler:\n self.last_batch = batch\n ", "d_id": 31780, "documentation": { "docstring": "Returns a batch of experience sampled from this worker.\n\n This method must be implemented by subclasses.\n\n Returns:\n A columnar batch of experiences (e.g., tensors).\n\n Examples:\n >>> import gym\n >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker\n >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGTFPolicy\n >>> worker = RolloutWorker( # doctest: +SKIP\n ... env_creator=lambda _: gym.make(\"CartPole-v0\"), # doctest: +SKIP\n ... policy_spec=PGTFPolicy) # doctest: +SKIP\n >>> print(worker.sample()) # doctest: +SKIP\n SampleBatch({\"obs\": [...], \"action\": [...], ...})\n ", "n_words": 67, "vocab_size": 46, "n_whitespaces": 198, "language": "en" } }, { "id": 206994, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_changelist/tests.py", "file_name": "tests.py", "fun_name": "test_no_duplicates_for_non_unique_related_object_in_list_filter", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_no_duplicates_for_non_unique_related_object_in_list_filter(self):\n \n parent = Parent.objects.create(name=\"Mary\")\n # Two children with the same name\n Child.objects.create(parent=parent, name=\"Daniel\")\n Child.objects.create(parent=parent, name=\"Daniel\")\n\n m = ParentAdmin(Parent, custom_site)\n request = self.factory.get(\"/parent/\", data={\"child__name\": \"Daniel\"})\n request.user = self.superuser\n\n cl = m.get_changelist_instance(request)\n # Exists() is applied.\n self.assertEqual(cl.queryset.count(), 1)\n # Queryset must be deletable.\n self.assertIs(cl.queryset.query.distinct, False)\n cl.queryset.delete()\n self.assertEqual(cl.queryset.count(), 0)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 152, "n_words": 47, "vocab_size": 38, "complexity": 1, "nloc": 12, "token_counts": 136, "n_ast_nodes": 229, "n_identifiers": 26, "random_cut": "def test_no_duplicates_for_non_unique_related_object_in_list_filter(self):\n \n parent = Parent.objects.create(name=\"Mary\")\n # Two children with the same name\n Child.objects.create(parent=parent, name=\"Daniel\")\n Child.objects.create(parent=parent, name=\"Daniel\")\n\n m = ParentAdmin(Parent, custom_site)\n ", "d_id": 51828, "documentation": { "docstring": "\n Regressions tests for #15819: If a field listed in list_filters is a\n non-unique related object, results shouldn't appear more than once.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 43, "language": "en" } }, { "id": 28140, "commit_id": "3e06a6462559498c6ad09c0591e648a7943ac0c6", "repo": "saleor", "path": "saleor/plugins/webhook/utils.py", "file_name": "utils.py", "fun_name": "get_current_tax_app", "commit_message": "Add support for calculating taxes in Saleor Apps (#9526)\n\n* Squash previouse PR in taxes by Sync webhooks\r\n\r\n* Adjust incoming communication form tax app in order calculation\r\n\r\n* Change return type for base_checkout_total to Money\r\n\r\n* Fix cratign order lines for checkout lines\r\n\r\n* Remove not needed args\r\n\r\n* Fix order discount recalculation\r\n\r\n* Fix order discounts events amount calculation\r\n\r\n* Fix order calculation tests\r\n\r\n* Use base price in checkout line serializer\r\n\r\n* Use base shipping price in checkout tax payload\r\n\r\n* Use base total in checkout tax payload\r\n\r\n* Tax app interface should recive tax rate as decimal\r\n\r\n* Tax app interface should recive tax rate as decimal\r\n\r\n* Clear migrations\r\n\r\n* Add descriptions to webhook events enums\r\n\r\n* Update changelog\r\n\r\n* Drop not required changes from plugin interface\r\n\r\n* Fix review remarks", "code": "def get_current_tax_app() -> Optional[App]:\n \n return (\n App.objects.order_by(\"pk\")\n .for_event_type(WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES)\n .for_event_type(WebhookEventSyncType.ORDER_CALCULATE_TAXES)\n .last()\n )\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 48, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 8, "token_counts": 39, "n_ast_nodes": 67, "n_identifiers": 10, "random_cut": "def get_current_tax_app() -> Optional[App]:\n \n return (\n App.objects.order_by(\"pk\")\n .for_event_type(WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES)\n ", "d_id": 5158, "documentation": { "docstring": "Return currently used tax app or None, if there aren't any.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 249857, "commit_id": "4ae967cf6308e80b03da749f0cbaed36988e235e", "repo": "synapse", "path": "tests/util/caches/test_deferred_cache.py", "file_name": "test_deferred_cache.py", "fun_name": "test_callbacks", "commit_message": "Add missing type hints to test.util.caches (#14529)", "code": "def test_callbacks(self) -> None:\n \n cache: DeferredCache[str, int] = DeferredCache(\"test\")\n callbacks = set()\n\n # start with an entry, with a callback\n cache.prefill(\"k1\", 10, callback=lambda: callbacks.add(\"prefill\"))\n\n # now replace that entry with a pending result\n origin_d: \"defer.Deferred[int]\" = defer.Deferred()\n set_d = cache.set(\"k1\", origin_d, callback=lambda: callbacks.add(\"set\"))\n\n # ... and also make a get request\n get_d = cache.get(\"k1\", callback=lambda: callbacks.add(\"get\"))\n\n # we don't expect the invalidation callback for the original value to have\n # been called yet, even though get() will now return a different result.\n # I'm not sure if that is by design or not.\n self.assertEqual(callbacks, set())\n\n # now fire off all the deferreds\n origin_d.callback(20)\n self.assertEqual(self.successResultOf(set_d), 20)\n self.assertEqual(self.successResultOf(get_d), 20)\n\n # now the original invalidation callback should have been called, but none of\n # the others\n self.assertEqual(callbacks, {\"prefill\"})\n callbacks.clear()\n\n # another update should invalidate both the previous results\n cache.prefill(\"k1\", 30)\n self.assertEqual(callbacks, {\"set\", \"get\"})\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 315, "n_words": 140, "vocab_size": 100, "complexity": 1, "nloc": 16, "token_counts": 171, "n_ast_nodes": 300, "n_identifiers": 20, "random_cut": "def test_callbacks(self) -> None:\n \n cache: DeferredCache[str, int] = DeferredCache(\"test\")\n callbacks = set()\n\n # start with an entry, with a callba", "d_id": 73173, "documentation": { "docstring": "Invalidation callbacks are called at the right time", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 204710, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/sql.py", "file_name": "sql.py", "fun_name": "sql_flush", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def sql_flush(style, connection, reset_sequences=True, allow_cascade=False):\n \n tables = connection.introspection.django_table_names(\n only_existing=True, include_views=False\n )\n return connection.ops.sql_flush(\n style,\n tables,\n reset_sequences=reset_sequences,\n allow_cascade=allow_cascade,\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 68, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 10, "token_counts": 52, "n_ast_nodes": 76, "n_identifiers": 11, "random_cut": "def sql_flush(style, connection, reset_sequences=True, allow_cascade=False):\n \n tables = connection.in", "d_id": 50845, "documentation": { "docstring": "\n Return a list of the SQL statements used to flush the database.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 19, "language": "en" } }, { "id": 242777, "commit_id": "d3c9a6504e84f87379554b6b671a1fb6c66a449e", "repo": "Pillow", "path": "src/PIL/ImageFilter.py", "file_name": "ImageFilter.py", "fun_name": "generate", "commit_message": "Variable in function should be snake_case", "code": "def generate(cls, size, callback, channels=3, target_mode=None):\n \n size_1d, size_2d, size_3d = cls._check_size(size)\n if channels not in (3, 4):\n raise ValueError(\"Only 3 or 4 output channels are supported\")\n\n table = [0] * (size_1d * size_2d * size_3d * channels)\n idx_out = 0\n for b in range(size_3d):\n for g in range(size_2d):\n for r in range(size_1d):\n table[idx_out : idx_out + channels] = callback(\n r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1)\n )\n idx_out += channels\n\n return cls(\n (size_1d, size_2d, size_3d),\n table,\n channels=channels,\n target_mode=target_mode,\n _copy_table=False,\n )\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 316, "n_words": 88, "vocab_size": 61, "complexity": 5, "nloc": 20, "token_counts": 151, "n_ast_nodes": 222, "n_identifiers": 18, "random_cut": "def generate(cls, size, callback, channels=3, target_mode=None):\n \n size_1d, size_2d, size_3d = cls._check_size(size)\n if channels not in (3, 4):\n raise ValueError(\"Only 3 or 4 output channels are supported\")\n\n table = [0] * (size_1d * size_2d * size_3d * channels)\n idx_out = 0\n for b in range(size_3d)", "d_id": 69920, "documentation": { "docstring": "Generates new LUT using provided callback.\n\n :param size: Size of the table. Passed to the constructor.\n :param callback: Function with three parameters which correspond\n three color channels. Will be called ``size**3``\n times with values from 0.0 to 1.0 and should return\n a tuple with ``channels`` elements.\n :param channels: The number of channels which should return callback.\n :param target_mode: Passed to the constructor of the resulting\n lookup table.\n ", "n_words": 67, "vocab_size": 48, "n_whitespaces": 201, "language": "en" } }, { "id": 204878, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/operations.py", "file_name": "operations.py", "fun_name": "regex_lookup", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def regex_lookup(self, lookup_type):\n \n raise NotImplementedError(\n \"subclasses of BaseDatabaseOperations may require a regex_lookup() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 4, "random_cut": "def regex_lookup(self, lookup_type):\n \n raise NotImplementedError(\n \"subclasses of BaseDatabaseOperations may require a regex_lookup() method\"\n ", "d_id": 50951, "documentation": { "docstring": "\n Return the string to use in a query when performing regular expression\n lookups (using \"regex\" or \"iregex\"). It should contain a '%s'\n placeholder for the column being searched against.\n\n If the feature is not supported (or part of it is not supported), raise\n NotImplementedError.\n ", "n_words": 44, "vocab_size": 39, "n_whitespaces": 87, "language": "en" } }, { "id": 68062, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/telephony/doctype/call_log/call_log.py", "file_name": "call_log.py", "fun_name": "link_existing_conversations", "commit_message": "style: format code with black", "code": "def link_existing_conversations(doc, state):\n\t\n\tif doc.doctype != \"Contact\":\n\t\treturn\n\ttry:\n\t\tnumbers = [d.phone for d in doc.phone_nos]\n\n\t\tfor number in numbers:\n\t\t\tnumber = strip_number(number)\n\t\t\tif not number:\n\t\t\t\tcontinue\n\t\t\tlogs = frappe.db.sql_list(\n\t\t\t\t,\n\t\t\t\tdict(phone_number=\"%{}\".format(number), docname=doc.name, doctype=doc.doctype),\n\t\t\t)\n\n\t\t\tfor log in logs:\n\t\t\t\tcall_log = frappe.get_doc(\"Call Log\", log)\n\t\t\t\tcall_log.add_link(link_type=doc.doctype, link_name=doc.name)\n\t\t\t\tcall_log.save(ignore_permissions=True)\n\t\t\tfrappe.db.commit()\n\texcept Exception:\n\t\tfrappe.log_error(title=_(\"Error during caller information update\"))\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 35, "n_words": 55, "vocab_size": 46, "complexity": 7, "nloc": 33, "token_counts": 142, "n_ast_nodes": 232, "n_identifiers": 32, "random_cut": "def link_existing_conversations(doc, state):\n\t\n\tif doc.doctype != \"Contact\":\n\t\treturn\n\ttry:\n\t\tnumbers = [d.phone for d in doc.phone_nos]\n\n\t\tfor number in numbers:\n\t\t\tnumber = strip_number(number)\n\t\t\tif not number:\n\t\t\t\tcontinue\n\t\t\tlogs = frappe.db.sql_list(\n\t\t\t\t,\n\t\t\t\tdict(phone_number=\"%{}\".format(number), docname=doc.name, doctype=doc.doctype),\n\t\t\t)\n\n\t\t\tfor log in logs:\n\t\t\t\tcall_log = frappe.get_doc(\"Call Log\", log)\n\t\t\t\tcall_log.add_link(link_", "d_id": 14712, "documentation": { "docstring": "\n\tCalled from hooks on creation of Contact or Lead to link all the existing conversations.\n\t\n\t\t\t\tSELECT cl.name FROM `tabCall Log` cl\n\t\t\t\tLEFT JOIN `tabDynamic Link` dl\n\t\t\t\tON cl.name = dl.parent\n\t\t\t\tWHERE (cl.`from` like %(phone_number)s or cl.`to` like %(phone_number)s)\n\t\t\t\tGROUP BY cl.name\n\t\t\t\tHAVING SUM(\n\t\t\t\t\tCASE\n\t\t\t\t\t\tWHEN dl.link_doctype = %(doctype)s AND dl.link_name = %(docname)s\n\t\t\t\t\t\tTHEN 1\n\t\t\t\t\t\tELSE 0\n\t\t\t\t\tEND\n\t\t\t\t)=0\n\t\t\t", "n_words": 58, "vocab_size": 52, "n_whitespaces": 45, "language": "en" } }, { "id": 290401, "commit_id": "0c8eeaa6436b04ba6da46bccab8b11523f314d9b", "repo": "core", "path": "homeassistant/components/media_player/__init__.py", "file_name": "__init__.py", "fun_name": "async_volume_up", "commit_message": "Update mypy to 0.990 (#81783)\n\n* Update mypy to 0.990\r\n\r\n* Remove type ignore - overriding attr with property (13475)\r\n\r\n* Remove type ignores - hasattr (13544)\r\n\r\n* Adjust type ignore - assignment (13549)\r\n\r\n* New error code - type-abstract (13785)\r\n\r\n* Disable annotation-unchecked (13851)", "code": "async def async_volume_up(self) -> None:\n \n if hasattr(self, \"volume_up\"):\n await self.hass.async_add_executor_job(self.volume_up)\n return\n\n if (\n self.volume_level is not None\n and self.volume_level < 1\n and self.supported_features & MediaPlayerEntityFeature.VOLUME_SET\n ):\n await self.async_set_volume_level(min(1, self.volume_level + 0.1))\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 125, "n_words": 31, "vocab_size": 26, "complexity": 5, "nloc": 14, "token_counts": 70, "n_ast_nodes": 112, "n_identifiers": 12, "random_cut": "async def async_volume_up(self) -> None:\n \n if hasattr(self, \"volume_up\"):\n await ", "d_id": 89517, "documentation": { "docstring": "Turn volume up for media player.\n\n This method is a coroutine.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 25, "language": "en" } }, { "id": 167374, "commit_id": "7d2f9b8d59908fbf57c6453bc41891efbfe981a6", "repo": "pandas", "path": "pandas/io/pytables.py", "file_name": "pytables.py", "fun_name": "infer_axes", "commit_message": "TYP: some return annotations in pytables.py (#47512)", "code": "def infer_axes(self) -> bool:\n \n s = self.storable\n if s is None:\n return False\n self.get_attrs()\n return True\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 62, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 10, "token_counts": 27, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def infer_axes(self) -> bool:\n \n s = self.storable\n if s is None:\n return False\n self.get_attrs()\n return True\n", "d_id": 39981, "documentation": { "docstring": "\n infer the axes of my storer\n return a boolean indicating if we have a valid storer or not\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 153296, "commit_id": "e5e9634357e60925a5a70e56a1d4882d269f533a", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_validate_set_axis", "commit_message": "REFACTOR-#3900: add flake8-no-implicit-concat plugin and refactor flake8 error codes (#3901)\n\nSigned-off-by: jeffreykennethli ", "code": "def _validate_set_axis(self, new_labels, old_labels):\n \n new_labels = ensure_index(new_labels)\n old_len = len(old_labels)\n new_len = len(new_labels)\n if old_len != new_len:\n raise ValueError(\n f\"Length mismatch: Expected axis has {old_len} elements, \"\n + \"new values have {new_len} elements\"\n )\n return new_labels\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 130, "n_words": 36, "vocab_size": 32, "complexity": 2, "nloc": 10, "token_counts": 43, "n_ast_nodes": 77, "n_identifiers": 9, "random_cut": "def _validate_set_axis(self, new_labels, old_labels):\n \n new_labels = ensure_index(new_labels)\n old_len = len(old_labels)\n new_len = len(new_labels)\n if old_len != new_len:\n raise ValueError(\n f\"Length mismatch: Expected axis has {old_len} elements, \"\n + \"new values have {new_len} elements\"\n )\n return new_labels\n", "d_id": 35351, "documentation": { "docstring": "\n Validate the possibility of replacement of old labels with the new labels.\n\n Parameters\n ----------\n new_labels : list-like\n The labels to replace with.\n old_labels : list-like\n The labels to replace.\n\n Returns\n -------\n list-like\n The validated labels.\n ", "n_words": 35, "vocab_size": 24, "n_whitespaces": 132, "language": "en" } }, { "id": 87117, "commit_id": "bf416f7ad23d7537a84c9727cfe1c0a7effd27bb", "repo": "sentry", "path": "src/sentry/snuba/discover.py", "file_name": "discover.py", "fun_name": "transform_data", "commit_message": "feat(discover): Only transform when ordering project (#39468)\n\n- This updates the querybuilder with a orderby resolver so we can\r\nimplement more custom orderbys(orderbies?) in the future\r\n- This changes the project field to just select the project_id only,\r\nwhich results in needing a new post-processing capability to the\r\nquerybuilder\r\n- This is done via the `value_resolver_map` and the `meta_resolver_map`\r\n- Removed the snuba_filter param from transform_results since we no\r\nlonger use it\r\n- Removes the old discover 1 query since it shouldn't be supported and\r\nno longer is being used\r\n- Needed to update ds code too since it relied on the old project\r\nbehaviour but doesn't use `discover.query`", "code": "def transform_data(result, translated_columns, query_builder) -> EventsResponse:\n \n final_result: EventsResponse = {\"data\": result[\"data\"], \"meta\": result[\"meta\"]}\n for col in final_result[\"meta\"]:\n # Translate back column names that were converted to snuba format\n col[\"name\"] = translated_columns.get(col[\"name\"], col[\"name\"])\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 55, "n_words": 32, "vocab_size": 31, "complexity": 3, "nloc": 13, "token_counts": 80, "n_ast_nodes": 102, "n_identifiers": 8, "random_cut": "def transform_data(result, translated_columns, query_builder) -> EventsResponse:\n \n final_result: EventsResponse = {\"data\": result[\"data\"], \"meta\": result[\"meta\"]}\n for col in final_result[\"meta\"]:\n # Translate back column names that were converted to snuba format\n col[\"name\"] = translated_columns.get(col[\"name\"], col[\"name\"])\n", "d_id": 18225, "documentation": { "docstring": "\n Transform internal names back to the public schema ones.\n\n When getting timeseries results via rollup, this function will\n zerofill the output results.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 35, "language": "en" } }, { "id": 197550, "commit_id": "eb20cbe9b89917786a10d50b785b4f21230f04be", "repo": "sympy", "path": "sympy/plotting/plot.py", "file_name": "plot.py", "fun_name": "plot_contour", "commit_message": "Improve documentation", "code": "def plot_contour(*args, show=True, **kwargs):\n \n\n args = list(map(sympify, args))\n plot_expr = check_arguments(args, 1, 2)\n series = [ContourSeries(*arg) for arg in plot_expr]\n plot_contours = Plot(*series, **kwargs)\n if len(plot_expr[0].free_symbols) > 2:\n raise ValueError('Contour Plot cannot Plot for more than two variables.')\n if show:\n plot_contours.show()\n return plot_contours\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 81, "n_words": 43, "vocab_size": 36, "complexity": 4, "nloc": 10, "token_counts": 86, "n_ast_nodes": 138, "n_identifiers": 17, "random_cut": "def plot_contour(*args, show=True, **kwargs):\n \n\n args = list(map(sympify, args))\n plot_expr = check_arguments(args, 1, 2)\n series = [ContourSeries(*arg) for arg ", "d_id": 48627, "documentation": { "docstring": "\n Draws contour plot of a function\n\n Usage\n =====\n\n Single plot\n\n ``plot_contour(expr, range_x, range_y, **kwargs)``\n\n If the ranges are not specified, then a default range of (-10, 10) is used.\n\n Multiple plot with the same range.\n\n ``plot_contour(expr1, expr2, range_x, range_y, **kwargs)``\n\n If the ranges are not specified, then a default range of (-10, 10) is used.\n\n Multiple plots with different ranges.\n\n ``plot_contour((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``\n\n Ranges have to be specified for every expression.\n\n Default range may change in the future if a more advanced default range\n detection algorithm is implemented.\n\n Arguments\n =========\n\n expr : Expression representing the function along x.\n\n range_x : (:class:`Symbol`, float, float)\n A 3-tuple denoting the range of the x variable, e.g. (x, 0, 5).\n\n range_y : (:class:`Symbol`, float, float)\n A 3-tuple denoting the range of the y variable, e.g. (y, 0, 5).\n\n Keyword Arguments\n =================\n\n Arguments for ``ContourSeries`` class:\n\n nb_of_points_x : int\n The x range is sampled uniformly at ``nb_of_points_x`` of points.\n\n nb_of_points_y : int\n The y range is sampled uniformly at ``nb_of_points_y`` of points.\n\n Aesthetics:\n\n surface_color : Function which returns a float\n Specifies the color for the surface of the plot. See\n :class:`sympy.plotting.Plot` for more details.\n\n If there are multiple plots, then the same series arguments are applied to\n all the plots. If you want to set these options separately, you can index\n the returned ``Plot`` object and set it.\n\n Arguments for ``Plot`` class:\n\n title : str\n Title of the plot.\n\n size : (float, float), optional\n A tuple in the form (width, height) in inches to specify the size of\n the overall figure. The default value is set to ``None``, meaning\n the size will be set by the default backend.\n\n See Also\n ========\n\n Plot, ContourSeries\n\n ", "n_words": 283, "vocab_size": 155, "n_whitespaces": 462, "language": "en" } }, { "id": 221193, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bz2.py", "file_name": "bz2.py", "fun_name": "peek", "commit_message": "add python 3.10.4 for windows", "code": "def peek(self, n=0):\n \n self._check_can_read()\n # Relies on the undocumented fact that BufferedReader.peek()\n # always returns at least one byte (except at EOF), independent\n # of the value of n\n return self._buffer.peek(n)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 73, "n_words": 31, "vocab_size": 26, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 44, "n_identifiers": 5, "random_cut": "def peek(self, n=0):\n \n self._check_can_read()\n # Relies on the u", "d_id": 56263, "documentation": { "docstring": "Return buffered data without advancing the file position.\n\n Always returns at least one byte of data, unless at EOF.\n The exact number of bytes returned is unspecified.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 48, "language": "en" } }, { "id": 101282, "commit_id": "2beceffad9b15c1fd78f06b9b272563321c5a41e", "repo": "faceswap", "path": "lib/training/cache.py", "file_name": "cache.py", "fun_name": "cache_full", "commit_message": "Data Augmentation update (#1263)\n\n- lib.detected_face\r\n - Subclass Masks for Landmark based masks\r\n - Add training mask propery + methods to DetectedFace\r\n - lib.training_training\r\n - subclass TrainingDataGenerator for training and preview data\r\n - Split cache into own module\r\n - Reduce thread count to 1 to prevent image corruption + data re-use\r\n - Process on largest model input/output size rather than stored image size\r\n - Size and crop masks during caching stage\r\n - Implement ring buffer for data flow\r\n - Fix preview reload bug\r\n - augmentation\r\n - typing\r\n - switch color aug order\r\n - better initialization\r\n - Fix warp + landmark warp to correctly apply at different image scales\r\n - Slightly improved warp caching\r\n - Don't store whether image is_preview. Handle all data as training images implicitly\r\n - plugins.trainer: Typing and fixes to work with trainingdata refactor", "code": "def cache_full(self) -> bool:\n \n if self._cache_info[\"cache_full\"]:\n return self._cache_info[\"cache_full\"]\n with self._lock:\n return self._cache_info[\"cache_full\"]\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 12, "vocab_size": 10, "complexity": 2, "nloc": 7, "token_counts": 35, "n_ast_nodes": 64, "n_identifiers": 5, "random_cut": "def cache_full(self) -> bool:\n \n if self._cache_", "d_id": 20701, "documentation": { "docstring": "bool: ``True`` if the cache has been fully populated. ``False`` if there are items still\n to be cached. ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 25, "language": "en" } }, { "id": 265914, "commit_id": "b2e2e3be35f3922ecee945b97279c50725c0b7fa", "repo": "netbox", "path": "netbox/netbox/views/generic/base.py", "file_name": "base.py", "fun_name": "get_queryset", "commit_message": "Closes #10739: Introduce get_queryset() method on generic views", "code": "def get_queryset(self, request):\n \n if self.queryset is None:\n raise ImproperlyConfigured(\n f\"{self.__class__.__name__} does not define a queryset. Set queryset on the class or \"\n f\"override its get_queryset() method.\"\n )\n return self.queryset.all()\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 102, "n_words": 29, "vocab_size": 29, "complexity": 2, "nloc": 7, "token_counts": 31, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def get_queryset(self, request):\n \n if self.queryset is None:\n raise ImproperlyConfigured(\n f\"{self.__class__.__name__} does not define a queryset. Set queryset on the class or \"\n f\"override its get_queryset() method.\"\n ", "d_id": 78245, "documentation": { "docstring": "\n Return the base queryset for the view. By default, this returns self.queryset.all().\n\n Args:\n request: The current request\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 50, "language": "en" } }, { "id": 322190, "commit_id": "621357338437ee420eabbbf5ab19065bc85e73a5", "repo": "PaddleNLP", "path": "paddlenlp/taskflow/knowledge_mining.py", "file_name": "knowledge_mining.py", "fun_name": "_preprocess", "commit_message": "Update neural search readme and Add Paddle Serving Support (#1558)\n\n* add recall inference similarity\r\n\r\n* update examples\r\n\r\n* updatea readme\r\n\r\n* update dir name\r\n\r\n* update neural search readme\r\n\r\n* update milvus readme\r\n\r\n* update domain adaptive pretraining readme\r\n\r\n* fix the mistakes\r\n\r\n* update readme\r\n\r\n* add recall Paddle Serving Support\r\n\r\n* update readme\r\n\r\n* update readme and format the code\r\n\r\n* reformat the files\r\n\r\n* move the files\r\n\r\n* reformat the code\r\n\r\n* remove redundant code\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: tianxin ", "code": "def _preprocess(self, inputs):\n \n inputs = self._check_input_text(inputs)\n self._max_cls_len = 5\n num_workers = self.kwargs[\n 'num_workers'] if 'num_workers' in self.kwargs else 0\n lazy_load = self.kwargs[\n 'lazy_load'] if 'lazy_load' in self.kwargs else False\n\n # Prompt template: input_text + \"是\" + \"[MASK]\" * cls_seq_length\n prompt_template = [\"是\"] + [\"[MASK]\"] * self._max_cls_len\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 117, "n_words": 46, "vocab_size": 33, "complexity": 3, "nloc": 26, "token_counts": 168, "n_ast_nodes": 115, "n_identifiers": 9, "random_cut": "def _preprocess(self, inputs):\n \n inputs = self._check_input_text(inputs)\n self._max_cls_len = 5\n num_workers = self.kwargs[\n 'num_workers'] if 'num_workers' in self.kwargs else 0\n lazy_load = self.kwargs[\n 'lazy_load'] if 'lazy_load' in self.kwargs else False\n\n # Prompt template: input_text + \"是\" + \"[MASK]\" * cls_seq_length\n prompt_template = [\"是\"] + [\"[MASK]\"] * self._max_cls_len\n", "d_id": 118085, "documentation": { "docstring": "\n Create the dataset and dataloader for the predict.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 23, "language": "en" } }, { "id": 215757, "commit_id": "fb825aa760fa0585a2c8fdafc6e62be8aec8cecf", "repo": "salt", "path": "salt/modules/consul.py", "file_name": "consul.py", "fun_name": "session_destroy", "commit_message": "[merge jam] Master port 49261 - consul modules (#58101)\n\n* add consul states and acl function present/absent\r\n\r\n* add consul to states doc index\r\n\r\n* refact/fix consul states\r\n\r\n* fix doc, fix states\r\n\r\n* fix name parameter for acl_changes\r\n\r\n* fixing pylint errors\r\n\r\n* small changes after review by @rallytime\r\n\r\n* fix header count\r\n\r\n* Update consul.py\r\n\r\n* fix acl_exists description, fix when both id and name are missing\r\n\r\n* Adding some tests for consul module and consul state module. Some additional fixes in the consul module.\r\n\r\n* Fixing tests.\r\n\r\n* Fixing failing tests on Windows.\r\n\r\n* Adding changelog.\r\n\r\n* Adding some tests for consul module and consul state module. Some additional fixes in the consul module.\r\n\r\n* moving tests to pytest.\r\n\r\n* manual black changes.\r\n\r\n* One more manual black change.\r\n\r\n* fixing formatting. Adding versionadded for state module.\r\n\r\nCo-authored-by: Rémi Jouannet \r\nCo-authored-by: Mike Place \r\nCo-authored-by: Daniel Wozniak \r\nCo-authored-by: Wayne Werner ", "code": "def session_destroy(consul_url=None, token=None, session=None, **kwargs):\n \n ret = {}\n if not consul_url:\n consul_url = _get_config()\n if not consul_url:\n log.error(\"No Consul URL found.\")\n ret[\"message\"] = \"No Consul URL found.\"\n ret[\"res\"] = False\n return ret\n\n if not session:\n raise SaltInvocationError('Required argument \"session\" is missing.')\n\n query_params = {}\n\n if \"dc\" in kwargs:\n query_params[\"dc\"] = kwargs[\"dc\"]\n\n function = \"session/destroy/{}\".format(session)\n res = _query(\n consul_url=consul_url,\n function=function,\n token=token,\n method=\"PUT\",\n query_params=query_params,\n )\n if res[\"res\"]:\n ret[\"res\"] = True\n ret[\"message\"] = \"Destroyed Session {}.\".format(session)\n else:\n ret[\"res\"] = False\n ret[\"message\"] = \"Unable to destroy session {}.\".format(session)\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 257, "n_words": 86, "vocab_size": 56, "complexity": 6, "nloc": 29, "token_counts": 160, "n_ast_nodes": 283, "n_identifiers": 16, "random_cut": "def session_destroy(consul_url=None, token=None, session=None, **kwargs):\n \n ret = {}\n if not consul_url:\n consul_url = _get_config()\n if not consul_url:\n log.error(\"No Consul URL found.\")\n ret[\"me", "d_id": 54151, "documentation": { "docstring": "\n Destroy session\n\n :param consul_url: The Consul server URL.\n :param session: The ID of the session to destroy.\n :param dc: By default, the datacenter of the agent is queried;\n however, the dc can be provided using the \"dc\" parameter.\n :return: Boolean & message of success or failure.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716'\n\n ", "n_words": 55, "vocab_size": 45, "n_whitespaces": 101, "language": "en" } }, { "id": 29623, "commit_id": "74d1c8d8504dbdd339865ff97ca4ac9bd30a8faf", "repo": "saleor", "path": "saleor/graphql/product/mutations/collection/collection_update.py", "file_name": "collection_update.py", "fun_name": "post_save_action", "commit_message": "Split product types and mutations (#11259)\n\n* Split product types file\r\n\r\n* Split product/mutations/products.py file", "code": "def post_save_action(cls, info, instance, cleaned_input):\n \n manager = load_plugin_manager(info.context)\n cls.call_event(manager.collection_updated, instance)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 47, "n_identifiers": 10, "random_cut": "def post_save_action(cls, info, instance, cleaned_input):\n \n manager = load_plugin_manager(info.context", "d_id": 5242, "documentation": { "docstring": "Override this method with `pass` to avoid triggering product webhook.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 276242, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saving_utils.py", "file_name": "saving_utils.py", "fun_name": "_deserialize_metric", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _deserialize_metric(metric_config):\n \n from keras import (\n metrics as metrics_module,\n ) # pylint:disable=g-import-not-at-top\n\n if metric_config in [\"accuracy\", \"acc\", \"crossentropy\", \"ce\"]:\n # Do not deserialize accuracy and cross-entropy strings as we have special\n # case handling for these in compile, based on model output shape.\n return metric_config\n return metrics_module.deserialize(metric_config)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 91, "n_words": 47, "vocab_size": 41, "complexity": 2, "nloc": 7, "token_counts": 37, "n_ast_nodes": 68, "n_identifiers": 6, "random_cut": "def _deserialize_metric(metric_config):\n \n from keras import (\n metrics as metrics_module,\n ) # pylint:disable=g-import-not-at-top\n\n if metric_config in [\"accuracy\", \"acc\", \"crossentropy\", \"ce\"]:\n # Do not deserialize accuracy and cross-entropy strings as we have special\n # case handling for these", "d_id": 81600, "documentation": { "docstring": "Deserialize metrics, leaving special strings untouched.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 269312, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/activations.py", "file_name": "activations.py", "fun_name": "softsign", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def softsign(x):\n \n return tf.math.softsign(x)\n\n\n@keras_export(\"keras.activations.swish\")\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.activations.swish\")\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 10, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 50, "n_identifiers": 8, "random_cut": "def softsign(x):\n \n return tf.math.softsign(x)\n\n\n@keras_export(\"keras.activations.swish\")\n@tf.__internal__.dispatch.add_dispatch_support", "d_id": 80026, "documentation": { "docstring": "Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.\n\n Example Usage:\n\n >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)\n >>> b = tf.keras.activations.softsign(a)\n >>> b.numpy()\n array([-0.5, 0. , 0.5], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The softsign activation: `x / (abs(x) + 1)`.\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 85, "language": "en" } }, { "id": 150952, "commit_id": "d3cb211283ced68d082cfdbdac12f3d2ab90d63b", "repo": "freqtrade", "path": "freqtrade/freqai/data_kitchen.py", "file_name": "data_kitchen.py", "fun_name": "compute_inlier_metric", "commit_message": "Add inlier metric computation", "code": "def compute_inlier_metric(self) -> None:\n \n\n import scipy.stats as ss\n \n nmb_previous_points = self.data['InlierMetric_nmb_points']\n weibull_percentile = self.data['InlierMetric_weib_perc']\n\n train_ft_df = self.data_dictionary['train_features']\n train_ft_df_reindexed = train_ft_df.reindex(\n index=np.flip(train_ft_df.index) \n )\n\n pairwise = pd.DataFrame(\n np.triu(\n pairwise_distances(train_ft_df_reindexed, n_jobs=self.thread_count)\n ),\n columns=train_ft_df_reindexed.index,\n index=train_ft_df_reindexed.index\n )\n pairwise = pairwise.round(5)\n\n column_labels = [\n '{}{}'.format('d', i) for i in range(1, nmb_previous_points+1)\n ]\n distances = pd.DataFrame(\n columns=column_labels, index=train_ft_df.index\n )\n for index in train_ft_df.index[nmb_previous_points]:\n current_row = pairwise.loc[[index]]\n current_row_no_zeros = current_row.loc[\n :, (current_row!=0).any(axis=0)\n ]\n distances.loc[[index]] = current_row_no_zeros.iloc[\n :, :nmb_previous_points\n ]\n distances = distances.replace([np.inf, -np.inf], np.nan)\n drop_index = pd.isnull(distances).any(1)\n distances = distances[drop_index==0]\n\n inliers = pd.DataFrame(index=distances.index)\n for key in distances.keys():\n current_distances = distances[key].dropna()\n fit_params = ss.weibull_min.fit(current_distances)\n cutoff = ss.weibull_min.ppf(weibull_percentile, *fit_params)\n is_inlier = np.where(\n current_distances<=cutoff, 1, 0\n )\n df_inlier = pd.DataFrame(\n {key+'_IsInlier':is_inlier}, index=distances.index\n )\n inliers = pd.concat(\n [inliers, df_inlier], axis=1\n )\n\n self.data_dictionary['train_features'] = pd.DataFrame(\n data=inliers.sum(axis=1)/nmb_previous_points,\n columns=['inlier_metric'],\n index = train_ft_df.index\n )\n\n percent_outliers = np.round(\n 100*(1-self.data_dictionary['iniler_metric'].sum()/\n len(train_ft_df.index)), 2\n )\n logger.info('{percent_outliers}%% of data points were identified as outliers')\n\n return None\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 708, "n_words": 145, "vocab_size": 98, "complexity": 4, "nloc": 64, "token_counts": 417, "n_ast_nodes": 653, "n_identifiers": 59, "random_cut": "def compute_inlier_metric(self) -> None:\n \n\n import scipy.stats as ss\n \n nmb_previous_points = self.data['InlierMetric_nmb_points']\n weibull_percentile = self.data['InlierMetric_weib_perc']\n\n train_ft_df = self.data_dictionary['train_features']\n train_ft_df_reindexed = train_ft_df.reindex(\n index=np.flip(train_ft_df.index) \n )\n\n pairwise = pd.DataFrame(\n np.triu(\n pairwise_distances(train_ft_df_reindexed, n_jobs=self.thread_count)\n ),\n columns=train_ft_df_reindexed.index,\n index=train_ft_df_reindexed.index\n )\n pairwise = pairwise.round(5)\n\n column_labels = [\n '{}{}'.format('d', i) for i in range(1, nmb_previous_points+1)\n ]\n distances = pd.DataFrame(\n columns=column_labels, index=train_ft_df.index\n )\n for index in train_ft_df.index[nmb_previous_points]:\n current_row = pairwise.loc[[index]]\n current_row_no_zeros = current_row.loc[\n :, (current_row!=0).any(axis=0)\n ]\n distances.loc[[index]] = current_row_no_zeros.iloc[\n :, :nmb_previous_points\n ]\n distances = distances.replace([np.inf, -np.inf], np.nan)\n drop_index = pd.isnull(distances).any(1)\n distances = distances[drop_index==0]\n\n inliers = pd.DataFrame(index=distances.index)\n for key in distances.keys():\n current_distances = distances[key].dropna()\n fit_params = ss.weibull_min.fit(current_distances)\n cutoff = ss.weibull_min.ppf(weibull_percentile, *fit_params)\n is_inlier = np.where(\n current_distances<=cutoff, 1, 0\n )\n df_inlier = pd.DataFrame(\n {key+'_IsInlier':is_inlier}, index=distances.index\n )\n inliers = pd.concat(\n [inliers, df_inlier], axis=1\n )\n\n self.data_dictionary['train_features'] = pd.DataFrame(\n data=inliers.sum(axis=1)/nmb_previous_points,\n col", "d_id": 34894, "documentation": { "docstring": "\n \n Compute inlier metric from backwards distance distributions. \n This metric defines how well features from a timepoint fit \n into previous timepoints.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 59, "language": "en" } }, { "id": 220224, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ast.py", "file_name": "ast.py", "fun_name": "items_view", "commit_message": "add python 3.10.4 for windows", "code": "def items_view(self, traverser, items):\n \n if len(items) == 1:\n traverser(items[0])\n self.write(\",\")\n else:\n self.interleave(lambda: self.write(\", \"), traverser, items)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 70, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 50, "n_ast_nodes": 84, "n_identifiers": 7, "random_cut": "def items_view(self, traverser, items):\n \n if len(items) == 1:\n traverser(items[0])\n self.write(\",\")\n else:\n self.interleave(lambda: self.write(\", \"), tra", "d_id": 55942, "documentation": { "docstring": "Traverse and separate the given *items* with a comma and append it to\n the buffer. If *items* is a single item sequence, a trailing comma\n will be added.", "n_words": 28, "vocab_size": 22, "n_whitespaces": 41, "language": "en" } }, { "id": 248296, "commit_id": "cde8af9a495cbc7f3d0207e3f17c37eddaee34e1", "repo": "synapse", "path": "synapse/metrics/jemalloc.py", "file_name": "jemalloc.py", "fun_name": "refresh_stats", "commit_message": "Add config flags to allow for cache auto-tuning (#12701)", "code": "def refresh_stats(self) -> None:\n \n try:\n self._mallctl(\"epoch\", read=False, write=1)\n except Exception as e:\n logger.warning(\"Failed to reload jemalloc stats: %s\", e)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 9, "token_counts": 37, "n_ast_nodes": 65, "n_identifiers": 9, "random_cut": "def refresh_stats(self) -> None:\n \n try:\n self._mallctl(\"epoch\", read=False, write=1)\n except Exception as e:\n logger.warning(\"Failed to reload jemalloc stats: %s", "d_id": 72195, "documentation": { "docstring": "Request that jemalloc updates its internal statistics. This needs to\n be called before querying for stats, otherwise it will return stale\n values.\n ", "n_words": 22, "vocab_size": 22, "n_whitespaces": 43, "language": "en" } }, { "id": 176247, "commit_id": "0cc70051fa0a979b1f1eab4af5b6587a6ebf8334", "repo": "networkx", "path": "networkx/tests/test_convert_numpy.py", "file_name": "test_convert_numpy.py", "fun_name": "test_to_numpy_array_multiweight_reduction", "commit_message": "Refactor `to_numpy_array` with advanced indexing (#5250)\n\n* WIP: try approach based on advanced indexing.\r\n\r\n* WIP: Fix some tests and support multigraphs.\r\n\r\n* Rm test for limiting reductions to nanfunctions.\r\n\r\n* Catch edgeless graph cornercase.\r\n\r\n* Cleanups.\r\n\r\n* Update networkx/convert_matrix.py\r\n\r\nComments from review\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Only subgraph if necessary\r\n\r\nand copy if so, for performance reasons\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Split multigraph and graph cases for performance.\r\n\r\n* Add tests for to_numpy_array with complex dtype.\r\n\r\nCo-authored-by: Andras Deak \r\n\r\n* Add test for object weights.\r\n\r\n* Add test for more multiweight reduction functions.\r\n\r\nInclude arbitrary functions beyond the original set of\r\nnanmin, nanmax, and nansum.\r\n\r\n* Update docstring.\r\n\r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Andras Deak ", "code": "def test_to_numpy_array_multiweight_reduction(func, expected):\n \n G = nx.MultiDiGraph()\n weights = [-1, 2, 10.0]\n for w in weights:\n G.add_edge(0, 1, weight=w)\n A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float)\n assert np.allclose(A, [[0, expected], [0, 0]])\n\n # Undirected case\n A = nx.to_numpy_array(G.to_undirected(), multigraph_weight=func, dtype=float)\n assert np.allclose(A, [[0, expected], [expected, 0]])\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 77, "n_words": 43, "vocab_size": 32, "complexity": 2, "nloc": 9, "token_counts": 122, "n_ast_nodes": 175, "n_identifiers": 18, "random_cut": "def test_to_numpy_array_multiweight_reduction(func, expected):\n \n G = nx.MultiDiGraph()\n weights = [-1, 2, 10.0]\n for w in weights:\n G.add_edge(0, 1, weight=w)\n A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float)\n", "d_id": 41791, "documentation": { "docstring": "Test various functions for reducing multiedge weights.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 125896, "commit_id": "8ddcf89096e5631c6b6e0d04dc094b458a15c9f9", "repo": "ray", "path": "rllib/connectors/tests/test_agent.py", "file_name": "test_agent.py", "fun_name": "test_vr_connector_causal_slice", "commit_message": "[RLlib] Implemented ViewRequirementConnector (#26998)", "code": "def test_vr_connector_causal_slice(self):\n \n view_rq_dict = {\n \"state\": ViewRequirement(\"obs\"),\n # shift array should be [-2, -1, 0]\n \"prev_states\": ViewRequirement(\"obs\", shift=\"-2:0\"),\n # shift array should be [-4, -2, 0]\n \"prev_strided_states_even\": ViewRequirement(\"obs\", shift=\"-4:0:2\"),\n # shift array should be [-3, -1]\n \"prev_strided_states_odd\": ViewRequirement(\"obs\", shift=\"-3:0:2\"),\n }\n\n obs_arrs = np.arange(10)[:, None] + 1\n config = PPOConfig().to_dict()\n ctx = ConnectorContext(\n view_requirements=view_rq_dict, config=config, is_policy_recurrent=True\n )\n c = ViewRequirementAgentConnector(ctx)\n\n # keep a queue of observations\n obs_list = []\n for t, obs in enumerate(obs_arrs):\n # t=0 is the next state of t=-1\n data = AgentConnectorDataType(\n 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1}\n )\n processed = c([data])\n for_action = processed[0].data.for_action\n\n if t == 0:\n obs_list.extend([obs for _ in range(5)])\n else:\n # remove the first obs and add the current obs to the end\n obs_list.pop(0)\n obs_list.append(obs)\n\n # check state\n check(for_action[\"state\"], obs[None])\n\n # check prev_states\n check(\n for_action[\"prev_states\"],\n np.stack(obs_list)[np.array([-3, -2, -1])][None],\n )\n\n # check prev_strided_states_even\n check(\n for_action[\"prev_strided_states_even\"],\n np.stack(obs_list)[np.array([-5, -3, -1])][None],\n )\n\n check(\n for_action[\"prev_strided_states_odd\"],\n np.stack(obs_list)[np.array([-4, -2])][None],\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 669, "n_words": 152, "vocab_size": 105, "complexity": 4, "nloc": 38, "token_counts": 300, "n_ast_nodes": 491, "n_identifiers": 36, "random_cut": "def test_vr_connector_causal_slice(self):\n \n view_rq_dict = {\n \"state\": ViewRequirement(\"obs\"),\n # shift array should be [-2, -1, 0]\n \"prev_states\": ViewRequirement(\"obs\", shift=\"-2:0\"),\n # shift array should be [-4, -2, 0]\n \"prev_strided_states_even\": ViewRequirement(\"obs\", shift=\"-4:0:2\"),\n # shift array should be [-3, -1]\n \"prev_strided_states_odd\": ViewRequirement(\"obs\", shift=\"-3:0:2\"),\n }\n\n obs_arrs = np.arange(10)[:, None] + 1\n config = PPOConfig().to_dict()\n ctx = ConnectorContext(\n view_requirements=view_rq_dict, config=config, is_policy_recurrent=True\n )\n c = ViewRequirementAgentConnector(ctx)\n\n # keep a queue of observations\n obs_list = []\n for t, obs in enumerate(obs_arrs):\n # t=0 is the next state of t=-1\n data = AgentConnectorDataType(\n 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1}\n )\n processed = c([data])\n for_action = processed[0].data.for_action\n\n if t ==", "d_id": 28018, "documentation": { "docstring": "Test that the ViewRequirementConnector can handle slice shifts correctly.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 183607, "commit_id": "6bfc26c1ec37262b9cd4bbab35d15907dc6742bf", "repo": "textual", "path": "examples/calculator.py", "file_name": "calculator.py", "fun_name": "render", "commit_message": "more docs", "code": "def render(self) -> RenderableType:\n \n return Padding(\n Align.right(FigletText(self.value), vertical=\"middle\"),\n (0, 1),\n style=\"white on rgb(51,51,51)\",\n )\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 68, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 38, "n_ast_nodes": 62, "n_identifiers": 10, "random_cut": "def render(self) -> RenderableType:\n \n return Padding(\n Align.right(FigletText(self.value), vertical=\"middle\"),\n (0, 1),\n style=\"white o", "d_id": 44269, "documentation": { "docstring": "Build a Rich renderable to render the calculator display.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 278255, "commit_id": "f0fc6f798937a7a5fdab469c0f16bdde7cfc4ccd", "repo": "keras", "path": "keras/models/cloning.py", "file_name": "cloning.py", "fun_name": "in_place_subclassed_model_state_restoration", "commit_message": "resolve line-too-long in models", "code": "def in_place_subclassed_model_state_restoration(model):\n \n assert not model._is_graph_network\n # Restore layers and build attributes\n if (\n hasattr(model, \"_original_attributes_cache\")\n and model._original_attributes_cache is not None\n ):\n # Models have sticky attribute assignment, so we want to be careful to\n # add back the previous attributes and track Layers by their original\n # names without adding dependencies on \"utility\" attributes which Models\n # exempt when they're constructed.\n setattr_tracking = model._setattr_tracking\n model._setattr_tracking = False\n model._self_tracked_trackables = []\n for name, value in model._original_attributes_cache.items():\n setattr(model, name, value)\n if isinstance(value, Layer):\n model._self_tracked_trackables.append(value)\n model._original_attributes_cache = None\n model._setattr_tracking = setattr_tracking\n else:\n # Restore to the state of a never-called model.\n _reset_build_compile_trackers(model)\n\n\n@keras_export(\"keras.__internal__.models.clone_and_build_model\", v1=[])", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.__internal__.models.clone_and_build_model\", v1=[])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 253, "n_words": 101, "vocab_size": 75, "complexity": 5, "nloc": 17, "token_counts": 97, "n_ast_nodes": 181, "n_identifiers": 18, "random_cut": "def in_place_subclassed_model_state_restoration(model):\n \n assert not model._is_graph_network\n # Restore layers and build attributes\n if (\n hasattr(model, \"_original_attributes_cache\")\n and model._original_attributes_cache is not None\n ):\n # Models have sticky attribute assignment, so we want to be careful to\n # add back the previous attributes and track Layers by their original\n # names without adding dependencies on \"utility\" attributes which Models\n # exempt when they're constructed.\n setattr_tracking = model._setattr_tracking\n model._setattr_tracking = False\n model._self_tracked_trackables = []\n for name, value in model._original_attributes_cache.items():\n setattr(model, name, value)\n if isinstance(value, Layer):\n model._self_tracked_trackables.append(value)\n model._original_attributes_cache = None\n model._setattr_tracking = setattr_tracking\n else:\n # Res", "d_id": 82432, "documentation": { "docstring": "Restores the original state of a model after it was \"reset\".\n\n This undoes this action of `_in_place_subclassed_model_reset`, which is\n called in `clone_and_build_model` if `in_place_reset` is set to True.\n\n Args:\n model: Instance of a Keras model created via subclassing, on which\n `_in_place_subclassed_model_reset` was previously called.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 68, "language": "en" } }, { "id": 198089, "commit_id": "bad8e3c1d614a05a0b1c6a05c21720f8751f0f2b", "repo": "sympy", "path": "sympy/core/expr.py", "file_name": "expr.py", "fun_name": "_imaginary_unit_as_coefficient", "commit_message": "move _imaginary_unit_as_coefficient to sympy.core.expr", "code": "def _imaginary_unit_as_coefficient(arg):\n \n if getattr(arg, 'is_real', True):\n return None\n else:\n return arg.as_coefficient(S.ImaginaryUnit)\n\n\n@sympify_method_args", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "@sympify_method_args", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 34, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 29, "n_ast_nodes": 54, "n_identifiers": 7, "random_cut": "def _imaginary_unit_as_coefficient(arg):\n \n if getattr(arg, 'is_real', True):\n return None\n else:\n return arg.as_coefficient(", "d_id": 48788, "documentation": { "docstring": " Helper to extract symbolic coefficient for imaginary unit ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 279959, "commit_id": "51a6050b936ec87cd684fc1a052f79785ec9aaec", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/optimizer.py", "file_name": "optimizer.py", "fun_name": "from_config", "commit_message": "Some changes on the new optimizer:\n1. Include `custom_objects` in `from_config` for deserializing custom learning rate.\n2. Handle the error of seeing unrecognized variable with a better error message.\n\nPiperOrigin-RevId: 476505974", "code": "def from_config(cls, config, custom_objects=None):\n \n if \"learning_rate\" in config:\n if isinstance(config[\"learning_rate\"], dict):\n config[\"learning_rate\"] = learning_rate_schedule.deserialize(\n config[\"learning_rate\"], custom_objects=custom_objects\n )\n return cls(**config)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 100, "n_words": 19, "vocab_size": 18, "complexity": 3, "nloc": 7, "token_counts": 52, "n_ast_nodes": 88, "n_identifiers": 8, "random_cut": "def from_config(cls, config, custom_objects=None):\n \n if \"learning_rate\" in config:\n if isinstance(config[\"learning_rate\"], dict):\n config[\"learning_rate\"] = learning_rate_schedule.deserialize(\n config[\"learning_rate\"], custom_objects=custom_objects\n", "d_id": 83198, "documentation": { "docstring": "Creates an optimizer from its config.\n\n This method is the reverse of `get_config`, capable of instantiating the\n same optimizer from the config dictionary.\n\n Args:\n config: A Python dictionary, typically the output of get_config.\n custom_objects: A Python dictionary mapping names to additional\n user-defined Python objects needed to recreate this optimizer.\n\n Returns:\n An optimizer instance.\n ", "n_words": 53, "vocab_size": 41, "n_whitespaces": 134, "language": "en" } }, { "id": 320283, "commit_id": "4aa318598fd0dc6c5d4e08dd2a13e7bf614511ec", "repo": "paperless-ngx", "path": "src/paperless_mail/tests/test_parsers_live.py", "file_name": "test_parsers_live.py", "fun_name": "test_generate_pdf_from_mail", "commit_message": "add test comments", "code": "def test_generate_pdf_from_mail(self):\n \n mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, \"html.eml\"))\n\n pdf_path = os.path.join(self.parser.tempdir, \"html.eml.pdf\")\n\n with open(pdf_path, \"wb\") as file:\n file.write(self.parser.generate_pdf_from_mail(mail))\n\n converted = os.path.join(\n self.parser.tempdir,\n \"html.eml.pdf.webp\",\n )\n run_convert(\n density=300,\n scale=\"500x5000>\",\n alpha=\"remove\",\n strip=True,\n trim=False,\n auto_orient=True,\n input_file=f\"{pdf_path}\", # Do net define an index to convert all pages.\n output_file=converted,\n logging_group=None,\n )\n self.assertTrue(os.path.isfile(converted))\n thumb_hash = self.imagehash(converted)\n\n # The created pdf is not reproducible. But the converted image should always look the same.\n expected_hash = self.imagehash(\n os.path.join(self.SAMPLE_FILES, \"html.eml.pdf.webp\"),\n )\n self.assertEqual(\n thumb_hash,\n expected_hash,\n f\"PDF looks different. Check if {converted} looks weird.\",\n )\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 363, "n_words": 81, "vocab_size": 70, "complexity": 1, "nloc": 30, "token_counts": 176, "n_ast_nodes": 291, "n_identifiers": 32, "random_cut": "def test_generate_pdf_from_mail(self):\n \n mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, \"html.eml\"))\n\n pdf_path = os.path.join(self.parser.tempdir, \"html.eml.pdf\")\n\n with open(pdf_path, \"wb\") as file:\n file.write(self.parser.generate_pdf_from_mail(mail))\n\n converted = os.path.join(\n self.parser.tempdir,\n \"html.eml.pdf.webp\",\n )\n run_convert(\n density=300,\n scale=\"500x5000>\",\n alpha=\"remove\",", "d_id": 117113, "documentation": { "docstring": "\n GIVEN:\n - Fresh start\n WHEN:\n - pdf generation from simple eml file is requested\n THEN:\n - gotenberg is called and the resulting file is returned and look as expected.\n ", "n_words": 29, "vocab_size": 23, "n_whitespaces": 91, "language": "en" } }, { "id": 212750, "commit_id": "430d1bc77fcdc0969a66ff370ec5e7e590423c83", "repo": "PySimpleGUI", "path": "DemoPrograms/Demo_Desktop_Widget_Drive_Usage_Gauges.py", "file_name": "Demo_Desktop_Widget_Drive_Usage_Gauges.py", "fun_name": "new", "commit_message": "More demo programs updates 🤦‍♂️ wow.....I thought for sure these were checked in....", "code": "def new(self, degree=0, color=None):\n \n (center_x, center_y, angle, inner_radius, outer_radius,\n outer_color, pointer_color, origin_color, line_width) = self.all\n pointer_color = color or pointer_color\n if self.figure != []:\n for figure in self.figure:\n self.graph_elem.DeleteFigure(figure)\n self.figure = []\n d = degree - 90\n self.all[2] = degree\n dx1 = int(2 * inner_radius * math.sin(d / 180 * math.pi))\n dy1 = int(2 * inner_radius * math.cos(d / 180 * math.pi))\n dx2 = int(outer_radius * math.sin(d / 180 * math.pi))\n dy2 = int(outer_radius * math.cos(d / 180 * math.pi))\n self.figure.append(self.graph_elem.DrawLine((center_x - dx1, center_y - dy1),\n (center_x + dx2, center_y + dy2),\n color=pointer_color, width=line_width))\n self.figure.append(self.graph_elem.DrawCircle((center_x, center_y), inner_radius,\n fill_color=origin_color, line_color=outer_color, line_width=line_width))\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 460, "n_words": 100, "vocab_size": 61, "complexity": 4, "nloc": 19, "token_counts": 238, "n_ast_nodes": 354, "n_identifiers": 33, "random_cut": "def new(self, degree=0, color=None):\n \n (center_x, center_y, angle, inner_radius, outer_radius,\n outer_color, pointer_color, origin_color, line_width) = self.all\n pointer_color = color or pointer_color\n if self.figure != []:\n for figure in self.figure:\n self.graph_elem.DeleteFigure(figure)\n self.figure = []\n d = degree - 90\n self.all[2] = degree\n dx1 = int(2 * inner_radius * math.sin(d / 180 * math.pi))\n dy1 = int(2 * inner_radius * math.cos(d / 180 * math.pi))\n dx2 = int(outer_radius * math.sin(d / 180 * math.pi))\n dy2 = int(outer_radius * math.cos(d / 180 * math.pi))\n self.figure.app", "d_id": 53380, "documentation": { "docstring": "\n Draw new pointer by angle, erase old pointer if exist\n degree defined as clockwise from negative x-axis.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 51, "language": "en" } }, { "id": 50918, "commit_id": "7a847a39b1da6e6867031f52f713d92391b9729d", "repo": "PaddleHub", "path": "modules/image/object_detection/yolov3_darknet53_vehicles/processor.py", "file_name": "processor.py", "fun_name": "postprocess", "commit_message": "update yolov3_darknet53_vehicles (#1957)\n\n* update yolov3_darknet53_vehicles\r\n\r\n* update gpu config\r\n\r\n* update\r\n\r\n* add clean func\r\n\r\n* update save inference model", "code": "def postprocess(paths, images, data_out, score_thresh, label_names, output_dir, handle_id, visualization=True):\n \n results = data_out.copy_to_cpu()\n lod = data_out.lod()[0]\n\n check_dir(output_dir)\n\n if paths:\n assert type(paths) is list, \"type(paths) is not list.\"\n if handle_id < len(paths):\n unhandled_paths = paths[handle_id:]\n unhandled_paths_num = len(unhandled_paths)\n else:\n unhandled_paths_num = 0\n if images is not None:\n if handle_id < len(images):\n unhandled_paths = None\n unhandled_paths_num = len(images) - handle_id\n else:\n unhandled_paths_num = 0\n\n output = list()\n for index in range(len(lod) - 1):\n output_i = {'data': []}\n if unhandled_paths and index < unhandled_paths_num:\n org_img_path = unhandled_paths[index]\n org_img = Image.open(org_img_path)\n else:\n org_img = images[index - unhandled_paths_num]\n org_img = org_img.astype(np.uint8)\n org_img = Image.fromarray(org_img[:, :, ::-1])\n if visualization:\n org_img_path = get_save_image_name(org_img, output_dir, 'image_numpy_{}'.format((handle_id + index)))\n org_img.save(org_img_path)\n org_img_height = org_img.height\n org_img_width = org_img.width\n result_i = results[lod[index]:lod[index + 1]]\n for row in result_i:\n if len(row) != 6:\n continue\n if row[1] < score_thresh:\n continue\n category_id = int(row[0])\n confidence = row[1]\n bbox = row[2:]\n dt = {}\n dt['label'] = label_names[category_id]\n dt['confidence'] = float(confidence)\n dt['left'], dt['top'], dt['right'], dt['bottom'] = clip_bbox(bbox, org_img_width, org_img_height)\n output_i['data'].append(dt)\n\n output.append(output_i)\n if visualization:\n output_i['save_path'] = draw_bounding_box_on_image(org_img_path, output_i['data'], output_dir)\n\n return output\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 610, "n_words": 172, "vocab_size": 107, "complexity": 13, "nloc": 50, "token_counts": 380, "n_ast_nodes": 611, "n_identifiers": 48, "random_cut": "def postprocess(paths, images, data_out, score_thresh, label_names, output_dir, handle_id, visualization=True):\n \n results = data_out.copy_to_cpu()\n lod = data_out.lod()[0]\n\n check_dir(output_dir)\n\n if paths:\n assert type(paths) is list, \"type(paths) is not list.\"\n if handle_id < len(paths):\n unhandled_paths = paths[handle_id:]\n unhandled_paths_num = len(unhandled_paths)\n else:\n unhandled_paths_num = 0\n if images is not None:\n if handle_id < len(images):\n unhandled_paths = None\n unhandled_paths_num = len(images) - handle_id\n else:\n unhandled_paths_num = 0\n\n output = list()\n for index in range(len(lod) - 1):\n output_i = {'data': []}\n if unhandled_paths and index < unhandled_paths_num:\n org_img_path = unhandled_paths[index]\n org_img = Image.open(org_img_path)\n else:\n org_img = images[index - unhandled_paths_num]\n org_img = org_img.astype(np.uint8)\n org_img = Image.fromarray(org_img[:, :, ::-1])\n if visualization:\n org_img_path = get_save_image_name(org_img, output_dir, 'image_numpy_{}'.format((handle_id + index)))\n org_img.save(org_img_path)\n org_img_height = org_img.height\n org_img_width = org_img.width\n result_i = results[lod[index]:lod[index + 1]]\n for row in result_i:\n if len(row) != 6:\n continue\n if row[1] < score_thresh:\n continue\n category_id = int(row[0])\n confidence = row[1]\n bbox = row[2:]\n dt = {}\n dt['label'] = label_names[category_id]\n dt['confidence'] = float(confidence)\n dt['left'], dt['top'], dt['right'], dt['bottom'] = clip_bbox(bbox, org_img_width, org_img_height)\n output_i['data'].append(dt)\n\n output.append(output_i)\n if visualization:\n output_i['save_path'] = draw", "d_id": 10241, "documentation": { "docstring": "\n postprocess the lod_tensor produced by Executor.run\n\n Args:\n paths (list[str]): The paths of images.\n images (list(numpy.ndarray)): images data, shape of each is [H, W, C]\n data_out (lod_tensor): data output of predictor.\n output_dir (str): The path to store output images.\n visualization (bool): Whether to save image or not.\n score_thresh (float): the low limit of bounding box.\n label_names (list[str]): label names.\n handle_id (int): The number of images that have been handled.\n\n Returns:\n res (list[dict]): The result of vehicles detecion. keys include 'data', 'save_path', the corresponding value is:\n data (dict): the result of object detection, keys include 'left', 'top', 'right', 'bottom', 'label', 'confidence', the corresponding value is:\n left (float): The X coordinate of the upper left corner of the bounding box;\n top (float): The Y coordinate of the upper left corner of the bounding box;\n right (float): The X coordinate of the lower right corner of the bounding box;\n bottom (float): The Y coordinate of the lower right corner of the bounding box;\n label (str): The label of detection result;\n confidence (float): The confidence of detection result.\n save_path (str): The path to save output images.\n ", "n_words": 181, "vocab_size": 92, "n_whitespaces": 369, "language": "en" } }, { "id": 167616, "commit_id": "f538568afc2c76c2d738d32e3544cf9fe6742960", "repo": "pandas", "path": "pandas/conftest.py", "file_name": "conftest.py", "fun_name": "series_with_multilevel_index", "commit_message": "TYP: misc return type annotations (#47558)", "code": "def series_with_multilevel_index() -> Series:\n \n arrays = [\n [\"bar\", \"bar\", \"baz\", \"baz\", \"qux\", \"qux\", \"foo\", \"foo\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n tuples = zip(*arrays)\n index = MultiIndex.from_tuples(tuples)\n data = np.random.randn(8)\n ser = Series(data, index=index)\n ser[3] = np.NaN\n return ser\n\n\n_narrow_series = {\n f\"{dtype.__name__}-series\": tm.make_rand_series(name=\"a\", dtype=dtype)\n for dtype in tm.NARROW_NP_DTYPES\n}\n\n\n_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}\n\n\n@pytest.fixture(params=_index_or_series_objs.keys())", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture(params=_index_or_series_objs.keys())", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 102, "n_words": 59, "vocab_size": 45, "complexity": 1, "nloc": 14, "token_counts": 92, "n_ast_nodes": 249, "n_identifiers": 28, "random_cut": "def series_with_multilevel_index() -> Series:\n \n arrays = [\n [\"bar\", \"bar\", \"baz\", \"baz\", \"qux\", \"qux\", \"foo\", \"foo\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n tuples = zip(*arrays)\n index = MultiIndex.from_tuples(tuples)\n da", "d_id": 40068, "documentation": { "docstring": "\n Fixture with a Series with a 2-level MultiIndex.\n ", "n_words": 8, "vocab_size": 6, "n_whitespaces": 15, "language": "en" } }, { "id": 307668, "commit_id": "5cccb248307d138a33c353544c57dc997b4fe917", "repo": "core", "path": "homeassistant/components/group/__init__.py", "file_name": "__init__.py", "fun_name": "_async_stop", "commit_message": "Improve type hints in group (#78350)", "code": "def _async_stop(self) -> None:\n \n if self._async_unsub_state_changed:\n self._async_unsub_state_changed()\n self._async_unsub_state_changed = None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 8, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 3, "random_cut": "def _async_stop(self) -> None:\n \n if self._async_unsub_state_changed:\n self._async_unsub_state_changed()\n self._async_unsub_state_ch", "d_id": 106436, "documentation": { "docstring": "Unregister the group from Home Assistant.\n\n This method must be run in the event loop.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 56814, "commit_id": "ef032ee4a8f5d357a6e8dadf4021177ccc71f767", "repo": "prefect", "path": "tests/conftest.py", "file_name": "conftest.py", "fun_name": "test_database_connection_url", "commit_message": ":facepalm: I got bitten by the async fixture context issue. Fixed and added comments to help future developers.", "code": "def test_database_connection_url(generate_test_database_connection_url):\n \n url = generate_test_database_connection_url\n if url is None:\n yield None\n else:\n # TODO: https://github.com/PrefectHQ/orion/issues/2045\n # Also temporarily override the environment variable, so that child\n # subprocesses that we spin off are correctly configured as well\n original_envvar = os.environ.get(\"PREFECT_ORION_DATABASE_CONNECTION_URL\")\n os.environ[\"PREFECT_ORION_DATABASE_CONNECTION_URL\"] = url\n\n with temporary_settings({PREFECT_ORION_DATABASE_CONNECTION_URL: url}):\n yield url\n\n os.environ[\"PREFECT_ORION_DATABASE_CONNECTION_URL\"] = original_envvar\n\n\n@pytest.fixture(scope=\"session\")", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@pytest.fixture(scope=\"session\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 129, "n_words": 51, "vocab_size": 39, "complexity": 2, "nloc": 10, "token_counts": 56, "n_ast_nodes": 122, "n_identifiers": 12, "random_cut": "def test_database_connection_url(generate_test_database_connection_url):\n \n url = generate_test_database_connection_url\n if url is None:\n yield None\n else:\n # TODO: https://github.com/PrefectHQ/orion/issues/2045\n # Also temporarily override the environment variable, so that child\n # subprocesses that we spin off are correctly configured as well\n original_envvar = os.environ.get(\"PREFECT_ORION_DATABASE_CONNECTION_URL\")\n os.environ[\"PREFECT_ORION_DATABASE_CONNECTION_URL\"] = url\n\n with temporary_settings({PREFECT_ORION_DATABASE_CONNECTION_URL: url}):\n yield url\n\n os.environ[\"PREFECT_ORION_DATABASE_CONNECTION_URL\"] = original_envvar\n\n\n@pytest.", "d_id": 11567, "documentation": { "docstring": "\n Update the setting for the database connection url to the generated value from\n `generate_test_database_connection_url`\n\n This _must_ be separate from the generation of the test url because async fixtures\n are run in a separate context from the test suite.\n ", "n_words": 38, "vocab_size": 28, "n_whitespaces": 54, "language": "en" } }, { "id": 125665, "commit_id": "8d7b865614f3635def12c42b653f8acd8b4ae56a", "repo": "ray", "path": "python/ray/tune/examples/optuna_define_by_run_example.py", "file_name": "optuna_define_by_run_example.py", "fun_name": "define_by_run_func", "commit_message": "[air/tuner/docs] Update docs for Tuner() API 2a: Tune examples (non-docs) (#26931)\n\nSplitting up #26884: This PR includes changes to use Tuner() instead of tune.run() for all examples included in python/ray/tune/examples\r\n\r\nSigned-off-by: xwjiang2010 \r\nSigned-off-by: Kai Fricke \r\n\r\nCo-authored-by: xwjiang2010 \r\nCo-authored-by: Richard Liaw ", "code": "def define_by_run_func(trial) -> Optional[Dict[str, Any]]:\n \n # This param is not used in the objective function.\n activation = trial.suggest_categorical(\"activation\", [\"relu\", \"tanh\"])\n trial.suggest_float(\"width\", 0, 20)\n trial.suggest_float(\"height\", -100, 100)\n\n # Define-by-run allows for conditional search spaces.\n if activation == \"relu\":\n trial.suggest_float(\"mult\", 1, 2)\n\n # Return all constants in a dictionary.\n return {\"steps\": 100}\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 84, "n_words": 50, "vocab_size": 46, "complexity": 2, "nloc": 18, "token_counts": 72, "n_ast_nodes": 127, "n_identifiers": 9, "random_cut": "def define_by_run_func(trial) -> Optional[Dict[str, Any]]:\n \n # This param is not used in the objective function.\n activation = trial.suggest_categorical(\"activation\", [\"relu\", \"tanh\"])\n trial.suggest_float(\"width\", 0, 20)\n trial.suggest_float(\"height\", -100, 100)\n\n # Define-by-run allows for conditional search spaces.\n if activation == \"relu\":\n trial.suggest_float(\"", "d_id": 27941, "documentation": { "docstring": "Define-by-run function to create the search space.\n\n Ensure no actual computation takes place here. That should go into\n the trainable passed to ``Tuner`` (in this example, that's\n ``easy_objective``).\n\n For more information, see https://optuna.readthedocs.io/en/stable\\\n/tutorial/10_key_features/002_configurations.html\n\n This function should either return None or a dict with constant values.\n ", "n_words": 46, "vocab_size": 42, "n_whitespaces": 63, "language": "en" } }, { "id": 181843, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/base.py", "file_name": "base.py", "fun_name": "_compile_to_sklearn", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def _compile_to_sklearn(self, expr):\n \n sklearn_pipeline_str = generate_pipeline_code(\n expr_to_tree(expr, self._pset), self.operators\n )\n sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context)\n sklearn_pipeline.memory = self._memory\n if self.random_state:\n # Fix random state when the operator allows\n set_param_recursive(\n sklearn_pipeline.steps, \"random_state\", self.random_state\n )\n return sklearn_pipeline\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 142, "n_words": 34, "vocab_size": 30, "complexity": 2, "nloc": 11, "token_counts": 61, "n_ast_nodes": 97, "n_identifiers": 16, "random_cut": "def _compile_to_sklearn(self, expr):\n \n sklearn_pipeline_str = generate_pipeline_code(\n expr_to_tree(expr, self._pset), self.operators\n )\n sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context)\n sklearn_pipeline.memory = self._memory\n if self.random_state:\n # Fix random sta", "d_id": 43616, "documentation": { "docstring": "Compile a DEAP pipeline into a sklearn pipeline.\n\n Parameters\n ----------\n expr: DEAP individual\n The DEAP pipeline to be compiled\n\n Returns\n -------\n sklearn_pipeline: sklearn.pipeline.Pipeline\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 83, "language": "en" } }, { "id": 196402, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/repmatrix.py", "file_name": "repmatrix.py", "fun_name": "equals", "commit_message": "Moved imports to higher level", "code": "def equals(self, other, failing_expression=False):\n \n if self.shape != getattr(other, 'shape', None):\n return False\n\n rv = True\n for i in range(self.rows):\n for j in range(self.cols):\n ans = self[i, j].equals(other[i, j], failing_expression)\n if ans is False:\n return False\n elif ans is not True and rv is True:\n rv = ans\n return rv\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 189, "n_words": 49, "vocab_size": 32, "complexity": 7, "nloc": 12, "token_counts": 93, "n_ast_nodes": 141, "n_identifiers": 13, "random_cut": "def equals(self, other, failing_expression=False):\n \n if self.shape != getattr(other, 'shape', None):\n retu", "d_id": 47902, "documentation": { "docstring": "Applies ``equals`` to corresponding elements of the matrices,\n trying to prove that the elements are equivalent, returning True\n if they are, False if any pair is not, and None (or the first\n failing expression if failing_expression is True) if it cannot\n be decided if the expressions are equivalent or not. This is, in\n general, an expensive operation.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> from sympy.abc import x\n >>> A = Matrix([x*(x - 1), 0])\n >>> B = Matrix([x**2 - x, 0])\n >>> A == B\n False\n >>> A.simplify() == B.simplify()\n True\n >>> A.equals(B)\n True\n >>> A.equals(2)\n False\n\n See Also\n ========\n sympy.core.expr.Expr.equals\n ", "n_words": 103, "vocab_size": 72, "n_whitespaces": 264, "language": "en" } }, { "id": 251177, "commit_id": "fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8", "repo": "mitmproxy", "path": "mitmproxy/addons/blocklist.py", "file_name": "blocklist.py", "fun_name": "load", "commit_message": "use Python 3.9+ typing", "code": "def load(self, loader):\n loader.add_option(\n \"block_list\", Sequence[str], [],\n \n )\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 44, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 11, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 6, "random_cut": "def load(self, loader):\n loader.add_option(\n \"b", "d_id": 73623, "documentation": { "docstring": "\n Block matching requests and return an empty response with the specified HTTP status.\n Option syntax is \"/flow-filter/status-code\", where flow-filter describes\n which requests this rule should be applied to and status-code is the HTTP status code to return for\n blocked requests. The separator (\"/\" in the example) can be any character.\n Setting a non-standard status code of 444 will close the connection without sending a response.\n ", "n_words": 65, "vocab_size": 52, "n_whitespaces": 132, "language": "en" } }, { "id": 9778, "commit_id": "77c3a7ff5254346146d0e9eedf8e84fb6d577878", "repo": "gensim", "path": "gensim/models/translation_matrix.py", "file_name": "translation_matrix.py", "fun_name": "train", "commit_message": "Replace np.multiply with np.square and copyedit in translation_matrix.py (#3374)\n\n* Replace np.multiply with np.square and copyedit\r\n\r\n* Copyedit translation_matrix.py\r\n\r\nCo-authored-by: Michael Penkov ", "code": "def train(self, tagged_docs):\n \n m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs]\n m2 = [self.target_lang_vec.dv[item.tags].flatten() for item in tagged_docs]\n\n self.translation_matrix = np.linalg.lstsq(m2, m1, -1)[0]\n return self.translation_matrix\n", "url": "https://github.com/RaRe-Technologies/gensim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 59, "n_words": 24, "vocab_size": 17, "complexity": 3, "nloc": 5, "token_counts": 76, "n_ast_nodes": 116, "n_identifiers": 15, "random_cut": "def train(self, tagged_docs):\n \n m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs]\n m2 = [self.ta", "d_id": 1691, "documentation": { "docstring": "Build the translation matrix to map from the source model's vectors to target model's vectors\n\n Parameters\n ----------\n tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, Documents\n that will be used for training, both the source language document vector and\n target language document vector trained on those tagged documents.\n\n Returns\n -------\n numpy.ndarray\n Translation matrix that maps from the source model's vectors to target model's vectors.\n\n ", "n_words": 61, "vocab_size": 41, "n_whitespaces": 143, "language": "en" } }, { "id": 62568, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/serializer.py", "file_name": "serializer.py", "fun_name": "serialize", "commit_message": "upd; format", "code": "def serialize(input, tree=\"etree\", encoding=None, **serializer_opts):\n \n # XXX: Should we cache this?\n walker = treewalkers.getTreeWalker(tree)\n s = HTMLSerializer(**serializer_opts)\n return s.render(walker(input), encoding)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 4, "token_counts": 44, "n_ast_nodes": 73, "n_identifiers": 11, "random_cut": "def serialize(input, tree=\"etree\", encoding=None, **serializer_opts):\n \n # XXX: Should we cache this?\n walker = treewalkers.getTreeWalker(tree)\n s = HTMLSerializer(**serializer_opts)\n return s.render(walker(input), encoding)\n\n", "d_id": 12993, "documentation": { "docstring": "Serializes the input token stream using the specified treewalker\n\n :arg input: the token stream to serialize\n\n :arg tree: the treewalker to use\n\n :arg encoding: the encoding to use\n\n :arg serializer_opts: any options to pass to the\n :py:class:`html5lib.serializer.HTMLSerializer` that gets created\n\n :returns: the tree serialized as a string\n\n Example:\n\n >>> from html5lib.html5parser import parse\n >>> from html5lib.serializer import serialize\n >>> token_stream = parse('

    Hi!

    ')\n >>> serialize(token_stream, omit_optional_tags=False)\n '

    Hi!

    '\n\n ", "n_words": 66, "vocab_size": 43, "n_whitespaces": 109, "language": "en" } }, { "id": 10563, "commit_id": "cea300655ed8be70d74c390ca12e8b09fb741665", "repo": "jina", "path": "jina/parsers/__init__.py", "file_name": "__init__.py", "fun_name": "set_client_cli_parser", "commit_message": "refactor: use absolute imports (#4167)", "code": "def set_client_cli_parser(parser=None):\n \n if not parser:\n from jina.parsers.base import set_base_parser\n\n parser = set_base_parser()\n\n from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser\n from jina.parsers.client import (\n mixin_client_features_parser,\n mixin_comm_protocol_parser,\n )\n\n mixin_client_gateway_parser(parser)\n mixin_client_features_parser(parser)\n mixin_comm_protocol_parser(parser)\n\n return parser\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 83, "n_words": 28, "vocab_size": 23, "complexity": 2, "nloc": 13, "token_counts": 64, "n_ast_nodes": 99, "n_identifiers": 13, "random_cut": "def set_client_cli_parser(parser=None):\n \n if not parser:\n from jina.parsers.base import set_base_parser\n\n parser = set_base_parser()\n\n from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser\n from jina.parsers.client import (\n mixin_client_features_parser,\n mixin_comm_protocol_parser,\n )\n\n mixin_client_gateway_parser(parser)\n mixin_client_features_parser(parser)\n mixin_comm_protocol_parser(parser)\n\n ", "d_id": 1853, "documentation": { "docstring": "Set the parser for the cli client\n\n :param parser: an optional existing parser to build upon\n :return: the parser\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 28, "language": "en" } }, { "id": 27924, "commit_id": "67492396aa41d068cac82e8fa328f218b5951d13", "repo": "saleor", "path": "saleor/graphql/discount/mutations/sale_create.py", "file_name": "sale_create.py", "fun_name": "send_sale_toggle_notification", "commit_message": "New event for starting and ending sales (#10110)\n\n* Add sale started and sale ended webhooks\r\n\r\n* Add started_notification_sent and ended_notification_sent flags to Sale model\r\n\r\n* Add sale_webhook_schedule\r\n\r\n* Add send_sale_started_and_sale_ended_notifications discount task\r\n\r\n* Add tests for discount tasks\r\n\r\n* Move sale task celery beat schedule to settings\r\n\r\n* Add tests for sale_webhook_schedule\r\n\r\n* Add sale_started and sale_ended methods to PluginSample\r\n\r\n* Update send_sale_started_and_sale_ended_notifications logging\r\n\r\n* Update SaleUpdate mutation - ensure the notification is sent and the flag is changed if needed\r\n\r\n* Update SaleCreate mutation - send sale_creatd and sale_ended notifications\r\n\r\n* Optimize fetch_catalogue_info\r\n\r\n* Clean up\r\n\r\n* Apply code review suggestions\r\n\r\n* Add SALE_TOGGLE webhook\r\n\r\n* Use sale_toggle webhook instead of sale_started and sale_ended\r\n\r\n* Delete sale_started and sale_eded wbhooks\r\n\r\n* Drop notification flags from Sale model\r\n\r\n* Add missing docstrings and comments\r\n\r\n* Fix failing tests\r\n\r\n* Update changelog\r\n\r\n* Add description for SaleToggle event type\r\n\r\n* Update discount task and webhook schedule\r\n\r\n* Set notification_sent_datetime to current date by default\r\n\r\n* Fix typo in comment", "code": "def send_sale_toggle_notification(info, instance, catalogue):\n \n manager = info.context.plugins\n now = datetime.now(pytz.utc)\n\n start_date = instance.start_date\n end_date = instance.end_date\n\n if (start_date and start_date <= now) and (not end_date or not end_date <= now):\n manager.sale_toggle(instance, catalogue)\n instance.notification_sent_datetime = now\n instance.save(update_fields=[\"notification_sent_datetime\"])\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 111, "n_words": 36, "vocab_size": 26, "complexity": 5, "nloc": 9, "token_counts": 79, "n_ast_nodes": 128, "n_identifiers": 17, "random_cut": "def send_sale_toggle_notification(info, instance, catalogue):\n \n manager = info.context.plugins\n now = datetime.now(pytz.utc)\n\n start_date = instance.start_date\n end_date = instance.end_date\n\n if (start_date and start_date <= now) and (not end_date or not end_date <= now):\n manager.sale_toggle(instance, catalogue)\n ", "d_id": 5141, "documentation": { "docstring": "Send a notification about starting or ending sale if it hasn't been sent yet.\n\n Send the notification when the start date is before the current date and the\n sale is not already finished.\n ", "n_words": 33, "vocab_size": 25, "n_whitespaces": 54, "language": "en" } }, { "id": 119028, "commit_id": "2c20d82776fea482aaf52e18ebad4f7fce5c3a81", "repo": "jax", "path": "jax/experimental/sparse/bcoo.py", "file_name": "bcoo.py", "fun_name": "bcoo_todense", "commit_message": "[sparse] generalize metadata argument in BCOO primitives", "code": "def bcoo_todense(data, indices, *, spinfo):\n \n return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spinfo)\n\n@bcoo_todense_p.def_impl", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@bcoo_todense_p.def_impl", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 11, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 35, "n_ast_nodes": 61, "n_identifiers": 9, "random_cut": "def bcoo_todense(data, indices, *, spinfo):\n \n return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spin", "d_id": 26536, "documentation": { "docstring": "Convert batched sparse matrix to a dense matrix.\n\n Args:\n data : array of shape ``batch_dims + (nse,) + block_dims``.\n indices : array of shape ``batch_dims + (n_sparse, nse)``\n spinfo : BCOOInfo. In particular, this includes the shape\n of the matrix, which is equal to ``batch_dims + sparse_dims + block_dims``\n where ``len(sparse_dims) == n_sparse``\n\n Returns:\n mat : array with specified shape and dtype matching ``data``\n ", "n_words": 64, "vocab_size": 46, "n_whitespaces": 89, "language": "en" } }, { "id": 135648, "commit_id": "b84dac2609bd587c43ed17bb6fa18fb7241a41de", "repo": "ray", "path": "rllib/utils/actor_manager.py", "file_name": "actor_manager.py", "fun_name": "ignore_ray_errors", "commit_message": "Refactor ActorManager to store underlying remote actors in dict. (#29953)\n\nSigned-off-by: Jun Gong ", "code": "def ignore_ray_errors(self) -> Iterator[ResultOrError]:\n \n return self._Iterator(\n [r for r in self.result_or_errors if not isinstance(r.get(), RayError)]\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 48, "n_words": 16, "vocab_size": 16, "complexity": 3, "nloc": 10, "token_counts": 38, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def ignore_ray_errors(self) -> Iterator[ResultOrError]:\n \n return self._Iterator(\n [r for r in self.result_or_errors if not isinstance(r.ge", "d_id": 30685, "documentation": { "docstring": "Return an iterator over the results, skipping only Ray errors.\n\n Similar to ignore_errors, but only skips Errors raised from the\n Ray framework. This is useful for application that wants to handle\n errors from user code differently.\n ", "n_words": 36, "vocab_size": 31, "n_whitespaces": 64, "language": "en" } }, { "id": 321782, "commit_id": "ec8eebf99640d5a73072d05e73c6ea9b2ebea556", "repo": "qutebrowser", "path": "qutebrowser/utils/usertypes.py", "file_name": "usertypes.py", "fun_name": "certificate_was_accepted", "commit_message": "lint: Fix remaining pylint issues", "code": "def certificate_was_accepted(self) -> None:\n \n if not self.is_overridable():\n return False\n if self._certificate_accepted is None:\n raise ValueError(\"No decision taken yet\")\n return self._certificate_accepted\n\n\n@dataclasses.dataclass", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "@dataclasses.dataclass", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 70, "n_words": 21, "vocab_size": 17, "complexity": 3, "nloc": 7, "token_counts": 34, "n_ast_nodes": 67, "n_identifiers": 7, "random_cut": "def certificate_was_accepted(self) -> None:\n \n if not self.is_overridable():\n return False\n if self._certificate_accepted is None:\n raise ValueError(\"No decision taken yet\")\n return self._certificate_accepted\n\n\n", "d_id": 117906, "documentation": { "docstring": "Check whether the certificate was accepted by the user.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 153191, "commit_id": "8d1004fdbdaa05700613c8e6287641a732acf606", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py", "file_name": "virtual_partition.py", "fun_name": "mask", "commit_message": "FIX-#3675: Expand virtual partitioning utility (#3886)\n\nCo-authored-by: mvashishtha \r\nCo-authored-by: jeffreykennethli \r\nCo-authored-by: Anatoly Myachev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Naren Krishna <92325366+naren-ponder@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Doris Lee \r\nCo-authored-by: Aditya Parameswaran \r\nCo-authored-by: Rehan Sohail Durrani \r\nCo-authored-by: Susmit Vengurlekar \r\nSigned-off-by: Devin Petersohn ", "code": "def mask(self, row_indices, col_indices):\n \n return (\n self.force_materialization()\n .list_of_partitions_to_combine[0]\n .mask(row_indices, col_indices)\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 6, "token_counts": 30, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def mask(self, row_indices, col_indices):\n ", "d_id": 35292, "documentation": { "docstring": "\n Create (synchronously) a mask that extracts the indices provided.\n\n Parameters\n ----------\n row_indices : list-like, slice or label\n The row labels for the rows to extract.\n col_indices : list-like, slice or label\n The column labels for the columns to extract.\n\n Returns\n -------\n PandasOnRayDataframeVirtualPartition\n A new ``PandasOnRayDataframeVirtualPartition`` object,\n materialized.\n ", "n_words": 47, "vocab_size": 35, "n_whitespaces": 155, "language": "en" } }, { "id": 198386, "commit_id": "7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c", "repo": "sympy", "path": "sympy/integrals/intpoly.py", "file_name": "intpoly.py", "fun_name": "left_integral3D", "commit_message": "Cleanup loops and ranges", "code": "def left_integral3D(facets, index, expr, vertices, hp_param, degree):\n \n value = S.Zero\n facet = facets[index]\n x0 = vertices[facet[0]]\n facet_len = len(facet)\n for i, fac in enumerate(facet):\n side = (vertices[fac], vertices[facet[(i + 1) % facet_len]])\n value += distance_to_side(x0, side, hp_param[0]) * \\\n lineseg_integrate(facet, i, side, expr, degree)\n return value\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 92, "n_words": 46, "vocab_size": 37, "complexity": 2, "nloc": 10, "token_counts": 103, "n_ast_nodes": 149, "n_identifiers": 20, "random_cut": "def left_integral3D(facets, index, expr, vertices, hp_param, degree):\n \n value = S.Zero\n facet = facets[index]\n x0 = vertices[fac", "d_id": 48898, "documentation": { "docstring": "Computes the left integral of Eq 10 in Chin et al.\n\n Explanation\n ===========\n\n For the 3D case, this is the sum of the integral values over constituting\n line segments of the face (which is accessed by facets[index]) multiplied\n by the distance between the first point of facet and that line segment.\n\n Parameters\n ==========\n\n facets :\n List of faces of the 3-Polytope.\n index :\n Index of face over which integral is to be calculated.\n expr :\n Input polynomial.\n vertices :\n List of vertices that constitute the 3-Polytope.\n hp_param :\n The hyperplane parameters of the face.\n degree :\n Degree of the ``expr``.\n\n Examples\n ========\n\n >>> from sympy.integrals.intpoly import left_integral3D\n >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\\\n (5, 0, 5), (5, 5, 0), (5, 5, 5)],\\\n [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\\\n [3, 1, 0, 2], [0, 4, 6, 2]]\n >>> facets = cube[1:]\n >>> vertices = cube[0]\n >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0)\n -50\n ", "n_words": 177, "vocab_size": 108, "n_whitespaces": 333, "language": "en" } }, { "id": 32887, "commit_id": "34aad0dac000508015d09ed7cf7c88adb5a0e308", "repo": "transformers", "path": "src/transformers/models/deberta/modeling_tf_deberta.py", "file_name": "modeling_tf_deberta.py", "fun_name": "xdropout", "commit_message": "TF: XLA-trainable DeBERTa v2 (#18546)\n\n* fix deberta issues\r\n\r\n* add different code paths for gpu and tpu\r\n\r\n* shorter gpu take along axis\r\n\r\n* Stable Dropout without tf cond\r\n\r\n* variable must be float", "code": "def xdropout(self, inputs):\n \n mask = tf.cast(\n 1\n - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),\n tf.bool,\n )\n scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)\n if self.drop_prob > 0:\n inputs = tf.where(mask, 0.0, inputs) * scale\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 111, "n_words": 32, "vocab_size": 27, "complexity": 2, "nloc": 11, "token_counts": 105, "n_ast_nodes": 143, "n_identifiers": 21, "random_cut": "def xdropout(self, inputs):\n \n mask = tf.cast(\n 1\n - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),\n tf.bool,\n ", "d_id": 6017, "documentation": { "docstring": "\n Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 118955, "commit_id": "0f76064dbb9b9405173effe7f872aa8a8dba69cc", "repo": "streamlit", "path": "lib/tests/streamlit/legacy_add_rows_test.py", "file_name": "legacy_add_rows_test.py", "fun_name": "test_deltas_that_melt_dataframes", "commit_message": "Remove legacy \"`add_rows` coalescing\" from ForwardMsgQueue (#4485)\n\nRemoves the `add_rows` legacy DataFrame concatenation implementation _from Python_. (To be clear: `add_rows` still works for legacy DataFrames, but the logic is frontend-only now. This is already how Arrow DataFrame concatenation is implemented.)\r\n\r\n### Background\r\n\r\n- An app's script sends `Delta` messages to the frontend by sticking them in the `ForwardMsgQueue`.\r\n- Generally, `ForwardMsgQueue` has either 0 or 1 messages in it, because the main thread is constantly emptying the queue and sending its contents to the frontend, so messages don't have time to back up. However, sometimes 2+ messages will accumulate before the queue is emptied.\r\n- `ForwardMsgQueue` has a couple optimizations where it tries to avoid sending out redundant messages to the frontend\r\n- One of those optimizations relates to `add_rows`: for legacy DataFrame serialization *only*, if the queue has a DataFrame message *and* an add_rows message that appends to that same DataFrame, it will perform the add_rows *operation* on the original message and skip sending the add_rows message. (Again, this only applies to legacy DataFrame serialization - by default, apps use Arrow for DataFrame serialization, and this add_rows optimization doesn't exist for that code path: add_rows is *always* applied on the frontend for Arrow.)\r\n- `add_rows` will raise an error if the two DataFrames being concatenated are incompatible (for example, if they have different shapes).\r\n- Because `ForwardMsgQueue` currently does its enqueuing on the script thread, this isn't an issue: the script will catch the `add_rows` error and show an exception message on the frontend.\r\n\r\n### The issue\r\n\r\n- We're moving to a world where `ScriptRunner` and the main thread are kept as separate as possible.\r\n- This means that `ScriptRunner` will no longer enqueue directly into the `ForwardMsgQueue`. Instead, the queue (which is owned by `AppSession`) will be filled only by `AppSession`, and only on the main thread.\r\n- This means that **`add_rows` errors will no longer be caught by the script thread**, and so the script won't be able to generate an exception message to send to the frontend.\r\n- As things currently stands, this would mean that add_rows errors will just be silently swallowed by the main thread, which we don't want.\r\n\r\n### The solution + PR notes\r\n\r\n- We're just ripping out `add_rows` server-side handling for legacy DataFrames. This brings the `legacy` code path a bit closer to the `Arrow` code path and eliminates a lot of code.\r\n- The bulk of this PR is concerned with updating legacy DataFrame tests, many of which relied on `add_rows` happening on the server.\r\n- Notably, our e2e tests (with one exception) do *not* change, because there's no observable behavior changes on the frontend.\r\n- The one exception is that we did not (and still don't) have e2e tests for handling `add_rows` errors that happen on the frontend. And in fact, we do a terrible job with these errors - we just show an exception modal overlay with a bad error message and no indication that it's related to an add_rows operation. I'll create a ticket to address this (it'll require some product input.)", "code": "def test_deltas_that_melt_dataframes(self):\n \n deltas = self._get_deltas_that_melt_dataframes()\n\n for delta in deltas:\n el = delta(DATAFRAME)\n el._legacy_add_rows(NEW_ROWS)\n\n df_proto = _get_data_frame(self.get_delta_from_queue())\n\n # Test that the add_rows delta is properly melted\n rows = df_proto.data.cols[0].int64s.data\n self.assertEqual([2, 3, 4, 2, 3, 4], rows)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 122, "n_words": 35, "vocab_size": 30, "complexity": 2, "nloc": 8, "token_counts": 74, "n_ast_nodes": 116, "n_identifiers": 17, "random_cut": "def test_deltas_that_melt_dataframes(self):\n \n deltas = self._get_deltas_that_melt_dataframe", "d_id": 26500, "documentation": { "docstring": "Some element types require that their dataframes are\n 'melted' (https://pandas.pydata.org/docs/reference/api/pandas.melt.html)\n before being sent to the frontend. Test that the melting occurs.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 43, "language": "en" } }, { "id": 188973, "commit_id": "46cb6c212a870b36bd0af17c48dd29f53468734b", "repo": "psutil", "path": "psutil/_pslinux.py", "file_name": "_pslinux.py", "fun_name": "sensors_temperatures", "commit_message": "[Linux] cat/bcat utils refactoring (#2053)", "code": "def sensors_temperatures():\n \n ret = collections.defaultdict(list)\n basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*')\n # CentOS has an intermediate /device directory:\n # https://github.com/giampaolo/psutil/issues/971\n # https://github.com/nicolargo/glances/issues/1060\n basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*'))\n basenames = sorted(set([x.split('_')[0] for x in basenames]))\n\n # Only add the coretemp hwmon entries if they're not already in\n # /sys/class/hwmon/\n # https://github.com/giampaolo/psutil/issues/1708\n # https://github.com/giampaolo/psutil/pull/1648\n basenames2 = glob.glob(\n '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*')\n repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/')\n for name in basenames2:\n altname = repl.sub('/sys/class/hwmon/', name)\n if altname not in basenames:\n basenames.append(name)\n\n for base in basenames:\n try:\n path = base + '_input'\n current = float(bcat(path)) / 1000.0\n path = os.path.join(os.path.dirname(base), 'name')\n unit_name = cat(path).strip()\n except (IOError, OSError, ValueError):\n # A lot of things can go wrong here, so let's just skip the\n # whole entry. Sure thing is Linux's /sys/class/hwmon really\n # is a stinky broken mess.\n # https://github.com/giampaolo/psutil/issues/1009\n # https://github.com/giampaolo/psutil/issues/1101\n # https://github.com/giampaolo/psutil/issues/1129\n # https://github.com/giampaolo/psutil/issues/1245\n # https://github.com/giampaolo/psutil/issues/1323\n continue\n\n high = bcat(base + '_max', fallback=None)\n critical = bcat(base + '_crit', fallback=None)\n label = cat(base + '_label', fallback='').strip()\n\n if high is not None:\n try:\n high = float(high) / 1000.0\n except ValueError:\n high = None\n if critical is not None:\n try:\n critical = float(critical) / 1000.0\n except ValueError:\n critical = None\n\n ret[unit_name].append((label, current, high, critical))\n\n # Indication that no sensors were detected in /sys/class/hwmon/\n if not basenames:\n basenames = glob.glob('/sys/class/thermal/thermal_zone*')\n basenames = sorted(set(basenames))\n\n for base in basenames:\n try:\n path = os.path.join(base, 'temp')\n current = float(bcat(path)) / 1000.0\n path = os.path.join(base, 'type')\n unit_name = cat(path).strip()\n except (IOError, OSError, ValueError) as err:\n debug(err)\n continue\n\n trip_paths = glob.glob(base + '/trip_point*')\n trip_points = set(['_'.join(\n os.path.basename(p).split('_')[0:3]) for p in trip_paths])\n critical = None\n high = None\n for trip_point in trip_points:\n path = os.path.join(base, trip_point + \"_type\")\n trip_type = cat(path, fallback='').strip()\n if trip_type == 'critical':\n critical = bcat(os.path.join(base, trip_point + \"_temp\"),\n fallback=None)\n elif trip_type == 'high':\n high = bcat(os.path.join(base, trip_point + \"_temp\"),\n fallback=None)\n\n if high is not None:\n try:\n high = float(high) / 1000.0\n except ValueError:\n high = None\n if critical is not None:\n try:\n critical = float(critical) / 1000.0\n except ValueError:\n critical = None\n\n ret[unit_name].append(('', current, high, critical))\n\n return dict(ret)\n\n", "url": "https://github.com/giampaolo/psutil.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1294, "n_words": 326, "vocab_size": 150, "complexity": 21, "nloc": 72, "token_counts": 563, "n_ast_nodes": 928, "n_identifiers": 47, "random_cut": "def sensors_temperatures():\n \n ret = collections.defaultdict(list)\n basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*')\n # CentOS has an intermediate /device directory:\n # https://github.com/giampaolo/psutil/issues/971\n # https://github.com/nicolargo/glances/issues/1060\n basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*'))\n basenames = sorted(set([x.split('_')[0] for x in basenames]))\n\n # Only add the coretemp hwmon entries if they're not already in\n # /sys/class/hwmon/\n # https://github.com/giampaolo/psutil/issues/1708\n # https://github.com/giampaolo/psutil/pull/1648\n basenames2 = glob.glob(\n '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*')\n repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/')\n for name in basenames2:\n altname = repl.sub('/sys/class/hwmon/', name)\n if altname not in basenames:\n basenames.append(name)\n\n for base in basenames:\n try:\n path = base + '_input'\n current = float(bcat(path)) / 1000.0\n path = os.path.join(os.path.dirname(base), 'name')\n unit_name = cat(path).strip()\n except (IOError, OSError, ValueError):\n # A lot of things can go wrong here, so let's just skip the\n # whole entry. Sure thing is Linux's /sys/class/hwmon really\n # is a stinky broken mess.\n # https://github.com/giampaolo/psutil/issues/1009\n # https://github.com/giampaolo/psutil/issues/1101\n # https://github.com/giampaolo/psutil/issues/1129\n # https://github.com/giampaolo/psutil/issues/1245\n # https://github.com/giampaolo/psutil/issues/1323\n continue\n\n high = bcat(base + '_max', fallback=None)\n critical = bcat(base + '_crit', fallback=None)\n label = cat(base + '_label', fallback='').strip()\n\n if high is not None:\n try:\n high = float(high) / 1000.0\n except ValueError:\n high = None\n if critical is not None:\n try:\n critical = float(critical) / 1000.0\n except ValueError:\n critical = None\n\n ret[unit_name].append((label, current, high, critical))\n\n # Indication that no sensors were detected in /sys/class/hwmon/\n if not basenames:\n basenames = glob.glob('/sys/class/thermal/thermal_zone*')\n basenames = sorted(set(basenames))\n\n for base in basenames:\n try:\n path = os.path.join(base, 'temp')\n current = float(bcat(path)) / 1000.0\n path = os.path.join(base, 'type')\n unit_name = cat(path).strip()\n except (IOError, OSError, ValueError) as err:\n debug(err)\n continue\n\n trip_paths = glob.glob(base + '/trip_point*')\n trip_points = set(['_'.join(\n os.path.basename(p).split('_')[0:3]) for p in trip_paths])\n critical = None\n hi", "d_id": 45948, "documentation": { "docstring": "Return hardware (CPU and others) temperatures as a dict\n including hardware name, label, current, max and critical\n temperatures.\n\n Implementation notes:\n - /sys/class/hwmon looks like the most recent interface to\n retrieve this info, and this implementation relies on it\n only (old distros will probably use something else)\n - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon\n - /sys/class/thermal/thermal_zone* is another one but it's more\n difficult to parse\n ", "n_words": 65, "vocab_size": 54, "n_whitespaces": 101, "language": "en" } }, { "id": 72541, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/pages/workflow.py", "file_name": "workflow.py", "fun_name": "preview_revision_for_task", "commit_message": "Reformat with black", "code": "def preview_revision_for_task(request, page_id, task_id):\n \n\n page = get_object_or_404(Page, id=page_id)\n task = get_object_or_404(Task, id=task_id).specific\n try:\n task_state = TaskState.objects.get(\n page_revision__page=page, task=task, status=TaskState.STATUS_IN_PROGRESS\n )\n except TaskState.DoesNotExist:\n messages.error(\n request,\n _(\n \"The page '{0}' is not currently awaiting moderation in task '{1}'.\"\n ).format(page.get_admin_display_title(), task.name),\n )\n return redirect(\"wagtailadmin_home\")\n\n revision = task_state.page_revision\n\n if not task.get_actions(page, request.user):\n raise PermissionDenied\n\n page_to_view = revision.as_page_object()\n\n # TODO: provide workflow actions within this view\n\n return page_to_view.make_preview_request(\n request,\n page.default_preview_mode,\n extra_request_attrs={\"revision_id\": revision.id},\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 223, "n_words": 68, "vocab_size": 57, "complexity": 3, "nloc": 24, "token_counts": 140, "n_ast_nodes": 221, "n_identifiers": 36, "random_cut": "def preview_revision_for_task(request, page_id, task_id):\n ", "d_id": 15901, "documentation": { "docstring": "Preview the revision linked to the in-progress TaskState of a specified Task. This enables pages in moderation\n to be edited and new TaskStates linked to the new revisions created, with preview links remaining valid", "n_words": 34, "vocab_size": 28, "n_whitespaces": 36, "language": "en" } }, { "id": 221726, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/contextlib.py", "file_name": "contextlib.py", "fun_name": "push_async_callback", "commit_message": "add python 3.10.4 for windows", "code": "def push_async_callback(self, callback, /, *args, **kwds):\n \n _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds)\n\n # We changed the signature, so using @wraps is not appropriate, but\n # setting __wrapped__ may still help with introspection.\n _exit_wrapper.__wrapped__ = callback\n self._push_exit_callback(_exit_wrapper, False)\n return callback # Allow use as a decorator\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 94, "n_words": 44, "vocab_size": 39, "complexity": 1, "nloc": 5, "token_counts": 45, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def push_async_callback(self, callback, /, *args, **kwds):\n \n _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds)\n\n # We changed the signature, so using @wraps is not appropriate, but\n # setti", "d_id": 56492, "documentation": { "docstring": "Registers an arbitrary coroutine function and arguments.\n\n Cannot suppress exceptions.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 24, "language": "en" } }, { "id": 63394, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "scanString", "commit_message": "upd; format", "code": "def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):\n \n if not self.streamlined:\n self.streamline()\n for e in self.ignoreExprs:\n e.streamline()\n\n if not self.keepTabs:\n instring = _ustr(instring).expandtabs()\n instrlen = len(instring)\n loc = 0\n preparseFn = self.preParse\n parseFn = self._parse\n ParserElement.resetCache()\n matches = 0\n try:\n while loc <= instrlen and matches < maxMatches:\n try:\n preloc = preparseFn(instring, loc)\n nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)\n except ParseException:\n loc = preloc + 1\n else:\n if nextLoc > loc:\n matches += 1\n yield tokens, preloc, nextLoc\n if overlap:\n nextloc = preparseFn(instring, loc)\n if nextloc > loc:\n loc = nextLoc\n else:\n loc += 1\n else:\n loc = nextLoc\n else:\n loc = preloc + 1\n except ParseBaseException as exc:\n if ParserElement.verbose_stacktrace:\n raise\n else:\n # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n if getattr(exc, '__traceback__', None) is not None:\n exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n raise exc\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 789, "n_words": 135, "vocab_size": 80, "complexity": 13, "nloc": 41, "token_counts": 217, "n_ast_nodes": 354, "n_identifiers": 35, "random_cut": "def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):\n \n if not self.streamlined:\n self.streamline()\n for e in self.ignoreExprs:\n e.streamline()\n\n if not self.keepTabs:\n instring = _ustr(instring).expandtabs()\n instrlen = len(instring)\n loc = 0\n preparseFn = self.preParse\n parseFn = self._parse\n ParserEleme", "d_id": 13285, "documentation": { "docstring": "\n Scan the input string for expression matches. Each match will return the\n matching tokens, start location, and end location. May be called with optional\n ``maxMatches`` argument, to clip scanning after 'n' matches are found. If\n ``overlap`` is specified, then overlapping matches will be reported.\n\n Note that the start and end locations are reported relative to the string\n being parsed. See :class:`parseString` for more information on parsing\n strings with embedded tabs.\n\n Example::\n\n source = \"sldjf123lsdjjkf345sldkjf879lkjsfd987\"\n print(source)\n for tokens, start, end in Word(alphas).scanString(source):\n print(' '*start + '^'*(end-start))\n print(' '*start + tokens[0])\n\n prints::\n\n sldjf123lsdjjkf345sldkjf879lkjsfd987\n ^^^^^\n sldjf\n ^^^^^^^\n lsdjjkf\n ^^^^^^\n sldkjf\n ^^^^^^\n lkjsfd\n ", "n_words": 99, "vocab_size": 78, "n_whitespaces": 442, "language": "en" } }, { "id": 154339, "commit_id": "39b36eb2a2e3bf3d612933e1c78545a8bb28cde4", "repo": "modin", "path": "modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py", "file_name": "partition.py", "fun_name": "add_to_apply_calls", "commit_message": "PERF-#4794: Compute caches in `_propagate_index_objs` (#4888)\n\nCo-authored-by: Mahesh Vashishtha \r\nSigned-off-by: Myachev ", "code": "def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs):\n \n return PandasOnDaskDataframePartition(\n self._data,\n call_queue=self.call_queue + [[func, args, kwargs]],\n length=length,\n width=width,\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 83, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 54, "n_ast_nodes": 76, "n_identifiers": 10, "random_cut": "def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs):\n \n return PandasOnDaskDataframePartition(\n self._data,\n call_queue=self.call_queue + [[func, args, kwargs]],\n length=length,\n width=width,\n )\n", "d_id": 35932, "documentation": { "docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n length : distributed.Future or int, optional\n Length, or reference to length, of wrapped ``pandas.DataFrame``.\n width : distributed.Future or int, optional\n Width, or reference to width, of wrapped ``pandas.DataFrame``.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnDaskDataframePartition\n A new ``PandasOnDaskDataframePartition`` object.\n\n Notes\n -----\n The keyword arguments are sent as a dictionary.\n ", "n_words": 87, "vocab_size": 54, "n_whitespaces": 259, "language": "en" } }, { "id": 34178, "commit_id": "497346d07ec39da3a7f38a7e0a67a4906c141ea3", "repo": "transformers", "path": "src/transformers/feature_extraction_utils.py", "file_name": "feature_extraction_utils.py", "fun_name": "to_json_string", "commit_message": "[ASR pipeline] correct with lm pipeline (#15200)\n\n* [ASR pipeline] correct with lm pipeline\r\n\r\n* improve error", "code": "def to_json_string(self) -> str:\n \n dictionary = self.to_dict()\n\n for key, value in dictionary.items():\n if isinstance(value, np.ndarray):\n dictionary[key] = value.tolist()\n\n # make sure private name \"_processor_class\" is correctly\n # saved as \"processor_class\"\n _processor_class = dictionary.pop(\"_processor_class\", None)\n if _processor_class is not None:\n dictionary[\"processor_class\"] = _processor_class\n\n return json.dumps(dictionary, indent=2, sort_keys=True) + \"\\n\"\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 141, "n_words": 48, "vocab_size": 40, "complexity": 4, "nloc": 15, "token_counts": 85, "n_ast_nodes": 142, "n_identifiers": 18, "random_cut": "def to_json_string(self) -> str:\n \n dictionary = self.to_dict()\n\n for key, value in dictionary.items():\n if isinstance(value, np.ndarray):\n dictionary[key] = value.tolist()\n\n # make sure private name \"_processor_class\" is correctly\n # saved as \"processor_class\"\n _processor_class = dictionary.pop(\"_processor_class\", None)\n if _processor_class is not None:\n dictionary[\"processor_class\"] = _processor_class\n\n return json.dumps(", "d_id": 6208, "documentation": { "docstring": "\n Serializes this instance to a JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 56, "language": "en" } }, { "id": 26810, "commit_id": "f0a988e798dd86befbbf7a0eda1bc1a8cc94dda2", "repo": "saleor", "path": "saleor/core/permissions/__init__.py", "file_name": "__init__.py", "fun_name": "one_of_permissions_or_auth_filter_required", "commit_message": "Include permissions in schema descriptions of protected fields (#9428)\n\n* POC Generate permission description for queries\r\n\r\n* Use PermissionField for app queries\r\n\r\n* Rename functions checking object ownership\r\n\r\n* Switch to PermissionsField in more fields\r\n\r\n* CR fixes\r\n\r\n* Add missing descriptions", "code": "def one_of_permissions_or_auth_filter_required(context, permissions):\n \n if not permissions:\n return True\n\n authorization_filters = [\n p for p in permissions if isinstance(p, AuthorizationFilters)\n ]\n permissions = [p for p in permissions if not isinstance(p, AuthorizationFilters)]\n\n granted_by_permissions = False\n granted_by_authorization_filters = False\n\n # TODO: move this function from graphql to core\n from saleor.graphql.utils import get_user_or_app_from_context\n\n is_app = bool(getattr(context, \"app\", None))\n requestor = get_user_or_app_from_context(context)\n\n if permissions:\n perm_checks_results = []\n for permission in permissions:\n if is_app and permission == AccountPermissions.MANAGE_STAFF:\n # `MANAGE_STAFF` permission for apps is not supported, as apps using it\n # could create a staff user with full access.\n perm_checks_results.append(False)\n else:\n perm_checks_results.append(requestor.has_perm(permission))\n granted_by_permissions = any(perm_checks_results)\n\n if authorization_filters:\n auth_filters_results = []\n for p in authorization_filters:\n perm_fn = resolve_authorization_filter_fn(p)\n if perm_fn:\n res = perm_fn(context)\n auth_filters_results.append(bool(res))\n granted_by_authorization_filters = any(auth_filters_results)\n\n return granted_by_permissions or granted_by_authorization_filters\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 357, "n_words": 125, "vocab_size": 76, "complexity": 14, "nloc": 29, "token_counts": 172, "n_ast_nodes": 278, "n_identifiers": 28, "random_cut": "def one_of_permissions_or_auth_filter_required(context, permissions):\n \n if not permissions:\n return True\n\n authorization_filters = [\n p for p in permissions if isinstance(p, AuthorizationFilters)\n ]\n permissions = [p for p in permissions if not isinstance(p, AuthorizationFilters)]\n\n granted_by_permissions = False\n granted_by_authorization_filters = False\n\n # TODO: move this function from graphql to core\n from saleor.graphql.utils import get_user_or_app_from_context\n\n is_app = bool(getattr(context, \"app\", None))\n requestor = get_user_or_app_from_context(context)\n\n if permissions:\n perm_checks_results = []\n for permission in permissions:\n if is_app and permission == AccountPermissions.MANAGE_STAFF:\n # `MANAGE_STAFF` permission for apps is not supported, as apps using it\n # could create a staff user with full access.\n perm_ch", "d_id": 5070, "documentation": { "docstring": "Determine whether user or app has rights to perform an action.\n\n The `context` parameter is the Context instance associated with the request.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 28, "language": "en" } }, { "id": 144928, "commit_id": "4c73560b313821fbfbb8c943e02c8b298b7c1731", "repo": "ray", "path": "python/ray/_private/runtime_env/_clonevirtualenv.py", "file_name": "_clonevirtualenv.py", "fun_name": "_dirmatch", "commit_message": "[runtime env] Support clone `virtualenv` from an existing `virtualenv` (#22309)\n\nBefore this PR, we can't run ray in virtualenv, cause `runtime_env` does not support create a new virtualenv from an existing virtualenv.\r\n\r\nMore details:https://github.com/ray-project/ray/pull/21801#discussion_r796848499\r\n\r\nCo-authored-by: 捕牛 ", "code": "def _dirmatch(path, matchwith):\n \n matchlen = len(matchwith)\n if (path.startswith(matchwith)\n and path[matchlen:matchlen + 1] in [os.sep, '']):\n return True\n return False\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 19, "vocab_size": 18, "complexity": 3, "nloc": 6, "token_counts": 45, "n_ast_nodes": 73, "n_identifiers": 8, "random_cut": "def _dirmatch(path, matchwith):\n \n matchlen = len(matchwith)\n if (path.startswith(matchwith)\n ", "d_id": 33344, "documentation": { "docstring": "Check if path is within matchwith's tree.\n >>> _dirmatch('/home/foo/bar', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar/', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar2', '/home/foo/bar')\n False\n >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar')\n False\n ", "n_words": 27, "vocab_size": 16, "n_whitespaces": 60, "language": "en" } }, { "id": 244173, "commit_id": "b403751bd409795cf63fcc6aa7ee280326358bac", "repo": "mmdetection", "path": "mmdet/models/dense_heads/tood_head.py", "file_name": "tood_head.py", "fun_name": "deform_sampling", "commit_message": "[Fix] Avoid invalid bbox after deform_sampling (#7567)\n\n* Avoid invalid bbox after deform_sampling\r\n\r\n* replace in-place opertion with torch.where, update docstring\r\n\r\n* Update", "code": "def deform_sampling(self, feat, offset):\n \n # it is an equivalent implementation of bilinear interpolation\n b, c, h, w = feat.shape\n weight = feat.new_ones(c, 1, 1, 1)\n y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c)\n return y\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 79, "n_words": 37, "vocab_size": 30, "complexity": 1, "nloc": 5, "token_counts": 57, "n_ast_nodes": 79, "n_identifiers": 13, "random_cut": "def deform_sampling(self, feat, offset):\n \n # it is an equivalent implementation of bilinear interpolation\n b, c, h, w = feat.shape\n weight = feat.new_ones(c, 1, 1, 1)\n y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c)\n return y\n", "d_id": 70270, "documentation": { "docstring": "Sampling the feature x according to offset.\n\n Args:\n feat (Tensor): Feature\n offset (Tensor): Spatial offset for feature sampling\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 54, "language": "en" } }, { "id": 81965, "commit_id": "68a44529b6b77d2d43d7099b654560bfd8bbf518", "repo": "awx", "path": "awxkit/awxkit/api/pages/page.py", "file_name": "page.py", "fun_name": "page_identity", "commit_message": "Register pages for the Instance peers and install bundle endpoints\n\nThis includes exposing a new interface for Page objects, Page.bytes,\nto return the full bytestring contents of the response.", "code": "def page_identity(self, response, request_json=None):\n \n request_path = response.request.path_url\n if request_path == '/migrations_notran/':\n raise exc.IsMigrating('You have been redirected to the migration-in-progress page.')\n request_method = response.request.method.lower()\n\n self.last_elapsed = response.elapsed\n\n if isinstance(request_json, dict) and 'ds' in request_json:\n ds = request_json.ds\n else:\n ds = None\n\n data = self.extract_data(response)\n exc_str = \"%s (%s) received\" % (http.responses[response.status_code], response.status_code)\n\n exception = exception_from_status_code(response.status_code)\n if exception:\n raise exception(exc_str, data)\n\n if response.status_code in (http.OK, http.CREATED, http.ACCEPTED):\n\n # Not all JSON responses include a URL. Grab it from the request\n # object, if needed.\n if 'url' in data:\n endpoint = data['url']\n else:\n endpoint = request_path\n\n data = objectify_response_json(response)\n\n if request_method in ('get', 'patch', 'put'):\n # Update existing resource and return it\n if are_same_endpoint(self.endpoint, request_path):\n self.json = data\n self.r = response\n return self\n\n registered_type = get_registered_page(request_path, request_method)\n return registered_type(self.connection, endpoint=endpoint, json=data, last_elapsed=response.elapsed, r=response, ds=ds)\n\n elif response.status_code == http.FORBIDDEN:\n if is_license_invalid(response):\n raise exc.LicenseInvalid(exc_str, data)\n elif is_license_exceeded(response):\n raise exc.LicenseExceeded(exc_str, data)\n else:\n raise exc.Forbidden(exc_str, data)\n\n elif response.status_code == http.BAD_REQUEST:\n if is_license_invalid(response):\n raise exc.LicenseInvalid(exc_str, data)\n if is_duplicate_error(response):\n raise exc.Duplicate(exc_str, data)\n else:\n raise exc.BadRequest(exc_str, data)\n else:\n raise exc.Unknown(exc_str, data)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 694, "n_words": 171, "vocab_size": 104, "complexity": 15, "nloc": 44, "token_counts": 337, "n_ast_nodes": 536, "n_identifiers": 47, "random_cut": "def page_identity(self, response, request_json=None):\n \n request_path = response.request.path_url\n if request_path == '/migrations_notran/':\n raise exc.IsMigrating('You have been redirected to the migration-in-progress page.')\n request_method = response.request.method.lower()\n\n self.last_elapsed = response.elapsed\n\n if isinstance(request_json, dict) and 'ds' in request_json:\n ds = request_json.ds\n else:\n ds = None\n\n data = self.extract_data(response)\n exc_str = \"%s (%s) received\" % (http.responses[response.status_code], response.status_code)\n\n exception = exception_from_status_code(response.status_code)\n if exception:\n raise exception(exc_str, data)\n\n if response.status_code in (http.OK, http.CREATED, http.ACCEPTED):\n\n # Not all JSON responses include a URL. Grab it from the request\n # object, if needed.\n if 'url' in data:\n endpoint = data['url']\n ", "d_id": 17285, "documentation": { "docstring": "Takes a `requests.Response` and\n returns a new __item_class__ instance if the request method is not a get, or returns\n a __class__ instance if the request path is different than the caller's `endpoint`.\n ", "n_words": 32, "vocab_size": 22, "n_whitespaces": 56, "language": "en" } }, { "id": 102077, "commit_id": "48c886b3dce3d3117ad16edaf35c8abd28dc51f5", "repo": "faceswap", "path": "lib/sysinfo.py", "file_name": "sysinfo.py", "fun_name": "_installed_conda", "commit_message": "Allow decoding errors", "code": "def _installed_conda(self):\n \n if not self._is_conda:\n return None\n with Popen(\"conda list\", shell=True, stdout=PIPE, stderr=PIPE) as conda:\n stdout, stderr = conda.communicate()\n if stderr:\n return \"Could not get package list\"\n installed = stdout.decode(self._encoding, errors=\"replace\").splitlines()\n return \"\\n\".join(installed)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 108, "n_words": 33, "vocab_size": 28, "complexity": 3, "nloc": 9, "token_counts": 73, "n_ast_nodes": 128, "n_identifiers": 16, "random_cut": "def _installed_conda(self):\n \n if not self._is_conda:\n return None\n with Popen(\"conda list\", shell=True, stdout=PIPE, stderr=PIPE) as conda:\n stdout, stderr = conda.communicate()\n if stderr:\n return \"Could not get package list\"\n installed = stdout.decode(self._encoding, errors=\"replace\").splitlines()\n return \"\\n\".join(installed)\n", "d_id": 21442, "documentation": { "docstring": " str: The list of installed Conda packages within Faceswap's scope. ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 39296, "commit_id": "d38dffc30c18e9e3280863b32dcc71d01757b181", "repo": "recommenders", "path": "recommenders/models/sasrec/model.py", "file_name": "model.py", "fun_name": "embedding", "commit_message": "doc", "code": "def embedding(self, input_seq):\n \n\n seq_embeddings = self.item_embedding_layer(input_seq)\n seq_embeddings = seq_embeddings * (self.embedding_dim ** 0.5)\n\n # FIXME\n positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0)\n positional_seq = tf.tile(positional_seq, [tf.shape(input_seq)[0], 1])\n positional_embeddings = self.positional_embedding_layer(positional_seq)\n\n return seq_embeddings, positional_embeddings\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 86, "n_words": 30, "vocab_size": 22, "complexity": 1, "nloc": 7, "token_counts": 86, "n_ast_nodes": 132, "n_identifiers": 14, "random_cut": "def embedding(self, input_seq):\n \n\n seq_embeddings = self.item_embedding_layer(input_seq)\n seq_embeddings = seq_embeddings * (self.embedding_dim ** 0.5)\n\n # FIXME\n positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0)\n ", "d_id": 7193, "documentation": { "docstring": "Compute the sequence and positional embeddings.\n\n Args:\n input_seq (tf.Tensor): Input sequence\n \n Returns:\n tf.Tensor, tf.Tensor:\n - Sequence embeddings.\n - Positional embeddings.\n \n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 101, "language": "en" } }, { "id": 266902, "commit_id": "6f445ca6e5c9c8b85ccc5062e00508c69ca26fde", "repo": "ansible", "path": "lib/ansible/utils/display.py", "file_name": "display.py", "fun_name": "display", "commit_message": "Remove obsolete Python 2.x controller code.", "code": "def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):\n \n\n nocolor = msg\n\n if not log_only:\n\n has_newline = msg.endswith(u'\\n')\n if has_newline:\n msg2 = msg[:-1]\n else:\n msg2 = msg\n\n if color:\n msg2 = stringc(msg2, color)\n\n if has_newline or newline:\n msg2 = msg2 + u'\\n'\n\n msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))\n # Convert back to text string\n # We first convert to a byte string so that we get rid of\n # characters that are invalid in the user's locale\n msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')\n\n # Note: After Display() class is refactored need to update the log capture\n # code in 'bin/ansible-connection' (and other relevant places).\n if not stderr:\n fileobj = sys.stdout\n else:\n fileobj = sys.stderr\n\n fileobj.write(msg2)\n\n try:\n fileobj.flush()\n except IOError as e:\n # Ignore EPIPE in case fileobj has been prematurely closed, eg.\n # when piping to \"head -n1\"\n if e.errno != errno.EPIPE:\n raise\n\n if logger and not screen_only:\n # We first convert to a byte string so that we get rid of\n # color and characters that are invalid in the user's locale\n msg2 = to_bytes(nocolor.lstrip(u'\\n'))\n\n # Convert back to text string\n msg2 = to_text(msg2, self._output_encoding(stderr=stderr))\n\n lvl = logging.INFO\n if color:\n # set logger level based on color (not great)\n try:\n lvl = color_to_log_level[color]\n except KeyError:\n # this should not happen, but JIC\n raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)\n # actually log\n logger.log(lvl, msg2)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 808, "n_words": 223, "vocab_size": 128, "complexity": 13, "nloc": 34, "token_counts": 229, "n_ast_nodes": 385, "n_identifiers": 36, "random_cut": "def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):\n \n\n nocolor = msg\n\n if not log_only:\n\n has_newline = msg.endswith(u'\\n')\n if has_newline:\n msg2 = msg[:-1]\n else:\n msg2 = msg\n\n if color:\n msg2 = stringc(msg2, color)\n\n if has_newline or newline:\n msg2 = msg2 + u'\\n'\n\n msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))\n # Convert back to text string\n # We first convert to a byte string so that we get rid of\n # characters that are invalid in the user's locale\n msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')\n\n # Note: After Display() class is refactored need to update the log capture\n # code in 'bin/ansible-connection' (and other relevant places).\n if not stderr:\n fileobj = sys.stdout\n else:\n fileobj = sys.stderr\n\n fileobj.write(msg2)\n\n try:\n fileobj.flush()\n except IOError as e:\n # Ignore EPIPE in case fileobj has been prematurely closed, eg.\n # when piping to \"head -n1\"\n if e.errno != errno.EPIPE:\n raise\n\n if logger and not screen_only:\n # We first convert to a byte string so that we get rid of\n # color and characters that are invalid in the u", "d_id": 78648, "documentation": { "docstring": " Display a message to the user\n\n Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 32, "language": "en" } }, { "id": 265983, "commit_id": "484efdaf75f267a43f9321b938fda1bc967b9e53", "repo": "netbox", "path": "netbox/extras/views.py", "file_name": "views.py", "fun_name": "get_queryset", "commit_message": "Closes #9623: Implement saved filters (#10801)\n\n* Initial work on saved filters\r\n\r\n* Return only enabled/shared filters\r\n\r\n* Add tests\r\n\r\n* Clean up filtering of usable SavedFilters", "code": "def get_queryset(self, request):\n \n queryset = SavedFilter.objects.all()\n user = request.user\n if user.is_superuser:\n return queryset\n if user.is_anonymous:\n return queryset.filter(shared=True)\n return queryset.filter(\n Q(shared=True) | Q(user=user)\n )\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 105, "n_words": 23, "vocab_size": 18, "complexity": 3, "nloc": 10, "token_counts": 62, "n_ast_nodes": 101, "n_identifiers": 13, "random_cut": "def get_queryset(self, request):\n \n queryset = SavedFilter.objects.all()\n user = request.user\n if user.is_superuser:\n return queryset\n if user.is_anonymous:\n return queryset.filter(shared=True)\n ", "d_id": 78254, "documentation": { "docstring": "\n Return only shared SavedFilters, or those owned by the current user, unless\n this is a superuser.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 264666, "commit_id": "f13a00b2dd33bffc3048c861b494096df457f212", "repo": "netbox", "path": "netbox/extras/api/views.py", "file_name": "views.py", "fun_name": "list", "commit_message": "Save old JobResults", "code": "def list(self, request):\n \n report_list = []\n report_content_type = ContentType.objects.get(app_label='extras', model='report')\n results = {\n r.name: r\n for r in JobResult.objects.filter(\n obj_type=report_content_type,\n status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES\n ).order_by('name', '-created').distinct('name').defer('data')\n }\n\n # Iterate through all available Reports.\n for module_name, reports in get_reports():\n for report in reports:\n\n # Attach the relevant JobResult (if any) to each Report.\n report.result = results.get(report.full_name, None)\n report_list.append(report)\n\n serializer = serializers.ReportSerializer(report_list, many=True, context={\n 'request': request,\n })\n\n return Response(serializer.data)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 264, "n_words": 64, "vocab_size": 54, "complexity": 4, "nloc": 18, "token_counts": 135, "n_ast_nodes": 222, "n_identifiers": 36, "random_cut": "def list(self, request):\n ", "d_id": 77779, "documentation": { "docstring": "\n Compile all reports and their related results (if any). Result data is deferred in the list view.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 32, "language": "en" } }, { "id": 265967, "commit_id": "484efdaf75f267a43f9321b938fda1bc967b9e53", "repo": "netbox", "path": "netbox/extras/filtersets.py", "file_name": "filtersets.py", "fun_name": "_usable", "commit_message": "Closes #9623: Implement saved filters (#10801)\n\n* Initial work on saved filters\r\n\r\n* Return only enabled/shared filters\r\n\r\n* Add tests\r\n\r\n* Clean up filtering of usable SavedFilters", "code": "def _usable(self, queryset, name, value):\n \n user = self.request.user if self.request else None\n if not user or user.is_anonymous:\n if value:\n return queryset.filter(enabled=True, shared=True)\n return queryset.filter(Q(enabled=False) | Q(shared=False))\n if value:\n return queryset.filter(enabled=True).filter(Q(shared=True) | Q(user=user))\n return queryset.filter(Q(enabled=False) | Q(Q(shared=False) & ~Q(user=user)))\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 121, "n_words": 38, "vocab_size": 27, "complexity": 6, "nloc": 9, "token_counts": 127, "n_ast_nodes": 199, "n_identifiers": 12, "random_cut": "def _usable(self, queryset, name, value):\n \n user = self.request.user if self.request else None\n if not user or user.is_anonymous:\n if value:\n return queryset.filter(enabled=True, shared=True)\n return queryset.filter(Q(enabled=False) | Q(shared=False))\n ", "d_id": 78253, "documentation": { "docstring": "\n Return only SavedFilters that are both enabled and are shared (or belong to the current user).\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 65853, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/api.py", "file_name": "api.py", "fun_name": "get_current_enrollment", "commit_message": "style: format code with black", "code": "def get_current_enrollment(student, academic_year=None):\n\tcurrent_academic_year = academic_year or frappe.defaults.get_defaults().academic_year\n\tprogram_enrollment_list = frappe.db.sql(\n\t\t,\n\t\t(student, current_academic_year),\n\t\tas_dict=1,\n\t)\n\n\tif program_enrollment_list:\n\t\treturn program_enrollment_list[0]\n\telse:\n\t\treturn None\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 23, "vocab_size": 21, "complexity": 3, "nloc": 19, "token_counts": 55, "n_ast_nodes": 85, "n_identifiers": 11, "random_cut": "def get_current_enrollment(student, academic_year=None):\n\tcurrent_academic_year = academic_year or frappe.defaul", "d_id": 14040, "documentation": { "docstring": "\n\t\tselect\n\t\t\tname as program_enrollment, student_name, program, student_batch_name as student_batch,\n\t\t\tstudent_category, academic_term, academic_year\n\t\tfrom\n\t\t\t`tabProgram Enrollment`\n\t\twhere\n\t\t\tstudent = %s and academic_year = %s\n\t\torder by creation", "n_words": 26, "vocab_size": 22, "n_whitespaces": 18, "language": "en" } }, { "id": 280749, "commit_id": "e52c89c7d1bd52d1f0db0da86a72322ba72c1dc1", "repo": "keras", "path": "keras/applications/inception_resnet_v2.py", "file_name": "inception_resnet_v2.py", "fun_name": "inception_resnet_block", "commit_message": "Removes the serialization of lambdas Keras tests where necessary and adds SafeModeScope all other lambda-based serialization tests.\n\nPiperOrigin-RevId: 495432774", "code": "def inception_resnet_block(x, scale, block_type, block_idx, activation=\"relu\"):\n \n if block_type == \"block35\":\n branch_0 = conv2d_bn(x, 32, 1)\n branch_1 = conv2d_bn(x, 32, 1)\n branch_1 = conv2d_bn(branch_1, 32, 3)\n branch_2 = conv2d_bn(x, 32, 1)\n branch_2 = conv2d_bn(branch_2, 48, 3)\n branch_2 = conv2d_bn(branch_2, 64, 3)\n branches = [branch_0, branch_1, branch_2]\n elif block_type == \"block17\":\n branch_0 = conv2d_bn(x, 192, 1)\n branch_1 = conv2d_bn(x, 128, 1)\n branch_1 = conv2d_bn(branch_1, 160, [1, 7])\n branch_1 = conv2d_bn(branch_1, 192, [7, 1])\n branches = [branch_0, branch_1]\n elif block_type == \"block8\":\n branch_0 = conv2d_bn(x, 192, 1)\n branch_1 = conv2d_bn(x, 192, 1)\n branch_1 = conv2d_bn(branch_1, 224, [1, 3])\n branch_1 = conv2d_bn(branch_1, 256, [3, 1])\n branches = [branch_0, branch_1]\n else:\n raise ValueError(\n \"Unknown Inception-ResNet block type. \"\n 'Expects \"block35\", \"block17\" or \"block8\", '\n \"but got: \" + str(block_type)\n )\n\n block_name = block_type + \"_\" + str(block_idx)\n channel_axis = 1 if backend.image_data_format() == \"channels_first\" else 3\n mixed = layers.Concatenate(axis=channel_axis, name=block_name + \"_mixed\")(\n branches\n )\n up = conv2d_bn(\n mixed,\n backend.int_shape(x)[channel_axis],\n 1,\n activation=None,\n use_bias=True,\n name=block_name + \"_conv\",\n )\n\n x = CustomScaleLayer()(x, up, scale)\n if activation is not None:\n x = layers.Activation(activation, name=block_name + \"_ac\")(x)\n return x\n\n\n@keras_export(\"keras.applications.inception_resnet_v2.preprocess_input\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.applications.inception_resnet_v2.preprocess_input\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 443, "n_words": 180, "vocab_size": 93, "complexity": 6, "nloc": 44, "token_counts": 336, "n_ast_nodes": 520, "n_identifiers": 28, "random_cut": "def inception_resnet_block(x, scale, block_type, block_idx, activation=\"relu\"):\n \n if block_type == \"block35\":\n branch_0 = conv2d_bn(x, 32, 1)\n branch_1 = conv2d_bn(x, 32, 1)\n branch_1 = conv2d_bn(branch_1, 32, 3)\n branch_2 = conv2d_bn(x, 32, 1)\n branch_2 = conv2d_bn(branch_2, 48, 3)\n branch_2 = conv2d_bn(branch_2, 64, 3)\n branches = [branch_0, branch_1, branch_2]\n elif block_type == \"block17\":\n branch_0 = conv2d_bn(x, 192, 1)\n branch_1 = conv2d_bn(x, 128, 1)\n branch_1 = conv2d_bn(branch_1, 160, [1, 7])\n branch_1 = conv2d_bn(branch_1, 192, [7, 1])\n branches = [branch_0, branch_1]\n elif block_type == \"block8\":\n branch_0 = conv2d_bn(x, 192, 1)\n branch_1 = conv2d_bn(x, 192, 1)\n branch_1 = conv2d_bn(branch_1, 224, [1, 3])\n branch_1 = conv2d_bn(branch_1, 256, [3, 1])\n branches = [branch_0, branch_1]\n else:\n raise ValueError(\n \"Unknown Inception-ResNet block type. \"\n 'Expects \"block35\", \"block17\" or \"block8\", '\n \"but got: \" + str(block_type)\n )\n\n block_name = block_type + \"_\" + str(block_idx)\n channel_axis = 1 if backend.image_data_format() == \"channels_first\" else 3\n mixed = layers.Concatenate(axis=channel_axis, name=block_name + \"_mixed\")(\n branch", "d_id": 83430, "documentation": { "docstring": "Adds an Inception-ResNet block.\n\n This function builds 3 types of Inception-ResNet blocks mentioned\n in the paper, controlled by the `block_type` argument (which is the\n block name used in the official TF-slim implementation):\n - Inception-ResNet-A: `block_type='block35'`\n - Inception-ResNet-B: `block_type='block17'`\n - Inception-ResNet-C: `block_type='block8'`\n\n Args:\n x: input tensor.\n scale: scaling factor to scale the residuals (i.e., the output of passing\n `x` through an inception module) before adding them to the shortcut\n branch. Let `r` be the output from the residual branch, the output of\n this block will be `x + scale * r`.\n block_type: `'block35'`, `'block17'` or `'block8'`, determines the network\n structure in the residual branch.\n block_idx: an `int` used for generating layer names. The Inception-ResNet\n blocks are repeated many times in this network. We use `block_idx` to\n identify each of the repetitions. For example, the first\n Inception-ResNet-A block will have `block_type='block35', block_idx=0`,\n and the layer names will have a common prefix `'block35_0'`.\n activation: activation function to use at the end of the block (see\n [activations](../activations.md)). When `activation=None`, no activation\n is applied\n (i.e., \"linear\" activation: `a(x) = x`).\n\n Returns:\n Output tensor for the block.\n\n Raises:\n ValueError: if `block_type` is not one of `'block35'`,\n `'block17'` or `'block8'`.\n ", "n_words": 193, "vocab_size": 130, "n_whitespaces": 344, "language": "en" } }, { "id": 100317, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/gui/analysis/stats.py", "file_name": "stats.py", "fun_name": "_get_calculations", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _get_calculations(self):\n \n for selection in self._selections:\n if selection == \"raw\":\n continue\n logger.debug(\"Calculating: %s\", selection)\n method = getattr(self, f\"_calc_{selection}\")\n raw_keys = [key for key in self._stats if key.startswith(\"raw_\")]\n for key in raw_keys:\n selected_key = f\"{selection}_{key.replace('raw_', '')}\"\n self._stats[selected_key] = method(self._stats[key])\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 152, "n_words": 38, "vocab_size": 28, "complexity": 6, "nloc": 10, "token_counts": 79, "n_ast_nodes": 156, "n_identifiers": 14, "random_cut": "def _get_calculations(self):\n \n for selection in self._selections:\n if selection == \"raw\":\n continue\n logger.debug(\"Calculating: %s\", selection)\n method = getattr(self, f\"_calc_{selection}\")\n raw_keys = [key for key in self._stats if key.startswith(\"raw_\")]\n for key in raw_keys:\n selected_key = f\"{selection}_{key.replace('raw_', '')}\"\n self._stats[selected_key] = method(self._sta", "d_id": 19814, "documentation": { "docstring": " Perform the required calculations and populate :attr:`stats`. ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 8, "language": "en" } }, { "id": 157377, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "file_name": "dpm_solver.py", "fun_name": "multistep_dpm_solver_second_update", "commit_message": "release more models", "code": "def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=\"dpm_solver\"):\n \n if solver_type not in ['dpm_solver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpm_solver' or 'taylor', got {}\".format(solver_type))\n ns = self.noise_schedule\n dims = x.dim()\n model_prev_1, model_prev_0 = model_prev_list\n t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(\n t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)\n if self.predict_x0:\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0\n )\n else:\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0\n - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0\n - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0\n )\n return x_t\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 809, "n_words": 228, "vocab_size": 88, "complexity": 7, "nloc": 43, "token_counts": 449, "n_ast_nodes": 681, "n_identifiers": 37, "random_cut": "def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=\"dpm_solver\"):\n \n if solver_type not in ['dpm_solver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpm_solver' or 'taylor', got {}\".format(solver_type))\n ns = self.noise_schedule\n dims = x.dim()\n model_prev_1, model_prev_0 = model_prev_list\n t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(\n t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)\n if self.predict_x0:\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0\n )\n else:\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * mo", "d_id": 36917, "documentation": { "docstring": "\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n ", "n_words": 91, "vocab_size": 57, "n_whitespaces": 201, "language": "en" } }, { "id": 175317, "commit_id": "acf7403f9baea3ae1119fc6b4a3298522188bf96", "repo": "cpython", "path": "Lib/enum.py", "file_name": "enum.py", "fun_name": "global_enum", "commit_message": "bpo-40066: [Enum] update str() and format() output (GH-30582)\n\nUndo rejected PEP-663 changes:\r\n\r\n- restore `repr()` to its 3.10 status\r\n- restore `str()` to its 3.10 status\r\n\r\nNew changes:\r\n\r\n- `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result\r\n- zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == ''`\r\n- update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type\r\n- added `_numeric_repr_` to `Flag` to control display of unnamed values\r\n- enums without doc strings have a more comprehensive doc string added\r\n- `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`", "code": "def global_enum(cls, update_str=False):\n \n if issubclass(cls, Flag):\n cls.__repr__ = global_flag_repr\n else:\n cls.__repr__ = global_enum_repr\n if not issubclass(cls, ReprEnum) or update_str:\n cls.__str__ = global_str\n sys.modules[cls.__module__].__dict__.update(cls.__members__)\n return cls\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 64, "n_words": 25, "vocab_size": 20, "complexity": 4, "nloc": 9, "token_counts": 65, "n_ast_nodes": 104, "n_identifiers": 17, "random_cut": "def global_enum(cls, update_str=False):\n \n if issubclass(cls, Flag):\n cls.__repr__ = global_flag_repr\n else:\n cls.__repr__ = global_enum_repr\n if not issubclass(cls, ReprEnum) or update_str:\n cls.__str__ = global_str\n sys.modules[cls.__module__].__dict__.updat", "d_id": 41599, "documentation": { "docstring": "\n decorator that makes the repr() of an enum member reference its module\n instead of its class; also exports all members to the enum's module's\n global namespace\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 39, "language": "en" } }, { "id": 272022, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_v1.py", "file_name": "training_v1.py", "fun_name": "create_training_target", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def create_training_target(self, target, run_eagerly=False):\n \n if self.has_training_target():\n raise ValueError(\n \"The training_target field for the _TrainingEndpoint \"\n \"instance has already been populated\"\n )\n if run_eagerly:\n # When run_eagerly, the target tensor is ignored, and the None placeholder\n # is created instead.\n self.training_target = _TrainingTarget(\n None, feedable=True, skip_target_weights=False\n )\n return\n\n if self.should_skip_target():\n self.training_target = _TrainingTarget(None)\n else:\n if target is not None and not backend.is_placeholder(target):\n feedable = False\n skip_target_weights = True\n else:\n feedable = True\n skip_target_weights = False\n\n if target is None:\n target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get(\n self.loss_fn, backend.dtype(self.output)\n )\n\n target = backend.placeholder(\n ndim=len(self.shape),\n name=self.output_name + \"_target\",\n sparse=backend.is_sparse(self.output),\n dtype=target_dtype,\n )\n\n self.training_target = _TrainingTarget(\n target,\n feedable=feedable,\n skip_target_weights=skip_target_weights,\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 584, "n_words": 101, "vocab_size": 64, "complexity": 7, "nloc": 35, "token_counts": 172, "n_ast_nodes": 276, "n_identifiers": 28, "random_cut": "def create_training_target(self, target, run_eagerly=False):\n \n if self.has_training_target():\n raise ValueError(\n \"The training_target field for the _TrainingEndpoint \"\n \"instance has already been populated\"\n )\n if run_eagerly:\n # When run_eagerly, the target tensor is ignored, and the None placeholder\n # is created instead.\n self.training_target = ", "d_id": 80939, "documentation": { "docstring": "Create training_target instance and update the self.training_target.\n\n Note that the input target should just be a tensor or None, and\n corresponding training target will be created based on the output and\n loss_fn.\n\n Args:\n target: the target tensor for the current output. Could be None.\n run_eagerly: boolean, whether the model is in run_eagerly mode.\n\n Raises:\n ValueError if the training_target field for the current instance has\n already been populated.\n ", "n_words": 67, "vocab_size": 49, "n_whitespaces": 145, "language": "en" } }, { "id": 43999, "commit_id": "2b4bf7fe67fc656ceb7bdaad36453b7a5b83ef04", "repo": "airflow", "path": "tests/models/test_dag.py", "file_name": "test_dag.py", "fun_name": "test_set_task_instance_state", "commit_message": "Use `DagRun.run_id` instead of `execution_date` when updating state of TIs(UI & REST API) (#18724)\n\nWe can now use run_id as well as execution_date to update states\r\nof task instances\r\n\r\nCo-authored-by: Tzu-ping Chung \r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_set_task_instance_state(run_id, execution_date, session, dag_maker):\n \n\n start_date = datetime_tz(2020, 1, 1)\n with dag_maker(\"test_set_task_instance_state\", start_date=start_date, session=session) as dag:\n task_1 = DummyOperator(task_id=\"task_1\")\n task_2 = DummyOperator(task_id=\"task_2\")\n task_3 = DummyOperator(task_id=\"task_3\")\n task_4 = DummyOperator(task_id=\"task_4\")\n task_5 = DummyOperator(task_id=\"task_5\")\n\n task_1 >> [task_2, task_3, task_4, task_5]\n\n dagrun = dag_maker.create_dagrun(\n run_id=run_id,\n execution_date=execution_date,\n state=State.FAILED,\n run_type=DagRunType.SCHEDULED,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 130, "n_words": 45, "vocab_size": 38, "complexity": 2, "nloc": 39, "token_counts": 321, "n_ast_nodes": 188, "n_identifiers": 23, "random_cut": "def test_set_task_instance_state(run_id, execution_date, session, dag_maker):\n \n\n start_date = datetime_tz(2020, 1, 1)\n with dag_maker(\"test_set_task_instance_state\", start_date=start_date, session=session) as dag:\n task_1 = DummyOperator(task_id=\"task_1\")\n task_2 = DummyOperator(task_id=\"ta", "d_id": 8117, "documentation": { "docstring": "Test that set_task_instance_state updates the TaskInstance state and clear downstream failed", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 296453, "commit_id": "23264c8fd4a3f8bcff5961ed11cab6388d3c67a4", "repo": "core", "path": "homeassistant/components/roon/config_flow.py", "file_name": "config_flow.py", "fun_name": "async_step_link", "commit_message": "Improve roon integraton (#66000)\n\n* Update to new library, revise discovery to work with new library, specify port to work with new library.\r\n\r\n* Move user gui to fallback.\r\n\r\n* Revise tests.\r\n\r\n* Handle old config.\r\n\r\n* Improve debugging, refresh faster on load.\r\n\r\n* Remove duplicate.\r\n\r\n* Bump library version.\r\n\r\n* Fix docstring per review.\r\n\r\n* Review suggestion\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Review suggestion\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Add check for duplicate host.\r\n\r\n* Add error message to strings.\r\n\r\n* Tidy.\r\n\r\n* Review changes.\r\n\r\n* Remove default.\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def async_step_link(self, user_input=None):\n \n errors = {}\n if user_input is not None:\n # Do not authenticate if the host is already configured\n self._async_abort_entries_match({CONF_HOST: self._host})\n\n try:\n info = await authenticate(\n self.hass, self._host, self._port, self._servers\n )\n\n except InvalidAuth:\n errors[\"base\"] = \"invalid_auth\"\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Unexpected exception\")\n errors[\"base\"] = \"unknown\"\n else:\n return self.async_create_entry(title=DEFAULT_NAME, data=info)\n\n return self.async_show_form(step_id=\"link\", errors=errors)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 260, "n_words": 56, "vocab_size": 46, "complexity": 4, "nloc": 16, "token_counts": 107, "n_ast_nodes": 182, "n_identifiers": 22, "random_cut": "async def async_step_link(self, user_input=None):\n \n errors = {}\n if user_input is not None:\n # Do not authenticate if the host is already configured\n self._async_abort_entries_match({CONF_HOST: self._host})\n\n try:\n info = await authenticate(\n self.hass, self._host, self._port, self._servers\n )\n\n except InvalidAuth:\n errors[\"base\"] = \"invalid_auth\"\n except Exception: # pylint: dis", "d_id": 95433, "documentation": { "docstring": "Handle linking and authenticting with the roon server.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 215744, "commit_id": "3bb43882e727b1d36abe2e501759c9c5e9048ecf", "repo": "salt", "path": "tests/pytests/unit/utils/win_dacl/test_get_sid_string.py", "file_name": "test_get_sid_string.py", "fun_name": "test_get_sid_string_none", "commit_message": "Add tests, migrate some tests to pytest", "code": "def test_get_sid_string_none():\n \n sid_obj = salt.utils.win_dacl.get_sid(None)\n assert isinstance(sid_obj, pywintypes.SIDType)\n assert salt.utils.win_dacl.get_sid_string(sid_obj) == \"S-1-0-0\"\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 66, "n_identifiers": 10, "random_cut": "def test_get_sid_string_none():\n \n sid_obj = sa", "d_id": 54138, "documentation": { "docstring": "\n Validate getting a null sid (S-1-0-0) when a null sid is passed\n ", "n_words": 12, "vocab_size": 9, "n_whitespaces": 19, "language": "en" } }, { "id": 322461, "commit_id": "ba3ea1cffa14d8fddb4d61239d691eba1d711a1d", "repo": "PaddleNLP", "path": "paddlenlp/datasets/dataset.py", "file_name": "dataset.py", "fun_name": "read", "commit_message": "[cblue] support converting labels of multi-tasks", "code": "def read(self, filename, split='train'):\n \n\n label_list = self.get_labels()\n vocab_info = self.get_vocab()\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 10, "vocab_size": 9, "complexity": 12, "nloc": 40, "token_counts": 260, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def read(self, filename, split='train'):\n \n\n label_list = self.get_labels()\n vocab_info = self.get_vocab()\n", "d_id": 118175, "documentation": { "docstring": "\n Returns a dataset containing all the examples that can be read from the file path.\n\n If `self.lazy` is False, this eagerly reads all instances from `self._read()`\n and returns a `MapDataset`.\n\n If `self.lazy` is True, this returns an `IterDataset`, which internally\n relies on the generator created from `self._read()` to lazily produce examples.\n In this case your implementation of `_read()` must also be lazy\n (that is, not load all examples into memory at once).\n\n Args:\n filename (str): Path of data file to read, usually provided by `_get_data` \n function.\n split (str, optional): The split name of selected dataset. This only makes\n a different when data files of different splits have different structures.\n \n Returns:\n A `MapDataset|IterDataset`.\n ", "n_words": 112, "vocab_size": 86, "n_whitespaces": 255, "language": "en" } }, { "id": 74629, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_whitelist.py", "file_name": "test_whitelist.py", "fun_name": "test_no_rule_for_attr", "commit_message": "Reformat with black", "code": "def test_no_rule_for_attr(self):\n \n tag = self.soup.b\n fn = attribute_rule({\"snowman\": \"barbecue\"})\n fn(tag)\n self.assertEqual(str(tag), \"baz\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 47, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def test_no_rule_for_attr(self):\n \n tag = self.soup.b\n fn = attribute_rule({\"snowman\": \"barbec", "d_id": 16279, "documentation": { "docstring": "\n Test that attribute_rule() drops attributes for\n which no rule has been defined.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 132902, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/check_serialize.py", "file_name": "check_serialize.py", "fun_name": "_inspect_generic_serialization", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _inspect_generic_serialization(base_obj, depth, parent, failure_set):\n \n assert not inspect.isfunction(base_obj)\n functions = inspect.getmembers(base_obj, predicate=inspect.isfunction)\n found = False\n with _printer.indent():\n for name, obj in functions:\n serializable, _ = inspect_serializability(\n obj,\n name=name,\n depth=depth - 1,\n _parent=parent,\n _failure_set=failure_set,\n )\n found = found or not serializable\n if found:\n break\n\n with _printer.indent():\n members = inspect.getmembers(base_obj)\n for name, obj in members:\n if name.startswith(\"__\") and name.endswith(\"__\") or inspect.isbuiltin(obj):\n continue\n serializable, _ = inspect_serializability(\n obj,\n name=name,\n depth=depth - 1,\n _parent=parent,\n _failure_set=failure_set,\n )\n found = found or not serializable\n if found:\n break\n if not found:\n _printer.print(\n f\"WARNING: Did not find non-serializable object in {base_obj}. \"\n \"This may be an oversight.\"\n )\n return found\n\n\n@DeveloperAPI", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@DeveloperAPI", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 477, "n_words": 103, "vocab_size": 60, "complexity": 11, "nloc": 37, "token_counts": 184, "n_ast_nodes": 302, "n_identifiers": 26, "random_cut": "def _inspect_generic_serialization(base_obj, depth, parent, failure_set):\n \n assert not inspect.isfunction(base_obj)\n functions = inspect.getmembers(base_obj, predicate=inspect.isfunction)\n found = False\n with _printer.indent():\n for name, obj in functions:\n serializable, _ = inspect_serializability(\n obj,\n name=name,\n depth=depth - 1,\n _parent=parent,\n _failure_set=failure_set,\n )\n found = found or not serializable\n ", "d_id": 29861, "documentation": { "docstring": "Adds the first-found non-serializable element to the failure_set.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 200643, "commit_id": "624e6f073d5d20e78484f5a0b477469f83678b88", "repo": "sympy", "path": "sympy/combinatorics/perm_groups.py", "file_name": "perm_groups.py", "fun_name": "is_dihedral", "commit_message": "Add a `PermutationGroup.is_dihedral` property", "code": "def is_dihedral(self):\n r\n if self._is_dihedral is not None:\n return self._is_dihedral\n\n order = self.order()\n\n if order % 2 == 1:\n self._is_dihedral = False\n return False\n if order == 2:\n self._is_dihedral = True\n return True\n if order == 4:\n # The dihedral group of order 4 is the Klein 4-group.\n self._is_dihedral = not self.is_cyclic\n return self._is_dihedral\n if self.is_abelian:\n # The only abelian dihedral groups are the ones of orders 2 and 4.\n self._is_dihedral = False\n return False\n\n # Now we know the group is of even order >= 6, and nonabelian.\n n = order // 2\n\n # Handle special cases where there are exactly two generators.\n gens = self.generators\n if len(gens) == 2:\n x, y = gens\n a, b = x.order(), y.order()\n # Make a >= b\n if a < b:\n x, y, a, b = y, x, b, a\n # Using Theorem 2.1 of [3]:\n if {a, b} == {2}:\n self._is_dihedral = True\n return True\n # Using Theorem 1.1 of [3]:\n if (a, b) == (n, 2) and y*x*y == ~x:\n self._is_dihedral = True\n return True\n\n # Procede with algorithm of [1]\n # Find elements of orders 2 and n\n order_2, order_n = [], []\n for p in self.elements:\n k = p.order()\n if k == 2:\n order_2.append(p)\n elif k == n:\n order_n.append(p)\n\n if len(order_2) != n + 1 - (n % 2):\n self._is_dihedral = False\n return False\n\n if not order_n:\n self._is_dihedral = False\n return False\n\n x = order_n[0]\n # Want an element y of order 2 that is not a power of x\n # (i.e. that is not the 180-deg rotation, when n is even).\n y = order_2[0]\n if n % 2 == 0 and y == x**(n//2):\n y = order_2[1]\n\n self._is_dihedral = (y*x*y == ~x)\n return self._is_dihedral\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 863, "n_words": 287, "vocab_size": 131, "complexity": 18, "nloc": 75, "token_counts": 314, "n_ast_nodes": 508, "n_identifiers": 20, "random_cut": "def is_dihedral(self):\n r\n if self._is_dihedral is not None:\n return self._is_dihedral\n\n order = self.order()\n\n if order % 2 == 1:\n self._is_dihedral = False\n return False\n if or", "d_id": 49752, "documentation": { "docstring": "\n Return ``True`` if the group is dihedral.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.perm_groups import PermutationGroup\n >>> from sympy.combinatorics.permutations import Permutation\n >>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup\n >>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6))\n >>> G.is_dihedral\n True\n >>> G = SymmetricGroup(3)\n >>> G.is_dihedral\n True\n >>> G = CyclicGroup(6)\n >>> G.is_dihedral\n False\n\n References\n ==========\n\n .. [1] https://math.stackexchange.com/a/827273\n .. [2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf\n .. [3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf\n .. [4] https://en.wikipedia.org/wiki/Dihedral_group\n ", "n_words": 70, "vocab_size": 48, "n_whitespaces": 225, "language": "en" } }, { "id": 266230, "commit_id": "4e27e8d3dd2cbfe3279bda3631ca92a7facdd334", "repo": "netbox", "path": "netbox/dcim/signals.py", "file_name": "signals.py", "fun_name": "extend_rearport_cable_paths", "commit_message": "Fixes #10969: Update cable paths ending at associated rear port when creating new front ports", "code": "def extend_rearport_cable_paths(instance, created, **kwargs):\n \n if created:\n rearport = instance.rear_port\n for cablepath in CablePath.objects.filter(_nodes__contains=rearport):\n cablepath.retrace()\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 5, "token_counts": 38, "n_ast_nodes": 62, "n_identifiers": 12, "random_cut": "def extend_rearport_cable_paths(instance, created, **kwargs):\n \n if created:\n rearport = instance.rear_port\n for cablepath in CablePath.objects.filter(_nodes__contains=rearport):\n ", "d_id": 78343, "documentation": { "docstring": "\n When a new FrontPort is created, add it to any CablePaths which end at its corresponding RearPort.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 24, "language": "en" } }, { "id": 100345, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/gui/utils.py", "file_name": "utils.py", "fun_name": "_load_images_to_cache", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _load_images_to_cache(self, image_files, frame_dims, thumbnail_size):\n \n logger.debug(\"Number image_files: %s, frame_dims: %s, thumbnail_size: %s\",\n len(image_files), frame_dims, thumbnail_size)\n num_images = (frame_dims[0] // thumbnail_size) * (frame_dims[1] // thumbnail_size)\n logger.debug(\"num_images: %s\", num_images)\n if num_images == 0:\n return False\n samples = []\n start_idx = len(image_files) - num_images if len(image_files) > num_images else 0\n show_files = sorted(image_files, key=os.path.getctime)[start_idx:]\n dropped_files = []\n for fname in show_files:\n try:\n img = Image.open(fname)\n except PermissionError as err:\n logger.debug(\"Permission error opening preview file: '%s'. Original error: %s\",\n fname, str(err))\n dropped_files.append(fname)\n continue\n except Exception as err: # pylint:disable=broad-except\n # Swallow any issues with opening an image rather than spamming console\n # Can happen when trying to read partially saved images\n logger.debug(\"Error opening preview file: '%s'. Original error: %s\",\n fname, str(err))\n dropped_files.append(fname)\n continue\n\n width, height = img.size\n scaling = thumbnail_size / max(width, height)\n logger.debug(\"image width: %s, height: %s, scaling: %s\", width, height, scaling)\n\n try:\n img = img.resize((int(width * scaling), int(height * scaling)))\n except OSError as err:\n # Image only gets loaded when we call a method, so may error on partial loads\n logger.debug(\"OS Error resizing preview image: '%s'. Original error: %s\",\n fname, err)\n dropped_files.append(fname)\n continue\n\n if img.size[0] != img.size[1]:\n # Pad to square\n new_img = Image.new(\"RGB\", (thumbnail_size, thumbnail_size))\n new_img.paste(img, ((thumbnail_size - img.size[0])//2,\n (thumbnail_size - img.size[1])//2))\n img = new_img\n draw = ImageDraw.Draw(img)\n draw.rectangle(((0, 0), (thumbnail_size, thumbnail_size)), outline=\"#E5E5E5\", width=1)\n samples.append(np.array(img))\n\n samples = np.array(samples)\n if not np.any(samples):\n logger.debug(\"No preview images collected.\")\n return False\n\n if dropped_files:\n logger.debug(\"Removing dropped files: %s\", dropped_files)\n show_files = [fname for fname in show_files if fname not in dropped_files]\n\n self._previewcache[\"filenames\"] = (self._previewcache[\"filenames\"] +\n show_files)[-num_images:]\n cache = self._previewcache[\"images\"]\n if cache is None:\n logger.debug(\"Creating new cache\")\n cache = samples[-num_images:]\n else:\n logger.debug(\"Appending to existing cache\")\n cache = np.concatenate((cache, samples))[-num_images:]\n self._previewcache[\"images\"] = cache\n logger.debug(\"Cache shape: %s\", self._previewcache[\"images\"].shape)\n return True\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1104, "n_words": 281, "vocab_size": 176, "complexity": 13, "nloc": 61, "token_counts": 488, "n_ast_nodes": 811, "n_identifiers": 50, "random_cut": "def _load_images_to_cache(self, image_files, frame_dims, thumbnail_size):\n \n logger.debug(\"Number image_files: %s, frame_dims: %s, thumbnail_size: %s\",\n len(image_files), frame_dims, thumbnail_size)\n num_images = (frame_dims[0] // thumbnail_size) * (frame_dims[1] // thumbnail_size)\n logger.debug(\"num_images: %s\", num_images)\n if num_images == 0:\n return False\n samples = []\n start_idx = len(image_files) - num_images if len(image_files) > num_images else 0\n show_files = sorted(image_files, key=os.path.getctime)[start_idx:]\n dropped_files = []\n for fname in show_files:\n try:\n img = Image.open(fname)\n except PermissionError as err:\n logger.debug(\"Permission error opening preview file: '%s'. Original error: %s\",\n fname, str(err))\n dropped_files.append(fname)\n continue\n except Exception as err: # pylint:disable=broad-except\n ", "d_id": 19840, "documentation": { "docstring": " Load preview images to the image cache.\n\n Load new images and append to cache, filtering the cache the number of thumbnails that will\n fit inside the display panel.\n\n Parameters\n ----------\n image_files: list\n A list of new image files that have been modified since the last check\n frame_dims: tuple\n The (width (`int`), height (`int`)) of the display panel that will display the preview\n thumbnail_size: int\n The size of each thumbnail that should be created\n\n Returns\n -------\n bool\n ``True`` if images were successfully loaded to cache otherwise ``False``\n ", "n_words": 86, "vocab_size": 60, "n_whitespaces": 209, "language": "en" } }, { "id": 267897, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/integration/coverage.py", "file_name": "coverage.py", "fun_name": "target_profile", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def target_profile(self) -> t.Optional[PosixProfile]:\n \n return t.cast(PosixProfile, self.profiles[0]) if self.profiles else None\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 33, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def target_profile(self) -> t.Optional[PosixProfile]:\n \n retur", "d_id": 79173, "documentation": { "docstring": "The POSIX target profile, if it uses a different Python interpreter than the controller, otherwise None.", "n_words": 16, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 286444, "commit_id": "f9086d6f38cf5de4bf3e44be0b4ccd332dbaca46", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_model.py", "file_name": "portfolio_model.py", "fun_name": "preprocess_transactions", "commit_message": "Portfolio menu bug fixes (#3204)\n\n* normalized way moving average is treated and prevent huge exception prompt\r\n\r\n* changed descriptions on docs\r\n\r\n* change to check_positive_float\r\n\r\n* add integration tests\r\n\r\n* fix linting\r\n\r\n* add more integration tests\r\n\r\n* add more integration tests\r\n\r\n* fix linting\r\n\r\n* add some po integration tests\r\n\r\n* fix flag without prompt\r\n\r\n* change orderbook to transactions\r\n\r\n* limit warning to portfolio\r\n\r\n* change print help\r\n\r\n* format portfolio var\r\n\r\n* reformat controller\r\n\r\n* reformat es\r\n\r\n* remove autocompletion\r\n\r\n* change print help\r\n\r\n* add percentage symbol to summary\r\n\r\n* fix holp holv\r\n\r\n* fix scripts\r\n\r\n* update website doc\r\n\r\n* add tqdm progress bars\r\n\r\n* fix table spacing\r\n\r\n* identify mret tables\r\n\r\n* remove positive float from rfr\r\n\r\n* flake8\r\n\r\n* pylint\r\n\r\n* fix reports\r\n\r\n* revert to old menu help\r\n\r\n* revert to old menu help\r\n\r\n* Update test_portfolio.openbb\r\n\r\n* quick change on empty lines\r\n\r\nCo-authored-by: hjoaquim \r\nCo-authored-by: James Maslek ", "code": "def preprocess_transactions(self):\n \n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\")\n\n try:\n\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 3. Capitalize Ticker and Type [of instrument...]\n self.__transactions[\"Ticker\"] = self.__transactions[\"Ticker\"].map(\n lambda x: x.upper()\n )\n self.__transactions[\"Type\"] = self.__transactions[\"Type\"].map(\n lambda x: x.upper()\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n self.__transactions[\"Signal\"] = self.__transactions[\"Side\"].map(\n lambda x: 1\n if x.lower() in [\"deposit\", \"buy\"]\n else (-1 if x.lower() in [\"withdrawal\", \"sell\"] else 0)\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 5. Convert quantity to signed integer\n self.__transactions[\"Quantity\"] = (\n abs(self.__transactions[\"Quantity\"]) * self.__transactions[\"Signal\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 6. Determining the investment/divestment value\n self.__transactions[\"Investment\"] = (\n self.__transactions[\"Quantity\"] * self.__transactions[\"Price\"]\n + self.__transactions[\"Fees\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n crypto_trades = self.__transactions[self.__transactions.Type == \"CRYPTO\"]\n self.__transactions.loc[\n (self.__transactions.Type == \"CRYPTO\"), \"Ticker\"\n ] = [\n f\"{crypto}-{currency}\"\n for crypto, currency in zip(\n crypto_trades.Ticker, crypto_trades.Currency\n )\n ]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided.\n\n # If isin not valid ticker is empty\n self.__transactions[\"yf_Ticker\"] = self.__transactions[\"ISIN\"].apply(\n lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan\n )\n\n empty_tickers = list(\n self.__transactions[\n (self.__transactions[\"yf_Ticker\"] == \"\")\n | (self.__transactions[\"yf_Ticker\"].isna())\n ][\"Ticker\"].unique()\n )\n\n # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported\n removed_tickers = []\n for item in empty_tickers:\n with contextlib.redirect_stdout(None):\n # Suppress yfinance failed download message if occurs\n valid_ticker = not (\n yf.download(\n item,\n start=datetime.datetime.now() + datetime.timedelta(days=-5),\n progress=False,\n ).empty\n )\n if valid_ticker:\n # Invalid ISIN but valid ticker\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = np.nan\n else:\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = \"\"\n removed_tickers.append(item)\n\n # Merge reformated tickers into Ticker\n self.__transactions[\"Ticker\"] = self.__transactions[\"yf_Ticker\"].fillna(\n self.__transactions[\"Ticker\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 9. Remove unsupported ISINs that came out empty\n self.__transactions.drop(\n self.__transactions[self.__transactions[\"Ticker\"] == \"\"].index,\n inplace=True,\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 10. Create tickers dictionary with structure {'Type': [Ticker]}\n for ticker_type in set(self.__transactions[\"Type\"]):\n self.tickers[ticker_type] = list(\n set(\n self.__transactions[\n self.__transactions[\"Type\"].isin([ticker_type])\n ][\"Ticker\"]\n )\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 11. Create list with tickers except cash\n self.tickers_list = list(set(self.__transactions[\"Ticker\"]))\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 12. Save transactions inception date\n self.inception_date = self.__transactions[\"Date\"][0]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 13. Populate fields Sector, Industry and Country\n if (\n self.__transactions.loc[\n self.__transactions[\"Type\"] == \"STOCK\",\n optional_fields,\n ]\n .isnull()\n .values.any()\n ):\n # If any fields is empty for stocks (overwrites any info there)\n self.load_company_data()\n\n p_bar.n += 1\n p_bar.refresh()\n\n # Warn user of removed ISINs\n if removed_tickers:\n console.print(\n f\"\\n\\n[red]The following tickers are not supported and were removed: {removed_tickers}.\"\n f\"\\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN.\"\n f\"\\nSuffix info on 'Yahoo Finance market coverage':\"\n \" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html\"\n f\"\\nE.g. IWDA -> IWDA.AS[/red]\"\n )\n except Exception:\n console.print(\"\\nCould not preprocess transactions.\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 2754, "n_words": 512, "vocab_size": 267, "complexity": 14, "nloc": 137, "token_counts": 838, "n_ast_nodes": 1446, "n_identifiers": 71, "random_cut": "def preprocess_transactions(self):\n \n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\")\n\n try:\n\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refre", "d_id": 85817, "documentation": { "docstring": "Method to preprocess, format and compute auxiliary fields.\n\n Preprocessing steps:\n 0. If optional fields not in the transactions add missing\n 1. Convert Date to datetime\n 2. Sort transactions by date\n 3. Capitalize Ticker and Type [of instrument...]\n 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n 5. Convert quantity to signed integer\n 6. Determining the investment/divestment value\n 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided\n 9. Remove unsupported ISINs that came out empty\n 10. Create tickers dictionary with structure {'Type': [Ticker]}\n 11. Create list with tickers except cash\n 12. Save transactions inception date\n 13. Populate fields Sector, Industry and Country\n ", "n_words": 116, "vocab_size": 92, "n_whitespaces": 284, "language": "en" } }, { "id": 269013, "commit_id": "75d70a610dffe927d89ceb400d79bb7f9027b26e", "repo": "keras", "path": "keras/optimizers/optimizer_v2/optimizer_v2.py", "file_name": "optimizer_v2.py", "fun_name": "_var_key", "commit_message": "Support checkpointing ShardedVariables in optimizer slot variables.\n\nPiperOrigin-RevId: 429577423", "code": "def _var_key(var):\n \n\n # pylint: disable=protected-access\n # Get the distributed variable if it exists.\n if hasattr(var, \"_distributed_container\"):\n var = var._distributed_container()\n if getattr(var, \"_in_graph_mode\", False):\n return var._shared_name\n return var._unique_id\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 39, "n_words": 27, "vocab_size": 23, "complexity": 3, "nloc": 6, "token_counts": 39, "n_ast_nodes": 69, "n_identifiers": 7, "random_cut": "def _var_key(var):\n \n\n # pylint: disable=protected-access\n # Get the distributed variable if it exists.\n if hasattr(var, \"_distributed_container\"):\n var = var._distributed_container()\n if getattr(var, \"_in_g", "d_id": 79831, "documentation": { "docstring": "Key for representing a primary variable, for looking up slots.\n\n In graph mode the name is derived from the var shared name.\n In eager mode the name is derived from the var unique id.\n If distribution strategy exists, get the primary variable first.\n\n Args:\n var: the variable.\n\n Returns:\n the unique name of the variable.\n ", "n_words": 54, "vocab_size": 35, "n_whitespaces": 66, "language": "en" } }, { "id": 261492, "commit_id": "b1807ff8ead319a08294beeaae90c3f03b2bb8ac", "repo": "scikit-learn", "path": "sklearn/ensemble/tests/test_stacking.py", "file_name": "test_stacking.py", "fun_name": "test_stacking_classifier_base_regressor", "commit_message": "ENH StackingClassifier allows regressors in its first layer (#24538)\n\nCo-authored-by: Tom Dupré la Tour \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_stacking_classifier_base_regressor():\n \n X_train, X_test, y_train, y_test = train_test_split(\n scale(X_iris), y_iris, stratify=y_iris, random_state=42\n )\n clf = StackingClassifier(estimators=[(\"ridge\", Ridge())])\n clf.fit(X_train, y_train)\n clf.predict(X_test)\n clf.predict_proba(X_test)\n assert clf.score(X_test, y_test) > 0.8\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 57, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 9, "token_counts": 79, "n_ast_nodes": 121, "n_identifiers": 19, "random_cut": "def test_stacking_classifier_base_regressor():\n \n X_train, X_test, y_train, y_test = train_test_split(\n scale(X_iris), y_iris, stratify=y_iris, random_state=42\n )\n clf = StackingClassifier(estimators=[(\"ridge\", Ridge())])\n clf.fit(X_train, y_train)\n clf.predict(X_test)\n clf.predict_proba(X_test)\n asser", "d_id": 76836, "documentation": { "docstring": "Check that a regressor can be used as the first layer in `StackingClassifier`.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 108151, "commit_id": "ec410abbb3a721e31f3aaa61e9e4f941467e35e1", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_svg.py", "file_name": "backend_svg.py", "fun_name": "_get_style_dict", "commit_message": "Deprecate functions in backends", "code": "def _get_style_dict(self, gc, rgbFace):\n \n attrib = {}\n\n forced_alpha = gc.get_forced_alpha()\n\n if gc.get_hatch() is not None:\n attrib['fill'] = \"url(#%s)\" % self._get_hatch(gc, rgbFace)\n if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0\n and not forced_alpha):\n attrib['fill-opacity'] = _short_float_fmt(rgbFace[3])\n else:\n if rgbFace is None:\n attrib['fill'] = 'none'\n else:\n if tuple(rgbFace[:3]) != (0, 0, 0):\n attrib['fill'] = rgb2hex(rgbFace)\n if (len(rgbFace) == 4 and rgbFace[3] != 1.0\n and not forced_alpha):\n attrib['fill-opacity'] = _short_float_fmt(rgbFace[3])\n\n if forced_alpha and gc.get_alpha() != 1.0:\n attrib['opacity'] = _short_float_fmt(gc.get_alpha())\n\n offset, seq = gc.get_dashes()\n if seq is not None:\n attrib['stroke-dasharray'] = ','.join(\n _short_float_fmt(val) for val in seq)\n attrib['stroke-dashoffset'] = _short_float_fmt(float(offset))\n\n linewidth = gc.get_linewidth()\n if linewidth:\n rgb = gc.get_rgb()\n attrib['stroke'] = rgb2hex(rgb)\n if not forced_alpha and rgb[3] != 1.0:\n attrib['stroke-opacity'] = _short_float_fmt(rgb[3])\n if linewidth != 1.0:\n attrib['stroke-width'] = _short_float_fmt(linewidth)\n if gc.get_joinstyle() != 'round':\n attrib['stroke-linejoin'] = gc.get_joinstyle()\n if gc.get_capstyle() != 'butt':\n attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]\n\n return attrib\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 580, "n_words": 145, "vocab_size": 76, "complexity": 21, "nloc": 37, "token_counts": 342, "n_ast_nodes": 558, "n_identifiers": 27, "random_cut": "def _get_style_dict(self, gc, rgbFace):\n \n attrib = {}\n\n forced_alpha = gc.get_forced_alpha()\n\n if gc.get_hatch() is not None:\n attrib['fill'] = \"url(#%s)\" % self._get_hatch(gc, rgbFace)\n if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0\n and not forced_alpha):\n attrib['fill-opacity'] = _short_float_fmt(rgbFace[3])\n else:\n if rgbFace is None:\n attrib['fill'] = 'none'\n else:\n if tuple(rgbFace[:3]) != (0, 0, 0):\n attrib['fill'] = rgb2hex(rgbFace)\n if (len(rgbFace) == 4 and rgbFace[3] != 1.0\n and not forced_alpha):\n attrib['fill-opacity'] = _short_float_fmt(rgbFace[3])\n\n if forced_alpha and gc.get_alpha() != 1.0:\n attrib['opacity'] = _short_float_fmt(gc.get_alpha())\n\n offset, seq = gc.get_dashes()\n if seq is not None:\n attrib['stroke-dasharray'] = ','.join(\n _short_float_fmt(val) for val in seq)\n attrib['stroke-dashoffset'] = _short_float_fmt(float(offset))\n\n linewidth = gc.get_linewidth()\n if linewidth:\n ", "d_id": 23079, "documentation": { "docstring": "Generate a style string from the GraphicsContext and rgbFace.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 116362, "commit_id": "61f6f6c3c8154fa0629df8a016d449ceded99879", "repo": "mindsdb", "path": "tests/unit/test_executor.py", "file_name": "test_executor.py", "fun_name": "test_union", "commit_message": "union command\n#2852", "code": "def test_union(self, mock_handler):\n\n self.set_handler(mock_handler, name='pg', tables={'tasks': self.df})\n\n # --- use predictor ---\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical,\n 'c': dtype.datetime\n },\n 'predicted_value': 'ccc'\n }\n self.set_predictor(predictor)\n sql = \n # union all\n ret = self.command_executor.execute_command(\n parse_sql(sql.format(union='ALL'), dialect='mindsdb'))\n assert ret.error_code is None\n\n ret_df = self.ret_to_df(ret)\n assert list(ret_df.columns) == ['a1', 'target']\n assert ret_df.shape[0] == 3 + 2\n\n # union\n ret = self.command_executor.execute_command(\n parse_sql(sql.format(union=''), dialect='mindsdb'))\n assert ret.error_code is None\n\n ret_df = self.ret_to_df(ret)\n assert list(ret_df.columns) == ['a1', 'target']\n assert ret_df.shape[0] == 3\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 348, "n_words": 85, "vocab_size": 53, "complexity": 1, "nloc": 35, "token_counts": 201, "n_ast_nodes": 346, "n_identifiers": 28, "random_cut": "def test_union(self, mock_handler):\n\n self.set_handler(mock_handler, name='pg', tables={'tasks': self.df})\n\n # --- use predictor ---\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical,\n 'c': dtype.datetime\n },\n 'predicted_value': 'ccc'\n }\n self.set_predictor(predictor)\n sql = \n # union all\n ret = self.command_executor.execute_command(\n parse_sql(sql.format(union='ALL'), dialect='mindsdb'))\n assert ret.error_code is None\n\n ret_df = self.ret_to_df(ret)\n assert list(ret_df.columns) =", "d_id": 25734, "documentation": { "docstring": "\n SELECT a as a1, b as target\n FROM pg.tasks\n UNION {union}\n SELECT model.a as a2, model.p as target2\n FROM pg.tasks as t\n JOIN mindsdb.task_model as model\n WHERE t.a=1 \n ", "n_words": 28, "vocab_size": 20, "n_whitespaces": 131, "language": "en" } }, { "id": 45252, "commit_id": "c75774d3a31efe749f55ba16e782737df9f53af4", "repo": "airflow", "path": "tests/utils/test_db_cleanup.py", "file_name": "test_db_cleanup.py", "fun_name": "test_run_cleanup_tables", "commit_message": "Add `db clean` CLI command for purging old data (#20838)\n\nCLI command to delete old rows from airflow metadata database.\r\nNotes:\r\n* Must supply \"purge before date\".\r\n* Can optionally provide table list.\r\n* Dry run will only print the number of rows meeting criteria.\r\n* If not dry run, will require the user to confirm before deleting.", "code": "def test_run_cleanup_tables(self, clean_table_mock, table_names):\n \n base_kwargs = dict(\n clean_before_timestamp=None,\n dry_run=None,\n verbose=None,\n )\n run_cleanup(**base_kwargs, table_names=table_names)\n assert clean_table_mock.call_count == len(table_names) if table_names else len(config_dict)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 89, "n_words": 21, "vocab_size": 21, "complexity": 2, "nloc": 8, "token_counts": 52, "n_ast_nodes": 79, "n_identifiers": 13, "random_cut": "def test_run_cleanup_tables(self, clean_table_mock, table_names):\n \n base_kwargs = dict(\n ", "d_id": 8522, "documentation": { "docstring": "\n ``_cleanup_table`` should be called for each table in subset if one\n is provided else should be called for all tables.\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 42, "language": "en" } }, { "id": 45040, "commit_id": "56285eee04285d8b6fac90911248d7e9dd5504d8", "repo": "airflow", "path": "tests/models/test_xcom.py", "file_name": "test_xcom.py", "fun_name": "test_set_serialize_call_old_signature", "commit_message": "Add params dag_id, task_id etc to XCom.serialize_value (#19505)\n\nWhen implementing a custom XCom backend, in order to store XCom objects organized by dag_id, run_id etc, we need to pass those params to `serialize_value`.", "code": "def test_set_serialize_call_old_signature(self, get_import, session):\n \n serialize_watcher = MagicMock()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 16, "token_counts": 82, "n_ast_nodes": 26, "n_identifiers": 6, "random_cut": "def test_set_serialize_call_old_signature(self, get_import, session):\n \n serialize_watcher =", "d_id": 8455, "documentation": { "docstring": "\n When XCom.serialize_value takes only param ``value``, other kwargs should be ignored.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 241856, "commit_id": "465da5496a8dda099646e9d5947f24dfc0ec44e9", "repo": "scipy", "path": "scipy/stats/_stats_py.py", "file_name": "_stats_py.py", "fun_name": "gmean", "commit_message": "ENH: stats: add `axis` tuple and `nan_policy` to `gmean` (#14657)\n\n* ENH: stats: add `axis` tuple and `nan_policy` to `gmean`\r\n\r\nCo-authored-by: Pamphile ROY ", "code": "def gmean(a, axis=0, dtype=None, weights=None):\n \n if not isinstance(a, np.ndarray):\n # if not an ndarray object attempt to convert it\n log_a = np.log(np.array(a, dtype=dtype))\n elif dtype:\n # Must change the default dtype allowing array type\n if isinstance(a, np.ma.MaskedArray):\n log_a = np.log(np.ma.asarray(a, dtype=dtype))\n else:\n log_a = np.log(np.asarray(a, dtype=dtype))\n else:\n log_a = np.log(a)\n\n if weights is not None:\n weights = np.asanyarray(weights, dtype=dtype)\n\n return np.exp(np.average(log_a, axis=axis, weights=weights))\n\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 152, "n_words": 63, "vocab_size": 45, "complexity": 5, "nloc": 13, "token_counts": 147, "n_ast_nodes": 228, "n_identifiers": 17, "random_cut": "def gmean(a, axis=0, dtype=None, weights=None):\n \n if not isinstance(a, np.ndarray):\n # if not an ndarray object attempt to convert it\n log_a = np.log(np.array(a, dtype=dtype))\n elif dtype:\n # Must change the default dtype allowing array type\n if isinstance(a, np.ma.MaskedArray):\n ", "d_id": 69715, "documentation": { "docstring": "Compute the geometric mean along the specified axis.\n\n Return the geometric average of the array elements.\n That is: n-th root of (x1 * x2 * ... * xn)\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the geometric mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If dtype is not specified, it defaults to the\n dtype of a, unless a has an integer dtype with a precision less than\n that of the default platform integer. In that case, the default\n platform integer is used.\n weights : array_like, optional\n The `weights` array must be broadcastable to the same shape as `a`.\n Default is None, which gives each value a weight of 1.0.\n\n Returns\n -------\n gmean : ndarray\n See `dtype` parameter above.\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n hmean : Harmonic mean\n\n Notes\n -----\n The geometric average is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n Beginning in SciPy 1.9, ``np.matrix`` inputs are converted to\n ``np.ndarray``s before the calculation is performed. In this case, the\n output will be a scalar or ``np.ndarray`` of appropriate shape rather than\n a 2D ``np.matrix``. Similarly, while masked elements of masked arrays\n are still ignored, the output will be a scalar or ``np.ndarray`` rather\n than a masked array with ``mask=False``.\n\n References\n ----------\n .. [1] \"Weighted Geometric Mean\", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean.\n\n Examples\n --------\n >>> from scipy.stats import gmean\n >>> gmean([1, 4])\n 2.0\n >>> gmean([1, 2, 3, 4, 5, 6, 7])\n 3.3800151591412964\n\n ", "n_words": 301, "vocab_size": 177, "n_whitespaces": 493, "language": "en" } }, { "id": 204741, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/serializers/base.py", "file_name": "base.py", "fun_name": "handle_m2m_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def handle_m2m_field(self, obj, field):\n \n raise NotImplementedError(\n \"subclasses of Serializer must provide a handle_m2m_field() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 5, "random_cut": "def handle_m2m_field(self, obj, field):\n \n raise NotImplementedError(\n \"subclasses of Serializer must provide a handle_m2m_field() method\"\n )\n", "d_id": 50868, "documentation": { "docstring": "\n Called to handle a ManyToManyField.\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 116724, "commit_id": "db6291bc6a2cbea0154bd41c3abff3f6cfb7bc8a", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/hana_handler/hana_handler.py", "file_name": "hana_handler.py", "fun_name": "check_connection", "commit_message": "feat: add sap hana integration", "code": "def check_connection(self) -> StatusResponse:\n \n\n response = StatusResponse(False)\n need_to_close = self.is_connected is False\n\n try:\n connection = self.connect()\n with connection.cursor() as cur:\n cur.execute('SELECT * FROM SYS.M_DATABASE')\n response.success = True\n except dbapi.Error as e:\n log.error(f'Error connecting to SAP HANA {self.address}, {e}!')\n response.error_message = e\n\n if response.success is True and need_to_close:\n self.disconnect()\n if response.success is False and self.is_connected is True:\n self.is_connected = False\n\n return response\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 209, "n_words": 61, "vocab_size": 42, "complexity": 6, "nloc": 20, "token_counts": 103, "n_ast_nodes": 188, "n_identifiers": 20, "random_cut": "def check_connection(self) -> StatusResponse:\n \n\n response = StatusResponse(False)\n need_to_close = self.is_connected is False\n\n try:\n connection = self.connect()\n with connection.cursor() as cur:\n cur.execute('SELECT * FROM SYS.M_DATABASE')\n response.success = True\n except dbapi.Error as e:\n log.error(f'Error connecting to SAP HANA {self.address}, {e}!')\n response.error_message = e\n\n if response.su", "d_id": 25818, "documentation": { "docstring": "\n Check the connection of the SAP HANA database\n :return: success status and error message if error occurs\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 39, "language": "en" } }, { "id": 195867, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/matrices/common.py", "file_name": "common.py", "fun_name": "extract", "commit_message": "Improved documentation formatting", "code": "def extract(self, rowsList, colsList):\n r\n\n if not is_sequence(rowsList) or not is_sequence(colsList):\n raise TypeError(\"rowsList and colsList must be iterable\")\n # ensure rowsList and colsList are lists of integers\n if rowsList and all(isinstance(i, bool) for i in rowsList):\n rowsList = [index for index, item in enumerate(rowsList) if item]\n if colsList and all(isinstance(i, bool) for i in colsList):\n colsList = [index for index, item in enumerate(colsList) if item]\n\n # ensure everything is in range\n rowsList = [a2idx(k, self.rows) for k in rowsList]\n colsList = [a2idx(k, self.cols) for k in colsList]\n\n return self._eval_extract(rowsList, colsList)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 185, "n_words": 90, "vocab_size": 49, "complexity": 15, "nloc": 56, "token_counts": 136, "n_ast_nodes": 208, "n_identifiers": 18, "random_cut": "def extract(self, rowsList, colsList):\n r\n\n if not is_sequence(rowsList) or not is_sequence(colsList):\n rais", "d_id": 47454, "documentation": { "docstring": "Return a submatrix by specifying a list of rows and columns.\n Negative indices can be given. All indices must be in the range\n $-n \\le i < n$ where $n$ is the number of rows or columns.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> m = Matrix(4, 3, range(12))\n >>> m\n Matrix([\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [9, 10, 11]])\n >>> m.extract([0, 1, 3], [0, 1])\n Matrix([\n [0, 1],\n [3, 4],\n [9, 10]])\n\n Rows or columns can be repeated:\n\n >>> m.extract([0, 0, 1], [-1])\n Matrix([\n [2],\n [2],\n [5]])\n\n Every other row can be taken by using range to provide the indices:\n\n >>> m.extract(range(0, m.rows, 2), [-1])\n Matrix([\n [2],\n [8]])\n\n RowsList or colsList can also be a list of booleans, in which case\n the rows or columns corresponding to the True values will be selected:\n\n >>> m.extract([0, 1, 2, 3], [True, False, True])\n Matrix([\n [0, 2],\n [3, 5],\n [6, 8],\n [9, 11]])\n ", "n_words": 156, "vocab_size": 95, "n_whitespaces": 426, "language": "en" } }, { "id": 196850, "commit_id": "1eeb01e15f06c6692a5bfd6fd2d2a3002d864a07", "repo": "sympy", "path": "sympy/integrals/integrals.py", "file_name": "integrals.py", "fun_name": "integrate", "commit_message": "Fix a few docstring formatting issues", "code": "def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs):\n \n doit_flags = {\n 'deep': False,\n 'meijerg': meijerg,\n 'conds': conds,\n 'risch': risch,\n 'heurisch': heurisch,\n 'manual': manual\n }\n integral = Integral(*args, **kwargs)\n\n if isinstance(integral, Integral):\n return integral.doit(**doit_flags)\n else:\n new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a\n for a in integral.args]\n return integral.func(*new_args)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 144, "n_words": 48, "vocab_size": 43, "complexity": 4, "nloc": 16, "token_counts": 119, "n_ast_nodes": 190, "n_identifiers": 16, "random_cut": "def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs):\n \n doit_flags = {\n 'deep': False,\n 'meijerg': meijerg,\n 'conds': conds,\n 'risch': risch,\n 'heurisch': heurisch,\n 'manual': manual\n }\n integral = Integral(*args, **kwargs)\n\n if isinstance(integral, Integral):\n return integral.doit(**doit_flags)\n else:\n new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a\n for a in integral.args]\n return integral.func(*new_args)\n\n", "d_id": 48217, "documentation": { "docstring": "integrate(f, var, ...)\n\n Explanation\n ===========\n\n Compute definite or indefinite integral of one or more variables\n using Risch-Norman algorithm and table lookup. This procedure is\n able to handle elementary algebraic and transcendental functions\n and also a huge class of special functions, including Airy,\n Bessel, Whittaker and Lambert.\n\n var can be:\n\n - a symbol -- indefinite integration\n - a tuple (symbol, a) -- indefinite integration with result\n given with ``a`` replacing ``symbol``\n - a tuple (symbol, a, b) -- definite integration\n\n Several variables can be specified, in which case the result is\n multiple integration. (If var is omitted and the integrand is\n univariate, the indefinite integral in that variable will be performed.)\n\n Indefinite integrals are returned without terms that are independent\n of the integration variables. (see examples)\n\n Definite improper integrals often entail delicate convergence\n conditions. Pass conds='piecewise', 'separate' or 'none' to have\n these returned, respectively, as a Piecewise function, as a separate\n result (i.e. result will be a tuple), or not at all (default is\n 'piecewise').\n\n **Strategy**\n\n SymPy uses various approaches to definite integration. One method is to\n find an antiderivative for the integrand, and then use the fundamental\n theorem of calculus. Various functions are implemented to integrate\n polynomial, rational and trigonometric functions, and integrands\n containing DiracDelta terms.\n\n SymPy also implements the part of the Risch algorithm, which is a decision\n procedure for integrating elementary functions, i.e., the algorithm can\n either find an elementary antiderivative, or prove that one does not\n exist. There is also a (very successful, albeit somewhat slow) general\n implementation of the heuristic Risch algorithm. This algorithm will\n eventually be phased out as more of the full Risch algorithm is\n implemented. See the docstring of Integral._eval_integral() for more\n details on computing the antiderivative using algebraic methods.\n\n The option risch=True can be used to use only the (full) Risch algorithm.\n This is useful if you want to know if an elementary function has an\n elementary antiderivative. If the indefinite Integral returned by this\n function is an instance of NonElementaryIntegral, that means that the\n Risch algorithm has proven that integral to be non-elementary. Note that\n by default, additional methods (such as the Meijer G method outlined\n below) are tried on these integrals, as they may be expressible in terms\n of special functions, so if you only care about elementary answers, use\n risch=True. Also note that an unevaluated Integral returned by this\n function is not necessarily a NonElementaryIntegral, even with risch=True,\n as it may just be an indication that the particular part of the Risch\n algorithm needed to integrate that function is not yet implemented.\n\n Another family of strategies comes from re-writing the integrand in\n terms of so-called Meijer G-functions. Indefinite integrals of a\n single G-function can always be computed, and the definite integral\n of a product of two G-functions can be computed from zero to\n infinity. Various strategies are implemented to rewrite integrands\n as G-functions, and use this information to compute integrals (see\n the ``meijerint`` module).\n\n The option manual=True can be used to use only an algorithm that tries\n to mimic integration by hand. This algorithm does not handle as many\n integrands as the other algorithms implemented but may return results in\n a more familiar form. The ``manualintegrate`` module has functions that\n return the steps used (see the module docstring for more information).\n\n In general, the algebraic methods work best for computing\n antiderivatives of (possibly complicated) combinations of elementary\n functions. The G-function methods work best for computing definite\n integrals from zero to infinity of moderately complicated\n combinations of special functions, or indefinite integrals of very\n simple combinations of special functions.\n\n The strategy employed by the integration code is as follows:\n\n - If computing a definite integral, and both limits are real,\n and at least one limit is +- oo, try the G-function method of\n definite integration first.\n\n - Try to find an antiderivative, using all available methods, ordered\n by performance (that is try fastest method first, slowest last; in\n particular polynomial integration is tried first, Meijer\n G-functions second to last, and heuristic Risch last).\n\n - If still not successful, try G-functions irrespective of the\n limits.\n\n The option meijerg=True, False, None can be used to, respectively:\n always use G-function methods and no others, never use G-function\n methods, or use all available methods (in order as described above).\n It defaults to None.\n\n Examples\n ========\n\n >>> from sympy import integrate, log, exp, oo\n >>> from sympy.abc import a, x, y\n\n >>> integrate(x*y, x)\n x**2*y/2\n\n >>> integrate(log(x), x)\n x*log(x) - x\n\n >>> integrate(log(x), (x, 1, a))\n a*log(a) - a + 1\n\n >>> integrate(x)\n x**2/2\n\n Terms that are independent of x are dropped by indefinite integration:\n\n >>> from sympy import sqrt\n >>> integrate(sqrt(1 + x), (x, 0, x))\n 2*(x + 1)**(3/2)/3 - 2/3\n >>> integrate(sqrt(1 + x), x)\n 2*(x + 1)**(3/2)/3\n\n >>> integrate(x*y)\n Traceback (most recent call last):\n ...\n ValueError: specify integration variables to integrate x*y\n\n Note that ``integrate(x)`` syntax is meant only for convenience\n in interactive sessions and should be avoided in library code.\n\n >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'\n Piecewise((gamma(a + 1), re(a) > -1),\n (Integral(x**a*exp(-x), (x, 0, oo)), True))\n\n >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')\n gamma(a + 1)\n\n >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')\n (gamma(a + 1), re(a) > -1)\n\n See Also\n ========\n\n Integral, Integral.doit\n\n ", "n_words": 865, "vocab_size": 406, "n_whitespaces": 1292, "language": "en" } }, { "id": 186624, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/augeasparser.py", "file_name": "augeasparser.py", "fun_name": "parsed_paths", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def parsed_paths(self) -> List[str]:\n \n\n res_paths: List[str] = []\n\n paths = self.parser.existing_paths\n for directory in paths:\n for filename in paths[directory]:\n res_paths.append(os.path.join(directory, filename))\n\n return res_paths\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 84, "n_words": 23, "vocab_size": 20, "complexity": 3, "nloc": 16, "token_counts": 57, "n_ast_nodes": 89, "n_identifiers": 14, "random_cut": "def parsed_paths(self) -> List[str]:\n \n\n res_paths: List[str] = []\n\n paths = self.parser.existing_paths\n for directory ", "d_id": 45535, "documentation": { "docstring": "\n Returns a list of file paths that have currently been parsed into the parser\n tree. The returned list may include paths with wildcard characters, for\n example: ['/etc/apache2/conf.d/*.load']\n\n This is typically called on the root node of the ParserNode tree.\n\n :returns: list of file paths of files that have been parsed\n ", "n_words": 50, "vocab_size": 35, "n_whitespaces": 93, "language": "en" } }, { "id": 264082, "commit_id": "21655572a6af55cefb05d0b0afbeb0b0db39ea19", "repo": "pyinstaller", "path": "PyInstaller/building/utils.py", "file_name": "utils.py", "fun_name": "_check_guts_toc_mtime", "commit_message": "building: clean up the _check_guts_* helpers", "code": "def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build, pyc=False):\n \n for dest_name, src_name, typecode in old_toc:\n if misc.mtime(src_name) > last_build:\n logger.info(\"Building because %s changed\", src_name)\n return True\n elif pyc and typecode == 'PYMODULE':\n py_filename = src_name[:-1]\n if misc.mtime(py_filename) > last_build:\n logger.info(\"Building because %s changed\", py_filename)\n return True\n return False\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 142, "n_words": 45, "vocab_size": 34, "complexity": 6, "nloc": 11, "token_counts": 82, "n_ast_nodes": 131, "n_identifiers": 14, "random_cut": "def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build, pyc=False):\n \n for dest_name, src_name, typecode in old_toc:\n if misc.mtime(src_name) > last_build:\n ", "d_id": 77592, "documentation": { "docstring": "\n Rebuild is required if mtimes of files listed in old TOC are newer than last_build.\n\n If pyc=True, check for .py files as well.\n\n Use this for calculated/analysed values read from cache.\n ", "n_words": 31, "vocab_size": 29, "n_whitespaces": 44, "language": "en" } }, { "id": 116753, "commit_id": "47c5e0ac2d89807f8ff7239d423a3d346bd39a1e", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/teradata_handler/teradata_handler.py", "file_name": "teradata_handler.py", "fun_name": "check_connection", "commit_message": "feat: add teradata integration", "code": "def check_connection(self) -> StatusResponse:\n \n\n response = StatusResponse(False)\n need_to_close = self.is_connected is False\n\n try:\n connection = self.connect()\n with connection.cursor() as cur:\n cur.execute('SELECT 1 FROM (SELECT 1 AS \"dual\") AS \"dual\"')\n response.success = True\n except teradatasql.Error as e:\n log.error(f'Error connecting to Teradata {self.host}, {e}!')\n response.error_message = e\n\n if response.success is True and need_to_close:\n self.disconnect()\n if response.success is False and self.is_connected is True:\n self.is_connected = False\n\n return response\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 213, "n_words": 65, "vocab_size": 44, "complexity": 6, "nloc": 20, "token_counts": 103, "n_ast_nodes": 188, "n_identifiers": 20, "random_cut": "def check_connection(self) -> StatusResponse:\n \n\n response = StatusResponse(False)\n need_to_close = self.is_co", "d_id": 25826, "documentation": { "docstring": "\n Check the connection of the Teradata database\n :return: success status and error message if error occurs\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 38, "language": "en" } }, { "id": 32666, "commit_id": "bd6d1b430080aaf7d9a15f908b95631242da3fb0", "repo": "transformers", "path": "utils/prepare_for_doc_test.py", "file_name": "prepare_for_doc_test.py", "fun_name": "process_doc_file", "commit_message": "Add a check regarding the number of occurrences of ``` (#18389)\n\nCo-authored-by: ydshieh ", "code": "def process_doc_file(code_file, add_new_line=True):\n \n with open(code_file, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n code = f.read()\n\n # fmt: off\n splits = code.split(\"```\")\n if len(splits) % 2 != 1:\n raise ValueError(\"The number of occurrences of ``` should be an even number.\")\n\n splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)]\n clean_code = \"```\".join(splits)\n # fmt: on\n\n diff = clean_code != code\n if diff:\n print(f\"Overwriting content of {code_file}.\")\n with open(code_file, \"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n f.write(clean_code)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 148, "n_words": 79, "vocab_size": 57, "complexity": 5, "nloc": 13, "token_counts": 132, "n_ast_nodes": 236, "n_identifiers": 22, "random_cut": "def process_doc_file(code_file, add_new_line=True):\n \n with open(code_file, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n code = f.read()\n\n # fmt: off\n splits = code.split(\"```\")\n if len(splits) % 2 != 1:\n raise ValueError(\"The number of occurrences of ``` should be an even number.\")\n\n splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)]\n clean_code = \"```\".join(splits)\n # fmt: ", "d_id": 5969, "documentation": { "docstring": "\n Process given file.\n\n Args:\n code_file (`str` or `os.PathLike`): The file in which we want to style the docstring.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 101241, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "plugins/extract/align/_base.py", "file_name": "_base.py", "fun_name": "finalize", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def finalize(self, batch):\n \n\n for face, landmarks in zip(batch[\"detected_faces\"], batch[\"landmarks\"]):\n if not isinstance(landmarks, np.ndarray):\n landmarks = np.array(landmarks)\n face._landmarks_xy = landmarks\n\n logger.trace(\"Item out: %s\", {key: val.shape if isinstance(val, np.ndarray) else val\n for key, val in batch.items()})\n\n for filename, face in zip(batch[\"filename\"], batch[\"detected_faces\"]):\n self._output_faces.append(face)\n if len(self._output_faces) != self._faces_per_filename[filename]:\n continue\n\n output = self._extract_media.pop(filename)\n output.add_detected_faces(self._output_faces)\n self._output_faces = []\n\n logger.trace(\"Final Output: (filename: '%s', image shape: %s, detected_faces: %s, \"\n \"item: %s)\",\n output.filename, output.image_shape, output.detected_faces, output)\n yield output\n\n # <<< PROTECTED METHODS >>> #\n\n # << PROCESS_INPUT WRAPPER >>", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 330, "n_words": 82, "vocab_size": 66, "complexity": 7, "nloc": 18, "token_counts": 174, "n_ast_nodes": 280, "n_identifiers": 28, "random_cut": "def finalize(self, batch):\n \n\n for face, landmarks in zip(batch[\"detected_faces\"], batch[\"landmarks\"]):\n if not isinstance(landmarks, np.ndarray):\n landmarks = np.array(landmarks)\n face._landmarks_xy = landmarks\n\n logger.trace(\"Item out: %s\", {key: val.shape if isinstance(val, np.ndarray) else val\n for key, val in batch.items()})\n\n for filename, face in zip(batch[\"filename\"], batch[\"detected_faces\"]):\n self._output_faces.append(face)", "d_id": 20661, "documentation": { "docstring": " Finalize the output from Aligner\n\n This should be called as the final task of each `plugin`.\n\n Pairs the detected faces back up with their original frame before yielding each frame.\n\n Parameters\n ----------\n batch : dict\n The final ``dict`` from the `plugin` process. It must contain the `keys`:\n ``detected_faces``, ``landmarks``, ``filename``\n\n Yields\n ------\n :class:`~plugins.extract.pipeline.ExtractMedia`\n The :attr:`DetectedFaces` list will be populated for this class with the bounding boxes\n and landmarks for the detected faces found in the frame.\n ", "n_words": 76, "vocab_size": 59, "n_whitespaces": 184, "language": "en" } }, { "id": 209670, "commit_id": "ca10c5cf00425d0178998ec0b006cbb65ddbfb54", "repo": "scapy", "path": "scapy/layers/dcerpc.py", "file_name": "dcerpc.py", "fun_name": "find_dcerpc_interface", "commit_message": "[MS-RPCE] and [MS-SMB] major update (#3683)\n\n* Various fixes regarding DCE/RPC build\r\n\r\n* DCE/RPC sessions\r\n\r\n* Cleanup unused code\r\n\r\n* Add missing GSS_WRAP algo names\r\n\r\n* Add find_dcerpc_interface\r\n\r\n* Split SMB client and server\r\n\r\n* Missing StrFixedLenFieldUtf16\r\n\r\n* Remove unfinished smbserver feature\r\n\r\n* Friendlier getter for SMB2\r\n\r\n* DceRpcNak\r\n\r\n* Improve NDR parsing (a lot)\r\n\r\n* Minor SMB2 improvements\r\n\r\n* BIG NDR refactor + Dissect pointer deferal\r\n\r\n* Build with pointer deferral\r\n\r\n* Small build bugs\r\n\r\n* SMB2 logoff, fix rawToken in SMB standalone\r\n\r\n* Add security providers from MS-RPCE to DCERPC\r\n\r\n* Cleanup ptr_pack of NDRPacketListField\r\n\r\n* Clearer exception in find_dcerpc_interface\r\n\r\n* Add minor_version attribute\r\n\r\n* Fix computation of auth_pad in sec_trailer\r\n\r\n* Fix a WTF bug\r\n\r\n* Compute length for NDR arrays\r\n\r\n* Pass enum to EnumField\r\n\r\n* Match union attributes from response with request\r\n\r\n* Improve SMB server\r\n\r\n* Small bug in pointer deferal dissection\r\n\r\n* Add user-friendly utils\r\n\r\n* Add a few NDR tests\r\n\r\n* More user-friendly improvements\r\n\r\n* Bug: parent not copied in clone_with\r\n\r\n* Build: propagate NDR64 and bug fix\r\n\r\n* Default close response parameters\r\n\r\n* Fix Python 2.7\r\n\r\n* Fix SMB2_Create_Context offset\r\n\r\n* Fix SMB2 create context\r\n\r\n* SMB2: support chain, improvements\r\n\r\n* Fix ioctl error\r\n\r\n* SMB: check computeNTProofStr\r\n\r\n* Fix UTCField default\r\n\r\n* Improve FileId capabilities\r\n\r\n* SMB2: contexts\r\n\r\n* Typos\r\n\r\n* Minor NDRUnion fixes\r\n\r\n* Py2 fixes", "code": "def find_dcerpc_interface(name):\n \n try:\n return next(x for x in DCE_RPC_INTERFACES.values() if x.name == name)\n except StopIteration:\n raise AttributeError(\"Unknown interface !\")\n\n\n# --- NDR fields - [C706] chap 14\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 27, "vocab_size": 27, "complexity": 4, "nloc": 5, "token_counts": 35, "n_ast_nodes": 62, "n_identifiers": 8, "random_cut": "def find_dcerpc_interface(name):\n \n try:\n return next(x for x in DCE_RPC_INTERFACES.values() if x.name == name)\n except StopItera", "d_id": 52763, "documentation": { "docstring": "\n Find an interface object through the name in the IDL\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 17, "language": "en" } }, { "id": 182367, "commit_id": "8be6ea91f6e8a8d24d385975f1a5a7714cf27894", "repo": "textual", "path": "tests/test_animator.py", "file_name": "test_animator.py", "fun_name": "test_animatable", "commit_message": "fix and test for animator", "code": "def test_animatable():\n \n\n animatable = AnimateTest()\n\n # Fake wall-clock time\n time = 100.0\n\n # Object that does the animation\n animation = SimpleAnimation(\n animatable,\n \"bar\",\n time,\n 3.0,\n start_value=Animatable(20.0),\n end_value=Animatable(50.0),\n final_value=Animatable(50.0),\n easing=lambda x: x,\n )\n\n assert animation(time) is False\n assert animatable.bar.value == 20.0\n\n assert animation(time + 1.0) is False\n assert animatable.bar.value == 30.0\n\n assert animation(time + 2.0) is False\n assert animatable.bar.value == 40.0\n\n assert animation(time + 2.9) is False\n assert pytest.approx(animatable.bar.value, 49.0)\n\n assert animation(time + 3.0) is True # True to indicate animation is complete\n assert animatable.bar.value == 50.0\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 194, "n_words": 86, "vocab_size": 49, "complexity": 1, "nloc": 23, "token_counts": 170, "n_ast_nodes": 222, "n_identifiers": 16, "random_cut": "def test_animatable():\n \n\n animatable = AnimateTest()\n\n # Fake wall-clock time\n time = 100.0\n\n # Object that does the animation\n animation = SimpleAnimation(\n animatable,\n \"bar\",\n time,\n 3.0,\n start_value=Animatable(20.0),\n end_value=Animatable(50.0),\n final_value=Animatable(50.0),\n easing=lambda x: x,\n )\n\n assert ", "d_id": 43806, "documentation": { "docstring": "Test SimpleAnimation works with the Animatable protocol", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 211340, "commit_id": "e55e41945d42db787a0f7c557d53d06a6b24536b", "repo": "PaddleDetection", "path": "ppdet/metrics/map_utils.py", "file_name": "map_utils.py", "fun_name": "update", "commit_message": "Refactor rbox (#6704)\n\n* refactor rbox\r\n\r\n* modify the code of save results\r\n\r\n* fix some problem\r\n\r\n* add .gitignore in dataset/dota\r\n\r\n* fix test anno path", "code": "def update(self, bbox, score, label, gt_box, gt_label, difficult=None):\n \n if difficult is None:\n difficult = np.zeros_like(gt_label)\n\n # record class gt count\n for gtl, diff in zip(gt_label, difficult):\n if self.evaluate_difficult or int(diff) == 0:\n self.class_gt_counts[int(np.array(gtl))] += 1\n\n # record class score positive\n visited = [False] * len(gt_label)\n for b, s, l in zip(bbox, score, label):\n pred = b.tolist() if isinstance(b, np.ndarray) else b\n max_idx = -1\n max_overlap = -1.0\n for i, gl in enumerate(gt_label):\n if int(gl) == int(l):\n if len(gt_box[i]) == 8:\n overlap = calc_rbox_iou(pred, gt_box[i])\n else:\n overlap = jaccard_overlap(pred, gt_box[i],\n self.is_bbox_normalized)\n if overlap > max_overlap:\n max_overlap = overlap\n max_idx = i\n\n if max_overlap > self.overlap_thresh:\n if self.evaluate_difficult or \\\n int(np.array(difficult[max_idx])) == 0:\n if not visited[max_idx]:\n self.class_score_poss[int(l)].append([s, 1.0])\n visited[max_idx] = True\n else:\n self.class_score_poss[int(l)].append([s, 0.0])\n else:\n self.class_score_poss[int(l)].append([s, 0.0])\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 650, "n_words": 125, "vocab_size": 81, "complexity": 15, "nloc": 31, "token_counts": 303, "n_ast_nodes": 450, "n_identifiers": 38, "random_cut": "def update(self, bbox, score, label, gt_box, gt_label, difficult=None):\n \n if difficult is None:\n difficult = np.zeros_like(gt_label)\n\n # record class gt count\n for gtl, diff in zip(gt_label, difficult):\n if self.evaluate_difficult or int(diff) == 0:\n self.class_gt_counts[int(np.array(gtl))] += 1\n\n # record class score positive\n visited = [False] * len(gt_label)\n for b, s, l in zip(bbox, score, label):\n pred = b.tolist() if isinstance(b, np.ndarray) else b\n max_idx = -1\n max_overlap = -1.0\n for i, gl in enumerate(gt_label):\n if int(gl) == int(l):\n if len(gt_box[i]) == 8:\n overlap = calc_rbox_iou(pred, gt_box[i])\n else:\n ", "d_id": 53068, "documentation": { "docstring": "\n Update metric statics from given prediction and ground\n truth infomations.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 32, "language": "en" } }, { "id": 73936, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/permission_policies/collections.py", "file_name": "collections.py", "fun_name": "_get_permission_objects_for_actions", "commit_message": "Reformat with black", "code": "def _get_permission_objects_for_actions(self, actions):\n \n permission_codenames = [\n \"%s_%s\" % (action, self.model_name) for action in actions\n ]\n return Permission.objects.filter(\n content_type=self._content_type, codename__in=permission_codenames\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 77, "n_words": 20, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 42, "n_ast_nodes": 66, "n_identifiers": 12, "random_cut": "def _get_permission_objects_for_actions(self, actions):\n \n permission_codenames = [\n \"%s_%s\" % (action, self.model_name) for action in actions\n ]\n return Permission.objects.filter(\n content_ty", "d_id": 16182, "documentation": { "docstring": "\n Get a queryset of the Permission objects for the given actions\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 321163, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/webengine/webview.py", "file_name": "webview.py", "fun_name": "createWindow", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def createWindow(self, wintype):\n \n debug_type = debug.qenum_key(QWebEnginePage, wintype)\n background = config.val.tabs.background\n\n log.webview.debug(\"createWindow with type {}, background {}\".format(\n debug_type, background))\n\n if wintype == QWebEnginePage.WebWindowType.WebBrowserWindow:\n # Shift-Alt-Click\n target = usertypes.ClickTarget.window\n elif wintype == QWebEnginePage.WebWindowType.WebDialog:\n log.webview.warning(\"{} requested, but we don't support \"\n \"that!\".format(debug_type))\n target = usertypes.ClickTarget.tab\n elif wintype == QWebEnginePage.WebWindowType.WebBrowserTab:\n # Middle-click / Ctrl-Click with Shift\n # FIXME:qtwebengine this also affects target=_blank links...\n if background:\n target = usertypes.ClickTarget.tab\n else:\n target = usertypes.ClickTarget.tab_bg\n elif wintype == QWebEnginePage.WebWindowType.WebBrowserBackgroundTab:\n # Middle-click / Ctrl-Click\n if background:\n target = usertypes.ClickTarget.tab_bg\n else:\n target = usertypes.ClickTarget.tab\n else:\n raise ValueError(\"Invalid wintype {}\".format(debug_type))\n\n tab = shared.get_tab(self._win_id, target)\n return tab._widget # pylint: disable=protected-access\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 411, "n_words": 99, "vocab_size": 60, "complexity": 7, "nloc": 25, "token_counts": 172, "n_ast_nodes": 287, "n_identifiers": 31, "random_cut": "def createWindow(self, wintype):\n \n debug_type = debug.qenum_key(QWebEnginePage, wintype)\n background = config.val.tabs.background\n\n log.webview.debug(\"createWindow with type {}, background {}\".format(\n debug_type, background))\n\n if wintype == QWebEnginePage.WebWindowType.WebBrowserWindow:\n # Shift-Alt-Click\n target = usertypes.ClickTarget.window\n elif wintype == QWebEnginePage.WebWindowType.WebDialog:\n log.webview.warning(\"{} requested, but we don't support \"\n \"that!\".format(debug_type))\n target = usertypes.ClickTarget.tab\n elif wintype == QWebEnginePage.WebWindowType.WebBrowserTab:\n # Middle-click / Ctrl-Click with Shift\n # FIXME:qtwebengine this also affects target=_blank links...\n if background:\n target = usertypes.Click", "d_id": 117569, "documentation": { "docstring": "Called by Qt when a page wants to create a new window.\n\n This function is called from the createWindow() method of the\n associated QWebEnginePage, each time the page wants to create a new\n window of the given type. This might be the result, for example, of a\n JavaScript request to open a document in a new window.\n\n Args:\n wintype: This enum describes the types of window that can be\n created by the createWindow() function.\n\n QWebEnginePage::WebBrowserWindow:\n A complete web browser window.\n QWebEnginePage::WebBrowserTab:\n A web browser tab.\n QWebEnginePage::WebDialog:\n A window without decoration.\n QWebEnginePage::WebBrowserBackgroundTab:\n A web browser tab without hiding the current visible\n WebEngineView.\n\n Return:\n The new QWebEngineView object.\n ", "n_words": 106, "vocab_size": 66, "n_whitespaces": 397, "language": "en" } }, { "id": 259662, "commit_id": "0d669dc419524eff7f45032f4c18253e627a055b", "repo": "scikit-learn", "path": "sklearn/ensemble/_gb.py", "file_name": "_gb.py", "fun_name": "predict", "commit_message": "DEP loss_ attribute in gradient boosting (#23079)", "code": "def predict(self, X):\n \n raw_predictions = self.decision_function(X)\n encoded_labels = self._loss._raw_prediction_to_decision(raw_predictions)\n return self.classes_.take(encoded_labels, axis=0)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 63, "n_identifiers": 11, "random_cut": "def predict(self, X):\n \n raw_predictions = self.decision_function(X)\n encode", "d_id": 75856, "documentation": { "docstring": "Predict class for X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, it will be converted to\n ``dtype=np.float32`` and if a sparse matrix is provided\n to a sparse ``csr_matrix``.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n The predicted values.\n ", "n_words": 47, "vocab_size": 39, "n_whitespaces": 140, "language": "en" } }, { "id": 177255, "commit_id": "50ff08de69c6e9541cd6c029bede5dabf56cfe73", "repo": "networkx", "path": "networkx/algorithms/operators/all.py", "file_name": "all.py", "fun_name": "union_all", "commit_message": "Make all.py generator friendly (#5984)\n\n* Make compose_all generator friendly\r\n\r\n* Make disjoint_union_all and intersection_all generator friendly\r\n\r\n* Refactor disjoint_union_all to yield relabeled graphs\r\n\r\n* Make union_all generator friendly\r\n\r\n* Fix intersection_all\r\n\r\n* Fix union_all signature\r\n\r\n* Allow passing an infinite rename generator to union_all\r\n\r\n* Copy over generalizations to binary.py\r\n\r\n* Clean up rename\r\n\r\n* Simplify first_label in disjoint_union_all\r\n\r\n* Simplify disjoint_union_all\r\n\r\n* Add missing R.graph.update in intersection_all", "code": "def union_all(graphs, rename=()):\n \n R = None\n seen_nodes = set()\n\n # rename graph to obtain disjoint node labels", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 17, "vocab_size": 16, "complexity": 8, "nloc": 66, "token_counts": 194, "n_ast_nodes": 35, "n_identifiers": 6, "random_cut": "def union_all(graphs, rename=()):\n \n R = None\n ", "d_id": 42319, "documentation": { "docstring": "Returns the union of all graphs.\n\n The graphs must be disjoint, otherwise an exception is raised.\n\n Parameters\n ----------\n graphs : iterable\n Iterable of NetworkX graphs\n\n rename : iterable , optional\n Node names of graphs can be changed by specifying the tuple\n rename=('G-','H-') (for example). Node \"u\" in G is then renamed\n \"G-u\" and \"v\" in H is renamed \"H-v\". Infinite generators (like itertools.count)\n are also supported.\n\n Returns\n -------\n U : a graph with the same type as the first graph in list\n\n Raises\n ------\n ValueError\n If `graphs` is an empty list.\n\n Notes\n -----\n To force a disjoint union with node relabeling, use\n disjoint_union_all(G,H) or convert_node_labels_to integers().\n\n Graph, edge, and node attributes are propagated to the union graph.\n If a graph attribute is present in multiple graphs, then the value\n from the last graph in the list with that attribute is used.\n\n See Also\n --------\n union\n disjoint_union_all\n ", "n_words": 146, "vocab_size": 100, "n_whitespaces": 252, "language": "en" } }, { "id": 261192, "commit_id": "072b481600c48662fd4893fdce461113becd207a", "repo": "scikit-learn", "path": "sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py", "file_name": "test_gradient_boosting.py", "fun_name": "test_unknown_category_that_are_negative", "commit_message": "FIX Treat gradient boosting categoricals outside the bounds as unknown during predict (#24283)", "code": "def test_unknown_category_that_are_negative():\n \n rng = np.random.RandomState(42)\n n_samples = 1000\n X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)]\n y = np.zeros(shape=n_samples)\n y[X[:, 1] % 2 == 0] = 1\n\n hist = HistGradientBoostingRegressor(\n random_state=0,\n categorical_features=[False, True],\n max_iter=10,\n ).fit(X, y)\n\n # Check that negative values from the second column are treated like a\n # missing category\n X_test_neg = np.asarray([[1, -2], [3, -4]])\n X_test_nan = np.asarray([[1, np.nan], [3, np.nan]])\n\n assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan))\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 123, "n_words": 63, "vocab_size": 53, "complexity": 1, "nloc": 14, "token_counts": 157, "n_ast_nodes": 238, "n_identifiers": 26, "random_cut": "def test_unknown_category_that_are_negative():\n \n rng = np.random.RandomState(42)\n ", "d_id": 76685, "documentation": { "docstring": "Check that unknown categories that are negative does not error.\n\n Non-regression test for #24274.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 20, "language": "en" } }, { "id": 161098, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg_extractor/encoder/encoder_layer.py", "file_name": "encoder_layer.py", "fun_name": "forward", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def forward(self, x_input, mask, cache=None):\n \n if isinstance(x_input, tuple):\n x, pos_emb = x_input[0], x_input[1]\n else:\n x, pos_emb = x_input, None\n\n # whether to use macaron style\n if self.feed_forward_macaron is not None:\n residual = x\n if self.normalize_before:\n x = self.norm_ff_macaron(x)\n x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))\n if not self.normalize_before:\n x = self.norm_ff_macaron(x)\n\n # multi-headed self-attention module\n residual = x\n if self.normalize_before:\n x = self.norm_mha(x)\n\n if cache is None:\n x_q = x\n else:\n assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)\n x_q = x[:, -1:, :]\n residual = residual[:, -1:, :]\n mask = None if mask is None else mask[:, -1:, :]\n\n if pos_emb is not None:\n x_att = self.self_attn(x_q, x, x, pos_emb, mask)\n else:\n x_att = self.self_attn(x_q, x, x, mask)\n\n if self.concat_after:\n x_concat = torch.cat((x, x_att), dim=-1)\n x = residual + self.concat_linear(x_concat)\n else:\n x = residual + self.dropout(x_att)\n if not self.normalize_before:\n x = self.norm_mha(x)\n\n # convolution module\n if self.conv_module is not None:\n residual = x\n if self.normalize_before:\n x = self.norm_conv(x)\n x = residual + self.dropout(self.conv_module(x))\n if not self.normalize_before:\n x = self.norm_conv(x)\n\n # feed forward module\n residual = x\n if self.normalize_before:\n x = self.norm_ff(x)\n x = residual + self.ff_scale * self.dropout(self.feed_forward(x))\n if not self.normalize_before:\n x = self.norm_ff(x)\n\n if self.conv_module is not None:\n x = self.norm_final(x)\n\n if cache is not None:\n x = torch.cat([cache, x], dim=1)\n\n if pos_emb is not None:\n return (x, pos_emb), mask\n\n return x, mask\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 764, "n_words": 225, "vocab_size": 80, "complexity": 19, "nloc": 53, "token_counts": 449, "n_ast_nodes": 699, "n_identifiers": 32, "random_cut": "def forward(self, x_input, mask, cache=None):\n \n if isinstance(x_input, tuple):\n x, pos_emb = x_input[0], x_input[1]\n else:\n x, pos_emb = x_input, None\n\n # whether to use macaron style\n if self.feed_forward_macaron is not None:\n", "d_id": 38909, "documentation": { "docstring": "Compute encoded features.\n\n :param torch.Tensor x_input: encoded source features, w/o pos_emb\n tuple((batch, max_time_in, size), (1, max_time_in, size))\n or (batch, max_time_in, size)\n :param torch.Tensor mask: mask for x (batch, max_time_in)\n :param torch.Tensor cache: cache for x (batch, max_time_in - 1, size)\n :rtype: Tuple[torch.Tensor, torch.Tensor]\n ", "n_words": 43, "vocab_size": 31, "n_whitespaces": 92, "language": "en" } }, { "id": 191364, "commit_id": "18aeb720126a68201c7e3b5a617139c27c779496", "repo": "langchain", "path": "tests/unit_tests/test_prompt.py", "file_name": "test_prompt.py", "fun_name": "test_prompt_invalid_template_format", "commit_message": "initial commit", "code": "def test_prompt_invalid_template_format() -> None:\n \n template = \"This is a {foo} test.\"\n input_variables = [\"foo\"]\n with pytest.raises(ValueError):\n Prompt(\n input_variables=input_variables, template=template, template_format=\"bar\"\n )\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 8, "token_counts": 37, "n_ast_nodes": 69, "n_identifiers": 8, "random_cut": "def test_prompt_invalid_template_format() -> None:\n \n template = \"This is a {foo} test.\"\n input_variables = [\"foo\"]\n with pytest.raises(ValueError):\n ", "d_id": 46502, "documentation": { "docstring": "Test initializing a prompt with invalid template format.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 249805, "commit_id": "a3623af74e0af0d2f6cbd37b47dc54a1acd314d5", "repo": "synapse", "path": "tests/rest/admin/test_user.py", "file_name": "test_user.py", "fun_name": "test_medium_does_not_exist", "commit_message": "Add an Admin API endpoint for looking up users based on 3PID (#14405)", "code": "def test_medium_does_not_exist(self) -> None:\n \n # test for unknown medium\n url = \"/_synapse/admin/v1/threepid/publickey/users/unknown-key\"\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n\n # test for unknown user with a known medium\n url = \"/_synapse/admin/v1/threepid/email/users/unknown\"\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 205, "n_words": 48, "vocab_size": 28, "complexity": 1, "nloc": 19, "token_counts": 110, "n_ast_nodes": 178, "n_identifiers": 13, "random_cut": "def test_medium_does_not_exist(self) -> None:\n \n # test for unknown medium\n url = \"/_synapse/admin/v1/threepid/publickey/users/unknown-key\"\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n\n # test for unknown user with a known medium\n url = \"/_synapse/admin/v1/threepid/email/users/unknown\"\n\n channel = self.make_request(\n \"GET\",\n url,\n acce", "d_id": 73140, "documentation": { "docstring": "Tests that both a lookup for a medium that does not exist and a user that\n doesn't exist with that third party ID returns a 404", "n_words": 26, "vocab_size": 19, "n_whitespaces": 32, "language": "en" } }, { "id": 337646, "commit_id": "1703b79a797dab765996764707186def7533d8fd", "repo": "accelerate", "path": "src/accelerate/utils/dataclasses.py", "file_name": "dataclasses.py", "fun_name": "deepspeed_config_process", "commit_message": "DeepSpeed Revamp (#405)\n\n* deepspeed revamp\n\n* Update dataclasses.py\n\n* Update deepspeed.py\n\n* quality\n\n* fixing code\n\n* quality\n\n* FIx imports\n\n* saving 16bit model in zero stage 3\n\n1. Saving 16bit model in zero stage 3\n2. zero init in stage 3 support using HFDeepSpeedConfig\n\n* quality\n\n* adding test and fixing bugs\n\n* update makefile for deepspeed tests\n\n* Update test.yml\n\n* adding `deepspeed` as requirement for tests\n\n* Apply suggestions from code review\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\n\n* quality\n\n* addressing comments\n\n* add example and minor updates\n\n1. Add example to show the usage of config file with revamped deepspeed support.\n2. update required deepspeed version to 0.6.5\n2. reverting `reinit` change as it is not required,\n3. raising Exception when using `clip_grad_value` with DeepSpeed/FSDP.\n\n* Documentation and Zero-3 Inference Support\n\n1. Changes to support ZeRo Stage-3 Inference support.\n2. minor bug fixes.\n3. Documentation.\n\n* doc fix\n\n* Apply suggestions from code review\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\n\n* addressing comments\n\n* update doc to address comments and bug fixes\n\n1. update tests and add new one testing autofill functionality of `prepare` method.\n2. fix bug related to zero-3 init related to HFDeepSpeedConfig\n3. Update documentation addressing comments.\n\n* removing image and hosting it on `documentation-images` dataset\n\n* check for hidden_size for zero_opt heurisitics\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must_match=True, **kwargs):\n \n mismatches = [] if mismatches is None else mismatches\n if config is None:\n config = self.deepspeed_config\n for key, value in config.items():\n if isinstance(value, dict):\n self.deepspeed_config_process(\n prefix=prefix + key + \".\", mismatches=mismatches, config=value, must_match=must_match, **kwargs\n )\n else:\n self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs)\n if len(mismatches) > 0 and prefix == \"\":\n mismatches_msg = \"\\n\".join(mismatches)\n raise ValueError(\n \"Please correct the following DeepSpeed config values that mismatch kwargs \"\n f\" values:\\n{mismatches_msg}\\nThe easiest method is to set these DeepSpeed config values to 'auto'.\"\n )\n\n\n@dataclass", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "@dataclass", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 282, "n_words": 88, "vocab_size": 68, "complexity": 7, "nloc": 17, "token_counts": 137, "n_ast_nodes": 228, "n_identifiers": 19, "random_cut": "def deepspeed_config_process(self, prefix=\"\", mismatches=None, config=None, must_match=True, **kwargs):\n \n mismatches = [] if mismatches is None else mismatches\n if config is None:\n config = self.deepspeed_config\n for key, value in config.items():\n if isinstance(value, dict):\n self.deepspeed_config_process(\n prefix=prefix + key + \".\", mismatches=mismatches, config=value, must_match=must", "d_id": 121103, "documentation": { "docstring": "Process the DeepSpeed config with the values from the kwargs.", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 279889, "commit_id": "8cf91871ce167d63069c99120f8580a4976a59d0", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "get_metrics_result", "commit_message": "Expose Model get_metrics_result on Keras Model as a public API\n\nPiperOrigin-RevId: 475681912", "code": "def get_metrics_result(self):\n \n # Collect metrics to return\n return_metrics = {}\n for metric in self.metrics:\n result = metric.result()\n if isinstance(result, dict):\n return_metrics.update(result)\n else:\n return_metrics[metric.name] = result\n return return_metrics\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 125, "n_words": 27, "vocab_size": 22, "complexity": 3, "nloc": 9, "token_counts": 50, "n_ast_nodes": 84, "n_identifiers": 10, "random_cut": "def get_metrics_result(self):\n \n # Collect metrics to return\n return_metrics = {}\n for metric in self.metrics:\n result = metric.r", "d_id": 83169, "documentation": { "docstring": "Returns the model's metrics values as a dict.\n\n If any of the metric result is a dict (containing multiple metrics),\n each of them gets added to the top level returned dict of this method.\n\n Returns:\n A `dict` containing values of the metrics listed in `self.metrics`.\n Example:\n `{'loss': 0.2, 'accuracy': 0.7}`.\n ", "n_words": 50, "vocab_size": 40, "n_whitespaces": 105, "language": "en" } }, { "id": 218899, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/refactor.py", "file_name": "refactor.py", "fun_name": "refactor_docstring", "commit_message": "add python 3.10.4 for windows", "code": "def refactor_docstring(self, input, filename):\n \n result = []\n block = None\n block_lineno = None\n indent = None\n lineno = 0\n for line in input.splitlines(keepends=True):\n lineno += 1\n if line.lstrip().startswith(self.PS1):\n if block is not None:\n result.extend(self.refactor_doctest(block, block_lineno,\n indent, filename))\n block_lineno = lineno\n block = [line]\n i = line.find(self.PS1)\n indent = line[:i]\n elif (indent is not None and\n (line.startswith(indent + self.PS2) or\n line == indent + self.PS2.rstrip() + \"\\n\")):\n block.append(line)\n else:\n if block is not None:\n result.extend(self.refactor_doctest(block, block_lineno,\n indent, filename))\n block = None\n indent = None\n result.append(line)\n if block is not None:\n result.extend(self.refactor_doctest(block, block_lineno,\n indent, filename))\n return \"\".join(result)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 593, "n_words": 95, "vocab_size": 47, "complexity": 9, "nloc": 31, "token_counts": 211, "n_ast_nodes": 333, "n_identifiers": 23, "random_cut": "def refactor_docstring(self, input, filename):\n \n result = []\n block = None\n block_lineno = None\n indent = None\n lineno = 0\n for line in input.splitlines(keepends=True):\n lineno += 1\n if line.lstrip().startswith(self.PS1):\n if block is not None:\n result.extend(self.refactor_doctest(block, block_lineno,\n indent, filename))\n block_lineno = lineno\n block = [line", "d_id": 55539, "documentation": { "docstring": "Refactors a docstring, looking for doctests.\n\n This returns a modified version of the input string. It looks\n for doctests, which start with a \">>>\" prompt, and may be\n continued with \"...\" prompts, as long as the \"...\" is indented\n the same as the \">>>\".\n\n (Unfortunately we can't use the doctest module's parser,\n since, like most parsers, it is not geared towards preserving\n the original source.)\n ", "n_words": 65, "vocab_size": 52, "n_whitespaces": 122, "language": "en" } }, { "id": 45557, "commit_id": "ace8c6e942ff5554639801468b971915b7c0e9b9", "repo": "airflow", "path": "tests/utils/test_edgemodifier.py", "file_name": "test_edgemodifier.py", "fun_name": "test_complex_reversed_dag", "commit_message": "EdgeModifier refactoring (#21404)", "code": "def test_complex_reversed_dag(self, test_complex_taskgroup_dag, complex_dag_expected_edges):\n \n (\n dag,\n group,\n (\n group_dm1,\n group_dm2,\n group_dm3,\n dm_in1,\n dm_in2,\n dm_in3,\n dm_in4,\n dm_out1,\n dm_out2,\n dm_out3,\n dm_out4,\n op_in1,\n op_out1,\n ),\n ) = test_complex_taskgroup_dag\n\n group_dm1 << [group_dm2, group_dm3]\n\n group << dm_in1\n group << Label('label dm_in2 <=> group') << dm_in2\n group << Label('label dm_in3/dm_in4 <=> group') << [dm_in3, dm_in4]\n group << Label('label op_in1 <=> group') << XComArg(op_in1, 'test_key')\n\n dm_out1 << group\n dm_out2 << Label('label group <=> dm_out2') << group\n [dm_out3, dm_out4] << Label('label group <=> dm_out3/dm_out4') << group\n XComArg(op_out1, 'test_key') << Label('label group <=> op_out1') << group\n\n compare_dag_edges(dag_edges(dag), complex_dag_expected_edges)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 419, "n_words": 89, "vocab_size": 50, "complexity": 1, "nloc": 30, "token_counts": 150, "n_ast_nodes": 210, "n_identifiers": 23, "random_cut": "def test_complex_reversed_dag(self, test_complex_taskgroup_dag, complex_dag_expected_edges):\n \n (\n dag,\n group,\n (\n group_dm1,\n group_dm2,\n group_dm3,\n dm_in1,\n dm_in2,\n dm_in3,\n dm_in4,\n dm_out1,\n dm_out2,\n dm_out3,\n dm_out4,\n op_in1,\n op_out1,\n ),\n ) = test_complex_taskgroup_dag\n\n group_dm1 << [group_dm2, group_dm3]\n\n group << dm_in1\n group << Label('label dm_in2 <=> group') << dm_in2\n group << Label('label dm_in3/dm_in4 <=> group') << [dm_in3, dm_in4]\n group << Label('label op_in1 <=> group') << XComArg(op_in1, 'test_key')\n\n dm_out1 << group\n dm_out2 << Label('label group <=> dm_out2') << group\n [dm_out3, dm_out4] << Label('label group <=> dm_out3/dm_out4') << group\n XComArg(op_out1, 'test_key') << Label('label group <=> op_out1') << group\n\n compare_dag_edges(dag_edges(dag), complex_dag_expected_edges)\n", "d_id": 8643, "documentation": { "docstring": "Tests the complex reversed dag with a TaskGroup and a Label", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 322323, "commit_id": "4c36ef9e41ea6b0e43935bdf6b2f1b4a1f8de809", "repo": "PaddleNLP", "path": "paddlenlp/ops/faster_transformer/sample/plato_inference.py", "file_name": "plato_inference.py", "fun_name": "postprocess_response", "commit_message": "FasterUnifiedTransformer/PLATO support dy2sta (#1717)\n\n* support ut dy2sta\r\n\r\n* use jit load", "code": "def postprocess_response(token_ids, tokenizer):\n \n eos_pos = len(token_ids)\n for i, tok_id in enumerate(token_ids):\n if tok_id == tokenizer.sep_token_id:\n eos_pos = i\n break\n token_ids = token_ids[:eos_pos]\n tokens = tokenizer.convert_ids_to_tokens(token_ids)\n tokens = tokenizer.merge_subword(tokens)\n return tokens\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 80, "n_words": 30, "vocab_size": 22, "complexity": 3, "nloc": 10, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 12, "random_cut": "def postprocess_response(token_ids, tokenizer):\n \n eos_pos = len(token_ids)\n for i, tok_id in enumerate(token_ids):\n if tok_id == tokenizer.sep_token_id:\n eos_pos = i\n break\n token_ids = token_ids[:eos_pos]\n tokens = tokenizer.convert_ids_to_tokens(token_ids)\n tokens = tokenizer.merge_subword(tokens)\n return tokens\n\n", "d_id": 118121, "documentation": { "docstring": "Post-process the decoded sequence. Truncate from the first .", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 338219, "commit_id": "693d46826e32507376d44f99967df4710886c984", "repo": "accelerate", "path": "src/accelerate/accelerator.py", "file_name": "accelerator.py", "fun_name": "clip_grad_norm_", "commit_message": "Return unclipped gradient from grad_clip_norm_ (#756)", "code": "def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n \n if self.distributed_type == DistributedType.FSDP:\n self.unscale_gradients()\n parameters = [p for p in parameters]\n for model in self._models:\n if parameters == [p for p in model.parameters()]:\n return model.clip_grad_norm_(max_norm, norm_type)\n elif self.distributed_type == DistributedType.DEEPSPEED:\n # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed\n # We cannot return the gradient norm because DeepSpeed does it.\n return None\n self.unscale_gradients()\n return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 201, "n_words": 66, "vocab_size": 48, "complexity": 7, "nloc": 11, "token_counts": 101, "n_ast_nodes": 156, "n_identifiers": 16, "random_cut": "def clip_grad_norm_(self, parameters, max_norm, norm_type=2):\n \n if self.distributed_type == DistributedType.FSDP:\n self.unscale_gradients()\n parameters = [p for p in parameters]\n for model in self._models:\n if parameters == [p for p in model.parameters()]:\n return mod", "d_id": 121193, "documentation": { "docstring": "\n Should be used in place of `torch.nn.utils.clip_grad_norm_`.\n\n Returns:\n `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).\n\n Example:\n\n ```python\n >>> from accelerate import Accelerator\n\n >>> accelerator = Accelerator(gradient_accumulation_steps=2)\n >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)\n\n >>> for (input, target) in dataloader:\n ... optimizer.zero_grad()\n ... output = model(input)\n ... loss = loss_func(output, target)\n ... accelerator.backward(loss)\n ... if accelerator.sync_gradients:\n ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)\n ... optimizer.step()\n ```\n ", "n_words": 69, "vocab_size": 52, "n_whitespaces": 232, "language": "en" } }, { "id": 222663, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/build_clib.py", "file_name": "build_clib.py", "fun_name": "check_library_list", "commit_message": "add python 3.10.4 for windows", "code": "def check_library_list(self, libraries):\n \n if not isinstance(libraries, list):\n raise DistutilsSetupError(\n \"'libraries' option must be a list of tuples\")\n\n for lib in libraries:\n if not isinstance(lib, tuple) and len(lib) != 2:\n raise DistutilsSetupError(\n \"each element of 'libraries' must a 2-tuple\")\n\n name, build_info = lib\n\n if not isinstance(name, str):\n raise DistutilsSetupError(\n \"first element of each tuple in 'libraries' \"\n \"must be a string (the library name)\")\n\n if '/' in name or (os.sep != '/' and os.sep in name):\n raise DistutilsSetupError(\"bad library name '%s': \"\n \"may not contain directory separators\" % lib[0])\n\n if not isinstance(build_info, dict):\n raise DistutilsSetupError(\n \"second element of each tuple in 'libraries' \"\n \"must be a dictionary (build info)\")\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 399, "n_words": 108, "vocab_size": 65, "complexity": 10, "nloc": 20, "token_counts": 113, "n_ast_nodes": 199, "n_identifiers": 15, "random_cut": "def check_library_list(self, libraries):\n \n if not isinstance(libraries, list):\n raise DistutilsSetupError(\n \"'libraries' option must be a list of tuples\")\n\n for lib in libraries:\n if not isinstance(lib, tuple) and len(lib) != 2:\n raise DistutilsSetupError(\n \"each element of 'libraries' must a 2-tuple\")\n\n name, build_info = lib\n\n if not isinstance(name, str):\n raise DistutilsSetupError(\n \"first element of each tuple in 'libraries' \"\n \"must be a string (the library name)\")\n\n if '/' in name or (os.sep != '/' and os.sep in name):\n raise DistutilsSetupError(\"bad library name '%s': \"\n \"may not contain directory separators\" % lib[0])\n\n if not isinstance(build_info, dict):\n raise DistutilsSetupError(\n \"second element of each tuple in 'libraries' \"\n \"mus", "d_id": 56689, "documentation": { "docstring": "Ensure that the list of libraries is valid.\n\n `library` is presumably provided as a command option 'libraries'.\n This method checks that it is a list of 2-tuples, where the tuples\n are (library_name, build_info_dict).\n\n Raise DistutilsSetupError if the structure is invalid anywhere;\n just returns otherwise.\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 86, "language": "en" } }, { "id": 20431, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/lexer.py", "file_name": "lexer.py", "fun_name": "_process_new_state", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _process_new_state(cls, new_state, unprocessed, processed):\n \n if isinstance(new_state, str):\n # an existing state\n if new_state == '#pop':\n return -1\n elif new_state in unprocessed:\n return (new_state,)\n elif new_state == '#push':\n return new_state\n elif new_state[:5] == '#pop:':\n return -int(new_state[5:])\n else:\n assert False, 'unknown new state %r' % new_state\n elif isinstance(new_state, combined):\n # combine a new state from existing ones\n tmp_state = '_tmp_%d' % cls._tmpname\n cls._tmpname += 1\n itokens = []\n for istate in new_state:\n assert istate != new_state, 'circular state ref %r' % istate\n itokens.extend(cls._process_state(unprocessed,\n processed, istate))\n processed[tmp_state] = itokens\n return (tmp_state,)\n elif isinstance(new_state, tuple):\n # push more than one state\n for istate in new_state:\n assert (istate in unprocessed or\n istate in ('#pop', '#push')), \\\n 'unknown new state ' + istate\n return new_state\n else:\n assert False, 'unknown new state def %r' % new_state\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 563, "n_words": 130, "vocab_size": 69, "complexity": 11, "nloc": 30, "token_counts": 177, "n_ast_nodes": 288, "n_identifiers": 16, "random_cut": "def _process_new_state(cls, new_state, unprocessed, processed):\n \n if isinstance(new_state, str):\n # an existing state\n if new_state == '#pop':\n return -1\n elif new_state in unprocessed:\n return (new_state,)\n elif new_state == '#push':\n return new_state\n elif new_state[:5] == '#pop:':\n return -int(new_state[5:])\n else:\n assert False, 'unknown new state %r' % new_state\n elif isinstance(new_state, combined):\n # combine a new state from existing ones\n tmp_state = '_tmp_%d' % cls._tmpname\n cls._tmpname += 1\n itokens = []\n for istate in new_state:\n assert istate != new_state, 'circular state ref %r' % istate\n itokens.extend(cls", "d_id": 3366, "documentation": { "docstring": "Preprocess the state transition action of a token definition.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 259226, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/preprocessing/tests/test_encoders.py", "file_name": "test_encoders.py", "fun_name": "test_ohe_infrequent_two_levels_user_cats", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_ohe_infrequent_two_levels_user_cats():\n \n X_train = np.array(\n [[\"a\"] * 5 + [\"b\"] * 20 + [\"c\"] * 10 + [\"d\"] * 3], dtype=object\n ).T\n ohe = OneHotEncoder(\n categories=[[\"c\", \"d\", \"a\", \"b\"]],\n sparse=False,\n handle_unknown=\"infrequent_if_exist\",\n max_categories=2,\n ).fit(X_train)\n\n assert_array_equal(ohe.infrequent_categories_, [[\"c\", \"d\", \"a\"]])\n\n X_test = [[\"b\"], [\"a\"], [\"c\"], [\"d\"], [\"e\"]]\n expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]])\n\n X_trans = ohe.transform(X_test)\n assert_allclose(expected, X_trans)\n\n # 'infrequent' is used to denote the infrequent categories for\n # `inverse_transform`\n expected_inv = [[col] for col in [\"b\"] + [\"infrequent_sklearn\"] * 4]\n X_inv = ohe.inverse_transform(X_trans)\n assert_array_equal(expected_inv, X_inv)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 169, "n_words": 89, "vocab_size": 67, "complexity": 2, "nloc": 18, "token_counts": 203, "n_ast_nodes": 332, "n_identifiers": 25, "random_cut": "def test_ohe_infrequent_two_levels_user_cats():\n \n X_train = np.array(\n [[\"a\"] * 5 + [\"b\"] * 20 + [\"c\"] * 10 + [\"d\"] * 3], dtyp", "d_id": 75661, "documentation": { "docstring": "Test that the order of the categories provided by a user is respected.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 260354, "commit_id": "db6123fe40400828918037f3fae949bfcc4d9d05", "repo": "scikit-learn", "path": "sklearn/decomposition/_sparse_pca.py", "file_name": "_sparse_pca.py", "fun_name": "fit", "commit_message": "MAINT Use _validate_params in SparsePCA and MiniBatchSparsePCA (#23710)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: jeremiedbb ", "code": "def fit(self, X, y=None):\n \n self._validate_params()\n random_state = check_random_state(self.random_state)\n X = self._validate_data(X)\n\n self.mean_ = X.mean(axis=0)\n X = X - self.mean_\n\n if self.n_components is None:\n n_components = X.shape[1]\n else:\n n_components = self.n_components\n\n return self._fit(X, n_components, random_state)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 119, "n_words": 34, "vocab_size": 24, "complexity": 2, "nloc": 11, "token_counts": 85, "n_ast_nodes": 135, "n_identifiers": 14, "random_cut": "def fit(self, X, y=None):\n \n self._v", "d_id": 76200, "documentation": { "docstring": "Fit the model from data in X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ", "n_words": 53, "vocab_size": 43, "n_whitespaces": 153, "language": "en" } }, { "id": 55077, "commit_id": "808660dd04465fc796a34e835467e8ae1f2449b3", "repo": "prefect", "path": "tests/cli/test_profile.py", "file_name": "test_profile.py", "fun_name": "test_create_profile", "commit_message": "Add tests for profile CLI", "code": "def test_create_profile():\n invoke_and_assert(\n [\"profile\", \"create\", \"foo\"],\n expected_output=(\n f\n ),\n )\n\n profiles = load_profiles()\n assert profiles[\"foo\"] == Profile(\n name=\"foo\", settings={}, source=PREFECT_PROFILES_PATH.value()\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 21, "token_counts": 52, "n_ast_nodes": 89, "n_identifiers": 11, "random_cut": "def test_create_profile():\n invoke_and_assert(\n [\"profile\", \"create\", \"foo\"],\n expected_output=(\n f\n ),\n )\n\n profiles = load_profiles()\n assert profiles[\"foo\"] == Profile(\n name=\"foo\", settings={}, source=PREFECT_PROFI", "d_id": 11200, "documentation": { "docstring": "\n Created profile 'foo'.\n\n Switch to your new profile with:\n\n prefect profile use 'foo'\n\n Or, to use it for a single command, include the `-p` option:\n\n prefect -p 'foo' config view\n ", "n_words": 30, "vocab_size": 24, "n_whitespaces": 105, "language": "en" } }, { "id": 109648, "commit_id": "907f78dbf959c0609ab484c59e840eea3eafee31", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_axes.py", "file_name": "test_axes.py", "fun_name": "test_mixed_errorbar_polar_caps", "commit_message": "Curved polar errorbars\n\n - uses _interpolation_steps\n - prefers transform MarkerStyle in init over _transform property\n - adjusted what's new\n - added more tests for overlapping, asymmetric and long errorbars\n - combine all tests to a single figure\n - remove overlappnig since it does not work same on all platforms\n - rework test figure, add overlapping, might work by avoiding grid\n - update what's new with image and link to example", "code": "def test_mixed_errorbar_polar_caps():\n \n fig = plt.figure()\n ax = plt.subplot(111, projection='polar')\n\n # symmetric errorbars\n th_sym = [1, 2, 3]\n r_sym = [0.9]*3\n ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt=\"o\")\n\n # long errorbars\n th_long = [np.pi/2 + .1, np.pi + .1]\n r_long = [1.8, 2.2]\n ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt=\"o\")\n\n # asymmetric errorbars\n th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1]\n r_asym = [1.1]*3\n xerr = [[.3, .3, .2], [.2, .3, .3]]\n yerr = [[.35, .5, .5], [.5, .35, .5]]\n ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt=\"o\")\n\n # overlapping errorbar\n th_over = [2.1]\n r_over = [3.1]\n ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt=\"o\")\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 160, "n_words": 97, "vocab_size": 72, "complexity": 1, "nloc": 17, "token_counts": 273, "n_ast_nodes": 348, "n_identifiers": 21, "random_cut": "def test_mixed_errorbar_polar_caps():\n \n fig = plt.figure()\n ax = plt.subplot(111, projection='polar')\n\n # symmetric errorbars\n th_sym = [1, 2, 3]\n r_sym = [0.9]*3\n ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt=\"o\")\n\n # long errorbars\n th_long = [np.pi/2 + .1, np.pi + .1]\n r_long = [1.8, 2.2]\n ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt=\"o\")\n\n # asymmetric errorbars\n th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1]\n r_asym = [1.1]*3\n xerr = [[.3, .3, .2], [.2, .3, .3]]\n ", "d_id": 23693, "documentation": { "docstring": "\n Mix several polar errorbar use cases in a single test figure.\n\n It is advisable to position individual points off the grid. If there are\n problems with reproducibility of this test, consider removing grid.\n ", "n_words": 33, "vocab_size": 32, "n_whitespaces": 46, "language": "en" } }, { "id": 176114, "commit_id": "20ca6e2fa7bab2adc8c37d8c42049076c692782e", "repo": "edgedb", "path": "tests/test_eval_model.py", "file_name": "test_eval_model.py", "fun_name": "test_edgeql_for_01", "commit_message": "Pull assert_data_shape out of testbase.server and use it for model tests (#3315)", "code": "def test_edgeql_for_01(self):\n self.assert_test_query(\n r,\n {(1, 1), (2, 2), (3, 3)},\n )\n", "url": "https://github.com/edgedb/edgedb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 33, "n_ast_nodes": 46, "n_identifiers": 3, "random_cut": "def test_edgeql_for_01(self):\n self.assert_test_query(\n r,\n {(1, 1), (2, 2), (3, 3)},\n )\n", "d_id": 41701, "documentation": { "docstring": "\n FOR X IN {1,2,3} UNION ((SELECT X), (SELECT X));\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 32, "language": "en" } }, { "id": 320737, "commit_id": "a20bb67a878b2e68abf8268c1b0a27f018d01352", "repo": "qutebrowser", "path": "qutebrowser/browser/downloadview.py", "file_name": "downloadview.py", "fun_name": "on_clicked", "commit_message": "mypy: Upgrade to PyQt5-stubs 5.15.6.0\n\nFor some unknown reason, those new stubs cause a *lot* of things now to be\nchecked by mypy which formerly probably got skipped due to Any being implied\nsomewhere.\n\nThe stubs themselves mainly improved, with a couple of regressions too.\n\nIn total, there were some 337 (!) new mypy errors. This commit fixes almost all\nof them, and the next commit improves a fix to get things down to 0 errors\nagain.\n\nOverview of the changes:\n\n==== qutebrowser/app.py\n\n- Drop type ignore due to improved stubs.\n\n==== qutebrowser/browser/browsertab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n This is debatable: I suppose the abstract stuff shouldn't need to know\n anything about the concrete backends at all. But it seems like we cut some\n corners when initially implementing things, and put some code in browsertab.py\n just because the APIs of both backends happened to be compatible. Perhaps\n something to reconsider once we drop QtWebKit and hopefully implement a dummy\n backend.\n\n- Add an additional assertion in AbstractAction.run_string. This is already\n covered by the isinstance(member, self.action_base) above it, but that's too\n dynamic for mypy to understand.\n\n- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x\n and y components), not a single int.\n\n- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x\n and y components), not a single int.\n\n- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass\n fractional percentages too.\n\n- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re\n this being debatable.\n\n- Fix the return type of AbstractTabPrivate.event_target(), which can be None\n (see #3888).\n\n- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS\n return value), not None.\n\n- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can\n be None to use the last used position.\n\n- Declare the type of sub-objects of AbstractTab.\n\n- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.\n\n==== qutebrowser/browser/commands.py\n\n- Make sure the active window is a MainWindow (with a .win_id attribute).\n\n==== qutebrowser/browser/downloadview.py\n\n- Add _model() which makes sure that self.model() is a DownloadModel, not None\n or any other model. This is needed because other methods access a variety of\n custom attributes on it, e.g. last_index().\n\n==== qutebrowser/browser/greasemonkey.py\n\n- Add an ignore for AbstractDownload.requested_url which we patch onto the\n downloads. Probably would be nicer to add it as a proper attribute which always\n gets set by the DownloadManager.\n\n==== qutebrowser/browser/hints.py\n\n- Remove type ignores for QUrl.toString().\n- Add a new type ignore for combining different URL flags (which works, but is\n not exactly type safe... still probably a regression in the stubs).\n- Make sure the things we get back from self._get_keyparser are what we actually\n expect. Probably should introduce a TypedDict (and/or overloads for\n _get_keyparser with typing.Literal) to teach mypy about the exact return value.\n See #7098.\n This is needed because we access Hint/NormalKeyParser-specific attributes such\n as .set_inhibited_timout() or .update_bindings().\n\n==== qutebrowser/browser/inspector.py\n\n- Similar changes than in browsertab.py to make some types where we share API\n (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next\n commit.\n\n==== qutebrowser/browser/network/pac.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/qtnetworkdownloads.py\n\n- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an\n AbstractDownload), so that we can call ._uses_nam() on it.\n\n==== qutebrowser/browser/qutescheme.py\n\n- Remove now unneeded type ignore for QUrl flags.\n\n==== qutebrowser/browser/urlmarks.py\n\n- Specify the type of UrlMarkManager._lineparser, as those only get initialized\n in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.\n\n==== qutebrowser/browser/webelem.py\n\n- New casts to turn single KeyboardModifier (enum) entries into\n KeyboardModifiers (flags). Might not be needed anymore with Qt 6.\n- With that, casting the final value is now unneeded.\n\n==== qutebrowser/browser/webengine/notification.py\n\n- Remove now unneeded type ignore for signal.\n- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()\n is a QProcess, not just any QObject.\n\n==== qutebrowser/browser/webengine/webenginedownloads.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webengine/webengineelem.py\n\n- Specify the type of WebEngineElement._tab.\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webengineinspector.py\n\n- See changes to inspector.py and next commit.\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/webengine/webenginequtescheme.py\n\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webenginesettings.py\n\n- Ignore access of .setter attribute which we patch onto QWebEngineProfile.\n Would be nice to have a subclass or wrapper-class instead.\n\n==== qutebrowser/browser/webengine/webenginetab.py\n\n- Specified the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Remove some now-unneeded type ignores for creating FindFlags.\n- Specify more concrete types for WebEngineTab members where we actually need to\n access WebEngine-specific attributes.\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webengine/webview.py\n\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webkit/network/networkreply.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webkit/webkitinspector.py\n\n- See changes to inspector.py and next commit.\n\n==== qutebrowser/browser/webkit/webkittab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Add a type ignore for WebKitAction because our workaround needs to\n treat them as ints (which is allowed by PyQt, even if not type-safe).\n- Add new ignores for findText calls: The text is a QString and can be None; the\n flags are valid despite mypy thinking they aren't (stubs regression?).\n- Specify the type for WebKitHistoryPrivate._history, because we access\n WebKit-specific attributes. See above (_widget) re this being debatable.\n- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs\n regression?).\n- Make sure the .page() and .page().networkAccessManager() are our subclasses\n rather than the more generic QtWebKit objects, as we use custom attributes.\n- Add new type ignores for signals (stubs regression!)\n\n==== qutebrowser/browser/webkit/webpage.py\n\n- Make sure the .networkAccessManager() is our subclass rather than the more\n generic QtWebKit object, as we use custom attributes.\n- Replace a cast by a type ignore. The cast didn't work anymore.\n\n==== qutebrowser/browser/webkit/webview.py\n\n- Make sure the .page() is our subclass rather than the more generic QtWebKit\n object, as we use custom attributes.\n\n==== qutebrowser/commands/userscripts.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/completion/completer.py\n\n- Add a new _completion() getter (which ensures it actually gets the completion\n view) rather than accessing the .parent() directly (which could be any QObject).\n\n==== qutebrowser/completion/completiondelegate.py\n\n- Make sure self.parent() is a CompletionView (no helper method as there is only\n one instance).\n- Remove a now-unneeded type ignore for adding QSizes.\n\n==== qutebrowser/completion/completionwidget.py\n\n- Add a ._model() getter which ensures that we get a CompletionModel (with\n custom attributes) rather than Qt's .model() which can be any QAbstractItemModel\n (or None).\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/completion/models/completionmodel.py\n\n- Remove now unneeded type ignores for signals.\n- Ignore a complaint about .set_pattern() not being defined. Completion\n categories don't share any common parent class, so it would be good to introduce\n a typing.Protocol for this. See #7098.\n\n==== qutebrowser/components/misccommands.py\n\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/components/readlinecommands.py\n\n- Make sure QApplication.instance() is a QApplication (and not just a\n QCoreApplication). This includes the former \"not None\" check.\n\n==== qutebrowser/components/scrollcommands.py\n\n- Add basic annotation for \"funcs\" dict. Could have a callable protocol to\n specify it needs a count kwarg, see #7098.\n\n==== qutebrowser/config/stylesheet.py\n\n- Correctly specify that stylesheet apply to QWidgets, not any QObject.\n- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy\n about this with overloads and protocols (stylesheet for set_register being None\n => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not\n worth the troble. See #7098.\n\n==== qutebrowser/keyinput/keyutils.py\n\n- Remove some now-unneeded type ignores and add a cast for using a single enum\n value as flags. Might need to look at this again with Qt 6 support.\n\n==== qutebrowser/keyinput/modeman.py\n\n- Add a FIXME for using a TypedDict, see comments for hints.py above.\n\n==== qutebrowser/mainwindow/mainwindow.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n- Improve where we cast from WindowType to WindowFlags, no int needed\n- Use new .tab_bar() getter, see below.\n\n==== qutebrowser/mainwindow/prompt.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n\n==== qutebrowser/mainwindow/statusbar/bar.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/statusbar/command.py\n\n- Fix type for setText() override (from QLineEdit): text can be None\n (QString in C++).\n\n==== qutebrowser/mainwindow/statusbar/url.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/tabbedbrowser.py\n\n- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses\n AbstractTab-specific attributes.\n- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access\n .maybe_hide.\n- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.\n- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and\n .widget(), which ensures that the return values are valid AbstractTabs (or None\n for _tab_by_idx). This is needed because we access AbstractTab-specific\n attributes.\n- For some places, where the tab can be None, continue using .currentTab() but\n add asserts.\n- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None\n possibility now.\n\n==== qutebrowser/mainwindow/tabwidget.py\n\n- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and\n .widget() are of type TabBar and AbstractTab, respectively.\n- Add additional assertions where we expect ._tab_by_idx() to never be None.\n- Remove dead code in get_tab_fields for handling a None y scroll position. I\n was unable to find any place in the code where this could be set to None.\n- Remove some now-unneeded type ignores and casts, as mypy now knows that\n _type_by_idx() could be None.\n- Work around a strange instance where mypy complains about not being able to\n find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,\n despite it clearly being shown as a bool *inside* that class without any\n annotation.\n- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in\n fact a TabWidget.\n\n==== qutebrowser/misc/crashsignal.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/editor.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/ipc.py\n\n- Remove now unneeded type ignores for signals.\n- Add new type ignores for .error() which is both a signal and a getter\n (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was\n renamed to errorOccurred in 5.15.\n\n==== qutebrowser/misc/objects.py\n\n- Make sure mypy knows that objects.app is our custom Application (with custom\n attributes) rather than any QApplication.\n\n==== qutebrowser/utils/objreg.py\n\n- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,\n but ideally, the whole objreg stuff should die one day anyways.\n\n==== tests/unit/completion/test_completer.py\n\n- Make CompletionWidgetStub inherit from CompletionView so that it passes the\n new isinstance() asserts in completer.py (see above).", "code": "def on_clicked(self, index):\n \n if not index.isValid():\n return\n item = self._model().data(index, downloads.ModelRole.item)\n if item.done and item.successful:\n item.open_file()\n item.remove()\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 78, "n_words": 17, "vocab_size": 16, "complexity": 4, "nloc": 7, "token_counts": 54, "n_ast_nodes": 91, "n_identifiers": 13, "random_cut": "def on_clicked(self, index):\n \n if not index.isValid():\n return\n item = self._model().data(index, downloads.ModelRole.item)\n if item.done and item.successful:\n item.open_file()\n item.remove()\n", "d_id": 117320, "documentation": { "docstring": "Handle clicking of an item.\n\n Args:\n index: The QModelIndex of the clicked item.\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 38, "language": "en" } }, { "id": 34690, "commit_id": "e09473a817c5e5871e11cc81004355ef30250502", "repo": "transformers", "path": "src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py", "file_name": "modeling_xlm_roberta_xl.py", "fun_name": "_tie_weights", "commit_message": "Add support for XLM-R XL and XXL models by modeling_xlm_roberta_xl.py (#13727)\n\n* add xlm roberta xl\r\n\r\n* add convert xlm xl fairseq checkpoint to pytorch\r\n\r\n* fix init and documents for xlm-roberta-xl\r\n\r\n* fix indention\r\n\r\n* add test for XLM-R xl,xxl\r\n\r\n* fix model hub name\r\n\r\n* fix some stuff\r\n\r\n* up\r\n\r\n* correct init\r\n\r\n* fix more\r\n\r\n* fix as suggestions\r\n\r\n* add torch_device\r\n\r\n* fix default values of doc strings\r\n\r\n* fix leftovers\r\n\r\n* merge to master\r\n\r\n* up\r\n\r\n* correct hub names\r\n\r\n* fix docs\r\n\r\n* fix model\r\n\r\n* up\r\n\r\n* finalize\r\n\r\n* last fix\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* add copied from\r\n\r\n* make style\r\n\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _tie_weights(self):\n # To tie those two weights if they get disconnected (on TPU or when the bias is resized)\n self.bias = self.decoder.bias\n\n\n@add_start_docstrings(\n ,\n XLM_ROBERTA_XL_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"\"\"\n XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top\n of the pooled output) e.g. for GLUE tasks.\n \"\"\",\n XLM_ROBERTA_XL_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 44, "n_words": 27, "vocab_size": 27, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 38, "n_identifiers": 6, "random_cut": "def _tie_weights(self):\n # To tie those two weights if they get disconnec", "d_id": 6311, "documentation": { "docstring": "\n XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top\n of the pooled output) e.g. for GLUE tasks.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 33, "language": "en" } }, { "id": 19706, "commit_id": "9a3b3ce70621af6f9adaa9eeac9cf83fa149319c", "repo": "pipenv", "path": "pipenv/installers.py", "file_name": "installers.py", "fun_name": "matches_minor", "commit_message": "Issue 4993 Add standard pre commit hooks and apply linting. (#4994)\n\n* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.", "code": "def matches_minor(self, other):\n \n return (self.major, self.minor) == (other.major, other.minor)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 28, "n_ast_nodes": 43, "n_identifiers": 5, "random_cut": "def matches_minor(self, other):\n \n re", "d_id": 3071, "documentation": { "docstring": "Check whether this version matches the other in (major, minor).", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 116582, "commit_id": "b999051fd8153a1d3624471cac5483867116f985", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py", "file_name": "test_lightwood_handler.py", "fun_name": "test_04_query_predictor_single_where_condition", "commit_message": "test fix", "code": "def test_04_query_predictor_single_where_condition(self):\n time.sleep(120) # TODO \n query = f\n response = self.handler.native_query(query)\n self.assertTrue(response.type == RESPONSE_TYPE.TABLE)\n self.assertTrue(len(response.data_frame) == 1)\n self.assertTrue(response.data_frame['sqft'][0] == 100)\n self.assertTrue(response.data_frame['rental_price'][0] is not None)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 83, "n_ast_nodes": 143, "n_identifiers": 15, "random_cut": "def test_04_query_predictor_single_where_condition(self):\n time.sleep(120) # TODO \n query = f\n response = self.handler.native_query(query)\n self.assertTrue(response.type == RESPONSE_TYPE.TABLE)\n self.assertTrue(len(response.data_frame) == 1)\n self.assertTrue(response.data_frame['sqft'][0] == 100)\n self.assertTrue(response.data_frame['rental_price'][0] is not None)\n", "d_id": 25781, "documentation": { "docstring": "\n SELECT target\n from {self.test_model_1}\n WHERE sqft=100\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 47, "language": "en" } }, { "id": 249542, "commit_id": "ac1a31740b6d0dfda4d57a25762aaddfde981caf", "repo": "synapse", "path": "tests/storage/test_event_federation.py", "file_name": "test_event_federation.py", "fun_name": "_setup_room_for_insertion_backfill_tests", "commit_message": "Only try to backfill event if we haven't tried before recently (#13635)\n\nOnly try to backfill event if we haven't tried before recently (exponential backoff). No need to keep trying the same backfill point that fails over and over.\r\n\r\nFix https://github.com/matrix-org/synapse/issues/13622\r\nFix https://github.com/matrix-org/synapse/issues/8451\r\n\r\nFollow-up to https://github.com/matrix-org/synapse/pull/13589\r\n\r\nPart of https://github.com/matrix-org/synapse/issues/13356", "code": "def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:\n \n room_id = \"!backfill-room-test:some-host\"\n\n depth_map: Dict[str, int] = {\n \"1\": 1,\n \"2\": 2,\n \"insertion_eventA\": 3,\n \"3\": 4,\n \"insertion_eventB\": 5,\n \"4\": 6,\n \"5\": 7,\n }\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 132, "n_words": 27, "vocab_size": 26, "complexity": 1, "nloc": 27, "token_counts": 81, "n_ast_nodes": 88, "n_identifiers": 8, "random_cut": "def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:\n \n room_id = \"!backfill-room-test:some-host\"\n\n depth_map: Dict[str, int] = {", "d_id": 72982, "documentation": { "docstring": "\n Sets up a room with various insertion event backward extremities to test\n backfill functions against.\n\n Returns:\n _BackfillSetupInfo including the `room_id` to test against and\n `depth_map` of events in the room\n ", "n_words": 30, "vocab_size": 26, "n_whitespaces": 81, "language": "en" } }, { "id": 127405, "commit_id": "203253321d34543aa25483803ebc21e3903679b6", "repo": "ray", "path": "python/ray/serve/experimental/gradio_visualize_graph.py", "file_name": "gradio_visualize_graph.py", "fun_name": "postprocessing", "commit_message": "[serve] Add additional features to DAG visualization with Gradio (#28246)", "code": "def postprocessing(data):\n \n\n if type_to_string(type(data)) == \"torch.Tensor\":\n try:\n import torch\n from torchvision import transforms\n\n # By default Torch tensors are displayed as images. To display them as JSON,\n # the user can simply convert them to numpy arrays.\n transformer = transforms.ToPILImage()\n return transformer(torch.squeeze(data))\n except ModuleNotFoundError:\n logger.warning(\n \"Module `torchvision` isn't installed, unable to process torch tensor.\"\n )\n return data\n\n return data\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 196, "n_words": 59, "vocab_size": 50, "complexity": 3, "nloc": 13, "token_counts": 55, "n_ast_nodes": 101, "n_identifiers": 13, "random_cut": "def postprocessing(data):\n \n\n if type_to_string(type(data)) == \"torch.Tensor\":\n try:\n import torch\n from torchvision import transforms\n\n # By default Torch tensors are displayed as images. To display them as JSON,\n # the user can simply convert them to numpy arrays.\n transformer = transforms.ToPILImage()\n return transformer(torch.squeeze(data))\n except ModuleNotFoundError:\n logger.warning(\n \"Module `torchvision` isn't installed, ", "d_id": 28435, "documentation": { "docstring": "Add support for types that are not supported by Gradio.\n\n Some data types like PyTorch tensors, cannot be processed and displayed through\n Gradio. Thus we extend support to these data types by transforming them into a form\n that Gradio can process and display.\n ", "n_words": 43, "vocab_size": 35, "n_whitespaces": 55, "language": "en" } }, { "id": 66430, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/work_order/test_work_order.py", "file_name": "test_work_order.py", "fun_name": "get_scrap_item_details", "commit_message": "style: format code with black", "code": "def get_scrap_item_details(bom_no):\n\tscrap_items = {}\n\tfor item in frappe.db.sql(\n\t\t,\n\t\tbom_no,\n\t\tas_dict=1,\n\t):\n\t\tscrap_items[item.item_code] = item.stock_qty\n\n\treturn scrap_items\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 9, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 10, "token_counts": 40, "n_ast_nodes": 62, "n_identifiers": 10, "random_cut": "def get_scrap_item_details(bom_no):\n\tscrap_items = {}\n\tfor item in frappe.db.sql(\n\t\t,\n\t\tbom_no,\n\t\tas_dict=1,\n\t):\n\t\tscrap_items[item.item_code] = item.stock_qty\n\n\treturn scrap_items\n\n", "d_id": 14186, "documentation": { "docstring": "select item_code, stock_qty from `tabBOM Scrap Item`\n\t\twhere parent = %s", "n_words": 11, "vocab_size": 11, "n_whitespaces": 9, "language": "en" } }, { "id": 46854, "commit_id": "921ccedf7f90f15e8d18c27a77b29d232be3c8cb", "repo": "airflow", "path": "airflow/models/dag.py", "file_name": "dag.py", "fun_name": "get_is_active", "commit_message": "API: Fix deprecation warning due to using query.value (#22775)\n\nWhen using sqlalchemy 1.4, there's a deprecation warning at the task logging:\r\n\r\nSADeprecationWarning: Query.value() is deprecated and will be removed\r\nin a future release. Please use Query.with_entities() in combination\r\nwith Query.scalar() (deprecated since: 1.4)\r\n\r\nThis PR fixes it", "code": "def get_is_active(self, session=NEW_SESSION) -> Optional[None]:\n \n return session.query(DagModel.is_active).filter(DagModel.dag_id == self.dag_id).scalar()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 39, "n_ast_nodes": 63, "n_identifiers": 11, "random_cut": "def get_is_active(self, session=NEW_SESSION) -> Optional[None]:\n \n ", "d_id": 9020, "documentation": { "docstring": "Returns a boolean indicating whether this DAG is active", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 225565, "commit_id": "32359f3e93f5ca7778b9f7c3d6d92f49a629c84c", "repo": "mkdocs", "path": "mkdocs/structure/pages.py", "file_name": "pages.py", "fun_name": "is_homepage", "commit_message": "Relative links end with slash even for homepage links (#3022)\n\nFixes #3015", "code": "def is_homepage(self) -> bool:\n \n return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html')\n\n previous_page: Optional[Page]\n \n\n next_page: Optional[Page]\n \n\n parent: Optional[Section]\n \n\n children: None = None\n \n\n is_section: bool = False\n \n\n is_page: bool = True\n \n\n is_link: bool = False\n \n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 99, "n_words": 36, "vocab_size": 27, "complexity": 3, "nloc": 3, "token_counts": 30, "n_ast_nodes": 143, "n_identifiers": 17, "random_cut": "def is_homepage(self) -> bool:\n \n return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html')\n\n previous_page: Optional[Page]\n \n\n next_page: Optional[Page]\n \n\n parent: Optional[Section]\n \n\n children: None = None\n \n\n is_section: bool = False\n \n\n is_page: bool = True\n \n\n is_link: bool =", "d_id": 57470, "documentation": { "docstring": "Evaluates to `True` for the homepage of the site and `False` for all other pages.The [page][mkdocs.structure.pages.Page] object for the previous page or `None`.\n The value will be `None` if the current page is the first item in the site navigation\n or if the current page is not included in the navigation at all.The [page][mkdocs.structure.pages.Page] object for the next page or `None`.\n The value will be `None` if the current page is the last item in the site navigation\n or if the current page is not included in the navigation at all.The immediate parent of the page in the site navigation. `None` if the\n page is at the top level.Pages do not contain children and the attribute is always `None`.Indicates that the navigation object is a \"section\" object. Always `False` for page objects.Indicates that the navigation object is a \"page\" object. Always `True` for page objects.Indicates that the navigation object is a \"link\" object. Always `False` for page objects.", "n_words": 158, "vocab_size": 57, "n_whitespaces": 172, "language": "en" } }, { "id": 100340, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/gui/utils.py", "file_name": "utils.py", "fun_name": "_filetypes", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _filetypes(self):\n \n all_files = (\"All files\", \"*.*\")\n filetypes = dict(\n default=(all_files,),\n alignments=[(\"Faceswap Alignments\", \"*.fsa\"), all_files],\n config_project=[(\"Faceswap Project files\", \"*.fsw\"), all_files],\n config_task=[(\"Faceswap Task files\", \"*.fst\"), all_files],\n config_all=[(\"Faceswap Project and Task files\", \"*.fst *.fsw\"), all_files],\n csv=[(\"Comma separated values\", \"*.csv\"), all_files],\n image=[(\"Bitmap\", \"*.bmp\"),\n (\"JPG\", \"*.jpeg *.jpg\"),\n (\"PNG\", \"*.png\"),\n (\"TIFF\", \"*.tif *.tiff\"),\n all_files],\n ini=[(\"Faceswap config files\", \"*.ini\"), all_files],\n json=[(\"JSON file\", \"*.json\"), all_files],\n model=[(\"Keras model files\", \"*.h5\"), all_files],\n state=[(\"State files\", \"*.json\"), all_files],\n log=[(\"Log files\", \"*.log\"), all_files],\n video=[(\"Audio Video Interleave\", \"*.avi\"),\n (\"Flash Video\", \"*.flv\"),\n (\"Matroska\", \"*.mkv\"),\n (\"MOV\", \"*.mov\"),\n (\"MP4\", \"*.mp4\"),\n (\"MPEG\", \"*.mpeg *.mpg *.ts *.vob\"),\n (\"WebM\", \"*.webm\"),\n (\"Windows Media Video\", \"*.wmv\"),\n all_files])\n\n # Add in multi-select options and upper case extensions for Linux\n for key in filetypes:\n if platform.system() == \"Linux\":\n filetypes[key] = [item\n if item[0] == \"All files\"\n else (item[0], f\"{item[1]} {item[1].upper()}\")\n for item in filetypes[key]]\n if len(filetypes[key]) > 2:\n multi = [f\"{key.title()} Files\"]\n multi.append(\" \".join([ftype[1]\n for ftype in filetypes[key] if ftype[0] != \"All files\"]))\n filetypes[key].insert(0, tuple(multi))\n return filetypes\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 774, "n_words": 154, "vocab_size": 116, "complexity": 8, "nloc": 40, "token_counts": 337, "n_ast_nodes": 586, "n_identifiers": 31, "random_cut": "def _filetypes(self):\n \n all_files = (\"All files\", \"*.*\")\n filetypes = dict(\n default=(all_files,),\n alignments=[(\"Faceswap Alignments\", \"*.fsa\"), all_files],\n config_project=[(\"Faceswap Project files\", \"*.fsw\"), all_files],\n config_task=[(\"Faceswap Task files\", \"*.fst\"), all_files],\n config_all=[(\"Faceswap Project and Task files\", \"*.fst *.fsw\"), all_files],\n csv=[(\"Comma separated values\", \"*.csv\"), all_files],\n image=[(\"Bitmap\", \"*.bmp\"),\n (\"JPG\", \"*.jpeg *.jpg\"),\n (\"PNG\", \"*.png\"),\n (\"TIFF\", \"*.tif *.tiff\"),\n all_files],\n ini=[(\"Faceswap config files\", \"*.ini\"), all_files],\n ", "d_id": 19835, "documentation": { "docstring": " dict: The accepted extensions for each file type for opening/saving ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 11, "language": "en" } }, { "id": 220528, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/futures.py", "file_name": "futures.py", "fun_name": "result", "commit_message": "add python 3.10.4 for windows", "code": "def result(self):\n \n if self._state == _CANCELLED:\n exc = self._make_cancelled_error()\n raise exc\n if self._state != _FINISHED:\n raise exceptions.InvalidStateError('Result is not ready.')\n self.__log_traceback = False\n if self._exception is not None:\n raise self._exception\n return self._result\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 32, "vocab_size": 22, "complexity": 4, "nloc": 10, "token_counts": 57, "n_ast_nodes": 95, "n_identifiers": 12, "random_cut": "def result(self):\n \n if self._state == _CANCELLED:\n exc = self._make_cancelled_error()\n raise exc\n if self._state != _FINISHED:\n raise exceptions.InvalidStateError('Result is not ready.')\n ", "d_id": 56035, "documentation": { "docstring": "Return the result this future represents.\n\n If the future has been cancelled, raises CancelledError. If the\n future's result isn't yet available, raises InvalidStateError. If\n the future is done and has an exception set, this exception is raised.\n ", "n_words": 37, "vocab_size": 24, "n_whitespaces": 67, "language": "en" } }, { "id": 105108, "commit_id": "ab7d3045ac9154e9c1c2602d0869130defdc6dc7", "repo": "datasets", "path": "src/datasets/formatting/dataset_wrappers/torch_iterable_dataset.py", "file_name": "torch_iterable_dataset.py", "fun_name": "_set_fsspec_for_multiprocess", "commit_message": "Support DataLoader with num_workers > 0 in streaming mode (#4375)\n\n* make TorchIterableDataset work in parallel\r\n- make it picklable\r\n- paralellize over the shards when num_workers is passed\r\n\r\n* start writing some tests\r\n\r\n* fix streaming extension and fsspec issues in subprocesses\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix import\r\n\r\n* fix and add tests\r\n\r\n* fix patch (handle successive patches and builtins)\r\n\r\n* revert unnecessary change to enriched_web_blg\r\n\r\n* style\r\n\r\n* use open locally to fix win permission errors\r\n\r\n* keep file opened in read_csv\r\n\r\n* fix compression for read_csv\r\n\r\n* consistency of read_csv: don't infer compression for file-like objects\r\n\r\n* stringify Path objects\r\n\r\n* comments + raise error if sharding is ambiguous\r\n\r\n* minor\r\n\r\n* Update src/datasets/iterable_dataset.py\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\nCo-authored-by: Mario Šaško ", "code": "def _set_fsspec_for_multiprocess() -> None:\n \n fsspec.asyn.iothread[0] = None\n fsspec.asyn.loop[0] = None\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 9, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 5, "random_cut": "def _set_fsspec_for_multiprocess() -> None:\n \n fsspec.asyn.iothread[0] = None\n fsspec.asyn.", "d_id": 22075, "documentation": { "docstring": "\n Clear reference to the loop and thread.\n This is necessary otherwise HTTPFileSystem hangs in the ML training loop.\n Only required for fsspec >= 0.9.0\n See https://github.com/fsspec/gcsfs/issues/379\n ", "n_words": 26, "vocab_size": 25, "n_whitespaces": 42, "language": "en" } }, { "id": 260798, "commit_id": "45756377c748d84aa52f66950b8d9eeefc31456c", "repo": "scikit-learn", "path": "sklearn/utils/extmath.py", "file_name": "extmath.py", "fun_name": "stable_cumsum", "commit_message": "DOC ensure sklearn/utils/extmath/stable_cumsum passes numpydoc (#24348)", "code": "def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):\n \n out = np.cumsum(arr, axis=axis, dtype=np.float64)\n expected = np.sum(arr, axis=axis, dtype=np.float64)\n if not np.all(\n np.isclose(\n out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True\n )\n ):\n warnings.warn(\n \"cumsum was found to be unstable: \"\n \"its last element does not correspond to sum\",\n RuntimeWarning,\n )\n return out\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 137, "n_words": 47, "vocab_size": 40, "complexity": 2, "nloc": 14, "token_counts": 108, "n_ast_nodes": 157, "n_identifiers": 19, "random_cut": "def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):\n \n out = np.cumsum(arr, axis=axis, dtype=np.float64)\n expected = np.sum(arr, axis=axis, dtype=np.float64)\n if not np.all(\n np.isclose(\n ", "d_id": 76501, "documentation": { "docstring": "Use high precision for cumsum and check that final value matches sum.\n\n Warns if the final cumulative sum does not match the sum (up to the chosen\n tolerance).\n\n Parameters\n ----------\n arr : array-like\n To be cumulatively summed as flat.\n axis : int, default=None\n Axis along which the cumulative sum is computed.\n The default (None) is to compute the cumsum over the flattened array.\n rtol : float, default=1e-05\n Relative tolerance, see ``np.allclose``.\n atol : float, default=1e-08\n Absolute tolerance, see ``np.allclose``.\n\n Returns\n -------\n out : ndarray\n Array with the cumulative sums along the chosen axis.\n ", "n_words": 93, "vocab_size": 68, "n_whitespaces": 171, "language": "en" } }, { "id": 43407, "commit_id": "e2f19505bf3622935480e80bee55bf5b6d80097b", "repo": "airflow", "path": "airflow/www/views.py", "file_name": "views.py", "fun_name": "confirm", "commit_message": "Upgrade FAB to 4.1.1 (#24399)\n\n* Upgrade FAB to 4.1.1\r\n\r\nThe Flask Application Builder have been updated recently to\r\nsupport a number of newer dependencies. This PR is the\r\nattempt to migrate FAB to newer version.\r\n\r\nThis includes:\r\n\r\n* update setup.py and setup.cfg upper and lower bounds to\r\n account for proper version of dependencies that\r\n FAB < 4.0.0 was blocking from upgrade\r\n* added typed Flask application retrieval with a custom\r\n application fields available for MyPy typing checks.\r\n* fix typing to account for typing hints added in multiple\r\n upgraded libraries optional values and content of request\r\n returned as Mapping\r\n* switch to PyJWT 2.* by using non-deprecated \"required\" claim as\r\n list rather than separate fields\r\n* add possibiliyt to install providers without constraints\r\n so that we could avoid errors on conflicting constraints when\r\n upgrade-to-newer-dependencies is used\r\n* add pre-commit to check that 2.4+ only get_airflow_app is not\r\n used in providers\r\n* avoid Bad Request in case the request sent to Flask 2.0 is not\r\n JSon content type\r\n* switch imports of internal classes to direct packages\r\n where classes are available rather than from \"airflow.models\" to\r\n satisfy MyPY\r\n* synchronize changes of FAB Security Manager 4.1.1 with our copy\r\n of the Security Manager.\r\n* add error handling for a few \"None\" cases detected by MyPY\r\n* corrected test cases that were broken by immutability of\r\n Flask 2 objects and better escaping done by Flask 2\r\n* updated test cases to account for redirection to \"path\" rather\r\n than full URL by Flask2\r\n\r\nFixes: #22397\r\n\r\n* fixup! Upgrade FAB to 4.1.1", "code": "def confirm(self):\n \n args = request.args\n dag_id = args.get('dag_id')\n task_id = args.get('task_id')\n dag_run_id = args.get('dag_run_id')\n state = args.get('state')\n origin = args.get('origin')\n\n if 'map_index' not in args:\n map_indexes: Optional[List[int]] = None\n else:\n map_indexes = args.getlist('map_index', type=int)\n\n upstream = to_boolean(args.get('upstream'))\n downstream = to_boolean(args.get('downstream'))\n future = to_boolean(args.get('future'))\n past = to_boolean(args.get('past'))\n origin = origin or url_for('Airflow.index')\n\n dag = get_airflow_app().dag_bag.get_dag(dag_id)\n if not dag:\n msg = f'DAG {dag_id} not found'\n return redirect_or_json(origin, msg, status='error', status_code=404)\n\n try:\n task = dag.get_task(task_id)\n except airflow.exceptions.TaskNotFound:\n msg = f\"Task {task_id} not found\"\n return redirect_or_json(origin, msg, status='error', status_code=404)\n\n task.dag = dag\n\n if state not in (\n 'success',\n 'failed',\n ):\n msg = f\"Invalid state {state}, must be either 'success' or 'failed'\"\n return redirect_or_json(origin, msg, status='error', status_code=400)\n\n latest_execution_date = dag.get_latest_execution_date()\n if not latest_execution_date:\n msg = f\"Cannot mark tasks as {state}, seem that dag {dag_id} has never run\"\n return redirect_or_json(origin, msg, status='error', status_code=400)\n\n if map_indexes is None:\n tasks: Union[List[Operator], List[Tuple[Operator, int]]] = [task]\n else:\n tasks = [(task, map_index) for map_index in map_indexes]\n\n to_be_altered = set_state(\n tasks=tasks,\n run_id=dag_run_id,\n upstream=upstream,\n downstream=downstream,\n future=future,\n past=past,\n state=state,\n commit=False,\n )\n\n if request.headers.get('Accept') == 'application/json':\n details = [str(t) for t in to_be_altered]\n return htmlsafe_json_dumps(details, separators=(',', ':'))\n\n details = \"\\n\".join(str(t) for t in to_be_altered)\n\n response = self.render_template(\n \"airflow/confirm.html\",\n endpoint=url_for(f'Airflow.{state}'),\n message=f\"Task instances you are about to mark as {state}:\",\n details=details,\n )\n\n return response\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 751, "n_words": 208, "vocab_size": 129, "complexity": 12, "nloc": 61, "token_counts": 430, "n_ast_nodes": 729, "n_identifiers": 57, "random_cut": "def confirm(self):\n \n args = request.args\n dag_id = args.get('dag_id')\n task_id = args.get('task_id')\n dag_run_id = args.get('dag_run_id')\n state = args.get('state')\n origin = args.get('origin')\n\n if 'map_index' not in args:\n map_indexes: Optional[List[int]] = None\n else:\n map_indexes = args.getlist('map_index', type=int)\n\n upstream = to_boolean(args.get('upstream'))\n downstream = to_boolean(args.get('downstream'))\n future = to_boolean(args.get('future'))\n past = to_boolean(args.get('past'))\n origin = origin or url_for('Airflow.index')\n\n dag = get_airflow_app().dag_bag.get_dag(dag_id)\n if not dag:\n msg = f'DAG {dag_id} not found'\n return redirect_or_json(origin, msg, status='error', status_code=404)\n\n try:\n task = dag.get_task(task_id)\n except airflow.exceptions.TaskNotFound:\n msg = f\"Task {task_id} not found\"\n return redirect_or_json(origin, msg, status='error', status_code=404)\n\n task.dag = dag\n\n if state not in (\n 'success',\n 'failed',\n ):\n msg = f\"Invalid state {state}, must be either 'success' or 'failed'\"\n return redirect_or_json(origin, msg", "d_id": 7960, "documentation": { "docstring": "Show confirmation page for marking tasks as success or failed.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 112125, "commit_id": "14d2966b9e91ae16dcc39de8f41017a75cec8ff9", "repo": "nni", "path": "nni/retiarii/oneshot/pytorch/base_lightning.py", "file_name": "base_lightning.py", "fun_name": "resample", "commit_message": "Valuechoice oneshot lightning (#4602)", "code": "def resample(self) -> Dict[str, Any]:\n \n result = {}\n for module in self.nas_modules:\n result.update(module.resample(memo=result))\n return result\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 13, "token_counts": 39, "n_ast_nodes": 63, "n_identifiers": 10, "random_cut": "def resample(self) -> Dict[str, Any]:\n ", "d_id": 24583, "documentation": { "docstring": "Trigger the resample for each ``nas_module``.\n Sometimes (e.g., in differentiable cases), it does nothing.\n\n Returns\n -------\n dict\n Sampled architecture.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 65, "language": "en" } }, { "id": 111419, "commit_id": "8387ce4c01db48d92ac5638e18316c0f1fc8861e", "repo": "spaCy", "path": "spacy/tests/doc/test_json_doc_conversion.py", "file_name": "test_json_doc_conversion.py", "fun_name": "test_json_to_doc_attribute_consistency", "commit_message": "Add Doc.from_json() (#10688)\n\n* Implement Doc.from_json: rough draft.\r\n\r\n* Implement Doc.from_json: first draft with tests.\r\n\r\n* Implement Doc.from_json: added documentation on website for Doc.to_json(), Doc.from_json().\r\n\r\n* Implement Doc.from_json: formatting changes.\r\n\r\n* Implement Doc.to_json(): reverting unrelated formatting changes.\r\n\r\n* Implement Doc.to_json(): fixing entity and span conversion. Moving fixture and doc <-> json conversion tests into single file.\r\n\r\n* Implement Doc.from_json(): replaced entity/span converters with doc.char_span() calls.\r\n\r\n* Implement Doc.from_json(): handling sentence boundaries in spans.\r\n\r\n* Implementing Doc.from_json(): added parser-free sentence boundaries transfer.\r\n\r\n* Implementing Doc.from_json(): added parser-free sentence boundaries transfer.\r\n\r\n* Implementing Doc.from_json(): incorporated various PR feedback.\r\n\r\n* Renaming fixture for document without dependencies.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implementing Doc.from_json(): using two sent_starts instead of one.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implementing Doc.from_json(): doc_without_dependency_parser() -> doc_without_deps.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implementing Doc.from_json(): incorporating various PR feedback. Rebased on latest master.\r\n\r\n* Implementing Doc.from_json(): refactored Doc.from_json() to work with annotation IDs instead of their string representations.\r\n\r\n* Implement Doc.from_json(): reverting unwanted formatting/rebasing changes.\r\n\r\n* Implement Doc.from_json(): added check for char_span() calculation for entities.\r\n\r\n* Update spacy/tokens/doc.pyx\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): minor refactoring, additional check for token attribute consistency with corresponding test.\r\n\r\n* Implement Doc.from_json(): removed redundancy in annotation type key naming.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): Simplifying setting annotation values.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement doc.from_json(): renaming annot_types to token_attrs.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): adjustments for renaming of annot_types to token_attrs.\r\n\r\n* Implement Doc.from_json(): removing default categories.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): simplifying lexeme initialization.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): simplifying lexeme initialization.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): refactoring to only have keys for present annotations.\r\n\r\n* Implement Doc.from_json(): fix check for tokens' HEAD attributes.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): refactoring Doc.from_json().\r\n\r\n* Implement Doc.from_json(): fixing span_group retrieval.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): fixing span retrieval.\r\n\r\n* Implement Doc.from_json(): added schema for Doc JSON format. Minor refactoring in Doc.from_json().\r\n\r\n* Implement Doc.from_json(): added comment regarding Token and Span extension support.\r\n\r\n* Implement Doc.from_json(): renaming inconsistent_props to partial_attrs..\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): adjusting error message.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): extending E1038 message.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): added params to E1038 raises.\r\n\r\n* Implement Doc.from_json(): combined attribute collection with partial attributes check.\r\n\r\n* Implement Doc.from_json(): added optional schema validation.\r\n\r\n* Implement Doc.from_json(): fixed optional fields in schema, tests.\r\n\r\n* Implement Doc.from_json(): removed redundant None check for DEP.\r\n\r\n* Implement Doc.from_json(): added passing of schema validatoin message to E1037..\r\n\r\n* Implement Doc.from_json(): removing redundant error E1040.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): changing message for E1037.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): adjusted website docs and docstring of Doc.from_json().\r\n\r\n* Update spacy/tests/doc/test_json_doc_conversion.py\r\n\r\n* Implement Doc.from_json(): docstring update.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): docstring update.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): website docs update.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): docstring formatting.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): docstring formatting.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): fixing Doc reference in website docs.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): reformatted website/docs/api/doc.md.\r\n\r\n* Implement Doc.from_json(): bumped IDs of new errors to avoid merge conflicts.\r\n\r\n* Implement Doc.from_json(): fixing bug in tests.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): fix setting of sentence starts for docs without DEP.\r\n\r\n* Implement Doc.from_json(): add check for valid char spans when manually setting sentence boundaries. Refactor sentence boundary setting slightly. Move error message for lack of support for partial token annotations to errors.py.\r\n\r\n* Implement Doc.from_json(): simplify token sentence start manipulation.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Combine related error messages\r\n\r\n* Update spacy/tests/doc/test_json_doc_conversion.py\r\n\r\nCo-authored-by: Adriane Boyd ", "code": "def test_json_to_doc_attribute_consistency(doc):\n \n doc_json = doc.to_json()\n doc_json[\"tokens\"][1].pop(\"morph\")\n with pytest.raises(ValueError):\n Doc(doc.vocab).from_json(doc_json)\n\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 28, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 44, "n_ast_nodes": 80, "n_identifiers": 11, "random_cut": "def test_json_to_doc_attribute_consistency(doc):\n \n doc_json = doc.to_json()\n doc_json[\"tokens\"][1].pop(\"morph\")\n with pytest.raises(ValueError):\n Doc(doc.vocab).from_js", "d_id": 24402, "documentation": { "docstring": "Test that Doc.from_json() raises an exception if tokens don't all have the same set of properties.", "n_words": 16, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 207767, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_index_css_classes", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_index_css_classes(self):\n \n # General index page\n response = self.client.get(reverse(\"admin:index\"))\n self.assertContains(response, '
    ')\n self.assertContains(response, '
    `` tag\n for the DataFrame HTML repr.\n\\\n: boolean\n When True, Jupyter notebook will process table contents using MathJax,\n rendering mathematical expressions enclosed by the dollar symbol.\n (default: True)\n\\\n: int\n The number of items that will be added to `dir(...)`. 'None' value means\n unlimited. Because dir is cached, changing this option will not immediately\n affect already existing dataframes until a column is deleted or added.\n\n This is for instance used to suggest columns from a dataframe to tab\n completion.\n\n: int\n Width of the display in characters. In case python/IPython is running in\n a terminal this can be set to None and pandas will correctly auto-detect\n the width.\n Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a\n terminal and hence it is not possible to correctly detect the width.\n\n: float or None\n if set to a float value, all float values smaller then the given threshold\n will be displayed as exactly 0 by repr and friends.\n\n: int or None\n When pretty-printing a long sequence, no more then `max_seq_items`\n will be printed. If items are omitted, they will be denoted by the\n addition of \"...\" to the resulting string.\n\n If set to None, the number of items to be printed is unlimited.\n\n: int or None\n df.info() will usually show null-counts for each column.\n For large frames this can be quite slow. max_info_rows and max_info_cols\n limit this null check only to frames with smaller dimensions than\n specified.\n\n: 'truncate'/'info'\n For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can\n show a truncated table (the default from 0.13), or switch to the view from\n df.info() (the behaviour in earlier versions of pandas).\n\n: bool, string or None\n This specifies if the memory usage of a DataFrame should be displayed when\n df.info() is called. Valid values True,False,'deep'\n\n: bool\n This specifies if the to_latex method of a Dataframe uses escapes special\n characters.\n Valid values: False,True\n\n:bool\n This specifies if the to_latex method of a Dataframe uses the longtable\n format.\n Valid values: False,True\n\n: bool\n This specifies if the to_latex method of a Dataframe uses multicolumns\n to pretty-print MultiIndex columns.\n Valid values: False,True\n\n: string\n This specifies the format for multicolumn headers.\n Can be surrounded with '|'.\n Valid values: 'l', 'c', 'r', 'p{}'\n\n: bool\n This specifies if the to_latex method of a Dataframe uses multirows\n to pretty-print MultiIndex rows.\n Valid values: False,True\n", "n_words": 960, "vocab_size": 361, "n_whitespaces": 1237, "language": "en" } }, { "id": 248638, "commit_id": "0fcc0ae37c959116c910f349a8025bd6921fdfc8", "repo": "synapse", "path": "tests/rest/media/v1/test_html_preview.py", "file_name": "test_html_preview.py", "fun_name": "test_twitter_tag", "commit_message": "Improve URL previews for sites with only Twitter card information. (#13056)\n\nPull out `twitter:` meta tags when generating a preview and\r\nuse it to augment any `og:` meta tags.\r\n\r\nPrefers Open Graph information over Twitter card information.", "code": "def test_twitter_tag(self) -> None:\n \n html = b\n tree = decode_body(html, \"http://example.com/test.html\")\n og = parse_html_to_open_graph(tree)\n self.assertEqual(\n og,\n {\n \"og:title\": None,\n \"og:description\": \"Description\",\n \"og:site_name\": \"@matrixdotorg\",\n },\n )\n\n # But they shouldn't override Open Graph values.\n html = b\n tree = decode_body(html, \"http://example.com/test.html\")\n og = parse_html_to_open_graph(tree)\n self.assertEqual(\n og,\n {\n \"og:title\": None,\n \"og:description\": \"Real Description\",\n \"og:site_name\": \"matrix.org\",\n },\n )\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 295, "n_words": 55, "vocab_size": 34, "complexity": 1, "nloc": 38, "token_counts": 88, "n_ast_nodes": 159, "n_identifiers": 8, "random_cut": "def test_twitter_tag(self) -> None:\n \n html = b\n tree = decode_body(html, \"http://example.com/test.html\")\n og = parse_html_to_open_graph(tree)\n self.assertEqual(\n og,\n {\n \"og:title\": None,\n \"og:description\": \"Description\",\n \"og:site_name\": \"@matrixdotorg\",\n },\n )\n\n # But they shouldn't override Ope", "d_id": 72393, "documentation": { "docstring": "Twitter card tags should be used if nothing else is available.\n \n \n \n \n \n \n \n \n \n \n \n \n \n ", "n_words": 40, "vocab_size": 25, "n_whitespaces": 139, "language": "en" } }, { "id": 68528, "commit_id": "05dd1d6d15c6c8c66165e9f267078c3cf9aec10e", "repo": "erpnext", "path": "erpnext/accounts/doctype/tax_rule/tax_rule.py", "file_name": "tax_rule.py", "fun_name": "get_tax_template", "commit_message": "refactor: tax rule validity query (#30934)", "code": "def get_tax_template(posting_date, args):\n\t\n\targs = frappe._dict(args)\n\tconditions = []\n\n\tif posting_date:\n\t\tconditions.append(\n\t\t\tf\n\t\t)\n\telse:\n\t\tconditions.append(\"(from_date is null) and (to_date is null)\")\n\n\tconditions.append(\n\t\t\"ifnull(tax_category, '') = {0}\".format(frappe.db.escape(cstr(args.get(\"tax_category\"))))\n\t)\n\tif \"tax_category\" in args.keys():\n\t\tdel args[\"tax_category\"]\n\n\tfor key, value in args.items():\n\t\tif key == \"use_for_shopping_cart\":\n\t\t\tconditions.append(\"use_for_shopping_cart = {0}\".format(1 if value else 0))\n\t\telif key == \"customer_group\":\n\t\t\tif not value:\n\t\t\t\tvalue = get_root_of(\"Customer Group\")\n\t\t\tcustomer_group_condition = get_customer_group_condition(value)\n\t\t\tconditions.append(\"ifnull({0}, '') in ('', {1})\".format(key, customer_group_condition))\n\t\telse:\n\t\t\tconditions.append(\"ifnull({0}, '') in ('', {1})\".format(key, frappe.db.escape(cstr(value))))\n\n\ttax_rule = frappe.db.sql(\n\t\t.format(\n\t\t\t\" and \".join(conditions)\n\t\t),\n\t\tas_dict=True,\n\t)\n\n\tif not tax_rule:\n\t\treturn None\n\n\tfor rule in tax_rule:\n\t\trule.no_of_keys_matched = 0\n\t\tfor key in args:\n\t\t\tif rule.get(key):\n\t\t\t\trule.no_of_keys_matched += 1\n\n\tdef cmp(a, b):\n\t\t# refernce: https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons\n\t\treturn int(a > b) - int(a < b)\n\n\trule = sorted(\n\t\ttax_rule,\n\t\tkey=functools.cmp_to_key(\n\t\t\tlambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority)\n\t\t),\n\t)[0]\n\n\ttax_template = rule.sales_tax_template or rule.purchase_tax_template\n\tdoctype = \"{0} Taxes and Charges Template\".format(rule.tax_type)\n\n\tif frappe.db.get_value(doctype, tax_template, \"disabled\") == 1:\n\t\treturn None\n\n\treturn tax_template\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 108, "n_words": 159, "vocab_size": 103, "complexity": 15, "nloc": 51, "token_counts": 312, "n_ast_nodes": 559, "n_identifiers": 39, "random_cut": "def get_tax_template(posting_date, args):\n\t\n\targs = frappe._dict(args)\n\tconditions = []\n\n\tif posting_date:\n\t\tconditions.append(\n\t\t\tf\n\t\t)\n\telse:\n\t\tconditions.appen", "d_id": 14812, "documentation": { "docstring": "Get matching tax rule(from_date is null or from_date <= '{posting_date}')\n\t\t\tand (to_date is null or to_date >= '{posting_date}')select * from `tabTax Rule`\n\t\twhere {0}", "n_words": 24, "vocab_size": 21, "n_whitespaces": 21, "language": "en" } }, { "id": 176628, "commit_id": "de1d00f20e0bc14f1cc911b3486e50225a8fa168", "repo": "networkx", "path": "networkx/generators/classic.py", "file_name": "classic.py", "fun_name": "wheel_graph", "commit_message": "Adjust the usage of nodes_or_number decorator (#5599)\n\n* recorrect typo in decorators.py\r\n\r\n* Update tests to show troubles in current code\r\n\r\n* fix troubles with usage of nodes_or_number\r\n\r\n* fix typo\r\n\r\n* remove nodes_or_number where that makes sense\r\n\r\n* Reinclude nodes_or_numbers and add some tests for nonstandard usage\r\n\r\n* fix typowq\r\n\r\n* hopefully final tweaks (no behavior changes\r\n\r\n* Update test_classic.py\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def wheel_graph(n, create_using=None):\n \n _, nodes = n\n G = empty_graph(nodes, create_using)\n if G.is_directed():\n raise NetworkXError(\"Directed Graph not supported\")\n\n if len(nodes) > 1:\n hub, *rim = nodes\n G.add_edges_from((hub, node) for node in rim)\n if len(rim) > 1:\n G.add_edges_from(pairwise(rim, cyclic=True))\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 97, "n_words": 40, "vocab_size": 32, "complexity": 5, "nloc": 11, "token_counts": 86, "n_ast_nodes": 139, "n_identifiers": 16, "random_cut": "def wheel_graph(n, create_using=None):\n \n _, nodes = n\n G = empty_graph(nodes, create_using)\n if G.is_directed():\n raise ", "d_id": 42010, "documentation": { "docstring": "Return the wheel graph\n\n The wheel graph consists of a hub node connected to a cycle of (n-1) nodes.\n\n Parameters\n ----------\n n : int or iterable\n If an integer, node labels are 0 to n with center 0.\n If an iterable of nodes, the center is the first.\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Node labels are the integers 0 to n - 1.\n ", "n_words": 76, "vocab_size": 51, "n_whitespaces": 117, "language": "en" } }, { "id": 189036, "commit_id": "c14744db097b1955f2b668dc753b2d2439db0bdf", "repo": "psutil", "path": "scripts/internal/print_announce.py", "file_name": "print_announce.py", "fun_name": "get_changes", "commit_message": "fix print_announce.py", "code": "def get_changes():\n \n with open(HISTORY) as f:\n lines = f.readlines()\n\n block = []\n\n # eliminate the part preceding the first block\n for i, line in enumerate(lines):\n line = lines.pop(0)\n if line.startswith('===='):\n break\n lines.pop(0)\n\n for i, line in enumerate(lines):\n line = lines.pop(0)\n line = line.rstrip()\n if re.match(r\"^- \\d+_\", line):\n line = re.sub(r\"^- (\\d+)_\", r\"- #\\1\", line)\n\n if line.startswith('===='):\n break\n block.append(line)\n\n # eliminate bottom empty lines\n block.pop(-1)\n while not block[-1]:\n block.pop(-1)\n\n return \"\\n\".join(block)\n\n", "url": "https://github.com/giampaolo/psutil.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 199, "n_words": 70, "vocab_size": 44, "complexity": 7, "nloc": 21, "token_counts": 151, "n_ast_nodes": 260, "n_identifiers": 18, "random_cut": "def get_changes():\n \n with open(HISTORY) as f:\n lines = f.readlines()\n\n block = []\n\n # eliminate the part preceding the first block\n for i, line in enumerate(lines):\n line = lines.pop(0)\n if line.startswith('===='):\n break\n lines.pop(0)\n\n for i, line in enumerate(lines):\n line = lines.pop(0)\n line = line.rstrip()\n if re.match(r\"^- \\d+_\", line):\n line = re.sub(r\"^- (\\d+)_\", r\"- #\\1\", line)\n\n if line.startswith('===='):\n break\n block.append(line)\n\n ", "d_id": 45974, "documentation": { "docstring": "Get the most recent changes for this release by parsing\n HISTORY.rst file.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 100727, "commit_id": "afec52309326304f4323029039e49bfcf928ef43", "repo": "faceswap", "path": "lib/gui/popup_session.py", "file_name": "popup_session.py", "fun_name": "_check_valid_data", "commit_message": "Bugfixes:\n - Stats graph - Handle NaNs in data\n - logger - de-elevate matplotlib font messages", "code": "def _check_valid_data(self) -> bool:\n \n logger.debug(\"Validating data. %s\",\n {key: len(val) for key, val in self._display_data.stats.items()})\n if any(len(val) == 0 # pylint:disable=len-as-condition\n for val in self._display_data.stats.values()):\n return False\n return True\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 102, "n_words": 28, "vocab_size": 24, "complexity": 4, "nloc": 15, "token_counts": 64, "n_ast_nodes": 105, "n_identifiers": 13, "random_cut": "def _check_valid_data(self) -> bool:\n \n logger.debug(\"Validating data. %s\",\n {key: len(val) for key, val in self._display_data.stats.items()})\n if any(len(val) == 0 # pylint:disable=len-as-condition\n for val in self._display_data.stats.values()):\n", "d_id": 20182, "documentation": { "docstring": " Check that the selections holds valid data to display\n NB: len-as-condition is used as data could be a list or a numpy array\n\n Returns\n -------\n bool\n ``True` if there is data to be displayed, otherwise ``False``\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 87, "language": "en" } }, { "id": 291332, "commit_id": "09c3df7eb258295211a8216c2039843b09aa244b", "repo": "core", "path": "homeassistant/components/ibeacon/coordinator.py", "file_name": "coordinator.py", "fun_name": "_async_check_unavailable_groups_with_random_macs", "commit_message": "Fix iBeacons with infrequent random mac address changes unexpectedly going unavailable (#82668)\n\nfixes https://github.com/home-assistant/core/issues/79781", "code": "def _async_check_unavailable_groups_with_random_macs(self) -> None:\n \n now = MONOTONIC_TIME()\n gone_unavailable = [\n group_id\n for group_id in self._group_ids_random_macs\n if group_id not in self._unavailable_group_ids\n and (service_info := self._last_seen_by_group_id.get(group_id))\n and (\n # We will not be callbacks for iBeacons with random macs\n # that rotate infrequently since their advertisement data is\n # does not change as the bluetooth.async_register_callback API\n # suppresses callbacks for duplicate advertisements to avoid\n # exposing integrations to the firehose of bluetooth advertisements.\n #\n # To solve this we need to ask for the latest service info for\n # the address we last saw to get the latest timestamp.\n #\n # If there is no last service info for the address we know that\n # the device is no longer advertising.\n not (\n latest_service_info := bluetooth.async_last_service_info(\n self.hass, service_info.address, connectable=False\n )\n )\n or now - latest_service_info.time > UNAVAILABLE_TIMEOUT\n )\n ]\n for group_id in gone_unavailable:\n self._unavailable_group_ids.add(group_id)\n async_dispatcher_send(self.hass, signal_unavailable(group_id))\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 538, "n_words": 144, "vocab_size": 92, "complexity": 7, "nloc": 20, "token_counts": 100, "n_ast_nodes": 166, "n_identifiers": 22, "random_cut": "def _async_check_unavailable_groups_with_random_macs(self) -> None:\n \n now = MONOTONIC_TIME()\n gone_unavailable = [\n group_id\n for group_id in self._group_ids_random_macs\n if group_id not in self._unavailable_group_ids\n and (service_info := self._last_seen_by_group_id.get(group_id))\n and (\n # We will not be callbacks for iBeacons with random macs\n # that rotate infrequently since their advertisement data is\n # does not change as the bluetooth.async_register_callback API\n # suppresses callbacks for duplicate advertisements to avoid\n # exposing integrations to the firehose of bluetooth advertisements.\n #\n # To solve this we need to ask for the latest service info for\n # the address we last saw to get the latest timestamp.\n #\n ", "d_id": 90441, "documentation": { "docstring": "Check for random mac groups that have not been seen in a while and mark them as unavailable.", "n_words": 18, "vocab_size": 18, "n_whitespaces": 17, "language": "en" } }, { "id": 241673, "commit_id": "9c8f52ccd1a1859502f705e0567f2d83d57ff93a", "repo": "lightning", "path": "pytorch_lightning/trainer/connectors/checkpoint_connector.py", "file_name": "checkpoint_connector.py", "fun_name": "restore_optimizers_and_schedulers", "commit_message": "Fix restoring lr scheduler states with deepspeed strategy (#11322)\n\n\r\nCo-authored-by: Carlos Mocholí \r\nCo-authored-by: thomas chaton ", "code": "def restore_optimizers_and_schedulers(self) -> None:\n \n if not self._loaded_checkpoint:\n return\n\n if self.trainer.strategy.lightning_restore_optimizer:\n # validation\n if \"optimizer_states\" not in self._loaded_checkpoint:\n raise KeyError(\n \"Trying to restore optimizer state but checkpoint contains only the model.\"\n \" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`.\"\n )\n self.restore_optimizers()\n\n if \"lr_schedulers\" not in self._loaded_checkpoint:\n raise KeyError(\n \"Trying to restore learning rate scheduler state but checkpoint contains only the model.\"\n \" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`.\"\n )\n self.restore_lr_schedulers()\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 275, "n_words": 76, "vocab_size": 42, "complexity": 5, "nloc": 17, "token_counts": 62, "n_ast_nodes": 117, "n_identifiers": 9, "random_cut": "def restore_optimizers_and_schedulers(self) -> None:\n \n if not self._loaded_checkpoint:\n return\n\n if self.trainer.strategy.lightning_restore_optimizer:\n # validation\n if \"optimizer_states\" not in self._loaded_checkpoint:\n ", "d_id": 69645, "documentation": { "docstring": "Restores the optimizers and learning rate scheduler states from the pre-loaded checkpoint.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 22138, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "urldefragauth", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def urldefragauth(url):\n \n scheme, netloc, path, params, query, fragment = urlparse(url)\n\n # see func:`prepend_scheme_if_needed`\n if not netloc:\n netloc, path = path, netloc\n\n netloc = netloc.rsplit(\"@\", 1)[-1]\n\n return urlunparse((scheme, netloc, path, params, query, \"\"))\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 32, "vocab_size": 23, "complexity": 2, "nloc": 6, "token_counts": 64, "n_ast_nodes": 99, "n_identifiers": 11, "random_cut": "def urldefragauth(url):\n \n scheme, netloc, path, params, query, fragment = urlparse(url)\n\n # see func:`prepend_scheme_if_needed`\n if not netloc:\n netloc, path = pat", "d_id": 4210, "documentation": { "docstring": "\n Given a url remove the fragment and the authentication part.\n\n :rtype: str\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 22, "language": "en" } }, { "id": 249582, "commit_id": "be76cd8200b18f3c68b895f85ac7ef5b0ddc2466", "repo": "synapse", "path": "tests/storage/test_registration.py", "file_name": "test_registration.py", "fun_name": "test_approval_not_required", "commit_message": "Allow admins to require a manual approval process before new accounts can be used (using MSC3866) (#13556)", "code": "def test_approval_not_required(self) -> None:\n \n self.get_success(self.store.register_user(self.user_id, self.pwhash))\n\n user = self.get_success(self.store.get_user_by_id(self.user_id))\n assert user is not None\n self.assertTrue(user[\"approved\"])\n\n approved = self.get_success(self.store.is_user_approved(self.user_id))\n self.assertTrue(approved)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 10, "token_counts": 81, "n_ast_nodes": 132, "n_identifiers": 12, "random_cut": "def test_approval_not_required(self) -> None:\n \n self.get_success(self.store.register_user(self.user_id, self.pwhash))\n\n user = self.get_success(self.store.get_user_by_id(self.user_id))\n assert user is not None\n self.assertTrue(user[\"approved\"])\n\n approved = self.get_success(self.store.is_user_a", "d_id": 73004, "documentation": { "docstring": "Tests that if we don't require approval for new accounts, newly created\n accounts are automatically marked as approved.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 32, "language": "en" } }, { "id": 223791, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/message.py", "file_name": "message.py", "fun_name": "get_all", "commit_message": "add python 3.10.4 for windows", "code": "def get_all(self, name, failobj=None):\n \n values = []\n name = name.lower()\n for k, v in self._headers:\n if k.lower() == name:\n values.append(self.policy.header_fetch_parse(k, v))\n if not values:\n return failobj\n return values\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 107, "n_words": 28, "vocab_size": 24, "complexity": 4, "nloc": 9, "token_counts": 64, "n_ast_nodes": 103, "n_identifiers": 12, "random_cut": "def get_all(self, name, failobj=None):\n \n valu", "d_id": 57068, "documentation": { "docstring": "Return a list of all the values for the named field.\n\n These will be sorted in the order they appeared in the original\n message, and may contain duplicates. Any fields deleted and\n re-inserted are always appended to the header list.\n\n If no such fields exist, failobj is returned (defaults to None).\n ", "n_words": 51, "vocab_size": 43, "n_whitespaces": 87, "language": "en" } }, { "id": 72102, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_privacy.py", "file_name": "test_privacy.py", "fun_name": "test_explorer_private_child", "commit_message": "Reformat with black", "code": "def test_explorer_private_child(self):\n \n response = self.client.get(\n reverse(\"wagtailadmin_explore\", args=(self.private_child_page.id,))\n )\n\n # Check the response\n self.assertEqual(response.status_code, 200)\n\n # Check the privacy indicator is public\n self.assertTemplateUsed(response, \"wagtailadmin/pages/_privacy_switch.html\")\n self.assertContains(response, '
    ')\n self.assertNotContains(response, '
    ')\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 105, "n_words": 31, "vocab_size": 25, "complexity": 1, "nloc": 8, "token_counts": 64, "n_ast_nodes": 110, "n_identifiers": 14, "random_cut": "def test_explorer_private_child(self):\n \n response = self.client.get(\n reverse(\"wagtailadmin_explore\", args=(self.private_child_page.id,))\n )\n\n # Check the response\n self.assertEqual(response.status_code, 200)\n\n # Check the privacy indicator is public\n self.a", "d_id": 15828, "documentation": { "docstring": "\n This tests that the privacy indicator on the private child pages explore view is set to \"PRIVATE\"\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 334167, "commit_id": "95f4256fc905b6e29e5ea0f245dcf88f72a9ddd1", "repo": "diffusers", "path": "utils/check_dummies.py", "file_name": "check_dummies.py", "fun_name": "find_backend", "commit_message": "upload some cleaning tools", "code": "def find_backend(line):\n \n if _re_test_backend.search(line) is None:\n return None\n backends = [b[0] for b in _re_backend.findall(line)]\n backends.sort()\n return \"_and_\".join(backends)\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 40, "n_words": 18, "vocab_size": 17, "complexity": 3, "nloc": 6, "token_counts": 47, "n_ast_nodes": 79, "n_identifiers": 10, "random_cut": "def find_backend(line):\n \n if _re_test_backend.search(line) is None:\n return No", "d_id": 120535, "documentation": { "docstring": "Find one (or multiple) backend in a code line of the init.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 177039, "commit_id": "2fb00bb8b9ed1e2917e5bc1aac04c558bd23c6d8", "repo": "networkx", "path": "networkx/classes/graphviews.py", "file_name": "graphviews.py", "fun_name": "subgraph_view", "commit_message": "Attempt to reverse slowdown from hasattr needed for cached_property (#5836)\n\n* Automate reset of cache for _adj,_pred,_succ\r\n\r\n* Make G._adj a data descriptor that resets G.adj when needed.\r\n\r\n* update places in the code where both G._succ and G._adj are changed\r\n\r\nThis is no longer needed since G._succ and G._adj are synced during __set__\r\n\r\n* testing hasattr(G, `_adj`) no longer ensures an instance.\r\n\r\n* Make mypy happy\r\n\r\n* Switch to hardcode attribute names in the data descriptors\r\n\r\n* Improve doc_strings for the data descriptors", "code": "def subgraph_view(G, filter_node=no_filter, filter_edge=no_filter):\n \n newG = nx.freeze(G.__class__())\n newG._NODE_OK = filter_node\n newG._EDGE_OK = filter_edge\n\n # create view by assigning attributes from G\n newG._graph = G\n newG.graph = G.graph\n\n newG._node = FilterAtlas(G._node, filter_node)\n if G.is_multigraph():\n Adj = FilterMultiAdjacency\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 70, "n_words": 36, "vocab_size": 29, "complexity": 3, "nloc": 19, "token_counts": 132, "n_ast_nodes": 114, "n_identifiers": 18, "random_cut": "def subgraph_view(G, filter_node=no_filter, filter_edge=no_filter):\n \n newG = nx.freeze(G.__class__())\n newG._NODE_OK ", "d_id": 42246, "documentation": { "docstring": "View of `G` applying a filter on nodes and edges.\n\n `subgraph_view` provides a read-only view of the input graph that excludes\n nodes and edges based on the outcome of two filter functions `filter_node`\n and `filter_edge`.\n\n The `filter_node` function takes one argument --- the node --- and returns\n `True` if the node should be included in the subgraph, and `False` if it\n should not be included.\n\n The `filter_edge` function takes two (or three arguments if `G` is a\n multi-graph) --- the nodes describing an edge, plus the edge-key if\n parallel edges are possible --- and returns `True` if the edge should be\n included in the subgraph, and `False` if it should not be included.\n\n Both node and edge filter functions are called on graph elements as they\n are queried, meaning there is no up-front cost to creating the view.\n\n Parameters\n ----------\n G : networkx.Graph\n A directed/undirected graph/multigraph\n\n filter_node : callable, optional\n A function taking a node as input, which returns `True` if the node\n should appear in the view.\n\n filter_edge : callable, optional\n A function taking as input the two nodes describing an edge (plus the\n edge-key if `G` is a multi-graph), which returns `True` if the edge\n should appear in the view.\n\n Returns\n -------\n graph : networkx.Graph\n A read-only graph view of the input graph.\n\n Examples\n --------\n >>> G = nx.path_graph(6)\n\n Filter functions operate on the node, and return `True` if the node should\n appear in the view:\n\n >>> def filter_node(n1):\n ... return n1 != 5\n ...\n >>> view = nx.subgraph_view(G, filter_node=filter_node)\n >>> view.nodes()\n NodeView((0, 1, 2, 3, 4))\n\n We can use a closure pattern to filter graph elements based on additional\n data --- for example, filtering on edge data attached to the graph:\n\n >>> G[3][4][\"cross_me\"] = False\n >>> def filter_edge(n1, n2):\n ... return G[n1][n2].get(\"cross_me\", True)\n ...\n >>> view = nx.subgraph_view(G, filter_edge=filter_edge)\n >>> view.edges()\n EdgeView([(0, 1), (1, 2), (2, 3), (4, 5)])\n\n >>> view = nx.subgraph_view(G, filter_node=filter_node, filter_edge=filter_edge,)\n >>> view.nodes()\n NodeView((0, 1, 2, 3, 4))\n >>> view.edges()\n EdgeView([(0, 1), (1, 2), (2, 3)])\n ", "n_words": 333, "vocab_size": 150, "n_whitespaces": 528, "language": "en" } }, { "id": 41875, "commit_id": "26bf4b3b645edc405ca52b533b8d68273aeba7d1", "repo": "seaborn", "path": "seaborn/utils.py", "file_name": "utils.py", "fun_name": "_deprecate_ci", "commit_message": "Housekeeping on relational plot parameters (#2855)\n\n* Do some housekeeping on lineplot ci deprecation\r\n\r\n* Remove some unused parameters from scatterplot\r\n\r\n* Remove incorrect statement from relplot docstring\r\n\r\n* Update lineplot ci= deprecation test", "code": "def _deprecate_ci(errorbar, ci):\n \n if ci != \"deprecated\":\n if ci is None:\n errorbar = None\n elif ci == \"sd\":\n errorbar = \"sd\"\n else:\n errorbar = (\"ci\", ci)\n msg = (\n \"\\n\\nThe `ci` parameter is deprecated. \"\n f\"Use `errorbar={repr(errorbar)}` for the same effect.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return errorbar\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 153, "n_words": 47, "vocab_size": 37, "complexity": 4, "nloc": 14, "token_counts": 59, "n_ast_nodes": 117, "n_identifiers": 9, "random_cut": "def _deprecate_ci(errorbar, ci):\n \n if ci != \"deprecated\":\n if ci is None:\n errorbar = None\n elif ci == \"sd\":\n errorbar = \"sd\"\n else:\n errorbar = (\"ci\", ci)\n ", "d_id": 7451, "documentation": { "docstring": "\n Warn on usage of ci= and convert to appropriate errorbar= arg.\n\n ci was deprecated when errorbar was added in 0.12. It should not be removed\n completely for some time, but it can be moved out of function definitions\n (and extracted from kwargs) after one cycle.\n\n ", "n_words": 45, "vocab_size": 42, "n_whitespaces": 61, "language": "en" } }, { "id": 160870, "commit_id": "6d77c591c59b5678f14ae5af2127eebb7d2415bc", "repo": "numpy", "path": "numpy/ma/core.py", "file_name": "core.py", "fun_name": "__sub__", "commit_message": "ENH: Adding __array_ufunc__ capability to MaskedArrays.\n\nThis enables any ufunc numpy operations that are called on a\nMaskedArray to use the masked version of that function automatically\nwithout needing to resort to np.ma.func() calls.", "code": "def __sub__(self, other):\n \n if self._delegate_binop(other):\n return NotImplemented\n return np.subtract(self, other)\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 42, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def __sub__(self, other):\n \n if self._delegate_binop(other):\n ", "d_id": 38770, "documentation": { "docstring": "\n Subtract other from self, and return a new masked array.\n\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 287952, "commit_id": "2667f0b792b1f936aeb5958cc40d5dee26350bf6", "repo": "core", "path": "tests/components/plugwise/conftest.py", "file_name": "conftest.py", "fun_name": "mock_smile_adam_2", "commit_message": "Bump plugwise to v0.21.3, add related new features (#76610)\n\nCo-authored-by: Franck Nijhof ", "code": "def mock_smile_adam_2() -> Generator[None, MagicMock, None]:\n \n chosen_env = \"m_adam_heating\"\n\n with patch(\n \"homeassistant.components.plugwise.gateway.Smile\", autospec=True\n ) as smile_mock:\n smile = smile_mock.return_value\n\n smile.gateway_id = \"da224107914542988a88561b4452b0f6\"\n smile.heater_id = \"056ee145a816487eaa69243c3280f8bf\"\n smile.smile_version = \"3.6.4\"\n smile.smile_type = \"thermostat\"\n smile.smile_hostname = \"smile98765\"\n smile.smile_name = \"Adam\"\n\n smile.connect.return_value = True\n\n smile.notifications = _read_json(chosen_env, \"notifications\")\n smile.async_update.return_value = _read_json(chosen_env, \"all_data\")\n\n yield smile\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 146, "n_words": 51, "vocab_size": 39, "complexity": 1, "nloc": 17, "token_counts": 95, "n_ast_nodes": 180, "n_identifiers": 21, "random_cut": "def mock_smile_adam_2() -> Generator[None, MagicMock, None]:\n \n chosen_env = \"m_adam_heating\"\n\n with patch(\n \"homeassistant.components.plugwise.gateway.Smile\", autospec=True\n ) as smile_mock:\n smile = smile_mock.return_value\n\n smile.gateway_id = \"da224107914542988a88561b4", "d_id": 87135, "documentation": { "docstring": "Create a 2nd Mock Adam environment for testing exceptions.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 106941, "commit_id": "6ef6b37fc2113c041f7d2643d70b553ec335d597", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/axes3d.py", "file_name": "axes3d.py", "fun_name": "plot_wireframe", "commit_message": "Remove *args deprecations", "code": "def plot_wireframe(self, X, Y, Z, **kwargs):\n \n\n had_data = self.has_data()\n if Z.ndim != 2:\n raise ValueError(\"Argument Z must be 2-dimensional.\")\n # FIXME: Support masked arrays\n X, Y, Z = np.broadcast_arrays(X, Y, Z)\n rows, cols = Z.shape\n\n has_stride = 'rstride' in kwargs or 'cstride' in kwargs\n has_count = 'rcount' in kwargs or 'ccount' in kwargs\n\n if has_stride and has_count:\n raise ValueError(\"Cannot specify both stride and count arguments\")\n\n rstride = kwargs.pop('rstride', 1)\n cstride = kwargs.pop('cstride', 1)\n rcount = kwargs.pop('rcount', 50)\n ccount = kwargs.pop('ccount', 50)\n\n if rcParams['_internal.classic_mode']:\n # Strides have priority over counts in classic mode.\n # So, only compute strides from counts\n # if counts were explicitly given\n if has_count:\n rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0\n cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0\n else:\n # If the strides are provided then it has priority.\n # Otherwise, compute the strides from the counts.\n if not has_stride:\n rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0\n cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0\n\n # We want two sets of lines, one running along the \"rows\" of\n # Z and another set of lines running along the \"columns\" of Z.\n # This transpose will make it easy to obtain the columns.\n tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)\n\n if rstride:\n rii = list(range(0, rows, rstride))\n # Add the last index only if needed\n if rows > 0 and rii[-1] != (rows - 1):\n rii += [rows-1]\n else:\n rii = []\n if cstride:\n cii = list(range(0, cols, cstride))\n # Add the last index only if needed\n if cols > 0 and cii[-1] != (cols - 1):\n cii += [cols-1]\n else:\n cii = []\n\n if rstride == 0 and cstride == 0:\n raise ValueError(\"Either rstride or cstride must be non zero\")\n\n # If the inputs were empty, then just\n # reset everything.\n if Z.size == 0:\n rii = []\n cii = []\n\n xlines = [X[i] for i in rii]\n ylines = [Y[i] for i in rii]\n zlines = [Z[i] for i in rii]\n\n txlines = [tX[i] for i in cii]\n tylines = [tY[i] for i in cii]\n tzlines = [tZ[i] for i in cii]\n\n lines = ([list(zip(xl, yl, zl))\n for xl, yl, zl in zip(xlines, ylines, zlines)]\n + [list(zip(xl, yl, zl))\n for xl, yl, zl in zip(txlines, tylines, tzlines)])\n\n linec = art3d.Line3DCollection(lines, **kwargs)\n self.add_collection(linec)\n self.auto_scale_xyz(X, Y, Z, had_data)\n\n return linec\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1017, "n_words": 393, "vocab_size": 193, "complexity": 30, "nloc": 54, "token_counts": 539, "n_ast_nodes": 846, "n_identifiers": 52, "random_cut": "def plot_wireframe(self, X, Y, Z, **kwargs):\n \n\n had_data = self.has_data()\n if Z.ndim != 2:\n raise ValueError(\"Argument Z must be 2-dimensional.\")\n # FIXME: Support masked arrays\n X, Y, Z = np.broadcast_arrays(X, Y, Z)\n rows, cols = Z.shape\n\n has_stride = 'rstride' in kwargs or 'cstride' in kwargs\n has_count = 'rcount' in kwargs or 'ccount' in kwargs\n\n if has_stride and has_count:\n raise ValueError(\"Cannot specify both stride and count arguments\")\n\n rstride = kwargs.pop('rstride', 1)\n cstride = kwargs.pop('cstride', 1)\n rcount = kwargs.pop('rcount', 50)\n ccount = kwargs.pop('ccount', 50)\n\n if rcParams['_internal.classic_mode']:\n # Strides have priority over counts in classic mode.\n # So, only compute strides from counts\n # if counts were explicitly given\n if has_count:\n rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0\n cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0\n else:\n # If the strides are provided then it has priority.\n # Otherwise, compute the strides from the counts.\n if not has_stride:\n ", "d_id": 22518, "documentation": { "docstring": "\n Plot a 3D wireframe.\n\n .. note::\n\n The *rcount* and *ccount* kwargs, which both default to 50,\n determine the maximum number of samples used in each direction. If\n the input data is larger, it will be downsampled (by slicing) to\n these numbers of points.\n\n Parameters\n ----------\n X, Y, Z : 2D arrays\n Data values.\n\n rcount, ccount : int\n Maximum number of samples used in each direction. If the input\n data is larger, it will be downsampled (by slicing) to these\n numbers of points. Setting a count to zero causes the data to be\n not sampled in the corresponding direction, producing a 3D line\n plot rather than a wireframe plot. Defaults to 50.\n\n rstride, cstride : int\n Downsampling stride in each direction. These arguments are\n mutually exclusive with *rcount* and *ccount*. If only one of\n *rstride* or *cstride* is set, the other defaults to 1. Setting a\n stride to zero causes the data to be not sampled in the\n corresponding direction, producing a 3D line plot rather than a\n wireframe plot.\n\n 'classic' mode uses a default of ``rstride = cstride = 1`` instead\n of the new default of ``rcount = ccount = 50``.\n\n **kwargs\n Other arguments are forwarded to `.Line3DCollection`.\n ", "n_words": 198, "vocab_size": 105, "n_whitespaces": 474, "language": "en" } }, { "id": 204534, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/handlers/base.py", "file_name": "base.py", "fun_name": "check_response", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def check_response(self, response, callback, name=None):\n \n if not (response is None or asyncio.iscoroutine(response)):\n return\n if not name:\n if isinstance(callback, types.FunctionType): # FBV\n name = \"The view %s.%s\" % (callback.__module__, callback.__name__)\n else: # CBV\n name = \"The view %s.%s.__call__\" % (\n callback.__module__,\n callback.__class__.__name__,\n )\n if response is None:\n raise ValueError(\n \"%s didn't return an HttpResponse object. It returned None \"\n \"instead.\" % name\n )\n elif asyncio.iscoroutine(response):\n raise ValueError(\n \"%s didn't return an HttpResponse object. It returned an \"\n \"unawaited coroutine instead. You may need to add an 'await' \"\n \"into your view.\" % name\n )\n\n # Other utility methods.\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 372, "n_words": 97, "vocab_size": 63, "complexity": 7, "nloc": 22, "token_counts": 105, "n_ast_nodes": 181, "n_identifiers": 14, "random_cut": "def check_response(self, response, callback, name=None):\n \n if not (response is None or asyncio.iscoroutine(response)):\n return\n if not name:\n if isinstance(callback, types.FunctionType): # FBV\n ", "d_id": 50774, "documentation": { "docstring": "\n Raise an error if the view returned None or an uncalled coroutine.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 157247, "commit_id": "0d8e12be4c2261b3457978c16aba7e893b1cf4a1", "repo": "dask", "path": "dask/dataframe/io/io.py", "file_name": "io.py", "fun_name": "_meta_from_array", "commit_message": "Support `cupy.ndarray` to `cudf.DataFrame` dispatching in `dask.dataframe` (#9579)", "code": "def _meta_from_array(x, columns=None, index=None, meta=None):\n \n\n if x.ndim > 2:\n raise ValueError(\n \"from_array does not input more than 2D array, got\"\n \" array with shape %r\" % (x.shape,)\n )\n\n if index is not None:\n if not isinstance(index, Index):\n raise ValueError(\"'index' must be an instance of dask.dataframe.Index\")\n index = index._meta\n\n if meta is None:\n meta = meta_lib_from_array(x).DataFrame()\n\n if getattr(x.dtype, \"names\", None) is not None:\n # record array has named columns\n if columns is None:\n columns = list(x.dtype.names)\n elif np.isscalar(columns):\n raise ValueError(\"For a struct dtype, columns must be a list.\")\n elif not all(i in x.dtype.names for i in columns):\n extra = sorted(set(columns).difference(x.dtype.names))\n raise ValueError(f\"dtype {x.dtype} doesn't have fields {extra}\")\n fields = x.dtype.fields\n dtypes = [fields[n][0] if n in fields else \"f8\" for n in columns]\n elif x.ndim == 1:\n if np.isscalar(columns) or columns is None:\n return meta._constructor_sliced(\n [], name=columns, dtype=x.dtype, index=index\n )\n elif len(columns) == 1:\n return meta._constructor(\n np.array([], dtype=x.dtype), columns=columns, index=index\n )\n raise ValueError(\n \"For a 1d array, columns must be a scalar or single element list\"\n )\n else:\n if np.isnan(x.shape[1]):\n raise ValueError(\"Shape along axis 1 must be known\")\n if columns is None:\n columns = list(range(x.shape[1])) if x.ndim == 2 else [0]\n elif len(columns) != x.shape[1]:\n raise ValueError(\n \"Number of column names must match width of the array. \"\n f\"Got {len(columns)} names for {x.shape[1]} columns\"\n )\n dtypes = [x.dtype] * len(columns)\n\n data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}\n return meta._constructor(data, columns=columns, index=index)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 630, "n_words": 234, "vocab_size": 136, "complexity": 21, "nloc": 47, "token_counts": 397, "n_ast_nodes": 656, "n_identifiers": 39, "random_cut": "def _meta_from_array(x, columns=None, index=None, meta=None):\n \n\n if x.ndim > 2:\n raise ValueError(\n \"from_array does not input more than 2D array, got\"\n \" array with shape %r\" % (x.shape,)\n )\n\n if index is not None:\n if not isinstance(index, Index):\n raise ValueError(\"'index' must be an instance of dask.dataframe.Index\")\n index = index._meta\n\n if meta is None:\n meta = meta_lib_from_array(x).DataFrame()\n\n if getattr(x.dtype, \"names\", None) is not None:\n # record array has named columns\n if columns is None:\n columns = list(x.dtype.names)\n elif np.isscalar(columns):\n raise ValueError(\"For a struct dtype, columns must be a list.\")\n elif not all(i in x.dtype.names for i in columns):\n extra = sorted(set(columns).difference(x.dtype.names))\n raise ValueError(f\"dtype {x.dtype} doesn't have fields {extra}\")\n fields = x.dtype.fields\n dtypes = [fields[n][0] if n in fields else \"f8\" for n in columns]\n elif x.ndim == 1:\n if np.isscalar(columns) or columns is None:\n return meta._constructor_sliced(\n [], name=columns, dtype=x.dtype, index=index\n )\n elif len(columns) == 1:\n return meta._constructor(\n np.array([], dtype=x.dtype), columns=columns, index=index\n", "d_id": 36894, "documentation": { "docstring": "Create empty DataFrame or Series which has correct dtype", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 73615, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/typed_table_block/blocks.py", "file_name": "blocks.py", "fun_name": "rows", "commit_message": "Reformat with black", "code": "def rows(self):\n \n for row in self.row_data:\n yield [\n column[\"block\"].bind(value)\n for column, value in zip(self.columns, row[\"values\"])\n ]\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 82, "n_words": 16, "vocab_size": 14, "complexity": 3, "nloc": 6, "token_counts": 41, "n_ast_nodes": 68, "n_identifiers": 9, "random_cut": "def rows(self):\n \n for row in self.row_data:\n yield [\n column[\"block\"].bind(value)\n for column, ", "d_id": 16070, "documentation": { "docstring": "\n Iterate over the rows of the table, with each row returned as a list of BoundBlocks\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 31, "language": "en" } }, { "id": 196686, "commit_id": "9ad8ab9fe58051cf11626ba6654852fcfec60147", "repo": "sympy", "path": "sympy/stats/crv_types.py", "file_name": "crv_types.py", "fun_name": "Uniform", "commit_message": "Documentation cleanup 5", "code": "def Uniform(name, left, right):\n r\n\n return rv(name, UniformDistribution, (left, right))\n\n#-------------------------------------------------------------------------------\n# UniformSum distribution ------------------------------------------------------\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 60, "token_counts": 24, "n_ast_nodes": 36, "n_identifiers": 6, "random_cut": "def Uniform(name, left, right):\n r\n\n return rv(name, UniformDistribution, (left, right))\n\n#--------------------------------------------------------", "d_id": 48104, "documentation": { "docstring": "\n Create a continuous random variable with a uniform distribution.\n\n Explanation\n ===========\n\n The density of the uniform distribution is given by\n\n .. math::\n f(x) := \\begin{cases}\n \\frac{1}{b - a} & \\text{for } x \\in [a,b] \\\\\n 0 & \\text{otherwise}\n \\end{cases}\n\n with :math:`x \\in [a,b]`.\n\n Parameters\n ==========\n\n a : Real number, :math:`-\\infty < a`, the left boundary\n b : Real number, :math:`a < b < \\infty`, the right boundary\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import Uniform, density, cdf, E, variance\n >>> from sympy import Symbol, simplify\n\n >>> a = Symbol(\"a\", negative=True)\n >>> b = Symbol(\"b\", positive=True)\n >>> z = Symbol(\"z\")\n\n >>> X = Uniform(\"x\", a, b)\n\n >>> density(X)(z)\n Piecewise((1/(-a + b), (b >= z) & (a <= z)), (0, True))\n\n >>> cdf(X)(z)\n Piecewise((0, a > z), ((-a + z)/(-a + b), b >= z), (1, True))\n\n >>> E(X)\n a/2 + b/2\n\n >>> simplify(variance(X))\n a**2/12 - a*b/6 + b**2/12\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29\n .. [2] http://mathworld.wolfram.com/UniformDistribution.html\n\n ", "n_words": 157, "vocab_size": 111, "n_whitespaces": 331, "language": "en" } }, { "id": 223527, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "get_ttext", "commit_message": "add python 3.10.4 for windows", "code": "def get_ttext(value):\n \n m = _non_token_end_matcher(value)\n if not m:\n raise errors.HeaderParseError(\n \"expected ttext but found '{}'\".format(value))\n ttext = m.group()\n value = value[len(ttext):]\n ttext = ValueTerminal(ttext, 'ttext')\n _validate_xtext(ttext)\n return ttext, value\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 29, "vocab_size": 23, "complexity": 2, "nloc": 10, "token_counts": 61, "n_ast_nodes": 106, "n_identifiers": 12, "random_cut": "def get_ttext(value):\n \n m = _non_token_end_matcher(value)\n if not m:\n raise errors.HeaderParseError(\n \"expected ttext but found '{}'\".format(value))\n ttext ", "d_id": 56953, "documentation": { "docstring": "ttext = \n\n We allow any non-TOKEN_ENDS in ttext, but add defects to the token's\n defects list if we find non-ttext characters. We also register defects for\n *any* non-printables even though the RFC doesn't exclude all of them,\n because we follow the spirit of RFC 5322.\n\n ", "n_words": 47, "vocab_size": 39, "n_whitespaces": 63, "language": "en" } }, { "id": 196497, "commit_id": "338775324184a00c6bf50b8339ebd805c2bf4879", "repo": "sympy", "path": "sympy/codegen/ast.py", "file_name": "ast.py", "fun_name": "kwargs", "commit_message": "Fixed issues with __slots__ (overlaps and omission in base classes)\n\nAcross several modules, two types of slot problems were detected.\n\n1) Overlaps\n A class redefines slots already present in a superclass.\n This reduces the memory savings from slots, as well as\n potentially introduces unpredictable behavior.\n\n2) Omission in base classes\n A class defines slots, but one of its superclasses does not.\n This reduces the memory savings from slots, as well as allows\n `__dict__` to be created and non-slot attributes to be set.\n\nMost of these issues were straightforward to fix, except in the `codegen`\nmodule, which makes use of slots to generate constructors. Here a change\nto the constructor logic was needed in order to solve the slots issues.", "code": "def kwargs(self, exclude=(), apply=None):\n \n kwargs = {k: getattr(self, k) for k in self._fields if k not in exclude}\n if apply is not None:\n return {k: apply(v) for k, v in kwargs.items()}\n else:\n return kwargs\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 34, "vocab_size": 25, "complexity": 5, "nloc": 17, "token_counts": 67, "n_ast_nodes": 103, "n_identifiers": 9, "random_cut": "def kwargs(self, exclude=(), apply=None):\n \n kwargs = {k: getattr(self, k) for k in self._fields if k not in exclude}\n ", "d_id": 47945, "documentation": { "docstring": " Get instance's attributes as dict of keyword arguments.\n\n Parameters\n ==========\n\n exclude : collection of str\n Collection of keywords to exclude.\n\n apply : callable, optional\n Function to apply to all values.\n ", "n_words": 30, "vocab_size": 24, "n_whitespaces": 88, "language": "en" } }, { "id": 140165, "commit_id": "f27e85cd7df5ca2873ef6231200a1530e16ac35d", "repo": "ray", "path": "python/ray/serve/deployment_executor_node.py", "file_name": "deployment_executor_node.py", "fun_name": "_execute_impl", "commit_message": "[Serve][Deployment Graph][Perf] Add minimal executor DAGNode (#24754)\n\ncloses #24475\r\n\r\nCurrent deployment graph has big perf issues compare with using plain deployment handle, mostly because overhead of DAGNode traversal mechanism. We need this mechanism to empower DAG API, specially deeply nested objects in args where we rely on pickling; But meanwhile the nature of each execution becomes re-creating and replacing every `DAGNode` instances involved upon each execution, that incurs overhead.\r\n\r\nSome overhead is inevitable due to pickling and executing DAGNode python code, but they could be quite minimal. As I profiled earlier, pickling itself is quite fast for our benchmarks at magnitude of microseconds.\r\n\r\nMeanwhile the elephant in the room is DeploymentNode and its relatives are doing too much work in constructor that's beyond necessary, thus slowing everything down. So the fix is as simple as \r\n\r\n1) Introduce a new set of executor dag node types that contains absolute minimal information that only preserves the DAG structure with traversal mechanism, and ability to call relevant deployment handles.\r\n2) Add a simple new pass in our build() that generates and replaces nodes with executor dag to produce a final executor dag to run the graph.\r\n\r\nCurrent ray dag -> serve dag mixed a lot of stuff related to deployment generation and init args, in longer term we should remove them but our correctness depends on it so i rather leave it as separate PR.\r\n\r\n### Current 10 node chain with deployment graph `.bind()`\r\n```\r\nchain_length: 10, num_clients: 1\r\nlatency_mean_ms: 41.05, latency_std_ms: 15.18\r\nthroughput_mean_tps: 27.5, throughput_std_tps: 3.2\r\n```\r\n\r\n### Using raw deployment handle without dag overhead\r\n```\r\nchain_length: 10, num_clients: 1\r\nlatency_mean_ms: 20.39, latency_std_ms: 4.57\r\nthroughput_mean_tps: 51.9, throughput_std_tps: 1.04\r\n```\r\n\r\n### After this PR:\r\n```\r\nchain_length: 10, num_clients: 1\r\nlatency_mean_ms: 20.35, latency_std_ms: 0.87\r\nthroughput_mean_tps: 48.4, throughput_std_tps: 1.43\r\n```", "code": "def _execute_impl(self, *args, **kwargs) -> RayServeHandle:\n \n return self._deployment_handle\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 18, "n_ast_nodes": 30, "n_identifiers": 6, "random_cut": "def _execute_impl(self, *args, **kwargs) -> RayServeHandle:\n \n return self._deployment_handle\n", "d_id": 31883, "documentation": { "docstring": "Does not call into anything or produce a new value, as the time\n this function gets called, all child nodes are already resolved to\n ObjectRefs.\n ", "n_words": 25, "vocab_size": 25, "n_whitespaces": 46, "language": "en" } }, { "id": 73523, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/test_templates.py", "file_name": "test_templates.py", "fun_name": "test_settings_use_default_site", "commit_message": "Reformat with black", "code": "def test_settings_use_default_site(self):\n \n context = {}\n\n # This should use the default site\n template = '{{ settings(\"tests.testsetting\", use_default_site=True).title}}'\n\n self.assertEqual(\n self.render(template, context, request_context=False),\n self.default_site_settings.title,\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 7, "token_counts": 37, "n_ast_nodes": 62, "n_identifiers": 9, "random_cut": "def test_settings_use_default_site(self):\n \n context = {}\n\n # This should use the default site\n template = '{{ settings(\"tests.testsetting\", use_default_site=True).title}}'\n\n self.assertEqual(\n self.render(template, context, request_co", "d_id": 16040, "documentation": { "docstring": "\n Check that the {{ settings(use_default_site=True) }} option works with\n no site in the context\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 181571, "commit_id": "1393889d5bc29c8b7c4ed45bca4736d6dfdfad8d", "repo": "moviepy", "path": "tests/test_ffmpeg_reader.py", "file_name": "test_ffmpeg_reader.py", "fun_name": "test_stream_square_brackets_and_language", "commit_message": "Handle brackets and language in FFMPEG output (#1837)\n\n* Improve regex to handle brackets and language\r\n\r\n* Update CHANGELOG.md\r\n\r\n* Simplify `if`", "code": "def test_stream_square_brackets_and_language():\n infos = \n\n d = FFmpegInfosParser(infos, \"clip.mp4\").parse()\n\n assert d\n assert len(d[\"inputs\"][0][\"streams\"]) == 2\n assert d[\"inputs\"][0][\"streams\"][0][\"language\"] == \"eng\"\n assert d[\"inputs\"][0][\"streams\"][1][\"language\"] is None\n\n", "url": "https://github.com/Zulko/moviepy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 40, "n_words": 22, "vocab_size": 16, "complexity": 1, "nloc": 12, "token_counts": 75, "n_ast_nodes": 132, "n_identifiers": 6, "random_cut": "def test_stream_square_brackets_and_language():\n infos = \n\n d = FFmpegInfosParser(infos, \"clip.mp4\").parse()\n\n assert d\n assert len(d[\"inputs\"][0][\"streams\"]) == 2\n assert d[\"inputs\"][0][\"streams\"][0][\"language\"] == \"eng\"\n assert d[\"inputs\"][0][\"streams\"][1][\"language\"] is None\n\n", "d_id": 43365, "documentation": { "docstring": "\nInput #0, mpeg, from 'clip.mp4':\n Duration: 00:02:15.00, start: 52874.498178, bitrate: 266 kb/s\n Stream #0:0[0x1e0](eng): Video: ..., 25 tbr, 90k tbn, 50 tbc\n Stream #0:1[0x1c0](und): Audio: mp2, 0 channels, s16p\nAt least one output file must be specified", "n_words": 37, "vocab_size": 36, "n_whitespaces": 42, "language": "en" } }, { "id": 60126, "commit_id": "a368874d1b145c1ec5201e5efd3c26ce7c1e8611", "repo": "prefect", "path": "src/prefect/_internal/concurrency/primitives.py", "file_name": "primitives.py", "fun_name": "wait", "commit_message": "Add thread-safe async primitives `Event` and `Future` (#7865)\n\nCo-authored-by: Serina Grill <42048900+serinamarie@users.noreply.github.com>", "code": "async def wait(self) -> None:\n \n if self._is_set:\n return\n\n if not self._loop:\n self._loop = get_running_loop()\n self._event = asyncio.Event()\n\n await self._event.wait()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 80, "n_words": 19, "vocab_size": 17, "complexity": 3, "nloc": 12, "token_counts": 44, "n_ast_nodes": 78, "n_identifiers": 8, "random_cut": "async def wait(self) -> None:\n ", "d_id": 11991, "documentation": { "docstring": "\n Wait until the flag has been set.\n\n If the flag has already been set when this method is called, it returns immediately.\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 44, "language": "en" } }, { "id": 56385, "commit_id": "78825acff7ee179ddb1e98da6efa6d39e4e3d1bf", "repo": "prefect", "path": "src/prefect/agent.py", "file_name": "agent.py", "fun_name": "get_and_submit_flow_runs", "commit_message": "Add message to indicate a work queue is paused\n\nThe agent now checks if the work queue is paused when it does not find any submittable runs. We may want to reduce the frequency of this API call in the future, but it seems reasonable as a starting point.", "code": "async def get_and_submit_flow_runs(self) -> List[FlowRun]:\n \n if not self.started:\n raise RuntimeError(\"Agent is not started. Use `async with OrionAgent()...`\")\n\n self.logger.debug(\"Checking for flow runs...\")\n\n before = pendulum.now(\"utc\").add(\n seconds=self.prefetch_seconds or PREFECT_AGENT_PREFETCH_SECONDS.value()\n )\n\n # Use the work queue id or load one from the name\n work_queue_id = self.work_queue_id or await self.work_queue_id_from_name()\n if not work_queue_id:\n return []\n\n try:\n submittable_runs = await self.client.get_runs_in_work_queue(\n id=work_queue_id, limit=10, scheduled_before=before\n )\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code == status.HTTP_404_NOT_FOUND:\n raise ValueError(\n f\"No work queue found with id '{work_queue_id}'\"\n ) from None\n else:\n raise\n\n # Check for a paused work queue for display purposes\n if not submittable_runs:\n work_queue = await self.client.read_work_queue(work_queue_id)\n if work_queue.is_paused:\n self.logger.info(\n f\"Work queue {work_queue.name!r} ({work_queue.id}) is paused.\"\n )\n\n for flow_run in submittable_runs:\n self.logger.info(f\"Submitting flow run '{flow_run.id}'\")\n\n # don't resubmit a run\n if flow_run.id in self.submitting_flow_run_ids:\n continue\n\n self.submitting_flow_run_ids.add(flow_run.id)\n self.task_group.start_soon(\n self.submit_run,\n flow_run,\n )\n return submittable_runs\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 570, "n_words": 134, "vocab_size": 90, "complexity": 11, "nloc": 41, "token_counts": 202, "n_ast_nodes": 362, "n_identifiers": 42, "random_cut": "async def get_and_submit_flow_runs(self) -> List[FlowRun]:\n \n if not self.started:\n raise RuntimeError(\"Agent is not started. Use `async with OrionAgent()...`\")\n\n self.logger.debug(\"Checking for flow runs...\")\n\n before = pendulum.now(\"utc\").add(\n seconds=self.prefetch_seconds or PREFECT_AGENT_PREFETCH_SECONDS.value()\n )\n\n # Use the work queue id or load one from the name\n work_queue_id = self.work_queue_id or await self.work_queue_id_from_name()\n if not work_queue_id:\n return []\n\n try:\n submittable_runs = await self.client.get_runs_in_work_queue(\n id=work_queue_id, limit=10, scheduled_before=before\n )\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code == status.HTTP_404_NOT_FOUND:\n raise ValueError(\n f\"No work queue found with id '{work_queue_id}'\"\n ) from None\n else:\n raise\n\n # Check for a paused work queue for display purposes\n if not submittable_runs:\n work_queue = await sel", "d_id": 11516, "documentation": { "docstring": "\n The principle method on agents. Queries for scheduled flow runs and submits\n them for execution in parallel.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 39, "language": "en" } }, { "id": 276718, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/conv_utils.py", "file_name": "conv_utils.py", "fun_name": "conv_output_length", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def conv_output_length(input_length, filter_size, padding, stride, dilation=1):\n \n if input_length is None:\n return None\n assert padding in {\"same\", \"valid\", \"full\", \"causal\"}\n dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n if padding in [\"same\", \"causal\"]:\n output_length = input_length\n elif padding == \"valid\":\n output_length = input_length - dilated_filter_size + 1\n elif padding == \"full\":\n output_length = input_length + dilated_filter_size - 1\n return (output_length + stride - 1) // stride\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 120, "n_words": 68, "vocab_size": 39, "complexity": 5, "nloc": 12, "token_counts": 95, "n_ast_nodes": 160, "n_identifiers": 8, "random_cut": "def conv_output_length(input_length, filter_size, padding, stride, dilation=1):\n \n if input_lengt", "d_id": 81709, "documentation": { "docstring": "Determines output length of a convolution given input length.\n\n Args:\n input_length: integer.\n filter_size: integer.\n padding: one of \"same\", \"valid\", \"full\", \"causal\"\n stride: integer.\n dilation: dilation rate, integer.\n\n Returns:\n The output length (integer).\n ", "n_words": 32, "vocab_size": 26, "n_whitespaces": 83, "language": "en" } }, { "id": 264784, "commit_id": "82706eb3a68e963d7ac089478788b87892d4ee79", "repo": "netbox", "path": "netbox/dcim/models/cables.py", "file_name": "cables.py", "fun_name": "get_split_nodes", "commit_message": "Migrate CablePath to use two-dimensional array", "code": "def get_split_nodes(self):\n \n rearport = path_node_to_object(self._nodes[-1])\n\n return FrontPort.objects.filter(rear_port=rearport)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 9, "random_cut": "def get_split_nodes(self):\n \n rearport = path_node_to_object(self._nodes[-1])\n\n return FrontPort.objects.filter(rear_port=rearp", "d_id": 77807, "documentation": { "docstring": "\n Return all available next segments in a split cable path.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 206882, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/views/generic/list.py", "file_name": "list.py", "fun_name": "get_template_names", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_template_names(self):\n \n try:\n names = super().get_template_names()\n except ImproperlyConfigured:\n # If template_name isn't specified, it's not a problem --\n # we just start with an empty list.\n names = []\n\n # If the list is a queryset, we'll invent a template name based on the\n # app and model name. This name gets put at the end of the template\n # name list so that user-supplied names override the automatically-\n # generated ones.\n if hasattr(self.object_list, \"model\"):\n opts = self.object_list.model._meta\n names.append(\n \"%s/%s%s.html\"\n % (opts.app_label, opts.model_name, self.template_name_suffix)\n )\n elif not names:\n raise ImproperlyConfigured(\n \"%(cls)s requires either a 'template_name' attribute \"\n \"or a get_queryset() method that returns a QuerySet.\"\n % {\n \"cls\": self.__class__.__name__,\n }\n )\n return names\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 391, "n_words": 113, "vocab_size": 85, "complexity": 4, "nloc": 20, "token_counts": 86, "n_ast_nodes": 155, "n_identifiers": 16, "random_cut": "def get_template_names(self):\n \n try:\n names = super().get_template_names()\n except ImproperlyConfigured:\n # If template_name isn't specified, it's not a problem --\n # we just start with an empty list.\n names = []\n\n # If the list is a queryset, we'll invent a template name based on the\n # app and model name. This name gets put at the end of the template\n # name list so that user-supplied names override the automatically-\n # generated ones.\n if hasattr(self.object_list, \"model\"):\n opts = self.object_list.model._meta\n names.append(\n \"%s/%s%s.html\"\n % (opts.app_label, opts.model_name, self.template_name_suffix)\n )\n elif not names:\n raise ImproperlyConfigured(\n \"%(cls)s requires either a 'template_name' attribute \"\n \"or a get_queryset() method that returns a QuerySet.\"", "d_id": 51781, "documentation": { "docstring": "\n Return a list of template names to be used for the request. Must return\n a list. May not be called if render_to_response is overridden.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 46, "language": "en" } }, { "id": 189007, "commit_id": "471b19d2aa799cd73bded23379e864dd35bec2b6", "repo": "psutil", "path": "scripts/internal/fix_flake8.py", "file_name": "fix_flake8.py", "fun_name": "remove_lines", "commit_message": "Fix typos", "code": "def remove_lines(fname, entries):\n \n to_remove = []\n for entry in entries:\n msg, issue, lineno, pos, descr = entry\n # 'module imported but not used'\n if issue == 'F401' and handle_f401(fname, lineno):\n to_remove.append(lineno)\n # 'blank line(s) at end of file'\n elif issue == 'W391':\n lines = read_lines(fname)\n i = len(lines) - 1\n while lines[i] == '\\n':\n to_remove.append(i + 1)\n i -= 1\n # 'too many blank lines'\n elif issue == 'E303':\n howmany = descr.replace('(', '').replace(')', '')\n howmany = int(howmany[-1])\n for x in range(lineno - howmany, lineno):\n to_remove.append(x)\n\n if to_remove:\n newlines = []\n for i, line in enumerate(read_lines(fname), 1):\n if i not in to_remove:\n newlines.append(line)\n print(\"removing line(s) from %s\" % fname)\n write_file(fname, newlines)\n\n return len(to_remove)\n\n", "url": "https://github.com/giampaolo/psutil.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 352, "n_words": 112, "vocab_size": 80, "complexity": 11, "nloc": 25, "token_counts": 185, "n_ast_nodes": 310, "n_identifiers": 26, "random_cut": "def remove_lines(fname, entries):\n \n to_remove = []\n for entry in entries:\n msg, issue, lineno, pos, descr = entry\n # 'module imported but not used'\n if issue == 'F401' and handle_f401(fname, lineno):\n to_remove.append(lineno)\n # 'blank line(s) at end of file'\n elif issue == 'W391':\n lines = read_lines(fname)\n i = len(lines) - 1\n while lines[i] == '\\n':\n ", "d_id": 45965, "documentation": { "docstring": "Check if we should remove lines, then do it.\n Return the number of lines removed.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 112855, "commit_id": "98c1a77f61900d486f46d284c49fb65675dbee6a", "repo": "nni", "path": "nni/algorithms/hpo/bohb_advisor/bohb_advisor.py", "file_name": "bohb_advisor.py", "fun_name": "_get_one_trial_job", "commit_message": "Support multiple HPO experiments in one process (#4855)", "code": "def _get_one_trial_job(self):\n \n if not self.generated_hyper_configs:\n ret = {\n 'parameter_id': '-1_0_0',\n 'parameter_source': 'algorithm',\n 'parameters': ''\n }\n self.send(CommandType.NoMoreTrialJobs, nni.dump(ret))\n return None\n assert self.generated_hyper_configs\n params = self.generated_hyper_configs.pop(0)\n ret = {\n 'parameter_id': params[0],\n 'parameter_source': 'algorithm',\n 'parameters': params[1]\n }\n self.parameters[params[0]] = params[1]\n return ret\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 217, "n_words": 39, "vocab_size": 26, "complexity": 2, "nloc": 18, "token_counts": 95, "n_ast_nodes": 164, "n_identifiers": 12, "random_cut": "def _get_one_trial_job(self):\n \n if not self.generated_hyper_configs:\n ret = {\n 'parameter_id': '-1_0_0',\n 'parameter_source': 'algorithm',\n 'parameters': ''\n }\n self.send(CommandType.NoMoreTrialJobs, nni.dump(ret))\n ", "d_id": 24770, "documentation": { "docstring": "get one trial job, i.e., one hyperparameter configuration.\n\n If this function is called, Command will be sent by BOHB:\n a. If there is a parameter need to run, will return \"NewTrialJob\" with a dict:\n {\n 'parameter_id': id of new hyperparameter\n 'parameter_source': 'algorithm'\n 'parameters': value of new hyperparameter\n }\n b. If BOHB don't have parameter waiting, will return \"NoMoreTrialJobs\" with\n {\n 'parameter_id': '-1_0_0',\n 'parameter_source': 'algorithm',\n 'parameters': ''\n }\n ", "n_words": 67, "vocab_size": 48, "n_whitespaces": 189, "language": "en" } }, { "id": 122221, "commit_id": "4da72cf3988b4918f65b1401e46c40b7c4504963", "repo": "jax", "path": "jax/experimental/pjit.py", "file_name": "pjit.py", "fun_name": "global_array_to_host_local_array", "commit_message": "Add `host_local_array_to_global_array` and `global_array_to_host_local_array` for enabling transition to jax.Array.\n\nAlso support `FROM_GDA` for `jax.Array` as a backwards compatible change so that users can continue to use that until they transition to jax.Array. Its currently required because of usage like `in_axis_resources = (FROM_GDA, FROM_GDA, P('data'), None)` and changing this on users side will require input from users so we as JAX can just support it as a temporary thing since GDA and Array act similarly in pjit.\n\nPiperOrigin-RevId: 479035326", "code": "def global_array_to_host_local_array(global_inputs, global_mesh, pspecs):\n \n def _convert(arr, pspec):\n local_aval = global_mesh._global_to_local(\n pxla._get_array_mapping(pspec), arr.aval)\n return array.ArrayImpl(\n local_aval, MeshPspecSharding(global_mesh.local_mesh, pspec),\n arr._arrays, committed=True)\n\n flattened_inps, out_tree = tree_flatten(global_inputs)\n out_pspecs = flatten_axis_resources(\n 'output pspecs', out_tree, pspecs, tupled_args=True)\n out = tree_map(_convert, tuple(flattened_inps), out_pspecs)\n return tree_unflatten(out_tree, out)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 77, "n_words": 39, "vocab_size": 34, "complexity": 1, "nloc": 7, "token_counts": 54, "n_ast_nodes": 150, "n_identifiers": 28, "random_cut": "def global_array_to_host_local_array(global_inputs, global_mesh, pspecs):\n \n def _convert(arr, pspec):", "d_id": 27125, "documentation": { "docstring": "Converts a global `jax.Array` to a host local `jax.Array`.\n\n You can use this function to transition to `jax.Array`. Using `jax.Array` with\n `pjit` has the same semantics of using GDA with pjit i.e. all `jax.Array`\n inputs to pjit should be globally shaped and the output from `pjit` will also\n be globally shaped `jax.Array`s\n\n You can use this function to convert the globally shaped `jax.Array` output\n from pjit to host local values again so that the transition to jax.Array can\n be a mechanical change.\n\n Example usage:\n\n ```\n global_inputs = jax.experimental.pjit.host_local_array_to_global_array(\n host_local_inputs, global_mesh, in_pspecs)\n\n with mesh:\n global_out = pjitted_fun(global_inputs)\n\n host_local_output = jax.experimental.pjit.global_array_to_host_local_array(\n global_out, mesh, out_pspecs)\n ```\n\n Args:\n global_inputs: A Pytree of global `jax.Array`s.\n global_mesh: The global mesh.\n pspecs: A Pytree of PartitionSpecs.\n ", "n_words": 119, "vocab_size": 73, "n_whitespaces": 152, "language": "en" } }, { "id": 213099, "commit_id": "a5db070f446b7cfebdaa6ad2e3dcf78f6105a272", "repo": "serverless-application-model", "path": "samtranslator/utils/py27hash_fix.py", "file_name": "py27hash_fix.py", "fun_name": "__setitem__", "commit_message": "fix: Py27hash fix (#2182)\n\n* Add third party py27hash code\r\n\r\n* Add Py27UniStr and unit tests\r\n\r\n* Add py27hash_fix utils and tests\r\n\r\n* Add to_py27_compatible_template and tests\r\n\r\n* Apply py27hash fix to wherever it is needed\r\n\r\n* Apply py27hash fix, all tests pass except api_with_any_method_in_swagger\r\n\r\n* apply py27hash fix in openapi + run black\r\n\r\n* remove py27 testing\r\n\r\n* remove other py27 references\r\n\r\n* black fixes\r\n\r\n* fixes/typos\r\n\r\n* remove py27 from tox.ini\r\n\r\n* refactoring\r\n\r\n* third party notice\r\n\r\n* black\r\n\r\n* Fix py27hash fix to deal with null events\r\n\r\n* Fix Py27UniStr repr for unicode literals\r\n\r\n* black reformat\r\n\r\n* Update _template_has_api_resource to check data type more defensively\r\n\r\n* Apply py27Dict in _get_authorizers\r\n\r\n* Apply Py27Dict to authorizers and gateway responses which will go into swagger\r\n\r\n* Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class\r\n\r\n* Rename _convert_to_py27_dict to _convert_to_py27_type\r\n\r\n* Apply Py27UniStr to path param name\r\n\r\n* Handle HttpApi resource under to_py27_compatible_template\r\n\r\n* Fix InvalidDocumentException to not sort different exceptions\r\n\r\n* black reformat\r\n\r\n* Remove unnecessary test files\r\n\r\nCo-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com>", "code": "def __setitem__(self, key, value):\n \n super(Py27Dict, self).__setitem__(key, value)\n self.keylist.add(key)\n", "url": "https://github.com/aws/serverless-application-model.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "def __setitem__(self, key, value):\n ", "d_id": 53642, "documentation": { "docstring": "\n Override of __setitem__ to track keys and simulate Python2.7 dict\n\n Parameters\n ----------\n key: hashable\n value: Any\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 59, "language": "en" } }, { "id": 20004, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/utils/virtualenv.py", "file_name": "virtualenv.py", "fun_name": "virtualenv_no_global", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def virtualenv_no_global() -> bool:\n \n # PEP 405 compliance needs to be checked first since virtualenv >=20 would\n # return True for both checks, but is only able to use the PEP 405 config.\n if _running_under_venv():\n return _no_global_under_venv()\n\n if _running_under_regular_virtualenv():\n return _no_global_under_regular_virtualenv()\n\n return False\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 75, "n_words": 43, "vocab_size": 35, "complexity": 3, "nloc": 7, "token_counts": 27, "n_ast_nodes": 52, "n_identifiers": 6, "random_cut": "def virtualenv_no_global() -> bool:\n \n # PEP 405 compliance needs to be checked firs", "d_id": 3172, "documentation": { "docstring": "Returns a boolean, whether running in venv with no system site-packages.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 75478, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/backends/database/mysql/mysql.py", "file_name": "mysql.py", "fun_name": "autocomplete", "commit_message": "Reformat with black", "code": "def autocomplete(self):\n \n texts = []\n for field in self.search_fields:\n for current_field, value in self.prepare_field(self.obj, field):\n if isinstance(current_field, AutocompleteField):\n texts.append((value))\n\n return \" \".join(texts)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 95, "n_words": 22, "vocab_size": 20, "complexity": 4, "nloc": 7, "token_counts": 56, "n_ast_nodes": 91, "n_identifiers": 13, "random_cut": "def autocomplete(self):\n \n texts = []\n for field in self.search_fields:\n for current_field, value in self.prepare_field(self.obj, field):\n if isinstance(current_field,", "d_id": 16409, "documentation": { "docstring": "\n Returns all values to index as \"autocomplete\". This is the value of all AutocompleteFields\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 271833, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_utils.py", "file_name": "training_utils.py", "fun_name": "list_to_tuple", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def list_to_tuple(maybe_list):\n \n if isinstance(maybe_list, list):\n return tuple(maybe_list)\n return maybe_list\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 4, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 5, "random_cut": "def list_to_tuple(maybe_list):\n \n if isinstance(maybe_list, list):\n return tuple", "d_id": 80856, "documentation": { "docstring": "Datasets will stack the list of tensor, so switch them to tuples.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 43926, "commit_id": "d48a3a357fd89ec805d086d5b6c1f1d4daf77b9a", "repo": "airflow", "path": "tests/models/test_taskinstance.py", "file_name": "test_taskinstance.py", "fun_name": "test_not_recorded_for_unused", "commit_message": "Add TaskMap and TaskInstance.map_id (#20286)\n\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_not_recorded_for_unused(self, dag_maker, xcom_value):\n \n with dag_maker(dag_id=\"test_not_recorded_for_unused\") as dag:\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 8, "token_counts": 63, "n_ast_nodes": 38, "n_identifiers": 6, "random_cut": "def test_not_recorded_for_unused(self, dag_maker, xcom_value):\n \n ", "d_id": 8100, "documentation": { "docstring": "A value not used for task-mapping should not be recorded.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 274555, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/losses.py", "file_name": "losses.py", "fun_name": "_ragged_tensor_mse", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _ragged_tensor_mse(y_true, y_pred):\n \n return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)\n\n\n@keras_export(\n \"keras.metrics.mean_absolute_error\",\n \"keras.metrics.mae\",\n \"keras.metrics.MAE\",\n \"keras.losses.mean_absolute_error\",\n \"keras.losses.mae\",\n \"keras.losses.MAE\",\n)\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\n \"keras.metrics.mean_absolute_error\",\n \"keras.metrics.mae\",\n \"keras.metrics.MAE\",\n \"keras.losses.mean_absolute_error\",\n \"keras.losses.mae\",\n \"keras.losses.MAE\",\n)\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 37, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 71, "n_identifiers": 10, "random_cut": "def _ragged_tensor_mse(y_true, y_pred):\n \n return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred)\n\n\n@keras_export(\n \"keras.metrics.mean_abso", "d_id": 81233, "documentation": { "docstring": "Implements support for handling RaggedTensors.\n\n Args:\n y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.\n When the number of dimensions of the batch feature vector [d0, .. dN] is\n greater than one the return value is a RaggedTensor. Otherwise a Dense\n tensor with dimensions [batch_size] is returned.\n ", "n_words": 69, "vocab_size": 47, "n_whitespaces": 108, "language": "en" } }, { "id": 191358, "commit_id": "18aeb720126a68201c7e3b5a617139c27c779496", "repo": "langchain", "path": "tests/unit_tests/test_formatting.py", "file_name": "test_formatting.py", "fun_name": "test_does_not_allow_extra_kwargs", "commit_message": "initial commit", "code": "def test_does_not_allow_extra_kwargs() -> None:\n \n template = \"This is a {foo} test.\"\n with pytest.raises(KeyError):\n formatter.format(template, foo=\"good\", bar=\"oops\")\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 32, "n_ast_nodes": 61, "n_identifiers": 9, "random_cut": "def test_does_not_allow_extra_kwargs() -> None:\n \n template = \"This is a {foo} test.\"\n with pytest.raises(KeyError):\n formatter.for", "d_id": 46496, "documentation": { "docstring": "Test formatting does not allow extra key word arguments.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 258632, "commit_id": "0dfaaadfe2d0e0b4fd9d2ba22a75b7b1b1903049", "repo": "scikit-learn", "path": "sklearn/neighbors/_lof.py", "file_name": "_lof.py", "fun_name": "score_samples", "commit_message": "DOC improve LOF documentation wrt difference of predict and fit_predict (#21878)\n\n* improve LOF documentation\r\n\r\n* Update sklearn/neighbors/_lof.py\r\n\r\nCo-authored-by: Alexandre Gramfort \r\n\r\nCo-authored-by: Alexandre Gramfort ", "code": "def score_samples(self, X):\n \n check_is_fitted(self)\n X = check_array(X, accept_sparse=\"csr\")\n\n distances_X, neighbors_indices_X = self.kneighbors(\n X, n_neighbors=self.n_neighbors_\n )\n X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X)\n\n lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]\n\n # as bigger is better:\n return -np.mean(lrd_ratios_array, axis=1)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 107, "n_words": 33, "vocab_size": 30, "complexity": 1, "nloc": 9, "token_counts": 77, "n_ast_nodes": 122, "n_identifiers": 19, "random_cut": "def score_samples(self, X):\n \n check_is_fitted(self)\n X = check_array(X, accept_sparse=\"csr\")\n\n distances_X, neighbors_indices_X = self.kneighbors(\n X, n_neighbors=self.n_neighbors_\n )\n X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X)\n\n lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]\n\n # as bigger is better:\n return -np.mean(lrd_r", "d_id": 75342, "documentation": { "docstring": "Opposite of the Local Outlier Factor of X.\n\n It is the opposite as bigger is better, i.e. large values correspond\n to inliers.\n\n **Only available for novelty detection (when novelty is set to True).**\n The argument X is supposed to contain *new data*: if X contains a\n point from training, it considers the later in its own neighborhood.\n Also, the samples in X are not considered in the neighborhood of any\n point. Because of this, the scores obtained via ``score_samples`` may\n differ from the standard LOF scores.\n The standard LOF scores for the training data is available via the\n ``negative_outlier_factor_`` attribute.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. the training samples.\n\n Returns\n -------\n opposite_lof_scores : ndarray of shape (n_samples,)\n The opposite of the Local Outlier Factor of each input samples.\n The lower, the more abnormal.\n ", "n_words": 148, "vocab_size": 93, "n_whitespaces": 311, "language": "en" } }, { "id": 66920, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/payroll/doctype/payroll_period/payroll_period.py", "file_name": "payroll_period.py", "fun_name": "get_payroll_period_days", "commit_message": "style: format code with black", "code": "def get_payroll_period_days(start_date, end_date, employee, company=None):\n\tif not company:\n\t\tcompany = frappe.db.get_value(\"Employee\", employee, \"company\")\n\tpayroll_period = frappe.db.sql(\n\t\t,\n\t\t{\"company\": company, \"start_date\": start_date, \"end_date\": end_date},\n\t)\n\n\tif len(payroll_period) > 0:\n\t\tactual_no_of_days = date_diff(getdate(payroll_period[0][2]), getdate(payroll_period[0][1])) + 1\n\t\tworking_days = actual_no_of_days\n\t\tif not cint(\n\t\t\tfrappe.db.get_value(\"Payroll Settings\", None, \"include_holidays_in_total_working_days\")\n\t\t):\n\t\t\tholidays = get_holiday_dates_for_employee(\n\t\t\t\temployee, getdate(payroll_period[0][1]), getdate(payroll_period[0][2])\n\t\t\t)\n\t\t\tworking_days -= len(holidays)\n\t\treturn payroll_period[0][0], working_days, actual_no_of_days\n\treturn False, False, False\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 44, "n_words": 63, "vocab_size": 48, "complexity": 4, "nloc": 26, "token_counts": 165, "n_ast_nodes": 256, "n_identifiers": 18, "random_cut": "def get_payroll_period_days(start_date, end_date, employee, company=None):\n\tif not company:\n\t\tcompany = frappe.db.get_value(\"Employee\", employee, \"company\")\n\tpayroll_period = frappe.db.sql(\n\t\t,\n\t\t{\"company\": company, \"st", "d_id": 14380, "documentation": { "docstring": "\n\t\tselect name, start_date, end_date\n\t\tfrom `tabPayroll Period`\n\t\twhere\n\t\t\tcompany=%(company)s\n\t\t\tand %(start_date)s between start_date and end_date\n\t\t\tand %(end_date)s between start_date and end_date\n\t", "n_words": 21, "vocab_size": 14, "n_whitespaces": 15, "language": "en" } }, { "id": 176699, "commit_id": "2a05ccdb07cff88e56661dee8a9271859354027f", "repo": "networkx", "path": "networkx/algorithms/bipartite/basic.py", "file_name": "basic.py", "fun_name": "density", "commit_message": "Remove redundant py2 numeric conversions (#5661)\n\n* Remove redundant float conversion\r\n\r\n* Remove redundant int conversion\r\n\r\n* Use integer division\r\n\r\nCo-authored-by: Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>", "code": "def density(B, nodes):\n \n n = len(B)\n m = nx.number_of_edges(B)\n nb = len(nodes)\n nt = n - nb\n if m == 0: # includes cases n==0 and n==1\n d = 0.0\n else:\n if B.is_directed():\n d = m / (2 * nb * nt)\n else:\n d = m / (nb * nt)\n return d\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 120, "n_words": 52, "vocab_size": 31, "complexity": 3, "nloc": 13, "token_counts": 76, "n_ast_nodes": 124, "n_identifiers": 12, "random_cut": "def density(B, nodes):\n \n n = len(B)\n m = nx.number_of_edges(B)\n nb = len(nodes)\n nt = n - nb\n if m == 0: # includes cases n==0 and n==1\n d = 0.0\n else:\n if B.is_directed():\n ", "d_id": 42041, "documentation": { "docstring": "Returns density of bipartite graph B.\n\n Parameters\n ----------\n B : NetworkX graph\n\n nodes: list or container\n Nodes in one node set of the bipartite graph.\n\n Returns\n -------\n d : float\n The bipartite density\n\n Examples\n --------\n >>> from networkx.algorithms import bipartite\n >>> G = nx.complete_bipartite_graph(3, 2)\n >>> X = set([0, 1, 2])\n >>> bipartite.density(G, X)\n 1.0\n >>> Y = set([3, 4])\n >>> bipartite.density(G, Y)\n 1.0\n\n Notes\n -----\n The container of nodes passed as argument must contain all nodes\n in one of the two bipartite node sets to avoid ambiguity in the\n case of disconnected graphs.\n See :mod:`bipartite documentation `\n for further details on how bipartite graphs are handled in NetworkX.\n\n See Also\n --------\n color\n ", "n_words": 113, "vocab_size": 79, "n_whitespaces": 208, "language": "en" } }, { "id": 46701, "commit_id": "2bb26a38070a4b949bfb210ef1d5644e016e373a", "repo": "airflow", "path": "airflow/www/views.py", "file_name": "views.py", "fun_name": "redirect_or_json", "commit_message": "Add details drawer to Grid View (#22123)\n\n* make UI and tree work with mapped tasks\r\n\r\nbasic slide drawer\r\n\r\nreformat grid background colors\r\n\r\nimprove rendering and add selected dag run\r\n\r\nfix hover and extra prop\r\n\r\nswitch from drawer to details section\r\n\r\nadd tooltip info to details\r\n\r\nuse API\r\n\r\nmake side panel collapsible, useTasks,\r\n\r\ndag run actions\r\n\r\ndag run actions w/ react-query\r\n\r\ntask instance links\r\n\r\ntask actions\r\n\r\nremove modals\r\n\r\nadjust panel width and use status color\r\n\r\nminor details styling\r\n\r\nadd duration to tooltips\r\n\r\nadd last scheduling decision and fix tests\r\n\r\n* move ref and selection to providers\r\n\r\n* fix test with mock providers\r\n\r\n* update TI and DR buttons\r\n\r\n* download logs and external logs\r\n\r\n* add extra links to TI details\r\n\r\n* download log bug fixes\r\n\r\n* fix extra links, hide local TZ if UTC,\r\n\r\n* confirm mark task failed/success\r\n\r\n* Update confirm modals for runs and tasks\r\n\r\n- async/await on mutations instead of useeffect\r\n- add confirmation for run actions\r\n\r\n* Fix dialog scrolling\r\n\r\n* Code cleanup and fix task clear\r\n\r\n* Fix task/run label, dialog focus, dag details overflow, panel open/close\r\n\r\n* Add timezone provider\r\n\r\n* Fix TimezoneEvent import\r\n\r\n* Improve button UX\r\n\r\n- Remove details panel title\r\n- Add button to reset root\r\n- Make \"More Details\" buttons more specific\r\n- Specify timezone as DAG timezone\r\n\r\n* autorefresh dag run details\r\n\r\n* auto-refresh task instance details\r\n\r\n* revert useTreeData changes\r\n\r\nNone of these changes were relevant to this PR. Better to be done separately.\r\n\r\n* Address PR feedback\r\n\r\n- useState vs useDisclosure\r\n- Remove extraneous elements\r\n- Copy changes\r\n- Wire up params for runTask\r\n- Breadcrumb padding\r\n\r\n* Handle task/run action sideeffects by separating autorefresh and treeData hooks\r\n\r\n* Clean up views.py endpoints\r\n\r\n- Pass 'Accept' headers for json returns\r\n- Consolidate more endpoints to return json or redirect\r\n\r\n* pass request as arg\r\n\r\n* remove request as arg\r\n\r\n* Anticipate when the 'Accept' header is not present\r\n\r\n* Fix argument count errors\r\n\r\n* Replace hard coded urls\r\n\r\n* Replace hard coded urls in react components\r\n\r\n* Update filter upstream link\r\n\r\n* Split TaskInstance details component\r\n\r\n* Fix undefined variables in tests\r\n\r\n* init_api_connexion in tests\r\n\r\n- add readme\r\n- rename context providers to avoid confusion with Airflow Providers\r\n\r\n* Fix url params, hide last item breadcrumb links\r\n\r\n* Update task run failed copy\r\n\r\n* Fix taskinstance/list buttons\r\n\r\nCo-authored-by: Tzu-ping Chung ", "code": "def redirect_or_json(origin, msg, status=\"\"):\n \n if request.headers.get('Accept') == 'application/json':\n return {'status': status, 'message': msg}\n else:\n if status:\n flash(msg, status)\n else:\n flash(msg)\n return redirect(origin)\n\n\n######################################################################################\n# Error handlers\n######################################################################################\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 118, "n_words": 27, "vocab_size": 23, "complexity": 3, "nloc": 9, "token_counts": 56, "n_ast_nodes": 103, "n_identifiers": 9, "random_cut": "def redirect_or_json(origin, msg, status=\"\"):\n \n if request.headers.get('Accept') == 'application/json':\n return {'status': status, 'message': msg}\n else:\n if status:\n flash(msg, status)\n else:\n flash(msg)\n return redirect(origin)\n\n\n######################################################################################\n# Error handlers\n###################################################################################", "d_id": 8956, "documentation": { "docstring": "\n Some endpoints are called by javascript,\n returning json will allow us to more elegantly handle side-effects in-page\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 27, "language": "en" } }, { "id": 259212, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/preprocessing/_encoders.py", "file_name": "_encoders.py", "fun_name": "_map_drop_idx_to_infrequent", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx):\n \n if not self._infrequent_enabled:\n return drop_idx\n\n default_to_infrequent = self._default_to_infrequent_mappings[feature_idx]\n if default_to_infrequent is None:\n return drop_idx\n\n # Raise error when explicitly dropping a category that is infrequent\n infrequent_indices = self._infrequent_indices[feature_idx]\n if infrequent_indices is not None and drop_idx in infrequent_indices:\n categories = self.categories_[feature_idx]\n raise ValueError(\n f\"Unable to drop category {categories[drop_idx]!r} from feature\"\n f\" {feature_idx} because it is infrequent\"\n )\n return default_to_infrequent[drop_idx]\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 203, "n_words": 62, "vocab_size": 47, "complexity": 5, "nloc": 14, "token_counts": 72, "n_ast_nodes": 127, "n_identifiers": 12, "random_cut": "def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx):\n \n if not self._infrequent_enabled:\n return drop_idx\n\n default_to_infrequent = self._default_to_infrequent_mappings[feature_idx]\n if default_to_infrequent is None:\n return drop_idx\n\n # Raise error when explicitly dropping a category that is infrequent\n infrequent_indices = self._infrequent_indices[feature_idx]\n if infrequent_indices is not None and drop_idx in infrequent_indices:\n categories = self.categories_[feature_idx]\n raise ValueError(\n f\"Unable to drop category {categories[drop_idx]!r} from feature\"\n f\" {feature_idx} because it is infrequent\"\n )\n return default_to_infreq", "d_id": 75649, "documentation": { "docstring": "Convert `drop_idx` into the index for infrequent categories.\n\n If there are no infrequent categories, then `drop_idx` is\n returned. This method is called in `_compute_drop_idx` when the `drop`\n parameter is an array-like.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 59, "language": "en" } }, { "id": 130020, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "dashboard/tests/test_dashboard.py", "file_name": "test_dashboard.py", "fun_name": "test_dashboard_module_decorator", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_dashboard_module_decorator(enable_test_module):\n head_cls_list = dashboard_utils.get_all_modules(dashboard_utils.DashboardHeadModule)\n agent_cls_list = dashboard_utils.get_all_modules(\n dashboard_utils.DashboardAgentModule\n )\n\n assert any(cls.__name__ == \"TestHead\" for cls in head_cls_list)\n assert any(cls.__name__ == \"TestAgent\" for cls in agent_cls_list)\n\n test_code = \n run_string_as_driver(test_code)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 57, "n_words": 29, "vocab_size": 21, "complexity": 3, "nloc": 23, "token_counts": 58, "n_ast_nodes": 97, "n_identifiers": 13, "random_cut": "def test_dashboard_module_decorator(enable_test_module):\n head_cls_list = dashboard_utils.get_all_modules(dashboard_utils.DashboardHeadModule)\n agent_cls_list = dashboard_utils.get_all_modules(\n dashboard_utils.DashboardAgentModule\n )\n\n assert any(cls.__name__ == \"TestHead\" for cls in head_cls_list)\n assert any(cls.__name__", "d_id": 29076, "documentation": { "docstring": "\nimport os\nimport ray.dashboard.utils as dashboard_utils\n\nos.environ.pop(\"RAY_DASHBOARD_MODULE_TEST\")\nhead_cls_list = dashboard_utils.get_all_modules(\n dashboard_utils.DashboardHeadModule)\nagent_cls_list = dashboard_utils.get_all_modules(\n dashboard_utils.DashboardAgentModule)\nprint(head_cls_list)\nprint(agent_cls_list)\nassert all(cls.__name__ != \"TestHead\" for cls in head_cls_list)\nassert all(cls.__name__ != \"TestAgent\" for cls in agent_cls_list)\nprint(\"success\")\n", "n_words": 34, "vocab_size": 25, "n_whitespaces": 38, "language": "en" } }, { "id": 223844, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/parser.py", "file_name": "parser.py", "fun_name": "parsestr", "commit_message": "add python 3.10.4 for windows", "code": "def parsestr(self, text, headersonly=False):\n \n return self.parse(StringIO(text), headersonly=headersonly)", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 26, "n_ast_nodes": 41, "n_identifiers": 6, "random_cut": "def parsestr(self, text, headersonly=False):\n \n return self.parse(StringIO(text), headersonly=headers", "d_id": 57102, "documentation": { "docstring": "Create a message structure from a string.\n\n Returns the root of the message structure. Optional headersonly is a\n flag specifying whether to stop parsing after reading the headers or\n not. The default is False, meaning it parses the entire contents of\n the file.\n ", "n_words": 43, "vocab_size": 34, "n_whitespaces": 80, "language": "en" } }, { "id": 266162, "commit_id": "ebf555e1fb1267348ca620c15ce456767d91042a", "repo": "netbox", "path": "netbox/netbox/views/generic/utils.py", "file_name": "utils.py", "fun_name": "get_prerequisite_model", "commit_message": "Use strings to specify prerequisite models", "code": "def get_prerequisite_model(queryset):\n \n if not queryset.exists():\n for prereq in getattr(queryset.model, 'prerequisite_models', []):\n model = apps.get_model(prereq)\n if not model.objects.exists():\n return model\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 69, "n_words": 19, "vocab_size": 16, "complexity": 4, "nloc": 6, "token_counts": 49, "n_ast_nodes": 83, "n_identifiers": 9, "random_cut": "def get_prerequisite_model(queryset):\n \n if not queryset.exists():\n for prereq in getattr(queryset.model, 'prerequisite_models', []):\n model = apps.get_model(prereq)\n if not model.objects.exists():\n return model\n", "d_id": 78323, "documentation": { "docstring": "\n Return any prerequisite model that must be created prior to creating\n an instance of the current model.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 27, "language": "en" } }, { "id": 275623, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/utils.py", "file_name": "utils.py", "fun_name": "filter_empty_gradients", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def filter_empty_gradients(grads_and_vars):\n \n grads_and_vars = tuple(grads_and_vars)\n if not grads_and_vars:\n return grads_and_vars\n\n filtered = []\n vars_with_empty_grads = []\n for grad, var in grads_and_vars:\n if grad is None:\n vars_with_empty_grads.append(var)\n else:\n filtered.append((grad, var))\n filtered = tuple(filtered)\n\n if not filtered:\n variable = ([v.name for _, v in grads_and_vars],)\n raise ValueError(\n f\"No gradients provided for any variable: {variable}. \"\n f\"Provided `grads_and_vars` is {grads_and_vars}.\"\n )\n if vars_with_empty_grads:\n logging.warning(\n (\n \"Gradients do not exist for variables %s when minimizing the loss. \"\n \"If you're using `model.compile()`, did you forget to provide a `loss`\"\n \"argument?\"\n ),\n ([v.name for v in vars_with_empty_grads]),\n )\n return filtered\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 303, "n_words": 95, "vocab_size": 69, "complexity": 8, "nloc": 28, "token_counts": 118, "n_ast_nodes": 203, "n_identifiers": 15, "random_cut": "def filter_empty_gradients(grads_and_vars):\n \n grads_and_vars = tuple(grads_and_vars)\n if not grads_and_vars:\n return grads_and_vars\n\n filtered = []\n vars_with_empty_grads = []\n for grad, var in grads_and_vars:\n if grad is None:\n vars_with_empty_grads.append(var)\n else:\n filtered.append((grad, var))\n filtered = tuple(filtered)\n\n if not filtered:\n variable = ([v.name for _, v in grads_and_vars],)\n raise ValueError(\n f\"No gradients provided for any variable: {variable}. \"\n f\"Provided `grads_a", "d_id": 81435, "documentation": { "docstring": "Filter out `(grad, var)` pairs that have a gradient equal to `None`.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 200000, "commit_id": "f8aedc2fa7434091fc83ff241298534f79047c60", "repo": "sympy", "path": "sympy/physics/wigner.py", "file_name": "wigner.py", "fun_name": "real_gaunt", "commit_message": "Update wigner.py", "code": "def real_gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None):\n r\n l_1, l_2, l_3, m_1, m_2, m_3 = [\n as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)]\n\n # check for quick exits\n if sum(1 for i in (m_1, m_2, m_3) if i < 0) % 2:\n return S.Zero # odd number of negative m\n if (l_1 + l_2 + l_3) % 2:\n return S.Zero # sum of l is odd\n lmax = l_2 + l_3\n lmin = max(abs(l_2 - l_3), min(abs(m_2 + m_3), abs(m_2 - m_3)))\n if (lmin + lmax) % 2:\n lmin += 1\n if lmin not in range(lmax, lmin - 2, -2):\n return S.Zero\n\n kron_del = lambda i, j: 1 if i == j else 0\n s = lambda e: -1 if e % 2 else 1 # (-1)**e to give +/-1, avoiding float when e<0\n A = lambda a, b: (-kron_del(a, b)*s(a-b) + kron_del(a, -b)*\n s(b)) if b < 0 else 0\n B = lambda a, b: (kron_del(a, b) + kron_del(a, -b)*s(a)) if b > 0 else 0\n C = lambda a, b: kron_del(abs(a), abs(b))*(kron_del(a, 0)*kron_del(b, 0) +\n (B(a, b) + I*A(a, b))/sqrt(2))\n ugnt = 0\n for i in range(-l_1, l_1+1):\n U1 = C(i, m_1)\n for j in range(-l_2, l_2+1):\n U2 = C(j, m_2)\n U3 = C(-i-j, m_3)\n ugnt = ugnt + re(U1*U2*U3)*gaunt(l_1, l_2, l_3, i, j, -i-j)\n\n if prec is not None:\n ugnt = ugnt.n(prec)\n return ugnt\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 439, "n_words": 231, "vocab_size": 124, "complexity": 15, "nloc": 142, "token_counts": 424, "n_ast_nodes": 623, "n_identifiers": 37, "random_cut": "def real_gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None):\n r\n l_1, l_2, l_3, m_1, m_2, m_3 = [\n as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)]\n\n # check for quick exits\n if sum(1 for i in (m_1, m_2, m_3) if i < 0) % 2:\n return S.Zero # odd number of negative m\n if (l_1 + l_2 + l_3) % 2:\n return S.Zero # sum of l is odd\n lmax = l_2 + l_3\n lmin = max(abs(l_2 - l_3), min(abs(m_2 + m_3), abs(m_2 - m_3)))\n if (lmin + lmax) % 2:\n lmin += 1\n if lmin not in range(lmax, lmin - 2, -2):\n return S.Zero\n\n kron_del = lambda i, j: 1 if i == j else 0\n s = lambda e: -1 if e % 2 else 1 # (-1)**e to give +/-1, avoiding float when e<0\n A = lambda a, b: (-kron_del(a, b)*s(a-b) + kron_del(a, -b)*\n s(b)) if b < 0 else 0\n B = lambda a, b: (kron_del(a, b) + kron_del(a, -b)*s(a)) if b > 0 else 0\n C = lambda a, b: kron", "d_id": 49486, "documentation": { "docstring": "\n Calculate the real Gaunt coefficient.\n\n Explanation\n ===========\n The real Gaunt coefficient is defined as the integral over three\n real spherical harmonics:\n \n .. math::\n \\begin{aligned}\n \\operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3)\n &=\\int Z^{m_1}_{l_1}(\\Omega)\n Z^{m_2}_{l_2}(\\Omega) Z^{m_3}_{l_3}(\\Omega) \\,d\\Omega \\\\\n \\end{aligned}\n\n Alternatively, it can be defined in terms of the standard Gaunt\n coefficient by relating the real spherical harmonics to the standard\n spherical harmonics via a unitary transformation `U`, i.e.\n `Z^{m}_{l}(\\Omega)=\\sum_{m'}U^{m}_{m'}Y^{m'}_{l}(\\Omega)` [Homeier96]_.\n The real Gaunt coefficient is then defined as\n\n .. math::\n \\begin{aligned}\n \\operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3)\n &=\\int Z^{m_1}_{l_1}(\\Omega)\n Z^{m_2}_{l_2}(\\Omega) Z^{m_3}_{l_3}(\\Omega) \\,d\\Omega \\\\\n &=\\sum_{m'_1 m'_2 m'_3} U^{m_1}_{m'_1}U^{m_2}_{m'_2}U^{m_3}_{m'_3}\n \\operatorname{Gaunt}(l_1,l_2,l_3,m'_1,m'_2,m'_3)\n \\end{aligned}\n\n The unitary matrix `U` has components\n\n .. math::\n \\begin{aligned}\n U^m_{m'} = \\delta_{|m||m'|}*(\\delta_{m'0}\\delta_{m0} + \\frac{1}{\\sqrt{2}}\\big[\\Theta(m)\n \\big(\\delta_{m'm}+(-1)^{m'}\\delta_{m'-m}\\big)+i\\Theta(-m)\\big((-1)^{-m}\n \\delta_{m'-m}-\\delta_{m'm}*(-1)^{m'-m}\\big)\\big])\n \\end{aligned}\n\n where `\\delta_{ij}` is the Kronecker delta symbol and `\\Theta` is a step\n function defined as\n\n .. math::\n \\begin{aligned}\n \\Theta(x) = \\begin{cases} 1 \\,\\text{for}\\, x > 0 \\\\ 0 \\,\\text{for}\\, x \\leq 0 \\end{cases}\n \\end{aligned}\n\n Parameters\n ==========\n l_1, l_2, l_3, m_1, m_2, m_3 :\n Integer.\n prec - precision, default: ``None``.\n Providing a precision can\n drastically speed up the calculation.\n\n Returns\n =======\n Rational number times the square root of a rational number.\n\n Examples\n ========\n >>> from sympy.physics.wigner import real_gaunt\n >>> real_gaunt(2,2,4,-1,-1,0)\n -2/(7*sqrt(pi))\n >>> real_gaunt(10,10,20,-9,-9,0).n(64)\n -0.00002480019791932209313156167...\n \n It is an error to use non-integer values for `l` and `m`::\n real_gaunt(2.8,0.5,1.3,0,0,0)\n Traceback (most recent call last):\n ...\n ValueError: l values must be integer\n real_gaunt(2,2,4,0.7,1,-3.4)\n Traceback (most recent call last):\n ...\n ValueError: m values must be integer\n\n Notes\n =====\n The real Gaunt coefficient inherits from the standard Gaunt coefficient,\n the invariance under any permutation of the pairs `(l_i, m_i)` and the\n requirement that the sum of the `l_i` be even to yield a non-zero value.\n It also obeys the following symmetry rules:\n\n - zero for `l_1`, `l_2`, `l_3` not fulfiling the condition\n `l_1 \\in \\{l_{\\text{max}}, l_{\\text{max}}-2, \\ldots, l_{\\text{min}}\\}`,\n where `l_{\\text{max}} = l_2+l_3`,\n \n .. math::\n \\begin{aligned}\n l_{\\text{min}} = \\begin{cases} \\kappa(l_2, l_3, m_2, m_3) & \\text{if}\\,\n \\kappa(l_2, l_3, m_2, m_3) + l_{\\text{max}}\\, \\text{is even} \\\\\n \\kappa(l_2, l_3, m_2, m_3)+1 & \\text{if}\\, \\kappa(l_2, l_3, m_2, m_3) +\n l_{\\text{max}}\\, \\text{is odd}\\end{cases}\n \\end{aligned}\n\n and `\\kappa(l_2, l_3, m_2, m_3) = \\max{\\big(|l_2-l_3|, \\min{\\big(|m_2+m_3|,\n |m_2-m_3|\\big)}\\big)}`\n \n - zero for an odd number of negative `m_i`\n \n Algorithms\n ==========\n This function uses the algorithms of [Homeier96]_ and [Rasch03]_ to\n calculate the value of the real Gaunt coefficient exactly. Note that\n the formula used in [Rasch03]_ contains alternating sums over large\n factorials and is therefore unsuitable for finite precision arithmetic\n and only useful for a computer algebra system [Rasch03]_. However, this\n function can in principle use any algorithm that computes the Gaunt\n coefficient, so it is suitable for finite precision arithmetic in so far\n as the algorithm which computes the Gaunt coefficient is.\n ", "n_words": 429, "vocab_size": 239, "n_whitespaces": 906, "language": "en" } }, { "id": 295910, "commit_id": "a61ac3ddc6d65522dfa1eb599adf73420a9267dc", "repo": "core", "path": "tests/components/siren/test_init.py", "file_name": "test_init.py", "fun_name": "test_missing_tones_list", "commit_message": "Add EntityFeature enum to Siren (#69585)\n\nCo-authored-by: Franck Nijhof ", "code": "async def test_missing_tones_list(hass):\n \n siren = MockSirenEntity(SirenEntityFeature.TONES, [\"a\", \"b\"])\n siren.hass = hass\n with pytest.raises(ValueError):\n process_turn_on_params(siren, {\"tone\": \"test\"})\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 35, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 43, "n_ast_nodes": 80, "n_identifiers": 10, "random_cut": "async def test_missing_tones_list(hass):\n \n siren = MockSirenEntity(SirenEntityFeature.TONES, [\"a\", \"b\"])\n siren.hass = hass\n with pytest.raises(ValueError):\n process_turn_on_params(siren, {\"tone\": \"test\"})\n\n", "d_id": 94918, "documentation": { "docstring": "Test ValueError when setting a tone that is missing from available_tones list.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 44090, "commit_id": "8dabce8887f02216c1037be35e80c214edcbadfe", "repo": "airflow", "path": "airflow/cli/commands/task_command.py", "file_name": "task_command.py", "fun_name": "task_failed_deps", "commit_message": "Add `--map-index` parameter to task CLI commands (#20980)", "code": "def task_failed_deps(args):\n \n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = _get_ti(task, args.execution_date_or_run_id, args.map_index)\n\n dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n # TODO, Do we want to print or log this\n if failed_deps:\n print(\"Task instance dependencies not met:\")\n for dep in failed_deps:\n print(f\"{dep.dep_name}: {dep.reason}\")\n else:\n print(\"Task instance dependencies are all met.\")\n\n\n@cli_utils.action_cli(check_db=False)\n@suppress_logs_and_warning", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@cli_utils.action_cli(check_db=False)\n@suppress_logs_and_warning", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 109, "n_words": 52, "vocab_size": 44, "complexity": 3, "nloc": 12, "token_counts": 88, "n_ast_nodes": 180, "n_identifiers": 28, "random_cut": "def task_failed_deps(args):\n \n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = _get_ti(task, args.execution_date_or_run_id, args.map_index)\n\n dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n # TODO, Do we want to print or log this\n if failed_deps:\n print(\"Task instance dependencies not met:\")\n for dep in failed_deps:\n print(f\"{dep.dep_name}: {dep.reason}\")\n else:\n ", "d_id": 8146, "documentation": { "docstring": "\n Returns the unmet dependencies for a task instance from the perspective of the\n scheduler (i.e. why a task instance doesn't get scheduled and then queued by the\n scheduler, and then run by an executor).\n >>> airflow tasks failed-deps tutorial sleep 2015-01-01\n Task instance dependencies not met:\n Dagrun Running: Task instance's dagrun did not exist: Unknown reason\n Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks\n to have succeeded, but found 1 non-success(es).\n ", "n_words": 73, "vocab_size": 59, "n_whitespaces": 101, "language": "en" } }, { "id": 217834, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/cookies.py", "file_name": "cookies.py", "fun_name": "load", "commit_message": "add python 3.10.4 for windows", "code": "def load(self, rawdata):\n \n if isinstance(rawdata, str):\n self.__parse_string(rawdata)\n else:\n # self.update() wouldn't call our custom __setitem__\n for key, value in rawdata.items():\n self[key] = value\n return\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 24, "vocab_size": 23, "complexity": 3, "nloc": 7, "token_counts": 42, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def load(self, rawdata):\n \n if isinstance(rawdata, str):\n ", "d_id": 54959, "documentation": { "docstring": "Load cookies from a string (presumably HTTP_COOKIE) or\n from a dictionary. Loading cookies from a dictionary 'd'\n is equivalent to calling:\n map(Cookie.__setitem__, d.keys(), d.values())\n ", "n_words": 24, "vocab_size": 19, "n_whitespaces": 57, "language": "en" } }, { "id": 107773, "commit_id": "2357c92d87d96d519c8470776e76180e71663d0b", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "_reset_major_tick_kw", "commit_message": "Refactor handling of tick and ticklabel visiblity in Axis.clear()\n\nThis is a follow-up to #20826, which makes the exceptions from clearing\nmore explicit.", "code": "def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False):\n \n backup = {name: value for name, value in self._major_tick_kw.items()\n if name in ['tick1On', 'tick2On', 'label1On', 'label2On']}\n self._major_tick_kw.clear()\n if keep_tick_and_label_visibility:\n self._major_tick_kw.update(backup)\n self._major_tick_kw['gridOn'] = (\n mpl.rcParams['axes.grid'] and\n mpl.rcParams['axes.grid.which'] in ('both', 'major'))\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 125, "n_words": 32, "vocab_size": 27, "complexity": 5, "nloc": 9, "token_counts": 87, "n_ast_nodes": 150, "n_identifiers": 12, "random_cut": "def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False):\n \n backup = {name: value for name, value in self._major_tick_kw.items()\n if name in ['tick1On', 'tick2On', 'label1On', 'label2On']}\n self.", "d_id": 22909, "documentation": { "docstring": "\n Reset major tick params to defaults.\n\n Shared subplots pre-configure tick and label visibility. To keep this\n beyond an Axis.clear() operation, we may\n *keep_tick_and_label_visibility*.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 59, "language": "en" } }, { "id": 129623, "commit_id": "3d79815cd08c1be8e56c245e662f34366523847e", "repo": "ray", "path": "python/ray/tune/tests/test_integration_comet.py", "file_name": "test_integration_comet.py", "fun_name": "test_class_variable_to_instance", "commit_message": "Comet Integration (#20766)\n\nThis PR adds a `CometLoggerCallback` to the Tune Integrations, allowing users to log runs from Ray to [Comet](https://www.comet.ml/site/).\r\n\r\nCo-authored-by: Michael Cullan \r\nCo-authored-by: Antoni Baum ", "code": "def test_class_variable_to_instance(self):\n \n logger = self.logger\n self.assertEqual(logger._to_exclude, logger._exclude_results)\n self.assertEqual(logger._to_system, logger._system_results)\n self.assertEqual(logger._to_other, logger._other_results)\n self.assertEqual(logger._to_episodes, logger._episode_results)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 55, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 59, "n_ast_nodes": 93, "n_identifiers": 12, "random_cut": "def test_class_variable_to_instance(self):\n \n logger = self.logger\n self.assertEqual(logger._to_exclude, logger._exclude_results)\n self.assertEqual(logger._to_system, lo", "d_id": 28990, "documentation": { "docstring": "Test that class variables get properly assigned to instance\n variables.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 24, "language": "en" } }, { "id": 21881, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/chardet/__init__.py", "file_name": "__init__.py", "fun_name": "detect_all", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def detect_all(byte_str, ignore_threshold=False):\n \n if not isinstance(byte_str, bytearray):\n if not isinstance(byte_str, bytes):\n raise TypeError(\n f\"Expected object of type bytes or bytearray, got: {type(byte_str)}\"\n )\n byte_str = bytearray(byte_str)\n\n detector = UniversalDetector()\n detector.feed(byte_str)\n detector.close()\n\n if detector.input_state == InputState.HIGH_BYTE:\n results = []\n probers = []\n for prober in detector.charset_probers:\n if hasattr(prober, \"probers\"):\n probers.extend(p for p in prober.probers)\n else:\n probers.append(prober)\n for prober in probers:\n if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:\n charset_name = prober.charset_name or \"\"\n lower_charset_name = charset_name.lower()\n # Use Windows encoding name instead of ISO-8859 if we saw any\n # extra Windows-specific bytes\n if lower_charset_name.startswith(\"iso-8859\") and detector.has_win_bytes:\n charset_name = detector.ISO_WIN_MAP.get(\n lower_charset_name, charset_name\n )\n results.append(\n {\n \"encoding\": charset_name,\n \"confidence\": prober.get_confidence(),\n \"language\": prober.language,\n }\n )\n if len(results) > 0:\n return sorted(results, key=lambda result: -result[\"confidence\"])\n\n return [detector.result]\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 574, "n_words": 120, "vocab_size": 88, "complexity": 14, "nloc": 36, "token_counts": 219, "n_ast_nodes": 372, "n_identifiers": 37, "random_cut": "def detect_all(byte_str, ignore_threshold=False):\n \n if not isinstance(byte_str, bytearray):\n if not isinstance(byte_str, bytes):\n raise TypeError(\n f\"Expected object of type bytes or bytearray, got: {type(byte_str)}\"\n )\n b", "d_id": 4085, "documentation": { "docstring": "\n Detect all the possible encodings of the given byte string.\n\n :param byte_str: The byte sequence to examine.\n :type byte_str: ``bytes`` or ``bytearray``\n :param ignore_threshold: Include encodings that are below\n ``UniversalDetector.MINIMUM_THRESHOLD``\n in results.\n :type ignore_threshold: ``bool``\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 134, "language": "en" } }, { "id": 217317, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/enum.py", "file_name": "enum.py", "fun_name": "__call__", "commit_message": "add python 3.10.4 for windows", "code": "def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):\n \n if names is None: # simple value lookup\n return cls.__new__(cls, value)\n # otherwise, functional API: we're creating a new Enum type\n return cls._create_(\n value,\n names,\n module=module,\n qualname=qualname,\n type=type,\n start=start,\n )\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 184, "n_words": 39, "vocab_size": 36, "complexity": 2, "nloc": 11, "token_counts": 70, "n_ast_nodes": 100, "n_identifiers": 10, "random_cut": "def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):\n \n if names is None: # simple value lookup\n return cls.__new__(cls, value)\n # otherwise, functional API: we're creating a new Enum type\n return cls._create_(\n value,\n names,\n module=module,\n qualname=qualname,\n type=type,\n start=start,\n ", "d_id": 54715, "documentation": { "docstring": "\n Either returns an existing member, or creates a new enum class.\n\n This method is used both when an enum class is given a value to match\n to an enumeration member (i.e. Color(3)) and for the functional API\n (i.e. Color = Enum('Color', names='RED GREEN BLUE')).\n\n When used for the functional API:\n\n `value` will be the name of the new class.\n\n `names` should be either a string of white-space/comma delimited names\n (values will start at `start`), or an iterator/mapping of name, value pairs.\n\n `module` should be set to the module this class is being created in;\n if it is not set, an attempt to find that module will be made, but if\n it fails the class will not be picklable.\n\n `qualname` should be set to the actual location this class can be found\n at in its module; by default it is set to the global scope. If this is\n not correct, unpickling will fail in some circumstances.\n\n `type`, if set, will be mixed in as the first base class.\n ", "n_words": 167, "vocab_size": 99, "n_whitespaces": 281, "language": "en" } }, { "id": 42482, "commit_id": "692adaff901dd9daf29400fdf3385130aefbfb2a", "repo": "nltk", "path": "nltk/util.py", "file_name": "util.py", "fun_name": "edges2dot", "commit_message": "Fix some tests in Wordnet-related DocStrings", "code": "def edges2dot(edges, shapes=None, attr=None):\n \n if not shapes:\n shapes = dict()\n if not attr:\n attr = dict()\n\n dot_string = \"digraph G {\\n\"\n\n for pair in attr.items():\n dot_string += f\"{pair[0]} = {pair[1]};\\n\"\n\n for edge in edges:\n for shape in shapes.items():\n for node in range(2):\n if shape[0] in repr(edge[node]):\n dot_string += f'\"{edge[node]}\" [shape = {shape[1]}];\\n'\n dot_string += f'\"{edge[0]}\" -> \"{edge[1]}\";\\n'\n\n dot_string += \"}\\n\"\n return dot_string\n\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 166, "n_words": 62, "vocab_size": 39, "complexity": 8, "nloc": 16, "token_counts": 97, "n_ast_nodes": 214, "n_identifiers": 13, "random_cut": "def edges2dot(edges, shapes=None, attr=None):\n \n if not shapes:\n ", "d_id": 7567, "documentation": { "docstring": "\n :param edges: the set (or list) of edges of a directed graph.\n\n :return dot_string: a representation of 'edges' as a string in the DOT\n graph language, which can be converted to an image by the 'dot' program\n from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string).\n\n :param shapes: dictionary of strings that trigger a specified shape.\n :param attr: dictionary with global graph attributes\n\n >>> import nltk\n >>> from nltk.util import edges2dot\n >>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')]))\n digraph G {\n \"A\" -> \"B\";\n \"A\" -> \"C\";\n \"B\" -> \"C\";\n \"C\" -> \"B\";\n }\n \n ", "n_words": 94, "vocab_size": 70, "n_whitespaces": 154, "language": "en" } }, { "id": 20356, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/img.py", "file_name": "img.py", "fun_name": "_draw_line_numbers", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _draw_line_numbers(self):\n \n if not self.line_numbers:\n return\n for p in range(self.maxlineno):\n n = p + self.line_number_start\n if (n % self.line_number_step) == 0:\n self._draw_linenumber(p, n)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 23, "vocab_size": 21, "complexity": 4, "nloc": 7, "token_counts": 49, "n_ast_nodes": 80, "n_identifiers": 10, "random_cut": "def _draw_line_numbers(self):\n \n if not self.line_numbers:\n return\n for p in range(self.maxlineno):\n n = p + self.line_number_start\n if (n % self.line_number_step) == 0:\n", "d_id": 3341, "documentation": { "docstring": "\n Create drawables for the line numbers.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 189904, "commit_id": "a20f8aeb6ccd30d6b9d5c34285c3a718b0f5a59b", "repo": "manim", "path": "manim/cli/cfg/group.py", "file_name": "group.py", "fun_name": "export", "commit_message": "Migrate from os.path to pathlib in SVGMobject and other locations (#2687)\n\n* fixed style\r\n\r\n* fixed changes\r\n\r\n* Update group.py\r\n\r\n* Remove extra `Path` call\r\n\r\nCo-authored-by: ad_chaos <90276965+Kiran-Raj-Dev@users.noreply.github.com>\r\n\r\n* Remove unused context manager\r\n\r\nSorry, just committing here myself so that the PR can be reviewed and merged. This is the only thing left to alter so thought I might as well do it myself.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Use `with_suffix`\r\n\r\n* Remove extra `Path` calls\r\n\r\nCo-authored-by: ad_chaos <90276965+Kiran-Raj-Dev@users.noreply.github.com>\r\n\r\nCo-authored-by: Darylgolden \r\nCo-authored-by: Raghav Goel \r\nCo-authored-by: Raghav Goel \r\nCo-authored-by: ad_chaos <90276965+Kiran-Raj-Dev@users.noreply.github.com>\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def export(ctx, directory):\n directory_path = Path(directory)\n if directory_path.absolute == Path.cwd().absolute:\n console.print(\n ,\n style=\"red bold\",\n end=\"\",\n )\n proceed = input().lower() == \"y\"\n else:\n proceed = True\n if proceed:\n if not directory_path.is_dir():\n console.print(f\"Creating folder: {directory}.\", style=\"red bold\")\n directory_path.mkdir(parents=True)\n\n ctx.invoke(write)\n from_path = Path.cwd() / \"manim.cfg\"\n to_path = directory_path / \"manim.cfg\"\n\n console.print(f\"Exported final Config at {from_path} to {to_path}.\")\n else:\n console.print(\"Aborted...\", style=\"red bold\")\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 197, "n_words": 58, "vocab_size": 43, "complexity": 4, "nloc": 23, "token_counts": 126, "n_ast_nodes": 234, "n_identifiers": 21, "random_cut": "def export(ctx, directory):\n directory_path = Path(directory)\n if directory_path.absolute == Path.cwd().absolute:\n console.print(\n ,\n style=\"red bold\",\n end=\"\",\n )\n proc", "d_id": 46249, "documentation": { "docstring": "You are reading the config from the same directory you are exporting to.\nThis means that the exported config will overwrite the config for this directory.\nAre you sure you want to continue? (y/n)", "n_words": 34, "vocab_size": 26, "n_whitespaces": 31, "language": "en" } }, { "id": 109834, "commit_id": "0d6ee255831adae452af355c025497c0f07aa296", "repo": "matplotlib", "path": "lib/matplotlib/backend_managers.py", "file_name": "backend_managers.py", "fun_name": "update_keymap", "commit_message": "Add tests for ToolManager", "code": "def update_keymap(self, name, key):\n \n if name not in self._tools:\n raise KeyError(f'{name!r} not in Tools')\n self._remove_keys(name)\n if isinstance(key, str):\n key = [key]\n for k in key:\n if k in self._keys:\n _api.warn_external(\n f'Key {k} changed from {self._keys[k]} to {name}')\n self._keys[k] = name\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 153, "n_words": 40, "vocab_size": 31, "complexity": 5, "nloc": 11, "token_counts": 70, "n_ast_nodes": 135, "n_identifiers": 13, "random_cut": "def update_keymap(self, name, key):\n \n if name not in self._tools:\n raise KeyError(f'{name!r} not in Tools')\n ", "d_id": 23770, "documentation": { "docstring": "\n Set the keymap to associate with the specified tool.\n\n Parameters\n ----------\n name : str\n Name of the Tool.\n key : str or list of str\n Keys to associate with the tool.\n ", "n_words": 31, "vocab_size": 20, "n_whitespaces": 96, "language": "en" } }, { "id": 112161, "commit_id": "14d2966b9e91ae16dcc39de8f41017a75cec8ff9", "repo": "nni", "path": "nni/retiarii/oneshot/pytorch/supermodule/differentiable.py", "file_name": "differentiable.py", "fun_name": "named_parameters", "commit_message": "Valuechoice oneshot lightning (#4602)", "code": "def named_parameters(self, *args, **kwargs):\n \n arch = kwargs.pop('arch', False)\n for name, p in super().named_parameters(*args, **kwargs):\n if any(name == par_name for par_name in self._arch_parameter_names):\n if arch:\n yield name, p\n else:\n if not arch:\n yield name, p\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 145, "n_words": 34, "vocab_size": 22, "complexity": 6, "nloc": 9, "token_counts": 71, "n_ast_nodes": 117, "n_identifiers": 12, "random_cut": "def named_parameters(self, *args, **kwargs):\n \n arch = kwargs.pop('arch', False)\n for name, p in super().named_parameters(*args, **kwargs):\n if any(name == par_name for par_name in self._arch_parameter_names):\n if arch:\n ", "d_id": 24602, "documentation": { "docstring": "Named parameters excluding architecture parameters.", "n_words": 5, "vocab_size": 5, "n_whitespaces": 4, "language": "en" } }, { "id": 120182, "commit_id": "72470dee3a5181c8bfe0f0a4725564efbef80f92", "repo": "jax", "path": "jax/_src/util.py", "file_name": "util.py", "fun_name": "unzip3", "commit_message": "Comment on implementation of unzip2 & unzip3", "code": "def unzip3(xyzs):\n \n # Note: we deliberately don't use zip(*xyzs) because it is lazily evaluated,\n # is too permissive about inputs, and does not guarantee a length-3 output.\n xs = []\n ys = []\n zs = []\n for x, y, z in xyzs:\n xs.append(x)\n ys.append(y)\n zs.append(z)\n return tuple(xs), tuple(ys), tuple(zs)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 66, "n_words": 49, "vocab_size": 43, "complexity": 2, "nloc": 9, "token_counts": 60, "n_ast_nodes": 101, "n_identifiers": 10, "random_cut": "def unzip3(xyzs):\n \n # Note: we deliberately don't use zip(*xyzs) because it is lazily evaluated,\n # is too permissive about inputs, and does not guarantee a length-3 output.\n xs = []\n ys = []\n zs = []\n for x, y, z in xyzs:\n xs.append(x)\n ys.append", "d_id": 26788, "documentation": { "docstring": "Unzip sequence of length-3 tuples into three tuples.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 83182, "commit_id": "90e202cd38d00945c81da4730d39e3f5c5b1e8b1", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_json_get_subscribers_for_guest_user", "commit_message": "docs: Consistently hyphenate “web-public”.\n\nIn English, compound adjectives should essentially always be\nhyphenated. This makes them easier to parse, especially for users who\nmight not recognize that the words “web public” go together as a\nphrase.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_json_get_subscribers_for_guest_user(self) -> None:\n \n guest_user = self.example_user(\"polonius\")\n never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed\n\n # A guest user can only see never subscribed streams that are web-public.\n # For Polonius, the only web-public stream that he is not subscribed at\n # this point is Rome.\n self.assert_length(never_subscribed, 1)\n\n web_public_stream_id = never_subscribed[0][\"stream_id\"]\n result = self.client_get(f\"/json/streams/{web_public_stream_id}/members\")\n self.assert_json_success(result)\n result_dict = result.json()\n self.assertIn(\"subscribers\", result_dict)\n self.assertIsInstance(result_dict[\"subscribers\"], list)\n self.assertGreater(len(result_dict[\"subscribers\"]), 0)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 158, "n_words": 60, "vocab_size": 50, "complexity": 1, "nloc": 15, "token_counts": 98, "n_ast_nodes": 172, "n_identifiers": 18, "random_cut": "def test_json_get_subscribers_for_guest_user(self) -> None:\n \n guest_user = self.example_user(\"polonius\")\n never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed\n\n # A guest user can only see never subscribed streams that are web-public.\n # For Polonius, the only web-public stream that he is not subscribed at\n # this point is Rome.\n self.assert_length(never_subscribed, 1)\n\n web_public_stream_id = never_subscribed[0][\"stream_id\"]\n result = self.client_get(f\"/json/streams/{web_public_stream_id}/members\")\n self.assert_json_success(result)\n result_dict = result.json()\n self.assertIn(\"subscribers\", result_dict)\n self.assertIsInstance(result_dict[\"subscribers\"], list)\n self.assertG", "d_id": 17609, "documentation": { "docstring": "\n Guest users should have access to subscribers of web-public streams, even\n if they aren't subscribed or have never subscribed to that stream.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 44, "language": "en" } }, { "id": 226118, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/chart-studio/chart_studio/plotly/chunked_requests/chunked_request.py", "file_name": "chunked_request.py", "fun_name": "_reconnect", "commit_message": "switch to black .22", "code": "def _reconnect(self):\n \n if not self._isconnected():\n try:\n self._connect()\n except http_client.socket.error as e:\n # Attempt to reconnect if the connection was refused\n if e.errno == 61 or e.errno == 10061:\n # errno 61 is the \"Connection Refused\" error\n time.sleep(self._delay)\n self._delay += self._delay # fibonacii delays\n self._tries += 1\n if self._tries < self.maxtries:\n self._reconnect()\n else:\n self._reset_retries()\n raise e\n else:\n # Unknown scenario\n raise e\n\n # Reconnect worked - reset _closed\n self._closed = False\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 402, "n_words": 70, "vocab_size": 53, "complexity": 6, "nloc": 17, "token_counts": 95, "n_ast_nodes": 166, "n_identifiers": 16, "random_cut": "def _reconnect(self):\n \n if not self._isconnected():\n try:\n self._connec", "d_id": 57798, "documentation": { "docstring": "Connect if disconnected.\n Retry self.maxtries times with delays\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 22, "language": "en" } }, { "id": 216173, "commit_id": "2bd6323ef5f87d871891a59917ee96f44ef55e75", "repo": "salt", "path": "salt/modules/cp.py", "file_name": "cp.py", "fun_name": "list_master", "commit_message": "fixes saltstack/salt#61562 cp functions derive saltenv from config", "code": "def list_master(saltenv=None, prefix=\"\"):\n \n if not saltenv:\n saltenv = __opts__[\"saltenv\"] or \"base\"\n return _client().file_list(saltenv, prefix)\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 4, "token_counts": 35, "n_ast_nodes": 63, "n_identifiers": 6, "random_cut": "def list_master(saltenv=None, prefix=\"\"):\n \n if not saltenv:\n ", "d_id": 54450, "documentation": { "docstring": "\n .. versionchanged:: 3005\n ``saltenv`` will use value from config if not explicitly set\n\n List all of the files stored on the master\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.list_master\n ", "n_words": 30, "vocab_size": 28, "n_whitespaces": 60, "language": "en" } }, { "id": 8732, "commit_id": "f3fbfbbe7e4c772d60dbc4374811d3a959699f2b", "repo": "ludwig", "path": "tests/ludwig/utils/test_tokenizers.py", "file_name": "test_tokenizers.py", "fun_name": "test_bert_hf_tokenizer_parity", "commit_message": "[TorchScript] Add user-defined HF Bert tokenizers (#2733)\n\n* first working set\r\n\r\n* wip todo: add never_split kwarg\r\n\r\n* adds new never_split kwarg\r\n\r\n* clean up\r\n\r\n* get tests passing\r\n\r\n* updated py38 tests\r\n\r\n* pr revisions\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* logging > logger\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def test_bert_hf_tokenizer_parity(tmpdir, pretrained_model_name_or_path):\n \n from ludwig.utils.tokenizers import get_hf_tokenizer, HFTokenizer\n\n inputs = \"Hello, ``I'm'' ónë of 1,205,000 sentences!\"\n hf_tokenizer = HFTokenizer(pretrained_model_name_or_path)\n torchtext_tokenizer = get_hf_tokenizer(pretrained_model_name_or_path)\n\n # Ensure that the tokenizer is scriptable\n tokenizer_path = os.path.join(tmpdir, \"tokenizer.pt\")\n torch.jit.script(torchtext_tokenizer).save(tokenizer_path)\n torchtext_tokenizer = torch.jit.load(tokenizer_path)\n\n token_ids_expected = hf_tokenizer(inputs)\n token_ids = torchtext_tokenizer(inputs)\n\n assert token_ids_expected == token_ids\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 83, "n_words": 47, "vocab_size": 38, "complexity": 1, "nloc": 11, "token_counts": 84, "n_ast_nodes": 140, "n_identifiers": 22, "random_cut": "def test_bert_hf_tokenizer_parity(tmpdir, pretrained_model_name_or_path):\n \n from ludwig.utils.tokenizers import get_hf_tokenizer, HFTokenizer\n\n inputs = \"Hello, ``I'm'' ónë of 1,205,000 sentences!\"\n hf_tokenizer = HFTokenizer(pretrained_model_name_or_path)\n torchtext_tokenizer = get_hf_tokenizer(pretrained_model_name_or_path)\n\n # Ensure that the tokenizer is scriptable\n tokenizer_path = os.path.join(tmpdir, \"tokenizer.pt\")\n torch.jit.script(torchtext_tokenizer).save(tokenizer_path)\n torchtext_tokenizer = tor", "d_id": 1492, "documentation": { "docstring": "Tests the BERTTokenizer implementation.\n\n Asserts both tokens and token IDs are the same by initializing the BERTTokenizer as a standalone tokenizer and as a\n HF tokenizer.\n ", "n_words": 26, "vocab_size": 20, "n_whitespaces": 35, "language": "en" } }, { "id": 64790, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py", "file_name": "bank_reconciliation_tool.py", "fun_name": "get_pe_matching_query", "commit_message": "style: format code with black", "code": "def get_pe_matching_query(amount_condition, account_from_to, transaction):\n\t# get matching payment entries query\n\tif transaction.deposit > 0:\n\t\tcurrency_field = \"paid_to_account_currency as currency\"\n\telse:\n\t\tcurrency_field = \"paid_from_account_currency as currency\"\n\treturn f\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 28, "token_counts": 27, "n_ast_nodes": 60, "n_identifiers": 6, "random_cut": "def get_pe_matching_query(amount_condition, account_from_to, transaction):\n\t# get matching payment entries query\n\tif transaction.deposit > 0:\n\t\tcurrency_field = \"paid_to_account_currency", "d_id": 13723, "documentation": { "docstring": "\n\tSELECT\n\t\t(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END\n\t\t+ 1 ) AS rank,\n\t\t'Payment Entry' as doctype,\n\t\tname,\n\t\tpaid_amount,\n\t\treference_no,\n\t\treference_date,\n\t\tparty,\n\t\tparty_type,\n\t\tposting_date,\n\t\t{currency_field}\n\tFROM\n\t\t`tabPayment Entry`\n\tWHERE\n\t\tpaid_amount {amount_condition} %(amount)s\n\t\tAND docstatus = 1\n\t\tAND payment_type IN (%(payment_type)s, 'Internal Transfer')\n\t\tAND ifnull(clearance_date, '') = \"\"\n\t\tAND {account_from_to} = %(bank_account)s\n\t", "n_words": 68, "vocab_size": 50, "n_whitespaces": 48, "language": "en" } }, { "id": 203351, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/checks.py", "file_name": "checks.py", "fun_name": "_check_list_display_links", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_list_display_links(self, obj):\n \n from django.contrib.admin.options import ModelAdmin\n\n if obj.list_display_links is None:\n return []\n elif not isinstance(obj.list_display_links, (list, tuple)):\n return must_be(\n \"a list, a tuple, or None\",\n option=\"list_display_links\",\n obj=obj,\n id=\"admin.E110\",\n )\n # Check only if ModelAdmin.get_list_display() isn't overridden.\n elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:\n return list(\n chain.from_iterable(\n self._check_list_display_links_item(\n obj, field_name, \"list_display_links[%d]\" % index\n )\n for index, field_name in enumerate(obj.list_display_links)\n )\n )\n return []\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 334, "n_words": 60, "vocab_size": 50, "complexity": 5, "nloc": 21, "token_counts": 107, "n_ast_nodes": 168, "n_identifiers": 23, "random_cut": "def _check_list_display_links(self, obj):\n \n from django.contrib.admin.options import ModelAdmin\n\n if obj.list_display_links is None:\n return []\n elif not isinstance(obj.list_display_links, (list, tuple)):\n return must_be(\n \"a list, a tuple, or None\",\n option=\"list_display_links\",\n obj=obj,\n id=\"admin.E110\",\n )\n # Check only if ModelAdmin.get_list_display() isn't overridden.\n elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:\n return list(\n chain.from_iterable(\n self._check_list_display_links_item(\n", "d_id": 50325, "documentation": { "docstring": "Check that list_display_links is a unique subset of list_display.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 290233, "commit_id": "83c6a7e18b1b0e4d5a302e304f117dee11d3aa51", "repo": "core", "path": "homeassistant/components/zha/core/channels/lighting.py", "file_name": "lighting.py", "fun_name": "min_mireds", "commit_message": "Fix invalid min and max color temp in bad ZHA light devices (#81604)\n\n* Fix ZHA default color temps\n\n* update test", "code": "def min_mireds(self) -> int:\n \n min_mireds = self.cluster.get(\"color_temp_physical_min\", self.MIN_MIREDS)\n if min_mireds == 0:\n self.warning(\n \"[Min mireds is 0, setting to %s] Please open an issue on the quirks repo to have this device corrected\",\n self.MIN_MIREDS,\n )\n min_mireds = self.MIN_MIREDS\n return min_mireds\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 131, "n_words": 40, "vocab_size": 35, "complexity": 2, "nloc": 10, "token_counts": 45, "n_ast_nodes": 76, "n_identifiers": 7, "random_cut": "def min_mireds(self) -> int:\n \n min_mireds = self.cluster.get(\"color_temp_physical_min\", self.MIN_MIREDS)\n if min_mireds == 0:\n self.warning(\n \"", "d_id": 89351, "documentation": { "docstring": "Return the coldest color_temp that this channel supports.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 184318, "commit_id": "ff55dafb8638f6674f3662aa526a5fc35a007b24", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "pop_screen", "commit_message": "prototype screens api", "code": "def pop_screen(self) -> Screen:\n \n screen_stack = self._screen_stack\n if len(screen_stack) <= 1:\n raise ScreenStackError(\n \"Can't pop screen; there must be at least one screen on the stack\"\n )\n screen = screen_stack.pop()\n screen.post_message_no_wait(events.ScreenSuspend(self))\n self.screen._screen_resized(self.size)\n self.screen.post_message_no_wait(events.ScreenResume(self))\n return screen\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 128, "n_words": 35, "vocab_size": 32, "complexity": 2, "nloc": 16, "token_counts": 69, "n_ast_nodes": 117, "n_identifiers": 15, "random_cut": "def pop_screen(self) -> Screen:\n \n screen_stack = self._screen_stack\n if len(screen_stack) <= 1:\n raise ScreenStackError(\n \"Can't pop screen; there must be at least one screen on the stack\"\n )\n screen = screen_stack.pop()\n screen.post_me", "d_id": 44547, "documentation": { "docstring": "Pop the current screen from the stack, and switch to the previous screen.\n\n Returns:\n Screen: The screen that was replaced.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 45, "language": "en" } }, { "id": 246234, "commit_id": "64ec45fc1b0856dc7daacca7d3ab75d50bd89f84", "repo": "synapse", "path": "tests/handlers/test_appservice.py", "file_name": "test_appservice.py", "fun_name": "test_notify_interested_services_ephemeral", "commit_message": "Send to-device messages to application services (#11215)\n\nCo-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>", "code": "def test_notify_interested_services_ephemeral(self):\n \n interested_service = self._mkservice(is_interested=True)\n services = [interested_service]\n self.mock_store.get_app_services.return_value = services\n self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable(\n 579\n )\n\n event = Mock(event_id=\"event_1\")\n self.event_source.sources.receipt.get_new_events_as.return_value = (\n make_awaitable(([event], None))\n )\n\n self.handler.notify_interested_services_ephemeral(\n \"receipt_key\", 580, [\"@fakerecipient:example.com\"]\n )\n self.mock_scheduler.enqueue_for_appservice.assert_called_once_with(\n interested_service, ephemeral=[event]\n )\n self.mock_store.set_appservice_stream_type_pos.assert_called_once_with(\n interested_service,\n \"read_receipt\",\n 580,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 221, "n_words": 39, "vocab_size": 27, "complexity": 1, "nloc": 22, "token_counts": 119, "n_ast_nodes": 192, "n_identifiers": 25, "random_cut": "def test_notify_interested_services_ephemeral(self):\n \n interested_service = self._mkservice(is_interested=True)\n services = [interested_service]\n self.mock_store.get_app_services.return_value = services\n self.mock_store.get_type_stream_id_for_appservice.return_v", "d_id": 71115, "documentation": { "docstring": "\n Test sending ephemeral events to the appservice handler are scheduled\n to be pushed out to interested appservices, and that the stream ID is\n updated accordingly.\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 54, "language": "en" } }, { "id": 61868, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py", "file_name": "compat.py", "fun_name": "convert", "commit_message": "upd; format", "code": "def convert(self, value):\n \n if not isinstance(value, ConvertingDict) and isinstance(value, dict):\n value = ConvertingDict(value)\n value.configurator = self\n elif not isinstance(value, ConvertingList) and isinstance(value, list):\n value = ConvertingList(value)\n value.configurator = self\n elif not isinstance(value, ConvertingTuple) and\\\n isinstance(value, tuple):\n value = ConvertingTuple(value)\n value.configurator = self\n elif isinstance(value, string_types):\n m = self.CONVERT_PATTERN.match(value)\n if m:\n d = m.groupdict()\n prefix = d['prefix']\n converter = self.value_converters.get(prefix, None)\n if converter:\n suffix = d['suffix']\n converter = getattr(self, converter)\n value = converter(suffix)\n return value\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 425, "n_words": 74, "vocab_size": 40, "complexity": 10, "nloc": 22, "token_counts": 161, "n_ast_nodes": 256, "n_identifiers": 23, "random_cut": "def convert(self, value):\n \n if not isinstance(value, ConvertingDict) and isinstance(value, dict):\n value = ConvertingDict(value)\n value.con", "d_id": 12732, "documentation": { "docstring": "\n Convert values to an appropriate type. dicts, lists and tuples are\n replaced by their converting alternatives. Strings are checked to\n see if they have a conversion format and are converted if they do.\n ", "n_words": 33, "vocab_size": 27, "n_whitespaces": 78, "language": "en" } }, { "id": 181983, "commit_id": "644fdc7ed181a22773556236e83fb5343efe8fd5", "repo": "textual", "path": "tests/test_css_parse.py", "file_name": "test_css_parse.py", "fun_name": "test_parse_transition", "commit_message": "Stop parsing time as scalar", "code": "def test_parse_transition(duration, parsed_duration):\n css = f\n stylesheet = Stylesheet()\n stylesheet.parse(css)\n\n rule = stylesheet.rules[0].styles\n\n assert len(stylesheet.rules) == 1\n assert len(stylesheet.rules[0].errors) == 0\n assert rule.transitions == {\n \"offset\": Transition(duration=parsed_duration, easing=\"in_out_cubic\", delay=0.0)\n }\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 30, "vocab_size": 24, "complexity": 1, "nloc": 13, "token_counts": 80, "n_ast_nodes": 130, "n_identifiers": 16, "random_cut": "def test_parse_transition(duration, parsed_duration):\n css = f\n stylesheet = Stylesheet()\n stylesheet.parse(css)\n\n rule = stylesheet.rules[0].styles\n\n assert len(stylesheet.rules) == 1\n assert len(stylesheet.rule", "d_id": 43706, "documentation": { "docstring": "#some-widget {{\n transition: offset {duration} in_out_cubic;\n }}\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 20, "language": "en" } }, { "id": 247396, "commit_id": "7e91107be1a4287873266e588a3c5b415279f4c8", "repo": "synapse", "path": "tests/rest/media/v1/test_html_preview.py", "file_name": "test_html_preview.py", "fun_name": "test_meta_charset", "commit_message": "Add type hints to `tests/rest` (#12146)\n\n* Add type hints to `tests/rest`\r\n\r\n* newsfile\r\n\r\n* change import from `SigningKey`", "code": "def test_meta_charset(self) -> None:\n \n encodings = _get_html_media_encodings(\n b,\n \"text/html\",\n )\n self.assertEqual(list(encodings), [\"ascii\", \"utf-8\", \"cp1252\"])\n\n # A less well-formed version.\n encodings = _get_html_media_encodings(\n b,\n \"text/html\",\n )\n self.assertEqual(list(encodings), [\"ascii\", \"utf-8\", \"cp1252\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 129, "n_words": 29, "vocab_size": 19, "complexity": 1, "nloc": 22, "token_counts": 62, "n_ast_nodes": 111, "n_identifiers": 6, "random_cut": "def test_meta_charset(self) -> None:\n \n encodings = _get_html_media_encodings(\n b,\n \"text/html\",\n )\n self.assertEqual(list(encodings), [\"ascii\", \"utf-8\", \"cp1252\"])\n\n # A less well-form", "d_id": 71652, "documentation": { "docstring": "A character encoding is found via the meta tag.\n \n \n \n \n \n \n < meta charset = ascii>\n \n \n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 93, "language": "en" } }, { "id": 70182, "commit_id": "1aa5596cc25fbd74cac65c5e4d6b16bd90091138", "repo": "glances", "path": "glances/amps_list.py", "file_name": "amps_list.py", "fun_name": "_build_amps_list", "commit_message": "AMP: regex with special chars #2152", "code": "def _build_amps_list(self, amp_value, processlist):\n \n ret = []\n try:\n # Search in both cmdline and name (for kernel thread, see #1261)\n for p in processlist:\n if (re.search(amp_value.regex(), p['name']) is not None) or (\n p['cmdline'] is not None\n and p['cmdline'] != []\n and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None\n ):\n ret.append(\n {'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}\n )\n\n except (TypeError, KeyError) as e:\n logger.debug(\"Can not build AMPS list ({})\".format(e))\n\n return ret\n", "url": "https://github.com/nicolargo/glances.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 285, "n_words": 69, "vocab_size": 57, "complexity": 7, "nloc": 15, "token_counts": 134, "n_ast_nodes": 228, "n_identifiers": 17, "random_cut": "def _build_amps_list(self, amp_value, processlist):\n \n ret = []\n try:\n # Search in both cmdline and name (for kernel thread, see #1261)\n for p in processlist:\n if (re.search(amp_value.regex(), p['name']) is not None) or (\n p['cmdline'] is not None\n and p['cmdline'] != []\n and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None\n ):\n ", "d_id": 15407, "documentation": { "docstring": "Return the AMPS process list according to the amp_value\n\n Search application monitored processes by a regular expression\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 31, "language": "en" } }, { "id": 270689, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "call", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument\n \n return inputs\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 25, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 27, "n_identifiers": 5, "random_cut": "def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument\n \n return inputs\n", "d_id": 80526, "documentation": { "docstring": "This is where the layer's logic lives.\n\n The `call()` method may not create state (except in its first invocation,\n wrapping the creation of variables or other resources in `tf.init_scope()`).\n It is recommended to create state in `__init__()`, or the `build()` method\n that is called automatically before `call()` executes the first time.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n The first positional `inputs` argument is subject to special rules:\n - `inputs` must be explicitly passed. A layer cannot have zero\n arguments, and `inputs` cannot be provided via the default value\n of a keyword argument.\n - NumPy array or Python scalar values in `inputs` get cast as tensors.\n - Keras mask metadata is only collected from `inputs`.\n - Layers are built (`build(input_shape)` method)\n using shape info from `inputs` only.\n - `input_spec` compatibility is only checked against `inputs`.\n - Mixed precision input casting is only applied to `inputs`.\n If a layer has tensor arguments in `*args` or `**kwargs`, their\n casting behavior in mixed precision should be handled manually.\n - The SavedModel input specification is generated using `inputs` only.\n - Integration with various ecosystem packages like TFMOT, TFLite,\n TF.js, etc is only supported for `inputs` and not for tensors in\n positional and keyword arguments.\n *args: Additional positional arguments. May contain tensors, although\n this is not recommended, for the reasons above.\n **kwargs: Additional keyword arguments. May contain tensors, although\n this is not recommended, for the reasons above.\n The following optional keyword arguments are reserved:\n - `training`: Boolean scalar tensor of Python boolean indicating\n whether the `call` is meant for training or inference.\n - `mask`: Boolean input mask. If the layer's `call()` method takes a\n `mask` argument, its default value will be set to the mask generated\n for `inputs` by the previous layer (if `input` did come from a layer\n that generated a corresponding mask, i.e. if it came from a Keras\n layer with masking support).\n\n Returns:\n A tensor or list/tuple of tensors.\n ", "n_words": 319, "vocab_size": 177, "n_whitespaces": 714, "language": "en" } }, { "id": 116155, "commit_id": "02a831997cdffafca7cb160eb1938e72020ee049", "repo": "mindsdb", "path": "tests/unit/test_executor.py", "file_name": "test_executor.py", "fun_name": "test_use_predictor_with_view", "commit_message": "executor tests", "code": "def test_use_predictor_with_view(self, mock_handler):\n # set integration data\n\n df = pd.DataFrame([\n {'a': 1, 'b': 'one'},\n {'a': 2, 'b': 'two'},\n {'a': 1, 'b': 'three'},\n ])\n self.set_handler(mock_handler, name='pg', tables={'tasks': df})\n\n view_name = 'vtasks'\n # --- create view ---\n ret = self.command_executor.execute_command(parse_sql(\n f'create view {view_name} (select * from pg (select * from tasks))',\n dialect='mindsdb')\n )\n assert ret.error_code is None\n\n # --- use predictor ---\n predicted_value = 3.14\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical\n },\n 'predicted_value': predicted_value\n }\n self.set_predictor(predictor)\n ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))\n assert ret.error_code is None\n\n # native query was called\n assert mock_handler().native_query.mock_calls[0].args[0] == 'select * from tasks'\n\n # check predictor call\n\n # model was called\n assert self.mock_model_interface.predict.mock_calls[0].args[0] == 'task_model'\n\n # input = one row whit a==2\n when_data = self.mock_model_interface.predict.mock_calls[0].args[1]\n assert len(when_data) == 1\n assert when_data[0]['a'] == 2\n\n # check prediction\n assert ret.data[0][0] == predicted_value\n assert len(ret.data) == 1\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 494, "n_words": 144, "vocab_size": 90, "complexity": 1, "nloc": 39, "token_counts": 254, "n_ast_nodes": 442, "n_identifiers": 31, "random_cut": "def test_use_predictor_with_view(self, mock_handler):\n # set integration data\n\n df = pd.DataFrame([\n {'a': 1, 'b': 'one'},\n {'a': 2, 'b': 'two'},\n {'a': 1, 'b': 'three'},\n ])\n self.set_handler(mock_handler, name='pg', tables={'tasks': df})\n\n view_name = 'vtasks'\n # --- create view ---\n ret = self.command_executor.execute_command(parse_sql(\n f'create view {view_name} (select * from pg (select * from tasks))',\n dialect='mindsdb')\n )\n assert ret.error_code is None\n\n # --- use predictor ---\n predicted_value = 3.14\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical\n },\n 'predicted_value': predicted_value\n }\n self.set_predictor(predictor)\n ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))\n assert ret.error_code is None\n\n # native query was called\n assert mock_handler().native_query.mock_calls[0].args[0] == 'select * from tasks'\n\n # check predictor call\n\n # model was called\n assert self.mock_model_interface.predict.mock_calls[0].args[0] == 'task_model'\n\n # input = one row whit a==2\n when_data = self.mock_model_interface.predict.mock_calls[0].args[1]\n assert len(when_data) == 1\n assert when_data[0]['a'] == 2\n\n # check prediction\n assert ret.data[0][0] =", "d_id": 25676, "documentation": { "docstring": "\n select task_model.p \n from views.{view_name}\n join mindsdb.task_model\n where {view_name}.a = 2\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 59, "language": "en" } }, { "id": 125895, "commit_id": "8ddcf89096e5631c6b6e0d04dc094b458a15c9f9", "repo": "ray", "path": "rllib/connectors/tests/test_agent.py", "file_name": "test_agent.py", "fun_name": "test_vr_connector_shift_by_one", "commit_message": "[RLlib] Implemented ViewRequirementConnector (#26998)", "code": "def test_vr_connector_shift_by_one(self):\n \n view_rq_dict = {\n \"state\": ViewRequirement(\"obs\"),\n \"next_state\": ViewRequirement(\n \"obs\", shift=1, used_for_compute_actions=False\n ),\n \"prev_state\": ViewRequirement(\"obs\", shift=-1),\n }\n\n obs_arrs = np.arange(10)[:, None] + 1\n config = PPOConfig().to_dict()\n ctx = ConnectorContext(\n view_requirements=view_rq_dict, config=config, is_policy_recurrent=True\n )\n c = ViewRequirementAgentConnector(ctx)\n\n # keep a running list of observations\n obs_list = []\n for t, obs in enumerate(obs_arrs):\n # t=0 is the next state of t=-1\n data = AgentConnectorDataType(\n 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1}\n )\n processed = c([data]) # env.reset() for t == -1 else env.step()\n for_action = processed[0].data.for_action\n # add cur obs to the list\n obs_list.append(obs)\n\n if t == 0:\n check(for_action[\"prev_state\"], for_action[\"state\"])\n else:\n # prev state should be equal to the prev time step obs\n check(for_action[\"prev_state\"], obs_list[-2][None])\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 421, "n_words": 114, "vocab_size": 87, "complexity": 3, "nloc": 26, "token_counts": 187, "n_ast_nodes": 308, "n_identifiers": 31, "random_cut": "def test_vr_connector_shift_by_one(self):\n \n view_rq_dict = {\n \"state\": ViewRequirement(\"obs\"),\n \"next_state\": ViewRequirement(\n \"obs\", shift=1, used_for_compute_actions=False\n ),\n \"prev_state\": ViewRequirement(\"obs\", shift=-1),\n }\n\n obs_arrs = np.arange(10)[:, None] + 1\n config = PPOConfig().to_dict()\n ctx = ConnectorContext(\n view_requirements=view_rq_dict, config=config, is_policy_recurrent=True\n )\n c = ViewRequirementAgentConnector(ctx)\n\n # keep a running list of observations\n obs_list = []\n for t, obs in enumerate(obs_arrs):\n # t=0 is the next state of t=-1\n data = AgentConnectorDataType(\n 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1}\n )\n process", "d_id": 28017, "documentation": { "docstring": "Test that the ViewRequirementConnector can handle shift by one correctly and\n can ignore future referencing view_requirements to respect causality", "n_words": 19, "vocab_size": 18, "n_whitespaces": 25, "language": "en" } }, { "id": 91232, "commit_id": "8b9bcdc92d8ff23ec9f44d90d14348d9464d476b", "repo": "sentry", "path": "tools/flake8_plugin.py", "file_name": "flake8_plugin.py", "fun_name": "adapt_error", "commit_message": "Revert \"ref: simplify and type flake8 plugin (#35645)\" (#35651)", "code": "def adapt_error(cls, e):\n \n return e._replace(message=e.message.format(*e.vars))[:4]\n\n\nerror = namedtuple(\"error\", \"lineno col message type vars\")\nError = partial(partial, error, message=\"\", type=SentryCheck, vars=())\n\nS001 = Error(\n message=\"S001: Avoid using the {} mock call as it is \"\n \"confusing and prone to causing invalid test \"\n \"behavior.\"\n)\nS001.methods = {\n \"not_called\",\n \"called_once\",\n \"called_once_with\",\n}\n\nS002 = Error(message=\"S002: print functions or statements are not allowed.\")\n\nS003 = Error(message=\"S003: Use ``from sentry.utils import json`` instead.\")\nS003.modules = {\"json\", \"simplejson\"}\nS003.names = {\n \"load\",\n \"loads\",\n \"dump\",\n \"dumps\",\n \"JSONEncoder\",\n \"JSONDecodeError\",\n \"_default_encoder\",\n}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 127, "n_words": 85, "vocab_size": 75, "complexity": 1, "nloc": 2, "token_counts": 31, "n_ast_nodes": 227, "n_identifiers": 19, "random_cut": "def adapt_error(cls, e):\n \n return e._replace(message=e.message.format(*e.vars))[:4]\n\n\nerror = namedtuple(\"error\", \"lineno col message type vars\")\nError = partial(partial, error, message=\"\", type=SentryCheck, vars=())\n\nS001 = Error(\n message=\"S001: Avoid us", "d_id": 18743, "documentation": { "docstring": "Adapts the extended error namedtuple to be compatible with Flake8.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 269925, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks.py", "file_name": "callbacks.py", "fun_name": "on_train_begin", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def on_train_begin(self, logs=None):\n \n logs = self._process_logs(logs)\n for callback in self.callbacks:\n callback.on_train_begin(logs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 43, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 6, "random_cut": "def on_train_begin(self, logs=None):\n \n logs = self._process_logs(logs)\n for callback in self.callbacks:\n callback.on_train_begin(logs)\n", "d_id": 80335, "documentation": { "docstring": "Calls the `on_train_begin` methods of its callbacks.\n\n Args:\n logs: Dict. Currently, no data is passed via this argument\n for this method, but that may change in the future.\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 66, "language": "en" } }, { "id": 260093, "commit_id": "122876e9ab1ab494b4bf0ca3360d5a1527caf2e7", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_param_validation.py", "file_name": "test_param_validation.py", "fun_name": "test_decorate_validated_function", "commit_message": "MNT Param validation: do not expose internal values in error msg (#23459)\n\n* allow to not expose internal valid params in error msg\n\n* ensure deprecated and internal do not overlap\n\n* deprecated and internal must be subsets of options\n\n* black", "code": "def test_decorate_validated_function():\n \n decorated_function = deprecated()(_func)\n\n with pytest.warns(FutureWarning, match=\"Function _func is deprecated\"):\n decorated_function(1, 2, c=3)\n\n # outer decorator does not interfer with validation\n with pytest.warns(FutureWarning, match=\"Function _func is deprecated\"):\n with pytest.raises(ValueError, match=r\"The 'c' parameter of _func must be\"):\n decorated_function(1, 2, c=\"wrong\")\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 80, "n_words": 40, "vocab_size": 29, "complexity": 1, "nloc": 7, "token_counts": 70, "n_ast_nodes": 123, "n_identifiers": 11, "random_cut": "def test_decorate_validated_function():\n \n decorated_function = deprecated()(_func)\n\n with pytest.warns(FutureWarning, match=\"Function _func is deprecated\"):\n decorated_function(1, 2, c=3)\n\n # outer decorator does not inte", "d_id": 76068, "documentation": { "docstring": "Check that validate_params functions can be decorated", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 253816, "commit_id": "7487da3edb1a68af60104e0290216f0849a8765c", "repo": "d2l-en", "path": "d2l/jax.py", "file_name": "jax.py", "fun_name": "set_axes", "commit_message": "[Jax] Add calculus", "code": "def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):\n \n axes.set_xlabel(xlabel), axes.set_ylabel(ylabel)\n axes.set_xscale(xscale), axes.set_yscale(yscale)\n axes.set_xlim(xlim), axes.set_ylim(ylim)\n if legend:\n axes.legend(legend)\n axes.grid()\n", "url": "https://github.com/d2l-ai/d2l-en.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 48, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 73, "n_ast_nodes": 111, "n_identifiers": 16, "random_cut": "def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):\n \n axes.set_xlabel(xlabel), axes.set_ylabel(ylabel)\n axes.set_xscale(xscale), axes.set_yscale(yscale)\n axes.set_xlim(xlim), axes.set_ylim(ylim)\n if legend:\n axes.legend(legend)\n axes.grid()\n", "d_id": 74253, "documentation": { "docstring": "Set the axes for matplotlib.\n\n Defined in :numref:`sec_calculus`", "n_words": 8, "vocab_size": 8, "n_whitespaces": 10, "language": "en" } }, { "id": 250551, "commit_id": "ee4999e8e4380f7b67faef92f04c361deffba412", "repo": "mitmproxy", "path": "mitmproxy/addonmanager.py", "file_name": "addonmanager.py", "fun_name": "register", "commit_message": "Rename new async helper functions.\n\nasync_trigger -> trigger_event\ninvoke_addon -> invoke_addon_sync (API breakage)\nasync_invoke_addon -> invoke_addon", "code": "def register(self, addon):\n \n api_changes = {\n # mitmproxy 6 -> mitmproxy 7\n \"clientconnect\": \"client_connected\",\n \"clientdisconnect\": \"client_disconnected\",\n \"serverconnect\": \"server_connect and server_connected\",\n \"serverdisconnect\": \"server_disconnected\",\n }\n for a in traverse([addon]):\n for old, new in api_changes.items():\n if hasattr(a, old):\n ctx.log.warn(f\"The {old} event has been removed, use {new} instead. \"\n f\"For more details, see https://docs.mitmproxy.org/stable/addons-events/.\")\n name = _get_name(a)\n if name in self.lookup:\n raise exceptions.AddonManagerError(\n \"An addon called '%s' already exists.\" % name\n )\n l = Loader(self.master)\n self.invoke_addon_sync(addon, LoadHook(l))\n for a in traverse([addon]):\n name = _get_name(a)\n self.lookup[name] = a\n for a in traverse([addon]):\n self.master.commands.collect_commands(a)\n self.master.options.process_deferred()\n return addon\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 397, "n_words": 91, "vocab_size": 68, "complexity": 7, "nloc": 26, "token_counts": 164, "n_ast_nodes": 283, "n_identifiers": 27, "random_cut": "def register(self, addon):\n \n api_changes = {\n # mitmproxy 6 -> mitmproxy 7\n \"clientconnect\": \"client_connected\",\n \"clientdisconnect\": \"client_disconnected\",\n \"serverconnect\": \"server_connect and server_connected\",\n \"serverdisconnect\": \"server_disconnected\",\n }\n for a in traverse([addon]):\n for old, new in api_changes.items():\n if hasattr(a, old):\n ctx.log.warn(f\"The {old} event has been removed, use {new} instead. \"\n f\"For more details, see https://docs.mitmproxy.or", "d_id": 73504, "documentation": { "docstring": "\n Register an addon, call its load event, and then register all its\n sub-addons. This should be used by addons that dynamically manage\n addons.\n\n If the calling addon is already running, it should follow with\n running and configure events. Must be called within a current\n context.\n ", "n_words": 45, "vocab_size": 41, "n_whitespaces": 119, "language": "en" } }, { "id": 188689, "commit_id": "08ff8fa285575b8ca5ee187d297d807bf197a161", "repo": "jumpserver", "path": "apps/authentication/views/login.py", "file_name": "login.py", "fun_name": "get_context_data", "commit_message": "fix: login confirm bug (#7914)\n\nCo-authored-by: feng626 <1304903146@qq.com>", "code": "def get_context_data(self, **kwargs):\n from tickets.models import Ticket\n from tickets.const import TICKET_DETAIL_URL\n ticket_id = self.request.session.get(\"auth_ticket_id\")\n if not ticket_id:\n ticket = None\n else:\n ticket = Ticket.all().filter(pk=ticket_id).first()\n context = super().get_context_data(**kwargs)\n if ticket:\n timestamp_created = datetime.datetime.timestamp(ticket.date_created)\n ticket_detail_url = TICKET_DETAIL_URL.format(id=ticket_id, type=ticket.type)\n assignees = ticket.current_node.first().ticket_assignees.all()\n assignees_display = ', '.join([str(i.assignee) for i in assignees])\n msg = _().format(assignees_display)\n else:\n timestamp_created = 0\n ticket_detail_url = ''\n msg = _(\"No ticket found\")\n context.update({\n \"msg\": msg,\n \"timestamp\": timestamp_created,\n \"ticket_detail_url\": ticket_detail_url\n })\n return context\n\n\n@method_decorator(never_cache, name='dispatch')", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "@method_decorator(never_cache, name='dispatch')", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 292, "n_words": 74, "vocab_size": 52, "complexity": 4, "nloc": 26, "token_counts": 180, "n_ast_nodes": 320, "n_identifiers": 41, "random_cut": "def get_context_data(self, **kwargs):\n from tickets.models import Ticket\n from tickets.const import TICKET_DETAIL_URL\n ticket_id = self.request.session.get(\"auth_ticket_id\")\n if not ticket_id:\n ", "d_id": 45927, "documentation": { "docstring": "Wait for {} confirm, You also can copy link to her/him
    \n Don't close this page", "n_words": 16, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 31457, "commit_id": "7cced021fa8ddc59f0f77384300760d34545394e", "repo": "transformers", "path": "src/transformers/modeling_tf_utils.py", "file_name": "modeling_tf_utils.py", "fun_name": "tf_shard_checkpoint", "commit_message": "TF Sharded (#17713)\n\n* initial commit\r\n\r\n* update modeeling tf utils\r\n\r\n* quality\r\n\r\n* clean and update args\r\n\r\n* update\r\n\r\n* remove potential bug\r\n\r\n* code quality\r\n\r\n* update\r\n\r\n* update max shard\r\n\r\n* update tests for sharding from pretrained\r\n\r\n* fix remaining test\r\n\r\n* make style\r\n\r\n* h5py if tf available\r\n\r\n* update and fix test\r\n\r\n* fix test\r\n\r\n* style\r\n\r\n* modified push to hub to support shard for TF\r\n\r\n* quick fix\r\n\r\n* update code\r\n\r\n* merge branch main and style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Patrick von Platen \r\n\r\n* update based on reviews\r\n\r\n* update doc\r\n\r\n* update and style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update based on reviews\r\n\r\n* fix typo\r\n\r\n* style\r\n\r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def tf_shard_checkpoint(weights, max_shard_size=\"10GB\"):\n \n max_shard_size = convert_file_size_to_int(max_shard_size)\n\n sharded_state_dicts = []\n current_block = []\n current_block_size = 0\n total_size = 0\n\n for item in weights:\n weight_size = item.numpy().size * dtype_byte_size(item.dtype)\n\n # If this weight is going to tip up over the maximal size, we split.\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = []\n current_block_size = 0\n\n current_block.append(item)\n current_block_size += weight_size\n total_size += weight_size\n\n # Add the last block\n sharded_state_dicts.append(current_block)\n\n # If we only have one shard, we return it\n if len(sharded_state_dicts) == 1:\n return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None\n\n # Otherwise, let's build the index\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = TF2_WEIGHTS_NAME.replace(\".h5\", f\"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5\")\n shards[shard_file] = shard\n for weight in shard:\n weight_name = weight.name\n weight_map[weight_name] = shard_file\n\n # Add the metadata\n metadata = {\"total_size\": total_size}\n index = {\"metadata\": metadata, \"weight_map\": weight_map}\n return shards, index\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 319, "n_words": 137, "vocab_size": 83, "complexity": 6, "nloc": 29, "token_counts": 181, "n_ast_nodes": 324, "n_identifiers": 29, "random_cut": "def tf_shard_checkpoint(weights, max_shard_size=\"10GB\"):\n \n max_shard_size = convert_file_size_to_int(max_shard_size)\n\n sharded_state_dicts = []\n current_block = []\n current_block_size = 0\n total_size = 0\n\n for item in weights:\n weight_size = item.numpy().size * dtype_byte_size(item.dtype)\n\n # If this weight is going to tip up over the maximal size, we split.\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = []\n current_block_size = 0\n\n current_block.append(item)\n current_block_size += weight_size\n total_size += weight_size\n\n # Add the last block\n sharded_state_dicts.append(current_block)\n\n # If we only have one shard, we return it\n if len(sharded_state_dicts) == 1:\n return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None\n\n # Otherwise, let's build the index\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = TF2_WEIGHTS_NAME.", "d_id": 5748, "documentation": { "docstring": "\n Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a\n given size.\n\n The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no\n optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the\n limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],\n [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].\n\n \n\n If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will\n have a size greater than `max_shard_size`.\n\n \n\n Args:\n weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save.\n max_shard_size (`int` or `str`, *optional*, defaults to `\"10GB\"`):\n The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit\n (like `\"5MB\"`).\n ", "n_words": 158, "vocab_size": 105, "n_whitespaces": 231, "language": "en" } }, { "id": 196261, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/curve.py", "file_name": "curve.py", "fun_name": "scale", "commit_message": "Updated import locations", "code": "def scale(self, x=1, y=1, pt=None):\n \n if pt:\n pt = Point(pt, dim=2)\n return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)\n fx, fy = self.functions\n return self.func((fx*x, fy*y), self.limits)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 72, "n_words": 22, "vocab_size": 20, "complexity": 2, "nloc": 6, "token_counts": 85, "n_ast_nodes": 130, "n_identifiers": 14, "random_cut": "def scale(self, x=1, y=1, pt=None):\n \n if pt:\n pt = Point(pt, dim=2)\n return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)\n fx, ", "d_id": 47761, "documentation": { "docstring": "Override GeometryEntity.scale since Curve is not made up of Points.\n\n Returns\n =======\n\n Curve :\n returns scaled curve.\n\n Examples\n ========\n\n >>> from sympy import Curve\n >>> from sympy.abc import x\n >>> Curve((x, x), (x, 0, 1)).scale(2)\n Curve((2*x, x), (x, 0, 1))\n\n ", "n_words": 40, "vocab_size": 31, "n_whitespaces": 121, "language": "en" } }, { "id": 197428, "commit_id": "5afe37b3dee65554d7e4592c20f922cb551fde31", "repo": "sympy", "path": "sympy/physics/vector/frame.py", "file_name": "frame.py", "fun_name": "orient_body_fixed", "commit_message": "Restores pre-1.7 simplify behavior for orient_body_fixed()", "code": "def orient_body_fixed(self, parent, angles, rotation_order):\n \n\n _check_frame(parent)\n\n amounts = list(angles)\n for i, v in enumerate(amounts):\n if not isinstance(v, Vector):\n amounts[i] = sympify(v)\n\n approved_orders = ('123', '231', '312', '132', '213', '321', '121',\n '131', '212', '232', '313', '323', '')\n # make sure XYZ => 123\n rot_order = translate(str(rotation_order), 'XYZxyz', '123123')\n if rot_order not in approved_orders:\n raise TypeError('The rotation order is not a valid order.')\n\n parent_orient_body = []\n if not (len(amounts) == 3 & len(rot_order) == 3):\n raise TypeError('Body orientation takes 3 values & 3 orders')\n a1 = int(rot_order[0])\n a2 = int(rot_order[1])\n a3 = int(rot_order[2])\n parent_orient_body = (self._rot(a1, amounts[0]) *\n self._rot(a2, amounts[1]) *\n self._rot(a3, amounts[2]))\n\n self._dcm(parent, parent_orient_body)\n\n try:\n from sympy.polys.polyerrors import CoercionFailed\n from sympy.physics.vector.functions import kinematic_equations\n q1, q2, q3 = amounts\n u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy)\n templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],\n 'body', rot_order)\n templist = [expand(i) for i in templist]\n td = solve(templist, [u1, u2, u3])\n u1 = expand(td[u1])\n u2 = expand(td[u2])\n u3 = expand(td[u3])\n wvec = u1 * self.x + u2 * self.y + u3 * self.z\n # NOTE : SymPy 1.7 removed the call to simplify() that occured\n # inside the solve() function, so this restores the pre-1.7\n # behavior. See:\n # https://github.com/sympy/sympy/issues/23140\n # and\n # https://github.com/sympy/sympy/issues/23130\n wvec = wvec.simplify()\n except (CoercionFailed, AssertionError):\n wvec = self._w_diff_dcm(parent)\n self._ang_vel_dict.update({parent: wvec})\n parent._ang_vel_dict.update({self: -wvec})\n self._var_dict = {}\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 740, "n_words": 217, "vocab_size": 155, "complexity": 7, "nloc": 40, "token_counts": 394, "n_ast_nodes": 626, "n_identifiers": 58, "random_cut": "def orient_body_fixed(self, parent, angles, rotation_order):\n \n\n _check_frame(parent)\n\n amounts = list(angles)\n for i, v in enumerate(amounts):\n if not isinstance(v, Vector):\n amounts[i] = sympify(v)\n\n approved_orders = ('123', '231', '312', '132', '213', '321', '121',\n '131', '212', '232', '313', '323', '')\n # make sure XYZ => 123\n rot_order = translate(str(rotation_order), 'XYZxyz', '123123')\n if rot_order not in approved_orders:\n raise TypeError('The rotation order is not a valid order.')\n\n parent_orient_body = []\n if not (len(amounts) == 3 & len(rot_order) == 3):\n raise TypeError('Body orientation takes 3 values & 3 orders')\n a1 = int(rot_order[0])\n a2 = int(rot_order[1])\n a3 = int(rot_order[2])\n parent_orient_body = (self._rot(a1, amounts[0]) *\n self._rot(a2, amounts[1]) *\n self._rot(a3, amounts[2]))\n\n self._dcm(parent, parent_orient_body)\n\n try:\n from sympy.polys.polyerrors import CoercionFailed\n from sympy.physics.vector.functions import kinematic_equations\n q1, q2, q3 = amounts\n u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy)\n templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],\n 'body', rot_order)\n templist = [expand(i) for i in templist]\n td = solve(templist, [u1, u2, u3])\n u1 = expand(td[u1])\n u2 = expand(td[u2])\n u3 = expand(td[u3])\n wvec = u1 * self.x + u2 * self.y + u3 * self.z\n # NOTE : SymPy 1.7 removed the call to simplify() that occured\n # inside the solve() function, so this restores the pre-1.7\n # behavior. See:\n # https://github.com/sympy/sympy/issues/23140\n # and\n # https://github.com/sympy/sympy/issues/23130\n wvec = wvec.simplify()\n except (CoercionFailed, AssertionError):\n wvec = self._w_diff_dcm(parent)\n self._ang_vel_dict.update({parent: wvec})\n parent._ang_vel_dict.update({self: -wvec})\n self._var_dict = {}\n", "d_id": 48536, "documentation": { "docstring": "Rotates this reference frame relative to the parent reference frame\n by right hand rotating through three successive body fixed simple axis\n rotations. Each subsequent axis of rotation is about the \"body fixed\"\n unit vectors of a new intermediate reference frame. This type of\n rotation is also referred to rotating through the `Euler and Tait-Bryan\n Angles`_.\n\n .. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles\n\n Parameters\n ==========\n\n parent : ReferenceFrame\n Reference frame that this reference frame will be rotated relative\n to.\n angles : 3-tuple of sympifiable\n Three angles in radians used for the successive rotations.\n rotation_order : 3 character string or 3 digit integer\n Order of the rotations about each intermediate reference frames'\n unit vectors. The Euler rotation about the X, Z', X'' axes can be\n specified by the strings ``'XZX'``, ``'131'``, or the integer\n ``131``. There are 12 unique valid rotation orders (6 Euler and 6\n Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx,\n and yxz.\n\n Warns\n ======\n\n UserWarning\n If the orientation creates a kinematic loop.\n\n Examples\n ========\n\n Setup variables for the examples:\n\n >>> from sympy import symbols\n >>> from sympy.physics.vector import ReferenceFrame\n >>> q1, q2, q3 = symbols('q1, q2, q3')\n >>> N = ReferenceFrame('N')\n >>> B = ReferenceFrame('B')\n >>> B1 = ReferenceFrame('B1')\n >>> B2 = ReferenceFrame('B2')\n >>> B3 = ReferenceFrame('B3')\n\n For example, a classic Euler Angle rotation can be done by:\n\n >>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX')\n >>> B.dcm(N)\n Matrix([\n [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],\n [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],\n [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])\n\n This rotates reference frame B relative to reference frame N through\n ``q1`` about ``N.x``, then rotates B again through ``q2`` about\n ``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to\n three successive ``orient_axis()`` calls:\n\n >>> B1.orient_axis(N, N.x, q1)\n >>> B2.orient_axis(B1, B1.y, q2)\n >>> B3.orient_axis(B2, B2.x, q3)\n >>> B3.dcm(N)\n Matrix([\n [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],\n [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],\n [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])\n\n Acceptable rotation orders are of length 3, expressed in as a string\n ``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis\n twice in a row are prohibited.\n\n >>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ')\n >>> B.orient_body_fixed(N, (q1, q2, 0), '121')\n >>> B.orient_body_fixed(N, (q1, q2, q3), 123)\n\n ", "n_words": 365, "vocab_size": 213, "n_whitespaces": 954, "language": "en" } }, { "id": 66175, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/leave_block_list/leave_block_list.py", "file_name": "leave_block_list.py", "fun_name": "get_applicable_block_lists", "commit_message": "style: format code with black", "code": "def get_applicable_block_lists(employee=None, company=None, all_lists=False):\n\tblock_lists = []\n\n\tif not employee:\n\t\temployee = frappe.db.get_value(\"Employee\", {\"user_id\": frappe.session.user})\n\t\tif not employee:\n\t\t\treturn []\n\n\tif not company:\n\t\tcompany = frappe.db.get_value(\"Employee\", employee, \"company\")\n\n\tdef add_block_list(block_list):\n\t\tif block_list:\n\t\t\tif all_lists or not is_user_in_allow_list(block_list):\n\t\t\t\tblock_lists.append(block_list)\n\n\t# per department\n\tdepartment = frappe.db.get_value(\"Employee\", employee, \"department\")\n\tif department:\n\t\tblock_list = frappe.db.get_value(\"Department\", department, \"leave_block_list\")\n\t\tadd_block_list(block_list)\n\n\t# global\n\tfor block_list in frappe.db.sql_list(\n\t\t,\n\t\tcompany,\n\t):\n\t\tadd_block_list(block_list)\n\n\treturn list(set(block_lists))\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 42, "n_words": 66, "vocab_size": 43, "complexity": 6, "nloc": 20, "token_counts": 132, "n_ast_nodes": 257, "n_identifiers": 18, "random_cut": "def get_applicable_block_lists(employee=None, company=None, all_lists=False):\n\tblock_lists = []\n\n\tif not employee:\n\t\temployee = frappe.db.get_value(\"Employee\", {\"user_id\": frappe.session.user})\n\t\tif not employee:\n\t\t\treturn []\n\n\tif not company:\n\t\tcompany = frappe.db.get_value(\"Employee\", employee, \"company\")\n\n\tdef add_block_list(block_list):\n\t\tif block_list:\n\t\t\tif all_lists or not is_user_in_allow_list(block_list):\n\t\t\t\tblock_lists.append(block_list)\n\n\t# per department\n\tdepartment = frappe.db.get_value(\"Employee\", employee, \"department\")\n\tif department:\n\t\tblock_list = frappe.db.get_value(\"Department\", department, \"leave_block_list\")\n\t\tadd_block_list(block_list)\n\n\t# global\n\tfor block_list in frappe.db.sql_list(", "d_id": 14124, "documentation": { "docstring": "select name from `tabLeave Block List`\n\t\twhere applies_to_all_departments=1 and company=%s", "n_words": 10, "vocab_size": 10, "n_whitespaces": 8, "language": "en" } }, { "id": 114363, "commit_id": "27a34a6a706a06e1241671d29c8cab93d77a19c1", "repo": "mindsdb", "path": "mindsdb/integrations/libs/storage_handler.py", "file_name": "storage_handler.py", "fun_name": "_setup_connection", "commit_message": "feat: add docs, improve base class signatures", "code": "def _setup_connection(self):\n # noqa\n cur = self.connection.cursor()\n if ('store',) not in list(cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")):\n cur.execute(\n )\n self.internal_registry.commit()\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 79, "n_words": 20, "vocab_size": 20, "complexity": 2, "nloc": 6, "token_counts": 45, "n_ast_nodes": 83, "n_identifiers": 9, "random_cut": "def _setup_connection(self):\n # noqa\n cur = self.connection.cursor()\n if ('store',) not in lis", "d_id": 25164, "documentation": { "docstring": " Checks that a key-value table exists, otherwise creates it. create table store (key text, value text)", "n_words": 16, "vocab_size": 15, "n_whitespaces": 16, "language": "en" } }, { "id": 156129, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/optimization.py", "file_name": "optimization.py", "fun_name": "cull", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def cull(dsk, keys):\n \n if not isinstance(keys, (list, set)):\n keys = [keys]\n\n seen = set()\n dependencies = dict()\n out = {}\n work = list(set(flatten(keys)))\n\n while work:\n new_work = []\n for k in work:\n dependencies_k = get_dependencies(dsk, k, as_list=True) # fuse needs lists\n out[k] = dsk[k]\n dependencies[k] = dependencies_k\n for d in dependencies_k:\n if d not in seen:\n seen.add(d)\n new_work.append(d)\n\n work = new_work\n\n return out, dependencies\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 214, "n_words": 64, "vocab_size": 44, "complexity": 6, "nloc": 19, "token_counts": 121, "n_ast_nodes": 193, "n_identifiers": 20, "random_cut": "def cull(dsk, keys):\n \n if not isinstance(keys, (lis", "d_id": 36574, "documentation": { "docstring": "Return new dask with only the tasks required to calculate keys.\n\n In other words, remove unnecessary tasks from dask.\n ``keys`` may be a single key or list of keys.\n\n Examples\n --------\n >>> def inc(x):\n ... return x + 1\n\n >>> def add(x, y):\n ... return x + y\n\n >>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}\n >>> dsk, dependencies = cull(d, 'out')\n >>> dsk # doctest: +ELLIPSIS\n {'out': (, 'x', 10), 'x': 1}\n >>> dependencies # doctest: +ELLIPSIS\n {'out': ['x'], 'x': []}\n\n Returns\n -------\n dsk: culled dask graph\n dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate\n other optimizations, notably fuse.\n ", "n_words": 109, "vocab_size": 86, "n_whitespaces": 277, "language": "en" } }, { "id": 148815, "commit_id": "41d8330fbc95224020a046bd46eea6252374ee15", "repo": "freqtrade", "path": "freqtrade/exchange/exchange.py", "file_name": "exchange.py", "fun_name": "fill_leverage_tiers", "commit_message": "freqtrade.exchange edited load_leverage_tiers", "code": "def fill_leverage_tiers(self) -> None:\n \n leverage_tiers = self.load_leverage_tiers()\n for pair, tiers in leverage_tiers.items():\n tiers = []\n for tier in tiers:\n tiers.append(self.parse_leverage_tier(tier))\n self._leverage_tiers[pair] = tiers\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 92, "n_words": 23, "vocab_size": 17, "complexity": 3, "nloc": 11, "token_counts": 54, "n_ast_nodes": 89, "n_identifiers": 11, "random_cut": "def fill_leverage_tiers(self) -> None:\n \n leverage_tiers = self.load_leverage_tiers()\n for pair, tiers in leverage_tiers.items():\n tiers = []\n ", "d_id": 34339, "documentation": { "docstring": "\n Assigns property _leverage_tiers to a dictionary of information about the leverage\n allowed on each pair\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 196389, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/expressions/kronecker.py", "file_name": "kronecker.py", "fun_name": "kronecker_product", "commit_message": "Moved imports to higher level", "code": "def kronecker_product(*matrices):\n \n if not matrices:\n raise TypeError(\"Empty Kronecker product is undefined\")\n validate(*matrices)\n if len(matrices) == 1:\n return matrices[0]\n else:\n return KroneckerProduct(*matrices).doit()\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 57, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 8, "token_counts": 46, "n_ast_nodes": 82, "n_identifiers": 7, "random_cut": "def kronecker_product(*matrices):\n \n if not matrices:", "d_id": 47889, "documentation": { "docstring": "\n The Kronecker product of two or more arguments.\n\n This computes the explicit Kronecker product for subclasses of\n ``MatrixBase`` i.e. explicit matrices. Otherwise, a symbolic\n ``KroneckerProduct`` object is returned.\n\n\n Examples\n ========\n\n For ``MatrixSymbol`` arguments a ``KroneckerProduct`` object is returned.\n Elements of this matrix can be obtained by indexing, or for MatrixSymbols\n with known dimension the explicit matrix can be obtained with\n ``.as_explicit()``\n\n >>> from sympy import kronecker_product, MatrixSymbol\n >>> A = MatrixSymbol('A', 2, 2)\n >>> B = MatrixSymbol('B', 2, 2)\n >>> kronecker_product(A)\n A\n >>> kronecker_product(A, B)\n KroneckerProduct(A, B)\n >>> kronecker_product(A, B)[0, 1]\n A[0, 0]*B[0, 1]\n >>> kronecker_product(A, B).as_explicit()\n Matrix([\n [A[0, 0]*B[0, 0], A[0, 0]*B[0, 1], A[0, 1]*B[0, 0], A[0, 1]*B[0, 1]],\n [A[0, 0]*B[1, 0], A[0, 0]*B[1, 1], A[0, 1]*B[1, 0], A[0, 1]*B[1, 1]],\n [A[1, 0]*B[0, 0], A[1, 0]*B[0, 1], A[1, 1]*B[0, 0], A[1, 1]*B[0, 1]],\n [A[1, 0]*B[1, 0], A[1, 0]*B[1, 1], A[1, 1]*B[1, 0], A[1, 1]*B[1, 1]]])\n\n For explicit matrices the Kronecker product is returned as a Matrix\n\n >>> from sympy import Matrix, kronecker_product\n >>> sigma_x = Matrix([\n ... [0, 1],\n ... [1, 0]])\n ...\n >>> Isigma_y = Matrix([\n ... [0, 1],\n ... [-1, 0]])\n ...\n >>> kronecker_product(sigma_x, Isigma_y)\n Matrix([\n [ 0, 0, 0, 1],\n [ 0, 0, -1, 0],\n [ 0, 1, 0, 0],\n [-1, 0, 0, 0]])\n\n See Also\n ========\n KroneckerProduct\n\n ", "n_words": 212, "vocab_size": 97, "n_whitespaces": 371, "language": "en" } }, { "id": 203419, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/options.py", "file_name": "options.py", "fun_name": "_get_obj_does_not_exist_redirect", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _get_obj_does_not_exist_redirect(self, request, opts, object_id):\n \n msg = _(\"%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?\") % {\n \"name\": opts.verbose_name,\n \"key\": unquote(object_id),\n }\n self.message_user(request, msg, messages.WARNING)\n url = reverse(\"admin:index\", current_app=self.admin_site.name)\n return HttpResponseRedirect(url)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 97, "n_words": 33, "vocab_size": 32, "complexity": 1, "nloc": 8, "token_counts": 65, "n_ast_nodes": 106, "n_identifiers": 18, "random_cut": "def _get_obj_does_not_exist_redirect(self, request, opts, object_id):\n \n msg = _(\"%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?\") % {\n \"name\": opts.verbose_name,\n \"key\": unquote(object_id),\n }\n self.message_user(request,", "d_id": 50364, "documentation": { "docstring": "\n Create a message informing the user that the object doesn't exist\n and return a redirect to the admin index page.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 219816, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "compare_total", "commit_message": "add python 3.10.4 for windows", "code": "def compare_total(self, other, context=None):\n \n other = _convert_other(other, raiseit=True)\n\n # if one is negative and the other is positive, it's easy\n if self._sign and not other._sign:\n return _NegativeOne\n if not self._sign and other._sign:\n return _One\n sign = self._sign\n\n # let's handle both NaN types\n self_nan = self._isnan()\n other_nan = other._isnan()\n if self_nan or other_nan:\n if self_nan == other_nan:\n # compare payloads as though they're integers\n self_key = len(self._int), self._int\n other_key = len(other._int), other._int\n if self_key < other_key:\n if sign:\n return _One\n else:\n return _NegativeOne\n if self_key > other_key:\n if sign:\n return _NegativeOne\n else:\n return _One\n return _Zero\n\n if sign:\n if self_nan == 1:\n return _NegativeOne\n if other_nan == 1:\n return _One\n if self_nan == 2:\n return _NegativeOne\n if other_nan == 2:\n return _One\n else:\n if self_nan == 1:\n return _One\n if other_nan == 1:\n return _NegativeOne\n if self_nan == 2:\n return _One\n if other_nan == 2:\n return _NegativeOne\n\n if self < other:\n return _NegativeOne\n if self > other:\n return _One\n\n if self._exp < other._exp:\n if sign:\n return _One\n else:\n return _NegativeOne\n if self._exp > other._exp:\n if sign:\n return _NegativeOne\n else:\n return _One\n return _Zero\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 999, "n_words": 183, "vocab_size": 61, "complexity": 27, "nloc": 57, "token_counts": 242, "n_ast_nodes": 391, "n_identifiers": 19, "random_cut": "def compare_total(self, other, context=None):\n \n other = _convert_other(other, raiseit=True)\n\n # if one is negative and the other is positive, it's easy\n if self._sign and not other._sign:\n return _NegativeOne\n if not self._sign and other._sign:\n return _One\n sign = self._sign\n\n # let's handle both NaN types\n self_nan = self._isnan()\n other_nan = other._isnan()\n if self_nan or other_nan:\n if self_nan == other_nan:\n # compare payloads as though they're integers\n self_key = len(self._int), self._int\n other_key = len(other._int), other._int\n if self_key < other_key:\n if sign:\n return _One\n else:\n return _NegativeOne\n if self_key > other_key:\n if sign:\n return _NegativeOne\n else:\n return _One\n return _Zero\n\n if sign:\n if self_nan == 1:\n return _NegativeOne\n if other_nan == 1:\n return _One\n if self_nan == 2:\n return _NegativeOne\n if other_nan == 2:\n return _One\n else:\n if self_nan == 1:\n return _One\n if other_nan == 1:\n return _NegativeOne\n if self_nan == 2:\n return _One\n if other_nan == 2:\n return _NegativeOne\n\n if self < other:\n retu", "d_id": 55829, "documentation": { "docstring": "Compares self to other using the abstract representations.\n\n This is not like the standard compare, which use their numerical\n value. Note that a total ordering is defined for all possible abstract\n representations.\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 60, "language": "en" } }, { "id": 241553, "commit_id": "a610e043d797ca0bae1ce186829fece79077407a", "repo": "lightning", "path": "pytorch_lightning/utilities/enums.py", "file_name": "enums.py", "fun_name": "detect_current_mode", "commit_message": "Add typing for utilities/enums.py (#11298)", "code": "def detect_current_mode(cls) -> _FaultTolerantMode:\n \n env_value = os.getenv(\"PL_FAULT_TOLERANT_TRAINING\", \"0\").lower()\n # the int values are kept for backwards compatibility, but long-term we want to keep only the strings\n if env_value in (\"0\", \"disabled\"):\n return _FaultTolerantMode.DISABLED\n elif env_value in (\"1\", \"automatic\"):\n return _FaultTolerantMode.AUTOMATIC\n elif env_value in (\"2\", \"manual\"):\n return _FaultTolerantMode.MANUAL\n raise MisconfigurationException(\n \"The environment flag `PL_FAULT_TOLERANT_TRAINING` should be either 'disabled', 'automatic', or 'manual'.\"\n )\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 161, "n_words": 61, "vocab_size": 52, "complexity": 4, "nloc": 12, "token_counts": 66, "n_ast_nodes": 122, "n_identifiers": 11, "random_cut": "def detect_current_mode(cls) -> _FaultTolerantMode:\n \n env_value = os.getenv(\"PL_FAULT_TOLERANT_TRAINING\", \"0\").lower()\n # the int values are kept for backwards compatibility, but long-term we want to keep only the strings\n if env_value in (\"0\", \"disabled\"):\n return _FaultT", "d_id": 69581, "documentation": { "docstring": "This classmethod detects if `Fault Tolerant` is activated and maps its value to `_FaultTolerantMode`.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 275711, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/preprocessing/image.py", "file_name": "image.py", "fun_name": "apply_channel_shift", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def apply_channel_shift(x, intensity, channel_axis=0):\n \n x = np.rollaxis(x, channel_axis, 0)\n min_x, max_x = np.min(x), np.max(x)\n channel_images = [\n np.clip(x_channel + intensity, min_x, max_x) for x_channel in x\n ]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n\n\n@keras_export(\"keras.preprocessing.image.random_channel_shift\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.preprocessing.image.random_channel_shift\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 71, "n_words": 41, "vocab_size": 29, "complexity": 2, "nloc": 9, "token_counts": 89, "n_ast_nodes": 144, "n_identifiers": 16, "random_cut": "def apply_channel_shift(x, intensity, channel_axis=0):\n \n x = np.rollaxis(x, channel_axis, 0)\n min_x, max_x = np.min(x), np.max(x)\n channel_images = [\n np.clip(x_channel + intensity, min_x, max_x) for x_channel in x\n ]\n x = np.stack(channel_imag", "d_id": 81449, "documentation": { "docstring": "Performs a channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 65, "language": "en" } }, { "id": 323132, "commit_id": "44a290e94d1becd1f09fddc3d873f9e19c9d6919", "repo": "PaddleNLP", "path": "paddlenlp/trainer/trainer_base.py", "file_name": "trainer_base.py", "fun_name": "_nested_gather", "commit_message": "[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)\n\n* add some datasets for finetune.\r\n\r\n* support fine tune for all tastks.\r\n\r\n* add trainer prototype.\r\n\r\n* init verison for paddlenlp trainer.\r\n\r\n* refine trainer.\r\n\r\n* update for some details.\r\n\r\n* support multi-cards training evaluation.\r\n\r\n* support load from ckpt.\r\n\r\n* support for export inference model.\r\n\r\n* first version of trainer.\r\n\r\n* seq cls support clue.\r\n\r\n* trainer support for token classification and question answersing tasks.\r\n\r\n* fix as reviews.\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def _nested_gather(self, tensors, name=None):\n \n if tensors is None:\n return\n if self.args.local_rank != -1:\n tensors = distributed_concat(tensors)\n return tensors\n\n # Copied from Accelerate.", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 79, "n_words": 22, "vocab_size": 18, "complexity": 3, "nloc": 6, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 7, "random_cut": "def _nested_gather(self, tensors, name=None):\n \n if tensors is None:\n return\n if self.ar", "d_id": 118377, "documentation": { "docstring": "\n Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before\n concatenating them to `gathered`\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 260411, "commit_id": "9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/tests/test_glm.py", "file_name": "test_glm.py", "fun_name": "test_glm_regression_unpenalized", "commit_message": "TST tight tests for GLMs (#23619)\n\nCo-authored-by: Olivier Grisel ", "code": "def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):\n \n model, X, y, coef, _, _, _ = glm_dataset\n n_samples, n_features = X.shape\n alpha = 0 # unpenalized\n params = dict(\n alpha=alpha,\n fit_intercept=fit_intercept,\n # solver=solver, # only lbfgs available\n tol=1e-12,\n max_iter=1000,\n )\n\n model = clone(model).set_params(**params)\n if fit_intercept:\n X = X[:, :-1] # remove intercept\n intercept = coef[-1]\n coef = coef[:-1]\n else:\n intercept = 0\n\n model.fit(X, y)\n\n # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails\n # for the wide/fat case with n_features > n_samples. Most current GLM solvers do\n # NOT return the minimum norm solution with fit_intercept=True.\n rtol = 5e-5\n if n_samples > n_features:\n assert model.intercept_ == pytest.approx(intercept)\n assert_allclose(model.coef_, coef, rtol=rtol)\n else:\n # As it is an underdetermined problem, prediction = y. The following shows that\n # we get a solution, i.e. a (non-unique) minimum of the objective function ...\n assert_allclose(model.predict(X), y, rtol=1e-6)\n if fit_intercept:\n # But it is not the minimum norm solution. Otherwise the norms would be\n # equal.\n norm_solution = np.linalg.norm(np.r_[intercept, coef])\n norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])\n assert norm_model > (1 + 1e-12) * norm_solution\n\n # See https://github.com/scikit-learn/scikit-learn/issues/23670.\n # Note: Even adding a tiny penalty does not give the minimal norm solution.\n # XXX: We could have naively expected LBFGS to find the minimal norm\n # solution by adding a very small penalty. Even that fails for a reason we\n # do not properly understand at this point.\n else:\n # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm\n # solution on this problem.\n # XXX: Do we have any theoretical guarantees why this should be the case?\n assert model.intercept_ == pytest.approx(intercept)\n assert_allclose(model.coef_, coef, rtol=rtol)\n\n\n@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [True, False])", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 595, "n_words": 269, "vocab_size": 170, "complexity": 4, "nloc": 31, "token_counts": 241, "n_ast_nodes": 413, "n_identifiers": 37, "random_cut": "def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):\n \n model, X, y, coef, _, _, _ = glm_dataset\n n_samples, n_features = X.shape\n alpha = 0 # unpenalized\n params = dict(\n alpha=alpha,\n fit_intercept=fit_intercept,\n # solver=solver, # only lbfgs available\n tol=1e-12,\n max_iter=1000,\n )\n\n model = clone(model).set_params(**params)\n if fit_intercept:\n X = X[:, :-1] # remove intercept\n intercept = coef[-1]\n coef = coef[:-1]\n else:\n intercept = 0\n\n model.fit(X, y)\n\n # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails\n # for the wide/fat case with n_features > n_samples. Most current GLM solvers do\n # NOT return the minimum norm solution with fit_intercept=True.\n rtol = 5e-5\n if n_samples > n_features:\n assert model.intercept_ == pytest.approx(intercept)\n assert_allclose(model.coef_, coef, rtol=rtol)\n else:\n # As it is an underdetermined problem, prediction = y. The following shows that\n # we get a solution, i.e. a (non-unique) minimum of the objective function ...\n assert_allclose(model.predict(X), y, rtol=1e-6)\n if fit_intercept:\n # But it is not the mi", "d_id": 76234, "documentation": { "docstring": "Test that unpenalized GLM converges for all solvers to correct solution.\n\n We work with a simple constructed data set with known solution.\n Note: This checks the minimum norm solution for wide X, i.e.\n n_samples < n_features:\n min ||w||_2 subject to w = argmin deviance(X, y, w)\n ", "n_words": 46, "vocab_size": 42, "n_whitespaces": 65, "language": "en" } }, { "id": 177486, "commit_id": "1f033118f2e0cca12c6e2375708dc92197b62da6", "repo": "networkx", "path": "networkx/algorithms/bipartite/redundancy.py", "file_name": "redundancy.py", "fun_name": "_node_redundancy", "commit_message": "Minor Python 2 cleanup (#6219)\n\nPython3 cleanup\r\n\r\nUse dict.keys() for set operations rather than explicitly\r\ncreating sets.", "code": "def _node_redundancy(G, v):\n \n n = len(G[v])\n overlap = sum(\n 1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v}\n )\n return (2 * overlap) / (n * (n - 1))\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 55, "n_words": 33, "vocab_size": 29, "complexity": 3, "nloc": 6, "token_counts": 79, "n_ast_nodes": 121, "n_identifiers": 11, "random_cut": "def _node_redundancy(G, v):\n \n n = len(G[v])\n overlap = sum(\n 1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v}", "d_id": 42392, "documentation": { "docstring": "Returns the redundancy of the node `v` in the bipartite graph `G`.\n\n If `G` is a graph with `n` nodes, the redundancy of a node is the ratio\n of the \"overlap\" of `v` to the maximum possible overlap of `v`\n according to its degree. The overlap of `v` is the number of pairs of\n neighbors that have mutual neighbors themselves, other than `v`.\n\n `v` must have at least two neighbors in `G`.\n\n ", "n_words": 72, "vocab_size": 41, "n_whitespaces": 90, "language": "en" } }, { "id": 198893, "commit_id": "af847114b9138a321933574fc3c3ec73af8b3459", "repo": "sympy", "path": "sympy/physics/continuum_mechanics/truss.py", "file_name": "truss.py", "fun_name": "solve", "commit_message": "solve method added for the truss class", "code": "def solve(self):\n \n count_reaction_loads = 0\n for node in self._nodes:\n if node in list(self._supports):\n if self._supports[node[0]]=='pinned':\n count_reaction_loads += 2\n elif self._supports[node[0]]=='roller':\n count_reaction_loads += 1\n coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))]\n load_matrix = zeros(2*len(self.nodes), 1)\n load_matrix_row = 0\n for node in self._nodes:\n if node[0] in list(self._loads):\n for load in self._loads[node[0]]:\n if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'):\n load_matrix[load_matrix_row] -= load[0]*math.cos(pi*load[1]/180)\n load_matrix[load_matrix_row + 1] -= load[0]*math.sin(pi*load[1]/180)\n load_matrix_row += 2\n cols = 0\n row = 0\n for node in self._nodes:\n if node[0] in list(self._supports):\n if self._supports[node[0]]=='pinned':\n coefficients_matrix[row][cols] += 1\n coefficients_matrix[row+1][cols+1] += 1\n cols += 2\n elif self._supports[node[0]]=='roller':\n coefficients_matrix[row+1][cols] += 1\n cols += 1\n row += 2\n for member in list(self._members):\n start = self._members[member][0]\n end = self._members[member][1]\n length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2)\n start_index = self._node_labels.index(start)\n end_index = self._node_labels.index(end)\n horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length\n vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length\n horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length\n vertical_component_end = (self._node_coordinates[start][1]-self._node_coordinates[end][1])/length\n coefficients_matrix[start_index*2][cols] += horizontal_component_start\n coefficients_matrix[start_index*2+1][cols] += vertical_component_start\n coefficients_matrix[end_index*2][cols] += horizontal_component_end\n coefficients_matrix[end_index*2+1][cols] += vertical_component_end\n cols += 1\n forces_matrix = (Matrix(coefficients_matrix)**-1)*load_matrix\n self._reaction_loads = {}\n i = 0\n for node in self._nodes:\n if node[0] in list(self._supports):\n if self._supports[node[0]]=='pinned':\n self._reaction_loads['R_'+str(node[0])+'_x'] = forces_matrix[i]\n self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i+1]\n i += 2\n elif self._supports[node[0]]=='roller':\n self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i]\n i += 1\n for member in list(self._members):\n self._internal_forces[member] = forces_matrix[i]\n i += 1\n return\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 958, "n_words": 199, "vocab_size": 80, "complexity": 22, "nloc": 61, "token_counts": 749, "n_ast_nodes": 1153, "n_identifiers": 45, "random_cut": "def solve(self):\n \n count_reaction_loads = 0\n for node in self._nodes:\n if node in list(self._supports):\n if self._supports[node[0]]=='pinned':\n count_reaction_loads += 2\n elif self._supports[node[0]]=='roller':\n count_reaction_loads += 1\n coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))]\n load_matrix = zeros(2*len(self.nodes), 1)\n load_matrix_row = 0\n for node in self._nodes:\n if node[0] in list(self._loads):\n for load in self._loads[node[0]]:\n if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'):\n load_matrix[load_matrix_row] -= load[0]*math.cos(pi*load[1]/180)\n load_matrix[load_matrix_row + 1] -= load[0]*math.sin(pi*load[1]/180)\n load_matrix_row += 2\n cols = 0\n row = 0\n for node in self._nodes:\n if node[0] in list(self._supports):\n if self._supports[node[0]]=='pinned':\n coefficients_matrix[row][cols] += 1\n coefficients_matrix[row+1][cols+1] += 1\n cols += 2\n elif self._supports[node[0]]=='roller':\n coefficients_matrix[row+1][cols] += 1\n cols += 1\n row += 2\n for member in list(self._members):\n start = self._members[member][0]\n end = self._members[member][1]\n length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2)\n start_index = self._node_labels.index(start)\n end_index = self._node_labels.index(end)\n horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length\n vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length\n horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length\n ", "d_id": 49063, "documentation": { "docstring": "\n This method solves for all reaction forces of all supports and all internal forces\n of all the members in the truss, provided the Truss is solvable.\n\n A Truss is solvable if the following condition is met,\n\n 2n >= r + m\n\n Where n is the number of nodes, r is the number of reaction forces, where each pinned\n support has 2 reaction forces and each roller has 1, and m is the number of members.\n\n The given condition is derived from the fact that a system of equations is solvable\n only when the number of variables is lesser than or equal to the number of equations.\n Equilibrium Equations in x and y directions give two equations per node giving 2n number\n equations. The number of variables is simply the sum of the number of reaction forces and\n member forces.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node(\"node_1\", 0, 0)\n >>> t.add_node(\"node_2\", 6, 0)\n >>> t.add_node(\"node_3\", 2, 2)\n >>> t.add_node(\"node_4\", 2, 0)\n >>> t.add_member(\"member_1\", \"node_1\", \"node_4\")\n >>> t.add_member(\"member_2\", \"node_2\", \"node_4\")\n >>> t.add_member(\"member_3\", \"node_1\", \"node_3\")\n >>> t.add_member(\"member_4\", \"node_2\", \"node_3\")\n >>> t.add_member(\"member_5\", \"node_3\", \"node_4\")\n >>> t.apply_load(\"node_4\", magnitude=10, direction=270)\n >>> t.apply_support(\"node_1\", type=\"pinned\")\n >>> t.apply_support(\"node_2\", type=\"roller\")\n >>> t.solve()\n >>> t.reaction_loads\n {'R_node_1_x': 1.83697019872103e-15, 'R_node_1_y': 6.66666666666667, 'R_node_2_y': 3.33333333333333}\n >>> t.internal_forces\n {'member_1': 6.66666666666666, 'member_2': 6.66666666666667, 'member_3': -6.66666666666667*sqrt(2), 'member_4': -3.33333333333333*sqrt(5), 'member_5': 10.0}\n ", "n_words": 218, "vocab_size": 128, "n_whitespaces": 450, "language": "en" } }, { "id": 197463, "commit_id": "d8bc197a19c0f4ea76c088da6f1655f1586cd700", "repo": "sympy", "path": "sympy/polys/galoistools.py", "file_name": "galoistools.py", "fun_name": "gf_edf_zassenhaus", "commit_message": "Improve `gf_edf_zassenhaus()`.\n\nFor the case p == 2, we use Algorithm 3.4.8 of [Cohen93], instead of the current procedure.\n\nThe current algorithm was failing to terminate on at least one known case (factoring cyclotomic_poly(17) mod 2).\n\nA simple bugfix would have been to change the iteration to `for i in range(n - 1):`\nwhen computing the polynomial `h` (`Tr` in Geddes), but Alg 3.4.8 is thought to\nbe better in practice.", "code": "def gf_edf_zassenhaus(f, n, p, K):\n \n factors = [f]\n\n if gf_degree(f) <= n:\n return factors\n\n N = gf_degree(f) // n\n if p != 2:\n b = gf_frobenius_monomial_base(f, p, K)\n\n t = [K.one, K.zero]\n while len(factors) < N:\n if p == 2:\n h = r = t\n\n for i in range(n - 1):\n r = gf_pow_mod(r, 2, f, p, K)\n h = gf_add(h, r, p, K)\n\n g = gf_gcd(f, h, p, K)\n t += [K.zero, K.zero]\n else:\n r = gf_random(2 * n - 1, p, K)\n h = _gf_pow_pnm1d2(r, n, f, b, p, K)\n g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)\n\n if g != [K.one] and g != f:\n factors = gf_edf_zassenhaus(g, n, p, K) \\\n + gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K)\n\n return _sort_factors(factors, multiple=False)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 319, "n_words": 127, "vocab_size": 67, "complexity": 8, "nloc": 24, "token_counts": 247, "n_ast_nodes": 352, "n_identifiers": 28, "random_cut": "def gf_edf_zassenhaus(f, n, p, K):\n \n factors = [f]\n\n if gf_degree(f) <= n:\n return factors\n\n N = gf_degree(f) // n\n if p != 2:\n b = gf_frobenius_monomial_base(f, p, K)\n\n t = [K.one, K.zero]\n while len(factors) < N:\n if p == 2:\n h = r = t\n\n for i in range(n - 1):\n r = gf_pow_mod(r, 2, f, p, K)\n h = gf_add(h, r, p, K)\n\n g = gf_gcd(f, h, p, K)\n t += [K.zero, K.zero]\n else:\n r = gf_random(2 * n - 1, p, K)\n h = _gf_pow_pnm1d2(r, n, f, b, p, K)\n g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)\n\n if g != [K.one] and g != f:\n factors = gf_edf_zassenhaus(g, n, p, K) \\\n + gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K)\n\n return _sort_factors(factors, multiple=False)\n\n", "d_id": 48565, "documentation": { "docstring": "\n Cantor-Zassenhaus: Probabilistic Equal Degree Factorization\n\n Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and\n an integer ``n``, such that ``n`` divides ``deg(f)``, returns all\n irreducible factors ``f_1,...,f_d`` of ``f``, each of degree ``n``.\n EDF procedure gives complete factorization over Galois fields.\n\n Consider the square-free polynomial ``f = x**3 + x**2 + x + 1`` in\n ``GF(5)[x]``. Let's compute its irreducible factors of degree one::\n\n >>> from sympy.polys.domains import ZZ\n >>> from sympy.polys.galoistools import gf_edf_zassenhaus\n\n >>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ)\n [[1, 1], [1, 2], [1, 3]]\n\n References\n ==========\n\n .. [1] [Gathen99]_\n .. [2] [Geddes92]_\n .. [3] [Cohen93]_\n\n ", "n_words": 96, "vocab_size": 79, "n_whitespaces": 160, "language": "en" } }, { "id": 176187, "commit_id": "5dfd57af2a141a013ae3753e160180b82bec9469", "repo": "networkx", "path": "networkx/linalg/algebraicconnectivity.py", "file_name": "algebraicconnectivity.py", "fun_name": "_tracemin_fiedler", "commit_message": "Use scipy.sparse array datastructure (#5139)\n\n* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.\r\n\r\nSeems like a reasonable place to start.\r\nnx.to_scipy_sparse_matrix is one of the primary interfaces to\r\nscipy.sparse from within NetworkX.\r\n\r\n* 1: Use np.outer instead of mult col/row vectors\r\n\r\nFix two instances in modularitymatrix where a new 2D array was being\r\ncreated via an outer product of two \\\"vectors\\\".\r\n\r\nIn the matrix case, this was a row vector \\* a column vector. In the\r\narray case this can be disambiguated by being explicit with np.outer.\r\n\r\n* Update _transition_matrix in laplacianmatrix module\r\n\r\n - A few instances of matrix multiplication operator\r\n - Add np.newaxis + transpose to get shape right for broadcasting\r\n - Explicitly convert e.g. sp.sparse.spdiags to a csr_array.\r\n\r\n* Update directed_combinitorial_laplacian w/ sparse array.\r\n\r\n - Wrap spdiags in csr_array and update matmul operators.\r\n\r\n* Rm matrix-specific code from lgc and hmn modules\r\n\r\n - Replace .A call with appropriate array semantics\r\n - wrap sparse.diags in csr_array.\r\n\r\n* Change hits to use sparse array semantics.\r\n\r\n - Replace * with @\r\n - Remove superfluous calls to flatten.\r\n\r\n* Update sparse matrix usage in layout module.\r\n - Simplify lil.getrowview call\r\n - Wrap spdiags in csr_array.\r\n\r\n* lil_matrix -> lil_array in graphmatrix.py.\r\n\r\n* WIP: Start working on algebraic connectivity module.\r\n\r\n* Incorporate auth mat varname feedback.\r\n\r\n* Revert 1D slice and comment for 1D sparse future.\r\n\r\n* Add TODOs: rm csr_array wrapper around spdiags etc.\r\n\r\n* WIP: cleanup algebraicconn: tracemin_fiedler.\r\n\r\n* Typo.\r\n\r\n* Finish reviewing algebraicconnectivity.\r\n\r\n* Convert bethe_hessian matrix to use sparse arrays.\r\n\r\n* WIP: update laplacian.\r\n\r\nUpdate undirected laplacian functions.\r\n\r\n* WIP: laplacian - add comment about _transition_matrix return types.\r\n\r\n* Finish laplacianmatrix review.\r\n\r\n* Update attrmatrix.\r\n\r\n* Switch to official laplacian function.\r\n\r\n* Update pagerank to use sparse array.\r\n\r\n* Switch bipartite matrix to sparse arrays.\r\n\r\n* Check from_scipy_sparse_matrix works with arrays.\r\n\r\nModifies test suite.\r\n\r\n* Apply changes from review.\r\n\r\n* Fix failing docstring tests.\r\n\r\n* Fix missing axis for in-place multiplication.\r\n\r\n* Use scipy==1.8rc2\r\n\r\n* Use matrix multiplication\r\n\r\n* Fix PyPy CI\r\n\r\n* [MRG] Create plot_subgraphs.py example (#5165)\r\n\r\n* Create plot_subgraphs.py\r\n\r\nhttps://github.com/networkx/networkx/issues/4220\r\n\r\n* Update plot_subgraphs.py\r\n\r\nblack\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint plus font_size\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded more plots\r\n\r\n* Update plot_subgraphs.py\r\n\r\nremoved plots from the unit test and added comments\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint\r\n\r\n* Update plot_subgraphs.py\r\n\r\ntypos fixed\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded nodes to the plot of the edges removed that was commented out for whatever reason\r\n\r\n* Update plot_subgraphs.py\r\n\r\nrevert the latest commit - the line was commented out for a reason - it's broken\r\n\r\n* Update plot_subgraphs.py\r\n\r\nfixed node color issue\r\n\r\n* Update plot_subgraphs.py\r\n\r\nformat fix\r\n\r\n* Update plot_subgraphs.py\r\n\r\nforgot to draw the nodes... now fixed\r\n\r\n* Fix sphinx warnings about heading length.\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult \r\n\r\n* Add traveling salesman problem to example gallery (#4874)\r\n\r\nAdds an example of the using Christofides to solve the TSP problem to the example galery.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037)\r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges()\r\n\r\n* Resolved Requested Changes\r\n\r\n* Revert changes to degree docstrings.\r\n\r\n* Update comments in example.\r\n\r\n* Apply wording to edges method in all graph classes.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Compatibility updates from testing with numpy/scipy/pytest rc's (#5226)\r\n\r\n* Rm deprecated scipy subpkg access.\r\n\r\n* Use recwarn fixture in place of deprecated pytest pattern.\r\n\r\n* Rm unnecessary try/except from tests.\r\n\r\n* Replace internal `close` fn with `math.isclose`. (#5224)\r\n\r\n* Replace internal close fn with math.isclose.\r\n\r\n* Fix lines in docstring examples.\r\n\r\n* Fix Python 3.10 deprecation warning w/ int div. (#5231)\r\n\r\n* Touchups and suggestions for subgraph gallery example (#5225)\r\n\r\n* Simplify construction of G with edges rm'd\r\n\r\n* Rm unused graph attribute.\r\n\r\n* Shorten categorization by node type.\r\n\r\n* Simplify node coloring.\r\n\r\n* Simplify isomorphism check.\r\n\r\n* Rm unit test.\r\n\r\n* Rm redundant plotting of each subgraph.\r\n\r\n* Use new package name (#5234)\r\n\r\n* Allowing None edges in weight function of bidirectional Dijkstra (#5232)\r\n\r\n* added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None.\r\n\r\n* changed syntax for better readability and code duplicate avoidance\r\n\r\nCo-authored-by: Hohmann, Nikolas \r\n\r\n* Add an FAQ about assigning issues. (#5182)\r\n\r\n* Add FAQ about assigning issues.\r\n\r\n* Add note about linking issues from new PRs.\r\n\r\n* Update dev deps (#5243)\r\n\r\n* Update minor doc issues with tex notation (#5244)\r\n\r\n* Add FutureWarnings to fns that return sparse matrices\r\n\r\n - biadjacency_matrix.\r\n - bethe_hessian_matrix.\r\n - incidence_matrix.\r\n - laplacian functions.\r\n - modularity_matrix functions.\r\n - adjacency_matrix.\r\n\r\n* Add to_scipy_sparse_array and use it everywhere.\r\n\r\nAdd a new conversion function to preserve array semantics internally\r\nwhile not altering behavior for users.\r\n\r\nAlso adds FutureWarning to to_scipy_sparse_matrix.\r\n\r\n* Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix.\r\n\r\n* Handle deprecations in separate PR.\r\n\r\n* Fix docstring examples.\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\nCo-authored-by: Jarrod Millman \r\nCo-authored-by: Andrew Knyazev \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: eskountis <56514439+eskountis@users.noreply.github.com>\r\nCo-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com>\r\nCo-authored-by: NikHoh \r\nCo-authored-by: Hohmann, Nikolas \r\nCo-authored-by: Sultan Orazbayev \r\nCo-authored-by: Mridul Seth ", "code": "def _tracemin_fiedler(L, X, normalized, tol, method):\n \n import numpy as np\n import scipy as sp\n import scipy.linalg # call as sp.linalg\n import scipy.linalg.blas # call as sp.linalg.blas\n import scipy.sparse # call as sp.sparse\n\n n = X.shape[0]\n\n if normalized:\n # Form the normalized Laplacian matrix and determine the eigenvector of\n # its nullspace.\n e = np.sqrt(L.diagonal())\n # TODO: rm csr_array wrapper when spdiags array creation becomes available\n D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format=\"csr\"))\n L = D @ L @ D\n e *= 1.0 / np.linalg.norm(e, 2)\n\n if normalized:\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 168, "n_words": 89, "vocab_size": 61, "complexity": 7, "nloc": 42, "token_counts": 412, "n_ast_nodes": 178, "n_identifiers": 23, "random_cut": "def _tracemin_fiedler(L, X, normalized, tol, method):\n \n import n", "d_id": 41753, "documentation": { "docstring": "Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.\n\n The Fiedler vector of a connected undirected graph is the eigenvector\n corresponding to the second smallest eigenvalue of the Laplacian matrix\n of the graph. This function starts with the Laplacian L, not the Graph.\n\n Parameters\n ----------\n L : Laplacian of a possibly weighted or normalized, but undirected graph\n\n X : Initial guess for a solution. Usually a matrix of random numbers.\n This function allows more than one column in X to identify more than\n one eigenvector if desired.\n\n normalized : bool\n Whether the normalized Laplacian matrix is used.\n\n tol : float\n Tolerance of relative residual in eigenvalue computation.\n Warning: There is no limit on number of iterations.\n\n method : string\n Should be 'tracemin_pcg' or 'tracemin_lu'.\n Otherwise exception is raised.\n\n Returns\n -------\n sigma, X : Two NumPy arrays of floats.\n The lowest eigenvalues and corresponding eigenvectors of L.\n The size of input X determines the size of these outputs.\n As this is for Fiedler vectors, the zero eigenvalue (and\n constant eigenvector) are avoided.\n ", "n_words": 172, "vocab_size": 108, "n_whitespaces": 291, "language": "en" } }, { "id": 258374, "commit_id": "9ebf164cfdfb320503b7161493420c1b0ec577a3", "repo": "haystack", "path": "test/nodes/test_prompt_node.py", "file_name": "test_prompt_node.py", "fun_name": "test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml", "commit_message": "feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667)\n\nCo-authored-by: ZanSara ", "code": "def test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml(tmp_path):\n with open(tmp_path / \"tmp_config_with_prompt_template.yml\", \"w\") as tmp_file:\n tmp_file.write(\n f\n )\n pipeline = Pipeline.load_from_yaml(path=tmp_path / \"tmp_config_with_prompt_template.yml\")\n result = pipeline.run(query=\"not relevant\", documents=[Document(\"Berlin is an amazing city.\")])\n assert \"Berlin\" in result[\"results\"][0]\n assert len(result[\"meta\"][\"invocation_context\"]) > 0\n\n\n@pytest.mark.skipif(\n not os.environ.get(\"OPENAI_API_KEY\", None),\n reason=\"Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.\",\n)", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(\n not os.environ.get(\"OPENAI_API_KEY\", None),\n reason=\"Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.\",\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 98, "n_words": 55, "vocab_size": 50, "complexity": 1, "nloc": 43, "token_counts": 78, "n_ast_nodes": 181, "n_identifiers": 22, "random_cut": "def test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml(tmp_path):\n with open(tmp_path / \"tmp_config_with_prompt_template.yml\", \"w\") as tmp_file:\n tmp_file.write(\n f\n )\n pipeline = Pipeline.load_from_yam", "d_id": 75229, "documentation": { "docstring": "\n version: ignore\n components:\n - name: pmodel\n type: PromptModel\n params:\n model_name_or_path: google/flan-t5-small\n model_kwargs:\n torch_dtype: torch.bfloat16\n - name: question_generation_template\n type: PromptTemplate\n params:\n name: question-generation-new\n prompt_text: \"Given the context please generate a question. Context: $documents; Question:\"\n - name: p1\n params:\n model_name_or_path: pmodel\n default_prompt_template: question_generation_template\n output_variable: questions\n type: PromptNode\n - name: p2\n params:\n model_name_or_path: pmodel\n default_prompt_template: question-answering\n type: PromptNode\n pipelines:\n - name: query\n nodes:\n - name: p1\n inputs:\n - Query\n - name: p2\n inputs:\n - p1\n ", "n_words": 72, "vocab_size": 40, "n_whitespaces": 523, "language": "en" } }, { "id": 100362, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/utils.py", "file_name": "utils.py", "fun_name": "_download_model", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _download_model(self):\n \n self.logger.info(\"Downloading model: '%s' from: %s\", self._model_name, self._url_download)\n for attempt in range(self._retries):\n try:\n downloaded_size = self._url_partial_size\n req = urllib.request.Request(self._url_download)\n if downloaded_size != 0:\n req.add_header(\"Range\", f\"bytes={downloaded_size}-\")\n with urllib.request.urlopen(req, timeout=10) as response:\n self.logger.debug(\"header info: {%s}\", response.info())\n self.logger.debug(\"Return Code: %s\", response.getcode())\n self._write_zipfile(response, downloaded_size)\n break\n except (socket_error, socket_timeout,\n urllib.error.HTTPError, urllib.error.URLError) as err:\n if attempt + 1 < self._retries:\n self.logger.warning(\"Error downloading model (%s). Retrying %s of %s...\",\n str(err), attempt + 2, self._retries)\n else:\n self.logger.error(\"Failed to download model. Exiting. (Error: '%s', URL: \"\n \"'%s')\", str(err), self._url_download)\n self.logger.info(\"You can try running again to resume the download.\")\n self.logger.info(\"Alternatively, you can manually download the model from: %s \"\n \"and unzip the contents to: %s\",\n self._url_download, self._cache_dir)\n sys.exit(1)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 595, "n_words": 109, "vocab_size": 89, "complexity": 5, "nloc": 26, "token_counts": 220, "n_ast_nodes": 366, "n_identifiers": 33, "random_cut": "def _download_model(self):\n \n self.logger.info(\"Downloading model: '%s' from: %s\", self._model_name, self._url_download)\n for attempt in range(self._retries):\n try:\n downloaded_size = ", "d_id": 19851, "documentation": { "docstring": " Download the model zip from github to the cache folder. ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 11, "language": "en" } }, { "id": 260546, "commit_id": "ceeda362402bfc978bcc93d02481fe28e21a07ad", "repo": "scikit-learn", "path": "sklearn/manifold/_locally_linear.py", "file_name": "_locally_linear.py", "fun_name": "fit_transform", "commit_message": "MAINT Use _validate_params in LocallyLinearEmbedding (#23938)\n\nCo-authored-by: jeremiedbb ", "code": "def fit_transform(self, X, y=None):\n \n self._validate_params()\n self._fit_transform(X)\n return self.embedding_\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 36, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def fit_transform(self, X, y=None):\n \n self._validate_params()\n self._fit_transform(X)\n ", "d_id": 76333, "documentation": { "docstring": "Compute the embedding vectors for data X and transform X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training set.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n Returns the instance itself.\n ", "n_words": 45, "vocab_size": 37, "n_whitespaces": 134, "language": "en" } }, { "id": 30144, "commit_id": "fa2ad657482aca9dc628e6d7062b8badf2706bb6", "repo": "spotify-downloader", "path": "tests/types/test_song.py", "file_name": "test_song.py", "fun_name": "test_song_from_data_dump", "commit_message": "v4 init", "code": "def test_song_from_data_dump():\n \n\n # Loads from str\n song = Song.from_data_dump(\n \n )\n\n assert song.name == \"Ropes\"\n assert song.artists == [\"Dirty Palm\", \"Chandler Jewels\"]\n assert song.album_name == \"Ropes\"\n assert song.album_artist == \"Dirty Palm\"\n assert song.genres == [\"gaming edm\", \"melbourne bounce international\"]\n assert song.disc_number == 1\n assert song.duration == 188\n assert song.year == 2021\n assert song.date == \"2021-10-28\"\n assert song.track_number == 1\n assert song.tracks_count == 1\n assert song.isrc == \"GB2LD2110301\"\n assert song.song_id == \"1t2qKa8K72IBC8yQlhD9bU\"\n assert (\n song.cover_url\n == \"https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332\"\n )\n assert song.explicit == False\n assert song.download_url == None\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 169, "n_words": 84, "vocab_size": 50, "complexity": 1, "nloc": 47, "token_counts": 119, "n_ast_nodes": 207, "n_identifiers": 20, "random_cut": "def test_song_from_data_dump():\n \n\n # Loads from str\n song = Song.from_data_dump(\n \n )\n\n assert song.name == \"Ropes\"\n assert song.artists == [\"Dirty Palm\", \"Chandler Jewels\"]\n assert song.album_name == \"Ropes\"\n assert song.album_artist == \"Dirty Palm\"\n assert song.genres == [\"gaming edm\", \"melbourne bounce international\"]\n assert song.disc_number == 1\n assert song.duration == 188\n assert song.year == 2021\n assert song.date == \"2021-10-28\"\n assert song.track_n", "d_id": 5345, "documentation": { "docstring": "\n Tests if Song.from_data_dump() works correctly.\n \n {\n \"name\": \"Ropes\",\n \"artists\": [\"Dirty Palm\", \"Chandler Jewels\"],\n \"album_name\": \"Ropes\",\n \"album_artist\": \"Dirty Palm\",\n \"genres\": [\"gaming edm\", \"melbourne bounce international\"],\n \"disc_number\": 1,\n \"duration\": 188,\n \"year\": 2021,\n \"date\": \"2021-10-28\",\n \"track_number\": 1,\n \"tracks_count\": 1,\n \"isrc\": \"GB2LD2110301\",\n \"song_id\": \"1t2qKa8K72IBC8yQlhD9bU\",\n \"cover_url\": \"https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332\",\n \"explicit\": false,\n \"download_url\": null,\n \"artist\" : \"Dirty Palm\",\n \"disc_count\": 1,\n \"copyright\": \"\",\n \"publisher\": \"\",\n \"url\": \"https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU\"\n }\n ", "n_words": 59, "vocab_size": 51, "n_whitespaces": 319, "language": "en" } }, { "id": 262100, "commit_id": "ea965a5683c56a39570b4cc91e86cd2bb9799308", "repo": "TTS", "path": "TTS/tts/models/vits.py", "file_name": "vits.py", "fun_name": "test_run", "commit_message": "Update VITS for the new API", "code": "def test_run(self) -> Tuple[Dict, Dict]:\n \n print(\" | > Synthesizing test sentences.\")\n test_audios = {}\n test_figures = {}\n test_sentences = self.config.test_sentences\n for idx, s_info in enumerate(test_sentences):\n try:\n aux_inputs = self.get_aux_input_from_test_sentences(s_info)\n wav, alignment, _, _ = synthesis(\n self,\n aux_inputs[\"text\"],\n self.config,\n \"cuda\" in str(next(self.parameters()).device),\n ap,\n speaker_id=aux_inputs[\"speaker_id\"],\n d_vector=aux_inputs[\"d_vector\"],\n style_wav=aux_inputs[\"style_wav\"],\n language_id=aux_inputs[\"language_id\"],\n language_name=aux_inputs[\"language_name\"],\n enable_eos_bos_chars=self.config.enable_eos_bos_chars,\n use_griffin_lim=True,\n do_trim_silence=False,\n ).values()\n test_audios[\"{}-audio\".format(idx)] = wav\n test_figures[\"{}-alignment\".format(idx)] = plot_alignment(alignment.T, output_fig=False)\n except: # pylint: disable=bare-except\n print(\" !! Error creating Test Sentence -\", idx)\n return test_figures, test_audios\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 482, "n_words": 73, "vocab_size": 63, "complexity": 3, "nloc": 35, "token_counts": 190, "n_ast_nodes": 304, "n_identifiers": 36, "random_cut": "def test_run(self) -> Tuple[Dict, Dict]:\n \n print(\" | > Synthesizing test sentences.\")\n test_audios = {}\n test_figures = {}\n test_sentences = self.config.test_sentences\n for idx, s_info in enumerate(test_sentences):\n try:\n aux_inputs = self.get_aux_input_from_test_sentences(s_info)\n wav, alignment, _, _ = synthesis(\n self,\n aux_inputs[\"text\"],\n self.config,\n \"cuda\" in str(next(self.parameters()).device),\n ap,\n speaker_id=aux_inputs[\"speaker_id\"],\n d_vector=aux_inputs[\"d_vector\"],\n style_wav=aux_inputs[\"style_wav\"],\n language_id=aux_inputs[\"language_id\"],\n language_name=aux_inputs[\"language_name\"],\n enable_eos_bos_chars=self.config.enable_eos_bos_chars,\n use_griffin_lim=True,\n do_trim_silence=False,\n ).values()\n test_audios[\"{}-audio\".format(idx)] = wav\n test_figures[\"{}-alignment\".format(idx)] = plot_alignment(alignment.T, output_fig=False)\n except: # pylint: disable=bare-except\n print(\" !! Error creating Test Sentence -\", idx)\n return test_figures, test_audios\n", "d_id": 77125, "documentation": { "docstring": "Generic test run for `tts` models used by `Trainer`.\n\n You can override this for a different behaviour.\n\n Returns:\n Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 61, "language": "en" } }, { "id": 221674, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/configparser.py", "file_name": "configparser.py", "fun_name": "read_dict", "commit_message": "add python 3.10.4 for windows", "code": "def read_dict(self, dictionary, source=''):\n \n elements_added = set()\n for section, keys in dictionary.items():\n section = str(section)\n try:\n self.add_section(section)\n except (DuplicateSectionError, ValueError):\n if self._strict and section in elements_added:\n raise\n elements_added.add(section)\n for key, value in keys.items():\n key = self.optionxform(str(key))\n if value is not None:\n value = str(value)\n if self._strict and (section, key) in elements_added:\n raise DuplicateOptionError(section, key, source)\n elements_added.add((section, key))\n self.set(section, key, value)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 298, "n_words": 60, "vocab_size": 42, "complexity": 9, "nloc": 18, "token_counts": 141, "n_ast_nodes": 222, "n_identifiers": 19, "random_cut": "def read_dict(self, dictionary, source=''):\n \n elements_added = set()\n for section, keys in dictionary.items():\n section = str(section)\n try:\n self.add_section(section)\n except (DuplicateSectionError, ValueError):\n ", "d_id": 56469, "documentation": { "docstring": "Read configuration from a dictionary.\n\n Keys are section names, values are dictionaries with keys and values\n that should be present in the section. If the used dictionary type\n preserves order, sections and their keys will be added in order.\n\n All types held in the dictionary are converted to strings during\n reading, including section names, option names and keys.\n\n Optional second argument is the `source' specifying the name of the\n dictionary being read.\n ", "n_words": 72, "vocab_size": 54, "n_whitespaces": 128, "language": "en" } }, { "id": 216287, "commit_id": "25e7a51c729cca539778c53f0858d6070e7d76cb", "repo": "salt", "path": "salt/channel/client.py", "file_name": "client.py", "fun_name": "send", "commit_message": "Move retries to channel", "code": "def send(self, load, tries=3, timeout=60, raw=False):\n \n _try = 1\n while True:\n try:\n if self.crypt == \"clear\":\n log.trace(\"ReqChannel send clear load=%r\", load)\n ret = yield self._uncrypted_transfer(load, timeout=timeout)\n else:\n log.trace(\"ReqChannel send crypt load=%r\", load)\n ret = yield self._crypted_transfer(\n load, timeout=timeout, raw=raw\n )\n break\n except Exception as exc:\n log.error(\"Failed to send msg %r\", dir(exc))\n if _try == tries:\n raise #salt.exceptions.SaltClientError(\"Connection to master lost\")\n else:\n _try += 1\n continue\n raise salt.ext.tornado.gen.Return(ret)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 382, "n_words": 67, "vocab_size": 49, "complexity": 5, "nloc": 21, "token_counts": 125, "n_ast_nodes": 206, "n_identifiers": 22, "random_cut": "def send(self, load, tries=3, timeout=60, raw=False):\n \n _try = 1\n while True:\n try:\n if self.crypt == \"clear\":\n log.trace(\"ReqChannel send clear load=%r\", load)\n ret = yield self._uncrypted_transfer(load, timeout=timeout)\n else:\n log.trace(\"ReqChannel send crypt load=%r\", load)\n ret = yield self._crypted_transfer(\n load, timeout=timeout, raw=raw\n )\n break\n except Exception as exc:\n log.error(\"Failed to send msg %r\", dir(exc))\n if _try == tries:\n raise #salt.exceptions.SaltClientError(\"Connection to master lost\")\n else:\n _try += 1\n continue\n raise salt.ext.tornado.gen.Return(ret)\n", "d_id": 54501, "documentation": { "docstring": "\n Send a request, return a future which will complete when we send the message\n\n :param dict load: A load to send across the wire\n :param int tries: The number of times to make before failure\n :param int timeout: The number of seconds on a response before failing\n ", "n_words": 47, "vocab_size": 35, "n_whitespaces": 83, "language": "en" } }, { "id": 203542, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/utils.py", "file_name": "utils.py", "fun_name": "get_fields_from_path", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_fields_from_path(model, path):\n \n pieces = path.split(LOOKUP_SEP)\n fields = []\n for piece in pieces:\n if fields:\n parent = get_model_from_relation(fields[-1])\n else:\n parent = model\n fields.append(parent._meta.get_field(piece))\n return fields\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 83, "n_words": 25, "vocab_size": 20, "complexity": 3, "nloc": 10, "token_counts": 58, "n_ast_nodes": 96, "n_identifiers": 13, "random_cut": "def get_fields_from_path(model, path):\n \n pieces = path.split(LOOKUP_SEP)\n fields = []\n for piece in pieces:\n if fields:\n parent = get_model_from_relation(fields[-1])\n else:\n parent = model\n fields.app", "d_id": 50438, "documentation": { "docstring": "Return list of Fields given path relative to model.\n\n e.g. (ModelX, \"user__groups__name\") -> [\n ,\n ,\n ,\n ]\n ", "n_words": 27, "vocab_size": 21, "n_whitespaces": 57, "language": "en" } }, { "id": 222870, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/dist.py", "file_name": "dist.py", "fun_name": "find_config_files", "commit_message": "add python 3.10.4 for windows", "code": "def find_config_files(self):\n \n files = []\n check_environ()\n\n # Where to look for the system-wide Distutils config file\n sys_dir = os.path.dirname(sys.modules['distutils'].__file__)\n\n # Look for the system config file\n sys_file = os.path.join(sys_dir, \"distutils.cfg\")\n if os.path.isfile(sys_file):\n files.append(sys_file)\n\n # What to call the per-user config file\n if os.name == 'posix':\n user_filename = \".pydistutils.cfg\"\n else:\n user_filename = \"pydistutils.cfg\"\n\n # And look for the user config file\n if self.want_user_cfg:\n user_file = os.path.join(os.path.expanduser('~'), user_filename)\n if os.path.isfile(user_file):\n files.append(user_file)\n\n # All platforms support local setup.cfg\n local_file = \"setup.cfg\"\n if os.path.isfile(local_file):\n files.append(local_file)\n\n if DEBUG:\n self.announce(\"using config files: %s\" % ', '.join(files))\n\n return files\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 310, "n_words": 92, "vocab_size": 61, "complexity": 7, "nloc": 21, "token_counts": 150, "n_ast_nodes": 267, "n_identifiers": 23, "random_cut": "def find_config_files(self):\n \n files = []\n check_environ()\n\n # Where to look for the system-wide Distutils config file\n sys_dir = os.path.dirname(sys.modules['distutils'].__file__)\n\n # Look for the system config file\n sys_file = os.path.join(sys_dir, \"distutils.cfg\")\n if os.path.isfile(sys_file):\n files.append(sys_file)\n\n # What to call the per-user config file\n if os.name == 'posix':\n user_filename = \".pydistutils.cfg\"\n else:\n user_filename = \"pydistutils.cfg\"\n\n # And look for the user config file\n if self.want_user_cfg:\n user_file = os.path.join(os.path.expanduser('~'), user_filename)\n if os.path.isfile(user_file):\n files.append(user_file)\n\n # All platforms support local setup.cfg\n local_file = \"setup.cfg\"\n if os.path.isfile(local_file):\n ", "d_id": 56788, "documentation": { "docstring": "Find as many configuration files as should be processed for this\n platform, and return a list of filenames in the order in which they\n should be parsed. The filenames returned are guaranteed to exist\n (modulo nasty race conditions).\n\n There are three possible config files: distutils.cfg in the\n Distutils installation directory (ie. where the top-level\n Distutils __inst__.py file lives), a file in the user's home\n directory named .pydistutils.cfg on Unix and pydistutils.cfg\n on Windows/Mac; and setup.cfg in the current directory.\n\n The file in the user's home directory can be disabled with the\n --no-user-cfg option.\n ", "n_words": 93, "vocab_size": 64, "n_whitespaces": 171, "language": "en" } }, { "id": 51516, "commit_id": "02d7e5514b0da9a7ebabb004533b274056c954e2", "repo": "PaddleHub", "path": "modules/image/Image_gan/gan/stgan_bald/processor.py", "file_name": "processor.py", "fun_name": "get_save_image_name", "commit_message": "update stgan_bald (#2022)", "code": "def get_save_image_name(org_im_path, output_dir, num):\n \n # name prefix of orginal image\n org_im_name = os.path.split(org_im_path)[-1]\n im_prefix = os.path.splitext(org_im_name)[0]\n ext = '.png'\n # save image path\n save_im_path = os.path.join(output_dir, im_prefix + ext)\n if os.path.exists(save_im_path):\n save_im_path = os.path.join(\n output_dir, im_prefix + str(num) + ext)\n\n return save_im_path\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 87, "n_words": 42, "vocab_size": 28, "complexity": 2, "nloc": 9, "token_counts": 85, "n_ast_nodes": 137, "n_identifiers": 15, "random_cut": "def get_save_image_name(org_im_path, output_dir, num):\n \n # name prefix of orginal image\n org_im_name = os.path.split(org_im_path)[-1]\n im_prefix = os.path.splitext(org_im_name)[0]\n ext = '.png'\n # save image path\n save_im_path = os.path.join(output_dir, im_prefix + ext)\n if os.path.exists(sav", "d_id": 10339, "documentation": { "docstring": "\n Get save image name from source image path.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 15, "language": "en" } }, { "id": 110265, "commit_id": "9b6abd0b4933811e0a45c2535ab8fd107db65dd9", "repo": "matplotlib", "path": "lib/matplotlib/colors.py", "file_name": "colors.py", "fun_name": "rgb_to_hsv", "commit_message": "DOC: improve grammar and consistency", "code": "def rgb_to_hsv(arr):\n \n arr = np.asarray(arr)\n\n # check length of the last dimension, should be _some_ sort of rgb\n if arr.shape[-1] != 3:\n raise ValueError(\"Last dimension of input array must be 3; \"\n \"shape {} was found.\".format(arr.shape))\n\n in_shape = arr.shape\n arr = np.array(\n arr, copy=False,\n dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.\n ndmin=2, # In case input was 1D.\n )\n out = np.zeros_like(arr)\n arr_max = arr.max(-1)\n ipos = arr_max > 0\n delta = arr.ptp(-1)\n s = np.zeros_like(delta)\n s[ipos] = delta[ipos] / arr_max[ipos]\n ipos = delta > 0\n # red is max\n idx = (arr[..., 0] == arr_max) & ipos\n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\n # green is max\n idx = (arr[..., 1] == arr_max) & ipos\n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]\n # blue is max\n idx = (arr[..., 2] == arr_max) & ipos\n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]\n\n out[..., 0] = (out[..., 0] / 6.0) % 1.0\n out[..., 1] = s\n out[..., 2] = arr_max\n\n return out.reshape(in_shape)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 310, "n_words": 175, "vocab_size": 95, "complexity": 2, "nloc": 28, "token_counts": 308, "n_ast_nodes": 452, "n_identifiers": 24, "random_cut": "def rgb_to_hsv(arr):\n \n arr = np.asarray(arr)\n\n # check length of the last dimension, should be _some_ sort of rgb\n if arr.shape[-1] != 3:\n raise ValueError(\"Last dimension of input array must be 3; \"\n \"shape {} was found.\".format(arr.shape))\n\n in_shape = arr.shape\n arr = np.array(\n arr, copy=False,\n dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.\n ndmin=2, # In case input was 1D.\n )\n out = np.zeros_like(arr)\n arr_max = arr.max(-1)\n ipos = arr_max > 0\n delta = arr.ptp(-1)\n s = np.zeros_like(delta)\n s[ipos] = delta[ipos] / arr_max[ipos]\n ipos = delta > 0\n # red is max\n idx = (arr[..., 0] == arr_max) & ipos\n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\n # green is max\n idx = (arr[..., 1] == arr_max) & ipos\n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]\n # blue is max\n idx = (arr[..., 2] == arr_max) & ipos\n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]\n\n out[..., 0] = (out[..., 0] / 6.0) % 1.0\n out[..., 1] = s\n out[..., 2] = ar", "d_id": 24007, "documentation": { "docstring": "\n Convert float RGB values (in the range [0, 1]), in a numpy array to HSV\n values.\n\n Parameters\n ----------\n arr : (..., 3) array-like\n All values must be in the range [0, 1]\n\n Returns\n -------\n (..., 3) ndarray\n Colors converted to HSV values in range [0, 1]\n ", "n_words": 46, "vocab_size": 32, "n_whitespaces": 86, "language": "en" } }, { "id": 46726, "commit_id": "f5f11aefea775448105098b59c4657fa1809fb94", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_scheduler_verify_pool_full", "commit_message": "Add dag-processor cli command (#22305)", "code": "def test_scheduler_verify_pool_full(self, dag_maker, configs):\n \n with conf_vars(configs):\n with dag_maker(dag_id='test_scheduler_verify_pool_full'):\n BashOperator(\n task_id='dummy',\n pool='test_scheduler_verify_pool_full',\n bash_command='echo hi',\n )\n\n session = settings.Session()\n pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)\n session.add(pool)\n session.flush()\n\n self.scheduler_job = SchedulerJob(executor=self.null_exec)\n self.scheduler_job.processor_agent = mock.MagicMock()\n\n # Create 2 dagruns, which will create 2 task instances.\n dr = dag_maker.create_dagrun(\n run_type=DagRunType.SCHEDULED,\n )\n self.scheduler_job._schedule_dag_run(dr, session)\n dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING)\n self.scheduler_job._schedule_dag_run(dr, session)\n session.flush()\n task_instances_list = self.scheduler_job._executable_task_instances_to_queued(\n max_tis=32, session=session\n )\n\n assert len(task_instances_list) == 1\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 382, "n_words": 64, "vocab_size": 49, "complexity": 1, "nloc": 25, "token_counts": 173, "n_ast_nodes": 285, "n_identifiers": 38, "random_cut": "def test_scheduler_verify_pool_full(self, dag_maker, configs):\n \n with conf_vars(configs):\n with dag_maker(dag_id='test_scheduler_verify_pool_full'):\n BashOperator(\n task_id='dummy',\n pool='test_scheduler_verify_pool_full',\n bash_command='echo hi',\n )\n\n session = settings.Session()\n pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)\n session.add(pool)\n session.flush()\n\n self.scheduler_job = SchedulerJob(executor=self.null_exec)\n self.scheduler_job.processor_agent = mock.MagicMock()\n\n # Create 2 dagruns, which will create 2 task instances.\n dr = dag_maker.create_dagrun(\n run_type=DagRunType.SCHEDULED,\n )\n self.scheduler_job._schedule_dag_run(dr, session)\n dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING)\n self.scheduler_job._schedule_dag_run(dr, session)\n session.flush()\n task_instances_list = self.scheduler_job._executable_task_instances_to_queued(\n max_tis=32, session=session\n )\n\n assert len(task_instances_list) == 1\n", "d_id": 8969, "documentation": { "docstring": "\n Test task instances not queued when pool is full\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 22868, "commit_id": "39c49e07066b2a53e176d555af6a7bf8aabb8a9c", "repo": "Python", "path": "VoiceAssistant/Project_Basic_struct/textRead.py", "file_name": "textRead.py", "fun_name": "ms_word", "commit_message": "VoiceAssistant\n\nThis is Voice Assistant coded using Python which can do the following: -\r\n 1. Speak Text entered by User.\r\n 2. Search anything on Google.\r\n 3. Search anything on Wikipedia.\r\n 4. Read an MS Word(docx) document.\r\n 5. Read a book(PDF).\r\n 6. Can be used as a Dictator.", "code": "def ms_word():\r\n \r\n # TODO : Take location input from the user\r\n try:\r\n speak(\"Enter the document's location - \")\r\n location = input(\"Enter the document's location - \")\r\n \r\n file_loc = doubleslash(location) \r\n \r\n doc = docx.Document(file_loc)\r\n fullText = []\r\n for para in doc.paragraphs:\r\n fullText.append(para.text)\r\n #print(fullText)\r\n doc_file = '\\n'.join(fullText)\r\n print(doc_file)\r\n speak(doc_file)\r\n except Exception as exp:\r\n #print(exp)\r\n print(f\"ERROR - {exp}\")\r\n print(Fore.YELLOW + \"I could'nt locate the file!\\nIf you didn't specify the extension of the file, please specify it.\")\r\n return \"None\"\r\n\r", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 215, "n_words": 74, "vocab_size": 57, "complexity": 3, "nloc": 16, "token_counts": 86, "n_ast_nodes": 166, "n_identifiers": 21, "random_cut": "def ms_word():\r\n \r\n # TODO : Take location input from the user\r\n try:\r\n speak(\"Enter the document's location - \")\r\n ", "d_id": 4481, "documentation": { "docstring": "[Print and speak out a ms_word docx file as specified in the path]\r\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 16, "language": "en" } }, { "id": 176252, "commit_id": "cceb43d15e1d01476c8c15ff273399dee0e3b1aa", "repo": "networkx", "path": "networkx/readwrite/json_graph/tree.py", "file_name": "tree.py", "fun_name": "tree_data", "commit_message": "Add exception for unconnected graph (#5287)", "code": "def tree_data(G, root, attrs=None, ident=\"id\", children=\"children\"):\n \n if G.number_of_nodes() != G.number_of_edges() + 1:\n raise TypeError(\"G is not a tree.\")\n if not G.is_directed():\n raise TypeError(\"G is not directed.\")\n if not nx.is_weakly_connected(G):\n raise TypeError(\"G is not weakly connected.\")\n\n # NOTE: to be removed in 3.0\n if attrs is not None:\n import warnings\n\n msg = (\n \"\\nThe `attrs` keyword argument of tree_data is deprecated\\n\"\n \"and will be removed in networkx 3.0.\\n\"\n \"It is replaced with explicit `ident` and `children` \"\n \"keyword arguments.\\n\"\n \"To make this warning go away and ensure usage is forward\\n\"\n \"compatible, replace `attrs` with `ident` and `children,\\n\"\n \"for example:\\n\\n\"\n \" >>> tree_data(G, root, attrs={'id': 'foo', 'children': 'bar'})\\n\\n\"\n \"should instead be written as\\n\\n\"\n \" >>> tree_data(G, root, ident='foo', children='bar')\\n\\n\"\n \"The default values of 'id' and 'children' will not change.\"\n )\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n\n ident = attrs[\"id\"]\n children = attrs[\"children\"]\n\n if ident == children:\n raise nx.NetworkXError(\"The values for `id` and `children` must be different.\")\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 369, "n_words": 151, "vocab_size": 104, "complexity": 6, "nloc": 31, "token_counts": 167, "n_ast_nodes": 247, "n_identifiers": 18, "random_cut": "def tree_data(G, root, attrs=None, ident=\"id\", children=\"children\"):\n \n if G.number_of_nodes() != G.number_of_edges() + 1:\n raise TypeError(\"G is not a tree.\")\n if not G.is_directed():\n raise TypeError(\"G is not directed.\")\n", "d_id": 41792, "documentation": { "docstring": "Returns data in tree format that is suitable for JSON serialization\n and use in Javascript documents.\n\n Parameters\n ----------\n G : NetworkX graph\n G must be an oriented tree\n\n root : node\n The root of the tree\n\n attrs : dict\n A dictionary that contains two keys 'id' and 'children'. The\n corresponding values provide the attribute names for storing\n NetworkX-internal graph data. The values should be unique. Default\n value: :samp:`dict(id='id', children='children')`.\n\n If some user-defined graph data use these attribute names as data keys,\n they may be silently dropped.\n\n .. deprecated:: 2.6\n\n The `attrs` keyword argument is replaced by `ident` and `children`\n and will be removed in networkx 3.0\n\n ident : string\n Attribute name for storing NetworkX-internal graph data. `ident` must\n have a different value than `children`. The default is 'id'.\n\n children : string\n Attribute name for storing NetworkX-internal graph data. `children`\n must have a different value than `ident`. The default is 'children'.\n\n Returns\n -------\n data : dict\n A dictionary with node-link formatted data.\n\n Raises\n ------\n NetworkXError\n If `children` and `ident` attributes are identical.\n\n Examples\n --------\n >>> from networkx.readwrite import json_graph\n >>> G = nx.DiGraph([(1, 2)])\n >>> data = json_graph.tree_data(G, root=1)\n\n To serialize with json\n\n >>> import json\n >>> s = json.dumps(data)\n\n Notes\n -----\n Node attributes are stored in this format but keys\n for attributes must be strings if you want to serialize with JSON.\n\n Graph and edge attributes are not stored.\n\n See Also\n --------\n tree_graph, node_link_data, adjacency_data\n ", "n_words": 235, "vocab_size": 139, "n_whitespaces": 450, "language": "en" } }, { "id": 110450, "commit_id": "7a1df7830f7685a99291d90c5e79bfc5e7876f31", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/tests/test_axes3d.py", "file_name": "test_axes3d.py", "fun_name": "test_mutating_input_arrays_y_and_z", "commit_message": "Test that plot results aren't affected by mutating input arrays", "code": "def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):\n \n ax1 = fig_test.add_subplot(111, projection='3d')\n x = [1, 2, 3]\n y = [0.0, 0.0, 0.0]\n z = [0.0, 0.0, 0.0]\n ax1.plot(x, y, z, 'o-')\n\n ax1.set_ylim([0, 4])\n ax1.set_zlim([0, 4])\n fig_test.draw_without_rendering()\n\n # mutate y,z to get a nontrivial line\n y[:] = [1, 2, 3]\n z[:] = [1, 2, 3]\n\n # draw the same plot without mutating x and y\n ax2 = fig_ref.add_subplot(111, projection='3d')\n x = [1, 2, 3]\n y = [0.0, 0.0, 0.0]\n z = [0.0, 0.0, 0.0]\n ax2.plot(x, y, z, 'o-')\n\n ax2.set_ylim([0, 4])\n ax2.set_zlim([0, 4])\n fig_test.draw_without_rendering()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 150, "n_words": 87, "vocab_size": 46, "complexity": 1, "nloc": 19, "token_counts": 208, "n_ast_nodes": 277, "n_identifiers": 14, "random_cut": "def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):\n \n ax1 = fig_test.add_subplot(111, projection='3d')\n x = [1, 2, 3]\n y = [0.0, 0.0, 0.0]\n z = [0.0, 0.0, 0.0]\n ax1.plot(x, y, z, 'o-')\n\n ax1.set_ylim([0, 4])\n ax1.set_zlim([0, 4])\n fig_test.draw_without_rendering()\n\n # mutate y,z to get a nontrivial line\n y[:] = [1, 2, 3]\n z[:] = [1, 2, 3]\n\n # draw the same plot without mutating x and y\n ax2 = fig_ref.add_subplot(111, projection='3d')\n x = [1, 2, 3]\n y = [0.0, 0.0, 0.0]\n z = [0.0, 0.0, 0.0]\n ax2.plot(x, y, z, 'o-')\n\n ax2.set_ylim([0, 4])\n ax2.set_zlim([0, 4])\n fig_test.draw_without_rendering()\n", "d_id": 24166, "documentation": { "docstring": "\n Test to see if the `z` axis does not get mutated\n after a call to `Axes3D.plot`\n\n test cases came from GH#8990\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 34, "language": "en" } }, { "id": 75623, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/tests/elasticsearch_common_tests.py", "file_name": "elasticsearch_common_tests.py", "fun_name": "test_search_with_hyphen", "commit_message": "Reformat with black", "code": "def test_search_with_hyphen(self):\n \n book = models.Book.objects.create(\n title=\"Harry Potter and the Half-Blood Prince\",\n publication_date=date(2009, 7, 15),\n number_of_pages=607,\n )\n\n index = self.backend.get_index_for_model(models.Book)\n index.add_item(book)\n index.refresh()\n\n results = self.backend.search(\"Half-Blood\", models.Book)\n self.assertUnsortedListEqual(\n [r.title for r in results],\n [\n \"Harry Potter and the Half-Blood Prince\",\n ],\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 184, "n_words": 40, "vocab_size": 32, "complexity": 2, "nloc": 16, "token_counts": 93, "n_ast_nodes": 148, "n_identifiers": 20, "random_cut": "def test_search_with_hyphen(self):\n \n book = models.Book.objects.create(\n title=\"Harry Potter and the Half-Blood Prince\",\n publication_date=date(2009, 7, 15),\n number_of_pages=607,\n )\n\n index = self.backend.get_index_for_model(models.Book)\n index.add_item(book)\n index.refresh()\n\n results = self.backend.search(\"Half-Blood\", models.Book)\n self.assertUnsortedListEqual(\n [r.title for r in results],\n [\n \"Harry Potter and the Half-Blood Prince\",\n ],\n )\n", "d_id": 16431, "documentation": { "docstring": "\n This tests that punctuation characters are treated the same\n way in both indexing and querying.\n\n See: https://github.com/wagtail/wagtail/issues/937\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 46, "language": "en" } }, { "id": 61879, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py", "file_name": "compat.py", "fun_name": "resolve", "commit_message": "upd; format", "code": "def resolve(self, s):\n \n name = s.split('.')\n used = name.pop(0)\n try:\n found = self.importer(used)\n for frag in name:\n used += '.' + frag\n try:\n found = getattr(found, frag)\n except AttributeError:\n self.importer(used)\n found = getattr(found, frag)\n return found\n except ImportError:\n e, tb = sys.exc_info()[1:]\n v = ValueError('Cannot resolve %r: %s' % (s, e))\n v.__cause__, v.__traceback__ = e, tb\n raise v\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 344, "n_words": 58, "vocab_size": 38, "complexity": 4, "nloc": 18, "token_counts": 114, "n_ast_nodes": 189, "n_identifiers": 21, "random_cut": "def resolve(self, s):\n \n name = s.split('.')\n used = name.pop(0)\n try:\n found = self.importer(used)\n for frag in name:\n used += '.' + frag\n try:\n found = getattr(found, frag)\n except AttributeError:\n self.importer(used)\n ", "d_id": 12738, "documentation": { "docstring": "\n Resolve strings to objects using standard import and attribute\n syntax.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 44, "language": "en" } }, { "id": 261569, "commit_id": "2c1581c32e641e535305647eb57a1787bcf803f0", "repo": "scikit-learn", "path": "examples/ensemble/plot_gradient_boosting_oob.py", "file_name": "plot_gradient_boosting_oob.py", "fun_name": "heldout_score", "commit_message": "DOC Fix FutureWarning in ensemble/plot_gradient_boosting_oob.py (#24948)", "code": "def heldout_score(clf, X_test, y_test):\n \n score = np.zeros((n_estimators,), dtype=np.float64)\n for i, y_pred in enumerate(clf.staged_decision_function(X_test)):\n score[i] = binomial_deviance(y_test, y_pred.ravel())\n return score\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 38, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 59, "n_ast_nodes": 91, "n_identifiers": 16, "random_cut": "def heldout_score(clf, X_test, y_test):\n \n score = np.zeros((n_estimators,), dtype=np.float64)\n for i, y_pred in enumerate(clf.staged_decision_function(X_test)):\n ", "d_id": 76867, "documentation": { "docstring": "compute deviance scores on ``X_test`` and ``y_test``.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 29299, "commit_id": "d90be220d6b687d08153934a51354011a3cb5ca1", "repo": "saleor", "path": "saleor/graphql/product/tests/queries/test_product_types_query.py", "file_name": "test_product_types_query.py", "fun_name": "test_product_types_query_ids_not_exists", "commit_message": "Split test_product.py and test_variant.py into multiple files (#11173)\n\n* Split test_product.py into multiple files\r\n\r\n* Split test_variant.py into multiple files", "code": "def test_product_types_query_ids_not_exists(user_api_client, category):\n query = NOT_EXISTS_IDS_COLLECTIONS_QUERY\n variables = {\"filter\": {\"ids\": [\"fTEJRuFHU6fd2RU=\", \"2XwnQNNhwCdEjhP=\"]}}\n response = user_api_client.post_graphql(query, variables)\n content = get_graphql_content(response, ignore_errors=True)\n message_error = '{\"ids\": [{\"message\": \"Invalid ID specified.\", \"code\": \"\"}]}'\n\n assert len(content[\"errors\"]) == 1\n assert content[\"errors\"][0][\"message\"] == message_error\n assert content[\"data\"][\"productTypes\"] is None\n\n\nQUERY_FILTER_PRODUCT_TYPES = \n\n\n@pytest.mark.parametrize(\n \"search, expected_names\",\n (\n (\"\", [\"The best juices\", \"The best beers\", \"The worst beers\"]),\n (\"best\", [\"The best juices\", \"The best beers\"]),\n (\"worst\", [\"The worst beers\"]),\n (\"average\", []),\n ),\n)", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"search, expected_names\",\n (\n (\"\", [\"The best juices\", \"The best beers\", \"The worst beers\"]),\n (\"best\", [\"The best juices\", \"The best beers\"]),\n (\"worst\", [\"The worst beers\"]),\n (\"average\", []),\n ),\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 130, "n_words": 72, "vocab_size": 52, "complexity": 1, "nloc": 9, "token_counts": 81, "n_ast_nodes": 234, "n_identifiers": 17, "random_cut": "def test_product_types_query_ids_not_exists(user_api_client, category):\n query = NOT_EXISTS_IDS_COLLECTIONS_QUERY\n variables = {\"filter\": {\"ids\": [\"fTEJRuFHU6fd2RU=\", \"2XwnQNNhwCdEjhP=\"]}}\n response = user_api_client.post_graphql(query, variables)\n content = get_graphql_content(response, ignore_errors=True)\n message_error = '{\"ids\": [{\"message\": \"Invalid ID specified.\", \"code\": \"\"}]}'\n\n assert len(content[\"errors\"]) == 1\n assert content[\"errors\"][0][\"message\"] == message_error\n assert content[\"data\"][\"productTypes\"] is None\n\n\nQUERY_FILTER_PRODUCT_TYPES = \n\n\n@pytest.mark.parametrize(\n \"search, expected_names\",\n (\n (\"\", [\"The best juices\", \"The best beers\", \"The worst beers\"]),\n (\"best\", [\"The best juices\", \"The best beers\"]),\n (\"worst\", [\"The wor", "d_id": 5214, "documentation": { "docstring": "\n query($filters: ProductTypeFilterInput) {\n productTypes(first: 10, filter: $filters) {\n edges {\n node {\n name\n }\n }\n }\n }\n", "n_words": 17, "vocab_size": 11, "n_whitespaces": 76, "language": "en" } }, { "id": 104721, "commit_id": "0060f4c7d3f8e4fb7a3694a925ca3b7f44e1f2ea", "repo": "datasets", "path": "datasets/hans/hans.py", "file_name": "hans.py", "fun_name": "_generate_examples", "commit_message": "Make HANS dataset streamable (#4155)\n\n* Make HANS dataset streamable\r\n\r\n* Fix tags", "code": "def _generate_examples(self, filepath):\n \n for idx, line in enumerate(open(filepath, \"r\", encoding=\"utf-8\")):\n if idx == 0:\n continue # skip header\n line = line.strip()\n split_line = line.split(\"\\t\")\n # Examples not marked with a three out of five consensus are marked with\n # \"-\" and should not be used in standard evaluations.\n if split_line[0] == \"-\":\n continue\n # Works for both splits even though dev has some extra human labels.\n yield idx, {\n \"premise\": split_line[5],\n \"hypothesis\": split_line[6],\n \"label\": split_line[0],\n \"binary_parse_premise\": split_line[1],\n \"binary_parse_hypothesis\": split_line[2],\n \"parse_premise\": split_line[3],\n \"parse_hypothesis\": split_line[4],\n \"heuristic\": split_line[8],\n \"subcase\": split_line[9],\n \"template\": split_line[10],\n }\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 384, "n_words": 90, "vocab_size": 76, "complexity": 4, "nloc": 20, "token_counts": 132, "n_ast_nodes": 223, "n_identifiers": 11, "random_cut": "def _generate_examples(self, filepath):\n \n for idx, line in enumerate(open(filepath, \"r\", encoding=\"utf-8\")):\n if idx == 0:\n continue # skip header\n line = line.strip()\n split_line = line.split(\"\\t\")\n # Examples not marked with a three out of five consensus are marked with\n # \"-\" and should not be used in standard evaluations.\n if split_line[0] == \"-\":\n continue\n # Works for both splits even though dev has some extra human labels.\n yield idx, {\n \"premise\": split_line[5],\n \"hypothesis\": split_line[6],\n \"label\": split_line[0],\n \"binary_parse_premise\": split_line[1],\n \"binary_parse_hypothesis\": split_line[2],\n ", "d_id": 21945, "documentation": { "docstring": "Generate hans examples.\n\n Args:\n filepath: a string\n\n Yields:\n dictionaries containing \"premise\", \"hypothesis\" and \"label\" strings\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 54, "language": "en" } }, { "id": 288598, "commit_id": "47d0598e75487f63901931875f69f802a477df13", "repo": "core", "path": "homeassistant/components/light/__init__.py", "file_name": "__init__.py", "fun_name": "_light_internal_color_mode", "commit_message": "Use Kelvin as the preferred color temperature unit (#79591)\n\n* Use Kelvin as the preferred white temperature unit\r\n\r\n* Update homekit\r\n\r\n* Adjust tests", "code": "def _light_internal_color_mode(self) -> str:\n \n if (color_mode := self.color_mode) is None:\n # Backwards compatibility for color_mode added in 2021.4\n # Add warning in 2021.6, remove in 2021.10\n supported = self._light_internal_supported_color_modes\n\n if ColorMode.HS in supported and self.hs_color is not None:\n return ColorMode.HS\n if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None:\n return ColorMode.COLOR_TEMP\n if ColorMode.BRIGHTNESS in supported and self.brightness is not None:\n return ColorMode.BRIGHTNESS\n if ColorMode.ONOFF in supported:\n return ColorMode.ONOFF\n return ColorMode.UNKNOWN\n\n return color_mode\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 241, "n_words": 72, "vocab_size": 38, "complexity": 9, "nloc": 14, "token_counts": 95, "n_ast_nodes": 150, "n_identifiers": 15, "random_cut": "def _light_internal_color_mode(self) -> str:\n \n if (color_mode := self.color_mode) is None:\n # Backwards compatibility for color_mode added in 2021.4\n # Add warning in 2021.6, remove in 2021.10\n supported = self._light_internal_supported_color_modes\n\n if ColorMode.HS in supported and self.hs_color is not None:\n return ColorMode.HS\n if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None:\n return ColorMode.COLOR_TEMP", "d_id": 87754, "documentation": { "docstring": "Return the color mode of the light with backwards compatibility.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 164815, "commit_id": "1b5338e95917a8b94a9f7b2e1881442dd663c02d", "repo": "pandas", "path": "pandas/plotting/_core.py", "file_name": "_core.py", "fun_name": "kde", "commit_message": "DOC: fix URLs, formatting and typos (#45920)", "code": "def kde(self, bw_method=None, ind=None, **kwargs):\n \n return self(kind=\"kde\", bw_method=bw_method, ind=ind, **kwargs)\n\n density = kde\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 2, "token_counts": 35, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def kde(self, bw_method=None, ind=None, **kwargs):\n \n ", "d_id": 39609, "documentation": { "docstring": "\n Generate Kernel Density Estimate plot using Gaussian kernels.\n\n In statistics, `kernel density estimation`_ (KDE) is a non-parametric\n way to estimate the probability density function (PDF) of a random\n variable. This function uses Gaussian kernels and includes automatic\n bandwidth determination.\n\n .. _kernel density estimation:\n https://en.wikipedia.org/wiki/Kernel_density_estimation\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable.\n If None (default), 'scott' is used.\n See :class:`scipy.stats.gaussian_kde` for more information.\n ind : NumPy array or int, optional\n Evaluation points for the estimated PDF. If None (default),\n 1000 equally spaced points are used. If `ind` is a NumPy array, the\n KDE is evaluated at the points passed. If `ind` is an integer,\n `ind` number of equally spaced points are used.\n **kwargs\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or numpy.ndarray of them\n\n See Also\n --------\n scipy.stats.gaussian_kde : Representation of a kernel-density\n estimate using Gaussian kernels. This is the function used\n internally to estimate the PDF.\n\n Examples\n --------\n Given a Series of points randomly sampled from an unknown\n distribution, estimate its PDF using KDE with automatic\n bandwidth determination and plot the results, evaluating them at\n 1000 equally spaced points (default):\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])\n >>> ax = s.plot.kde()\n\n A scalar bandwidth can be specified. Using a small bandwidth value can\n lead to over-fitting, while using a large bandwidth value may result\n in under-fitting:\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(bw_method=0.3)\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(bw_method=3)\n\n Finally, the `ind` parameter determines the evaluation points for the\n plot of the estimated PDF:\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])\n\n For DataFrame, it works in the same way:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],\n ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],\n ... })\n >>> ax = df.plot.kde()\n\n A scalar bandwidth can be specified. Using a small bandwidth value can\n lead to over-fitting, while using a large bandwidth value may result\n in under-fitting:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(bw_method=0.3)\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(bw_method=3)\n\n Finally, the `ind` parameter determines the evaluation points for the\n plot of the estimated PDF:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])\n ", "n_words": 399, "vocab_size": 184, "n_whitespaces": 1083, "language": "en" } }, { "id": 26447, "commit_id": "aca6418d6c36956bc1ab530e6ef7e146ec9df90c", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py", "file_name": "fixtures.py", "fun_name": "subscription_invoice_requested_webhook", "commit_message": "Add Webhook payload via graphql subscriptions (#9394)\n\n* Add PoC of webhook subscriptions\r\n\r\n* add async webhooks subscription payloads feature\r\n\r\n* remove unneeded file\r\n\r\n* add translations subscription handling, fixes after review\r\n\r\n* remove todo\r\n\r\n* add descriptions\r\n\r\n* add descriptions, move subsrciption_payloads.py\r\n\r\n* refactor\r\n\r\n* fix imports, add changelog\r\n\r\n* check_document_is_single_subscription refactor\r\n\r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def subscription_invoice_requested_webhook(subscription_webhook):\n return subscription_webhook(\n INVOICE_REQUESTED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.INVOICE_REQUESTED\n )\n\n\nINVOICE_DELETED_SUBSCRIPTION_QUERY = \n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 21, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 14, "n_ast_nodes": 36, "n_identifiers": 8, "random_cut": "def subscription_invoice_requested_webhook(subscription_webhook):\n return subscription_webhook(\n INVOICE_REQUESTED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.INVOICE_REQUESTED\n )\n\n\nINVOICE_DELETED_SUBSCRIPTION_QUERY = \n\n\n@pytest", "d_id": 5005, "documentation": { "docstring": "\n subscription{\n event{\n ...on InvoiceDeleted{\n invoice{\n id\n }\n }\n }\n }\n", "n_words": 10, "vocab_size": 7, "n_whitespaces": 69, "language": "en" } }, { "id": 191522, "commit_id": "c02eb199b6587aeeb50fbb083693572bd2f030cc", "repo": "langchain", "path": "tests/unit_tests/prompts/test_prompt.py", "file_name": "test_prompt.py", "fun_name": "test_prompt_from_examples_valid", "commit_message": "add few shot example (#148)", "code": "def test_prompt_from_examples_valid() -> None:\n \n template = \n input_variables = [\"question\"]\n example_separator = \"\\n\\n\"\n prefix = \n suffix = \n examples = [\n ,\n ,\n ]\n prompt_from_examples = PromptTemplate.from_examples(\n examples,\n suffix,\n input_variables,\n example_separator=example_separator,\n prefix=prefix,\n )\n prompt_from_template = PromptTemplate(\n input_variables=input_variables, template=template\n )\n assert prompt_from_examples.template == prompt_from_template.template\n assert prompt_from_examples.input_variables == prompt_from_template.input_variables\n\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 146, "n_words": 45, "vocab_size": 34, "complexity": 1, "nloc": 32, "token_counts": 81, "n_ast_nodes": 143, "n_identifiers": 11, "random_cut": "def test_prompt_from_examples_valid() -> None:\n \n template = \n input_variables = [\"question\"]\n example_separator = \"\\n\\n\"\n prefix = \n suffix = \n examples = [\n ,\n ,\n ]\n prompt_from_examples = PromptTemplate.from_examples(\n examples,\n suffix,\n input_variables,\n example_separator=example_separator,\n prefix=prefix,\n )\n prompt_from_template = PromptTemplate(\n input_variables=input_variables, template=template\n )\n assert prompt_from_examples.template == prompt_from_template.template\n assert prompt_from_examples.input_variables == prompt_from_template.input_variables\n\n", "d_id": 46647, "documentation": { "docstring": "Test prompt can be successfully constructed from examples.Test Prompt:\n\nQuestion: who are you?\nAnswer: foo\n\nQuestion: what are you?\nAnswer: bar\n\nQuestion: {question}\nAnswer:Test Prompt:Question: {question}\\nAnswer:Question: who are you?\\nAnswer: fooQuestion: what are you?\\nAnswer: bar", "n_words": 34, "vocab_size": 23, "n_whitespaces": 27, "language": "en" } }, { "id": 63105, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "compatible_platforms", "commit_message": "upd; format", "code": "def compatible_platforms(provided, required):\n \n if provided is None or required is None or provided == required:\n # easy case\n return True\n\n # Mac OS X special cases\n reqMac = macosVersionString.match(required)\n if reqMac:\n provMac = macosVersionString.match(provided)\n\n # is this a Mac package?\n if not provMac:\n # this is backwards compatibility for packages built before\n # setuptools 0.6. All packages built after this point will\n # use the new macosx designation.\n provDarwin = darwinVersionString.match(provided)\n if provDarwin:\n dversion = int(provDarwin.group(1))\n macosversion = \"%s.%s\" % (reqMac.group(1), reqMac.group(2))\n if dversion == 7 and macosversion >= \"10.3\" or \\\n dversion == 8 and macosversion >= \"10.4\":\n return True\n # egg isn't macosx or legacy darwin\n return False\n\n # are they the same major version and machine type?\n if provMac.group(1) != reqMac.group(1) or \\\n provMac.group(3) != reqMac.group(3):\n return False\n\n # is the required OS major update >= the provided one?\n if int(provMac.group(2)) > int(reqMac.group(2)):\n return False\n\n return True\n\n # XXX Linux and other platforms' special cases should go here\n return False\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 455, "n_words": 163, "vocab_size": 95, "complexity": 14, "nloc": 22, "token_counts": 168, "n_ast_nodes": 281, "n_identifiers": 13, "random_cut": "def compatible_platforms(provided, required):\n \n if provided is None or required is None or provided == required:\n # easy case\n return True\n\n # Mac OS X special cases\n reqMac = macosVersionString.match(required)\n if reqMac:\n provMac = macosVersionString.match(provided)\n\n # is this a Mac package?\n if not", "d_id": 13149, "documentation": { "docstring": "Can code for the `provided` platform run on the `required` platform?\n\n Returns true if either platform is ``None``, or the platforms are equal.\n\n XXX Needs compatibility checks for Linux and other unixy OSes.\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 42, "language": "en" } }, { "id": 176590, "commit_id": "ec2e239764c92adf3b1abcf12817198a878d8772", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/weighted.py", "file_name": "weighted.py", "fun_name": "find_negative_cycle", "commit_message": "Corrected the documentation of find_negative_cycle() solving issue #5610 (#5613)\n\n* issue\r\n\r\n* Update branchings.py\r\n\r\n* Update weakly_connected.py", "code": "def find_negative_cycle(G, source, weight=\"weight\"):\n \n weight = _weight_function(G, weight)\n pred = {source: []}\n\n v = _inner_bellman_ford(G, [source], weight, pred=pred)\n if v is None:\n raise nx.NetworkXError(\"No negative cycles detected.\")\n\n # negative cycle detected... find it\n neg_cycle = []\n stack = [(v, list(pred[v]))]\n seen = {v}\n while stack:\n node, preds = stack[-1]\n if v in preds:\n # found the cycle\n neg_cycle.extend([node, v])\n neg_cycle = list(reversed(neg_cycle))\n return neg_cycle\n\n if preds:\n nbr = preds.pop()\n if nbr not in seen:\n stack.append((nbr, list(pred[nbr])))\n neg_cycle.append(node)\n seen.add(nbr)\n else:\n stack.pop()\n if neg_cycle:\n neg_cycle.pop()\n else:\n if v in G[v] and weight(G, v, v) < 0:\n return [v, v]\n # should not reach here\n raise nx.NetworkXError(\"Negative cycle is detected but not found\")\n # should not get here...\n msg = \"negative cycle detected but not identified\"\n raise nx.NetworkXUnbounded(msg)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 422, "n_words": 125, "vocab_size": 83, "complexity": 9, "nloc": 31, "token_counts": 221, "n_ast_nodes": 358, "n_identifiers": 24, "random_cut": "def find_negative_cycle(G, source, weight=\"weight\"):\n \n weight = _weight_function(G, weight)\n pred = {source: []}\n\n v = _inner_bellman_ford(G, [source], weight, pred=pred)\n if v is None:\n raise nx.NetworkXError(\"No negative cycles detected.\")\n\n # negative cycle detected... find it\n neg_cycle = []\n stack = [(v, list(pred[v]))]\n seen = {v}\n while stack:\n node, preds = stack[-1]\n if v in preds:\n # found the cycle\n neg_cycle.extend([node, v])\n neg_cycle = list(reversed(neg_cycle))\n return neg_cycle\n\n if preds:\n nbr = preds.pop()\n if nbr not in seen:\n stack.append((nbr, list(pred[nbr])))\n neg_cycle.append(node)\n seen.add(nbr)\n else:\n stack.pop()\n if neg_cycle:\n neg_c", "d_id": 41989, "documentation": { "docstring": "Returns a cycle with negative total weight if it exists.\n\n Bellman-Ford is used to find shortest_paths. That algorithm\n stops if there exists a negative cycle. This algorithm\n picks up from there and returns the found negative cycle.\n\n The cycle consists of a list of nodes in the cycle order. The last\n node equals the first to make it a cycle.\n You can look up the edge weights in the original graph. In the case\n of multigraphs the relevant edge is the minimal weight edge between\n the nodes in the 2-tuple.\n\n If the graph has no negative cycle, a NetworkXError is raised.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source: node label\n The search for the negative cycle will start from this node.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)])\n >>> nx.find_negative_cycle(G, 0)\n [4, 0, 1, 4]\n\n Returns\n -------\n cycle : list\n A list of nodes in the order of the cycle found. The last node\n equals the first to indicate a cycle.\n\n Raises\n ------\n NetworkXError\n If no negative cycle is found.\n ", "n_words": 285, "vocab_size": 144, "n_whitespaces": 464, "language": "en" } }, { "id": 243427, "commit_id": "279ddf4ce6c76498ac29df2552a3023b9aaa76c1", "repo": "Pillow", "path": "src/PIL/ImageOps.py", "file_name": "ImageOps.py", "fun_name": "expand", "commit_message": "Use getpalette() in ImageOps", "code": "def expand(image, border=0, fill=0):\n \n left, top, right, bottom = _border(border)\n width = left + image.size[0] + right\n height = top + image.size[1] + bottom\n color = _color(fill, image.mode)\n if image.mode == \"P\" and image.palette:\n palette = ImagePalette.ImagePalette(palette=image.getpalette())\n if isinstance(color, tuple):\n color = palette.getcolor(color)\n else:\n palette = None\n out = Image.new(image.mode, (width, height), color)\n if palette:\n out.putpalette(palette.palette)\n out.paste(image, (left, top))\n return out\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 133, "n_words": 61, "vocab_size": 45, "complexity": 5, "nloc": 16, "token_counts": 149, "n_ast_nodes": 230, "n_identifiers": 26, "random_cut": "def expand(image, border=0, fill=0):\n \n left, top, right, bottom = _border(border)\n width = left + image.size[0] + right\n height = top + image.size[1] + bottom\n color = _color(fill, ", "d_id": 70030, "documentation": { "docstring": "\n Add border to the image\n\n :param image: The image to expand.\n :param border: Border width, in pixels.\n :param fill: Pixel fill value (a color value). Default is 0 (black).\n :return: An image.\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 52, "language": "en" } }, { "id": 86210, "commit_id": "210295c5ed1d7286ae808b15d14f6e83356af16e", "repo": "sentry", "path": "tests/sentry/integrations/slack/notifications/test_issue_alert.py", "file_name": "test_issue_alert.py", "fun_name": "test_digest_enabled", "commit_message": "feat(workflow): Set project ownership fallthrough default false (#39305)", "code": "def test_digest_enabled(self, digests, mock_func):\n \n backend = RedisBackend()\n digests.digest = backend.digest\n digests.enabled.return_value = True\n\n rule = Rule.objects.create(project=self.project, label=\"my rule\")\n ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)\n event = self.store_event(\n data={\"message\": \"Hello world\", \"level\": \"error\"}, project_id=self.project.id\n )\n key = f\"mail:p:{self.project.id}\"\n backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)\n\n with self.tasks():\n deliver_digest(key)\n\n attachment, text = get_attachment()\n\n assert attachment[\"title\"] == \"Hello world\"\n assert attachment[\"text\"] == \"\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 174, "n_words": 54, "vocab_size": 45, "complexity": 1, "nloc": 16, "token_counts": 150, "n_ast_nodes": 260, "n_identifiers": 32, "random_cut": "def test_digest_enabled(self, digests, mock_func):\n \n backend = RedisBackend()\n digests.digest = backend.digest\n digests.enabled.return_value = True\n\n rule = Rule.objects.create(project=self.project, label=\"my rule\")\n ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)\n event = self.store_event(\n data={\"message\": \"Hello world\", \"level\": \"error\"}, project_id=self.project.id\n )\n key = f\"mail:p:{self.project.id}\"\n backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)\n\n with self.tasks():\n deliver_digest(key)\n\n attachment, text = get_attachment()\n\n assert attachment[\"title\"] == \"Hello world\"\n assert attachment[\"text\"] == \"\"\n", "d_id": 18085, "documentation": { "docstring": "\n Test that with digests enabled, but Slack notification settings\n (and not email settings), we send a Slack notification\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 195738, "commit_id": "bf1cb469061d7ad07bfbf687f5635d9f4ec569dd", "repo": "sympy", "path": "sympy/physics/control/control_plots.py", "file_name": "control_plots.py", "fun_name": "pole_zero_numerical_data", "commit_message": "Allow complex transfer functions in pole-zero plot", "code": "def pole_zero_numerical_data(system):\n \n _check_system(system)\n system = system.doit() # Get the equivalent TransferFunction object.\n\n num_poly = Poly(system.num, system.var).all_coeffs()\n den_poly = Poly(system.den, system.var).all_coeffs()\n\n num_poly = np.array(num_poly, dtype=np.complex128)\n den_poly = np.array(den_poly, dtype=np.complex128)\n\n zeros = np.roots(num_poly)\n poles = np.roots(den_poly)\n\n return zeros, poles\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 37, "vocab_size": 26, "complexity": 1, "nloc": 10, "token_counts": 97, "n_ast_nodes": 157, "n_identifiers": 18, "random_cut": "def pole_zero_numerical_data(system):\n \n _check_system(system)\n system = system.doit() # Get the equivalent TransferFunction object.\n\n num_poly = Poly(system.num, system.var).all_coeffs()\n den_poly = Poly(system.den, system.var).all_coeffs()\n\n num_poly = np.array(num_poly, dtype=np.complex128)\n den_poly = np.array(den_poly, dtype=np.complex128)\n\n zeros = np.roots(num_poly)\n poles = np.roots(", "d_id": 47393, "documentation": { "docstring": "\n Returns the numerical data of poles and zeros of the system.\n It is internally used by ``pole_zero_plot`` to get the data\n for plotting poles and zeros. Users can use this data to further\n analyse the dynamics of the system or plot using a different\n backend/plotting-module.\n\n Parameters\n ==========\n\n system : SISOLinearTimeInvariant\n The system for which the pole-zero data is to be computed.\n\n Returns\n =======\n\n tuple : (zeros, poles)\n zeros = Zeros of the system. NumPy array of complex numbers.\n poles = Poles of the system. NumPy array of complex numbers.\n\n Raises\n ======\n\n NotImplementedError\n When a SISO LTI system is not passed.\n\n When time delay terms are present in the system.\n\n ValueError\n When more than one free symbol is present in the system.\n The only variable in the transfer function should be\n the variable of the Laplace transform.\n\n Examples\n ========\n\n >>> from sympy.abc import s\n >>> from sympy.physics.control.lti import TransferFunction\n >>> from sympy.physics.control.control_plots import pole_zero_numerical_data\n >>> tf1 = TransferFunction(s**2 + 1, s**4 + 4*s**3 + 6*s**2 + 5*s + 2, s)\n >>> pole_zero_numerical_data(tf1) # doctest: +SKIP\n ([-0.+1.j 0.-1.j], [-2. +0.j -0.5+0.8660254j -0.5-0.8660254j -1. +0.j ])\n\n See Also\n ========\n\n pole_zero_plot\n\n ", "n_words": 187, "vocab_size": 117, "n_whitespaces": 341, "language": "en" } }, { "id": 222834, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/cygwinccompiler.py", "file_name": "cygwinccompiler.py", "fun_name": "get_versions", "commit_message": "add python 3.10.4 for windows", "code": "def get_versions():\n \n commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']\n return tuple([_find_exe_version(cmd) for cmd in commands])\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 25, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 3, "token_counts": 28, "n_ast_nodes": 51, "n_identifiers": 5, "random_cut": "def get_versions():\n \n commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']\n return t", "d_id": 56767, "documentation": { "docstring": " Try to find out the versions of gcc, ld and dllwrap.\n\n If not possible it returns None for it.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 26, "language": "en" } }, { "id": 215170, "commit_id": "4e3632254fb73210ce3e1954ec507473433018b8", "repo": "salt", "path": "salt/beacons/napalm_beacon.py", "file_name": "napalm_beacon.py", "fun_name": "__virtual__", "commit_message": "Align enhanced logging accross beacons", "code": "def __virtual__():\n \n if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):\n return __virtualname__\n else:\n err_msg = \"NAPALM is not installed.\"\n log.error(\"Unable to load %s beacon: %s\", __virtualname__, err_msg)\n return False, err_msg\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 26, "vocab_size": 23, "complexity": 2, "nloc": 7, "token_counts": 42, "n_ast_nodes": 71, "n_identifiers": 11, "random_cut": "def __virtual__():\n \n if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):\n return __virtualname__\n else:\n err_msg = \"NAPALM is not installed.\"\n log.error(\"Unable to load %s beacon: %s\", __virtualname__, err_msg)", "d_id": 53868, "documentation": { "docstring": "\n This beacon can only work when running under a regular or a proxy minion, managed through napalm.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 24, "language": "en" } }, { "id": 337188, "commit_id": "2a0c823527694058d410ed6f91b52e7dd9f94ebe", "repo": "diffusers", "path": "examples/community/lpw_stable_diffusion.py", "file_name": "lpw_stable_diffusion.py", "fun_name": "parse_prompt_attention", "commit_message": "[Community Pipelines] Long Prompt Weighting Stable Diffusion Pipelines (#907)\n\n* [Community Pipelines] Long Prompt Weighting\r\n\r\n* Update README.md\r\n\r\n* fix\r\n\r\n* style\r\n\r\n* fix style\r\n\r\n* Update examples/community/README.md\r\n\r\nCo-authored-by: Patrick von Platen ", "code": "def parse_prompt_attention(text):\n \n\n res = []\n round_brackets = []\n square_brackets = []\n\n round_bracket_multiplier = 1.1\n square_bracket_multiplier = 1 / 1.1\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 37, "n_words": 19, "vocab_size": 12, "complexity": 16, "nloc": 38, "token_counts": 299, "n_ast_nodes": 49, "n_identifiers": 7, "random_cut": "def parse_prompt_attention(text):\n \n\n res = []\n round_brackets = []\n square_brackets = []\n\n round_bracket_multiplier = 1.1\n square_bracket_multiplier = 1 / 1.1\n", "d_id": 120971, "documentation": { "docstring": "\n Parses a string with attention tokens and returns a list of pairs: text and its assoicated weight.\n Accepted tokens are:\n (abc) - increases attention to abc by a multiplier of 1.1\n (abc:3.12) - increases attention to abc by a multiplier of 3.12\n [abc] - decreases attention to abc by a multiplier of 1.1\n \\( - literal character '('\n \\[ - literal character '['\n \\) - literal character ')'\n \\] - literal character ']'\n \\\\ - literal character '\\'\n anything else - just text\n >>> parse_prompt_attention('normal text')\n [['normal text', 1.0]]\n >>> parse_prompt_attention('an (important) word')\n [['an ', 1.0], ['important', 1.1], [' word', 1.0]]\n >>> parse_prompt_attention('(unbalanced')\n [['unbalanced', 1.1]]\n >>> parse_prompt_attention('\\(literal\\]')\n [['(literal]', 1.0]]\n >>> parse_prompt_attention('(unnecessary)(parens)')\n [['unnecessaryparens', 1.1]]\n >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')\n [['a ', 1.0],\n ['house', 1.5730000000000004],\n [' ', 1.1],\n ['on', 1.0],\n [' a ', 1.1],\n ['hill', 0.55],\n [', sun, ', 1.1],\n ['sky', 1.4641000000000006],\n ['.', 1.1]]\n ", "n_words": 145, "vocab_size": 83, "n_whitespaces": 268, "language": "en" } }, { "id": 266879, "commit_id": "8b2e6285650ec42ec4a19075a8567047e8304ba2", "repo": "ansible", "path": "lib/ansible/galaxy/dependency_resolution/providers.py", "file_name": "providers.py", "fun_name": "get_dependencies", "commit_message": "galaxy - Clean up type hints and imports.", "code": "def get_dependencies(self, candidate):\n # type: (Candidate) -> list[Candidate]\n r\n # FIXME: If there's several galaxy servers set, there may be a\n # FIXME: situation when the metadata of the same collection\n # FIXME: differs. So how do we resolve this case? Priority?\n # FIXME: Taking into account a pinned hash? Exploding on\n # FIXME: any differences?\n # NOTE: The underlying implmentation currently uses first found\n req_map = self._api_proxy.get_collection_dependencies(candidate)\n\n # NOTE: This guard expression MUST perform an early exit only\n # NOTE: after the `get_collection_dependencies()` call because\n # NOTE: internally it polulates the artifact URL of the candidate,\n # NOTE: its SHA hash and the Galaxy API token. These are still\n # NOTE: necessary with `--no-deps` because even with the disabled\n # NOTE: dependency resolution the outer layer will still need to\n # NOTE: know how to download and validate the artifact.\n #\n # NOTE: Virtual candidates should always return dependencies\n # NOTE: because they are ephemeral and non-installable.\n if not self._with_deps and not candidate.is_virtual:\n return []\n\n return [\n self._make_req_from_dict({'name': dep_name, 'version': dep_req})\n for dep_name, dep_req in req_map.items()\n ]\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 364, "n_words": 178, "vocab_size": 125, "complexity": 4, "nloc": 13, "token_counts": 60, "n_ast_nodes": 115, "n_identifiers": 12, "random_cut": "def get_dependencies(self, candidate):\n # type: (Candidate) -> list[Candidate]\n r\n # FIXME: If there's several galaxy servers set, there may be a\n # FIXME: situation when the metadata of the same collection\n # FIXME: differs. So how do we resolve this case? Priority?\n # FIXME: Taking into account a pinned hash? Exploding on\n # FIXME: any differences?\n # NOTE: The underlying implmentation currently uses first found\n req_map = self._api_proxy.get_collection_dependencies(candidate)\n\n # NOTE: This guard expression MUST perform an early exit only\n # NOTE: after the `get_collectio", "d_id": 78638, "documentation": { "docstring": "Get direct dependencies of a candidate.\n\n :returns: A collection of requirements that `candidate` \\\n specifies as its dependencies.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 49, "language": "en" } }, { "id": 246153, "commit_id": "901b264c0c88f39cbfb8b2229e0dc57968882658", "repo": "synapse", "path": "tests/rest/admin/test_user.py", "file_name": "test_user.py", "fun_name": "test_set_displayname", "commit_message": "Add type hints to `tests/rest/admin` (#11851)", "code": "def test_set_displayname(self) -> None:\n \n\n # Modify user\n channel = self.make_request(\n \"PUT\",\n self.url_other_user,\n access_token=self.admin_user_tok,\n content={\"displayname\": \"foobar\"},\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(\"@user:test\", channel.json_body[\"name\"])\n self.assertEqual(\"foobar\", channel.json_body[\"displayname\"])\n\n # Get user\n channel = self.make_request(\n \"GET\",\n self.url_other_user,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(\"@user:test\", channel.json_body[\"name\"])\n self.assertEqual(\"foobar\", channel.json_body[\"displayname\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 208, "n_words": 40, "vocab_size": 25, "complexity": 1, "nloc": 21, "token_counts": 142, "n_ast_nodes": 235, "n_identifiers": 14, "random_cut": "def test_set_displayname(self) -> None:\n \n\n # Modify user\n channel = self.make_request(\n \"PUT\",\n self.url_other_user,\n access_token=self.admin_user_tok,\n content={\"displayname\": \"foobar\"},\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(\"@user:test\", channel.json_body[\"name", "d_id": 71047, "documentation": { "docstring": "\n Test setting the displayname of another user.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 321664, "commit_id": "e5340c449f23608803c286da0563b62f58ba25b0", "repo": "qutebrowser", "path": "qutebrowser/browser/webkit/network/networkmanager.py", "file_name": "networkmanager.py", "fun_name": "on_ssl_errors", "commit_message": "Refactor certificate error handling\n\n- Make CertificateErrorWrapper responsible for accepting/rejecting certs\n- Try to avoid dealing with unclear booleans\n- Implement support for deferred errors (#4616) - disabled due to PyQt bug\n- Implement support for Qt 6 API (#7086)", "code": "def on_ssl_errors(self, reply, qt_errors):\n \n errors = certificateerror.CertificateErrorWrapper(reply, qt_errors)\n log.network.debug(\"Certificate errors: {!r}\".format(errors))\n try:\n host_tpl: Optional[urlutils.HostTupleType] = urlutils.host_tuple(\n reply.url())\n except ValueError:\n host_tpl = None\n is_accepted = False\n is_rejected = False\n else:\n assert host_tpl is not None\n is_accepted = errors in self._accepted_ssl_errors[host_tpl]\n is_rejected = errors in self._rejected_ssl_errors[host_tpl]\n\n log.network.debug(\"Already accepted: {} / \"\n \"rejected {}\".format(is_accepted, is_rejected))\n\n if is_rejected:\n return\n elif is_accepted:\n reply.ignoreSslErrors()\n return\n\n abort_on = self._get_abort_signals(reply)\n\n tab = self._get_tab()\n first_party_url = QUrl() if tab is None else tab.data.last_navigation.url\n\n shared.handle_certificate_error(\n request_url=reply.url(),\n first_party_url=first_party_url,\n error=errors,\n abort_on=abort_on,\n )\n\n if errors.certificate_was_accepted():\n if host_tpl is not None:\n self._accepted_ssl_errors[host_tpl].add(errors)\n elif host_tpl is not None:\n self._rejected_ssl_errors[host_tpl].add(errors)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 437, "n_words": 94, "vocab_size": 62, "complexity": 8, "nloc": 35, "token_counts": 220, "n_ast_nodes": 353, "n_identifiers": 37, "random_cut": "def on_ssl_errors(self, reply, qt_errors):\n \n", "d_id": 117847, "documentation": { "docstring": "Decide if SSL errors should be ignored or not.\n\n This slot is called on SSL/TLS errors by the self.sslErrors signal.\n\n Args:\n reply: The QNetworkReply that is encountering the errors.\n qt_errors: A list of errors.\n ", "n_words": 34, "vocab_size": 30, "n_whitespaces": 77, "language": "en" } }, { "id": 171627, "commit_id": "e2df99823758210fb2b7c4aba39e23f3445f7cd3", "repo": "pandas", "path": "pandas/_version.py", "file_name": "_version.py", "fun_name": "render_pep440", "commit_message": "BLD: use nonvendor versioneer (#49924)\n\n* BLD: remove vendored versioneer\r\n\r\n* run vis\r\n\r\n* move config to pyproject.toml\r\n\r\n* add versioneer to deps\r\n\r\n* run pyupgrade\r\n\r\n* fix isort and pylint\r\n\r\n* fix ci\r\n\r\n* fix env", "code": "def render_pep440(pieces):\n \n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += f\"{pieces['distance']}.g{pieces['short']}\"\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = f\"0+untagged.{pieces['distance']}.g{pieces['short']}\"\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 142, "n_words": 36, "vocab_size": 20, "complexity": 6, "nloc": 13, "token_counts": 65, "n_ast_nodes": 163, "n_identifiers": 4, "random_cut": "def render_pep440(pieces):\n \n i", "d_id": 40694, "documentation": { "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n ", "n_words": 37, "vocab_size": 35, "n_whitespaces": 52, "language": "en" } }, { "id": 202099, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/cache/tests_async.py", "file_name": "tests_async.py", "fun_name": "test_aset_many", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "async def test_aset_many(self):\n \n self.assertEqual(await cache.aset_many({\"a\": 1, \"b\": 2}), [])\n self.assertEqual(\n await cache.aset_many({\"a\": 1, \"b\": 2}, timeout=2, version=\"1\"),\n [],\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 69, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 61, "n_ast_nodes": 105, "n_identifiers": 7, "random_cut": "async def test_aset_many(self):\n \n self.assertEqual(await cache.aset_many({\"a\": 1, \"b\": 2}), [])\n self.assert", "d_id": 50052, "documentation": { "docstring": "aset_many() does nothing for the dummy cache backend.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 201852, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/bash_completion/tests.py", "file_name": "tests.py", "fun_name": "_user_input", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _user_input(self, input_str):\n \n os.environ[\"COMP_WORDS\"] = input_str\n idx = len(input_str.split(\" \")) - 1 # Index of the last word\n comp_cword = idx + 1 if input_str.endswith(\" \") else idx\n os.environ[\"COMP_CWORD\"] = str(comp_cword)\n sys.argv = input_str.split()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 77, "n_words": 34, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 63, "n_ast_nodes": 110, "n_identifiers": 13, "random_cut": "def _user_input(self, input_str):\n \n os.environ[\"COMP_WORDS\"] = input_str\n idx = ", "d_id": 50013, "documentation": { "docstring": "\n Set the environment and the list of command line arguments.\n\n This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is\n an array consisting of the individual words in the current command\n line, the latter is the index of the current cursor position, so in\n case a word is completed and the cursor is placed after a whitespace,\n $COMP_CWORD must be incremented by 1:\n\n * 'django-admin start' -> COMP_CWORD=1\n * 'django-admin startproject' -> COMP_CWORD=1\n * 'django-admin startproject ' -> COMP_CWORD=2\n ", "n_words": 80, "vocab_size": 53, "n_whitespaces": 157, "language": "en" } }, { "id": 118671, "commit_id": "dd9084523e365e637443ea351eaaaa25f52d8412", "repo": "streamlit", "path": "lib/streamlit/config.py", "file_name": "config.py", "fun_name": "_check_conflicts", "commit_message": "Report sharing removal (#4260)\n\nThe report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand.", "code": "def _check_conflicts() -> None:\n # Node-related conflicts\n\n # When using the Node server, we must always connect to 8501 (this is\n # hard-coded in JS). Otherwise, the browser would decide what port to\n # connect to based on window.location.port, which in dev is going to\n # be (3000)\n\n # Import logger locally to prevent circular references\n from streamlit.logger import get_logger\n\n LOGGER = get_logger(__name__)\n\n if get_option(\"global.developmentMode\"):\n assert _is_unset(\n \"server.port\"\n ), \"server.port does not work when global.developmentMode is true.\"\n\n assert _is_unset(\"browser.serverPort\"), (\n \"browser.serverPort does not work when global.developmentMode is \" \"true.\"\n )\n\n # XSRF conflicts\n if get_option(\"server.enableXsrfProtection\"):\n if not get_option(\"server.enableCORS\") or get_option(\"global.developmentMode\"):\n LOGGER.warning(\n \n )\n\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 229, "n_words": 102, "vocab_size": 74, "complexity": 5, "nloc": 25, "token_counts": 65, "n_ast_nodes": 132, "n_identifiers": 9, "random_cut": "def _check_conflicts() -> None:\n # Node-related conflicts\n\n # When using the Node server, we must always connect to 8501 (this is\n # hard-coded in JS). Otherwise, the browser would decide what port to\n # connect to based on window.location.port, which in dev is going to\n # be (3000)\n\n # Import logger locally to prevent circular references\n f", "d_id": 26352, "documentation": { "docstring": "\nWarning: the config option 'server.enableCORS=false' is not compatible with 'server.enableXsrfProtection=true'.\nAs a result, 'server.enableCORS' is being overridden to 'true'.\n\nMore information:\nIn order to protect against CSRF attacks, we send a cookie with each request.\nTo do so, we must specify allowable origins, which places a restriction on\ncross-origin resource sharing.\n\nIf cross origin resource sharing is required, please disable server.enableXsrfProtection.\n ", "n_words": 61, "vocab_size": 53, "n_whitespaces": 66, "language": "en" } }, { "id": 104785, "commit_id": "1904d0c0a3a96330d9b870cdca3e9a3a137f2977", "repo": "datasets", "path": "src/datasets/dataset_dict.py", "file_name": "dataset_dict.py", "fun_name": "num_columns", "commit_message": "Add code examples for DatasetDict (#4245)\n\n* 📝 add code examples for DatasetDict\r\n\r\n* 🖍 apply quentin review", "code": "def num_columns(self) -> Dict[str, int]:\n \n self._check_values_type()\n return {k: dataset.num_columns for k, dataset in self.items()}\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 14, "token_counts": 36, "n_ast_nodes": 58, "n_identifiers": 9, "random_cut": "def num_columns(self) -> Dict[str, int]:\n \n self._check_values_type()\n return {k: dataset.num_columns for k, datase", "d_id": 21967, "documentation": { "docstring": "Number of columns in each split of the dataset.\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\")\n >>> ds.num_columns\n {'test': 2, 'train': 2, 'validation': 2}\n ```\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 85, "language": "en" } }, { "id": 156162, "commit_id": "4e5dfe7463028a39a90e026c7fb9220969093ab3", "repo": "dask", "path": "dask/bag/random.py", "file_name": "random.py", "fun_name": "_sample_with_replacement_map_partitions", "commit_message": "Bag: add implementation for reservoir sampling (#7068) (#7636)\n\n - Implement the [L algorithm](https://en.wikipedia.org/wiki/Reservoir_sampling#An_optimal_algorithm) for reservoir sampling without replacement. \r\n - Use the **k** reservoir of size 1 strategy for sampling with replacement (see [reference](http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf)) of **k** items", "code": "def _sample_with_replacement_map_partitions(population, k):\n \n\n stream = iter(population)\n e = next(stream)\n reservoir, stream_length = [e for _ in range(k)], 1\n\n w = [rnd.random() for _ in range(k)]\n nxt = [_geometric(wi) for wi in w]\n min_nxt = min(nxt)\n\n for i, e in enumerate(stream, 1):\n if i == min_nxt:\n for j, n in enumerate(nxt):\n if n == min_nxt:\n reservoir[j] = e\n w[j] *= rnd.random()\n nxt[j] += _geometric(w[j])\n min_nxt = min(nxt)\n\n stream_length += 1\n\n return reservoir, stream_length\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 207, "n_words": 72, "vocab_size": 43, "complexity": 8, "nloc": 17, "token_counts": 144, "n_ast_nodes": 224, "n_identifiers": 23, "random_cut": "def _sample_with_replacement_map_partitions(population, k):\n \n\n stream = iter(population)\n e = next(stream)\n reservoir, stream_length = [e for _ in range(k)], 1\n\n w = [rnd.random() for _ in range(k)]\n nxt = [_geometric(wi) for wi in w]\n min_nxt = min(nxt)\n\n for i, e in enumerate(stream, 1):\n if i == min_nxt:\n for j, n in enumerate(nxt):\n if n == min_nxt:\n reservoir[j] = e\n w[j] *= rnd.random()\n nxt[j] += _geometric(w[j])\n min_nxt = min(nxt)\n\n ", "d_id": 36586, "documentation": { "docstring": "\n Reservoir sampling with replacement, the main idea is to use k reservoirs of size 1\n See Section Applications in http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 30, "language": "en" } }, { "id": 175300, "commit_id": "acf7403f9baea3ae1119fc6b4a3298522188bf96", "repo": "cpython", "path": "Lib/enum.py", "file_name": "enum.py", "fun_name": "__setattr__", "commit_message": "bpo-40066: [Enum] update str() and format() output (GH-30582)\n\nUndo rejected PEP-663 changes:\r\n\r\n- restore `repr()` to its 3.10 status\r\n- restore `str()` to its 3.10 status\r\n\r\nNew changes:\r\n\r\n- `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result\r\n- zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == ''`\r\n- update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type\r\n- added `_numeric_repr_` to `Flag` to control display of unnamed values\r\n- enums without doc strings have a more comprehensive doc string added\r\n- `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`", "code": "def __setattr__(cls, name, value):\n \n member_map = cls.__dict__.get('_member_map_', {})\n if name in member_map:\n raise AttributeError('cannot reassign member %r' % (name, ))\n super().__setattr__(name, value)\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 61, "n_words": 22, "vocab_size": 22, "complexity": 2, "nloc": 5, "token_counts": 48, "n_ast_nodes": 80, "n_identifiers": 9, "random_cut": "def __setattr__(cls, name, value):\n \n member_map = cls.__dict__.get('_member_map_', {})\n if name in member_map:\n raise AttributeError('cannot reassign member %r' % (name, ))\n super().__s", "d_id": 41589, "documentation": { "docstring": "\n Block attempts to reassign Enum members.\n\n A simple assignment to the class namespace only changes one of the\n several possible ways to get an Enum member from the Enum class,\n resulting in an inconsistent Enumeration.\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 71, "language": "en" } }, { "id": 221633, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/configparser.py", "file_name": "configparser.py", "fun_name": "read_file", "commit_message": "add python 3.10.4 for windows", "code": "def read_file(self, f, source=None):\n \n if source is None:\n try:\n source = f.name\n except AttributeError:\n source = ''\n self._read(f, source)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 92, "n_words": 19, "vocab_size": 16, "complexity": 3, "nloc": 7, "token_counts": 38, "n_ast_nodes": 64, "n_identifiers": 7, "random_cut": "def read_file(self, f, source=None):\n \n if source is None:\n try:\n source = f.name\n except AttributeError:\n ", "d_id": 56453, "documentation": { "docstring": "Like read() but the argument must be a file-like object.\n\n The `f' argument must be iterable, returning one line at a time.\n Optional second argument is the `source' specifying the name of the\n file being read. If not given, it is taken from f.name. If `f' has no\n `name' attribute, `' is used.\n ", "n_words": 53, "vocab_size": 41, "n_whitespaces": 88, "language": "en" } }, { "id": 281449, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/defi/substack_model.py", "file_name": "substack_model.py", "fun_name": "get_newsletters", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def get_newsletters() -> pd.DataFrame:\n \n\n urls = [\n \"https://defiweekly.substack.com/archive\",\n \"https://newsletter.thedefiant.io/archive\",\n \"https://thedailygwei.substack.com/archive\",\n \"https://todayindefi.substack.com/archive\",\n \"https://newsletter.banklesshq.com/archive\",\n \"https://defislate.substack.com/archive\",\n ]\n\n threads = len(urls)\n newsletters = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:\n for newsletter in executor.map(scrape_substack, urls):\n try:\n newsletters.append(pd.DataFrame(newsletter))\n except KeyError as e:\n console.print(e, \"\\n\")\n continue\n\n df = pd.concat(newsletters, ignore_index=True)\n df.columns = [\"Title\", \"Link\", \"Date\"]\n\n df[\"Title\"] = df[\"Title\"].apply(lambda x: \"\".join(i for i in x if ord(i) < 128))\n df[\"Date\"] = df[\"Date\"].apply(\n lambda x: parser.parse(x).strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n df[\"Title\"] = df[\"Title\"].apply(\n lambda x: \"\\n\".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x\n )\n return (\n df[[\"Title\", \"Date\", \"Link\"]]\n .sort_values(by=\"Date\", ascending=False)\n .reset_index(drop=\"index\")\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 286, "n_words": 90, "vocab_size": 72, "complexity": 6, "nloc": 40, "token_counts": 242, "n_ast_nodes": 419, "n_identifiers": 42, "random_cut": "def get_newsletters() -> pd.DataFrame:\n \n\n urls = [\n \"https://defiweekly.substack.com/archive\",\n \"https://newsletter.thedefiant.io/archive\",\n \"https://thedailygwei.substack.com/archive\",\n \"https://todayindefi.substack.com/archive\",\n \"https://newsletter.banklesshq.com/archive\",\n \"https://defislate.substack.com/archive\",\n ]\n\n threads = len(urls)\n newsletters = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:\n for newsletter in executor.map(scrape_substack, urls):\n try:\n newsletters.append(pd.DataFrame(newsletter))\n except KeyError as e:\n console.print(e, \"\\n\")\n continue\n\n df = pd.concat(newsletters, ignore_index=True)\n df.columns = [\"Title\", \"Link\", \"Date\"]\n\n df[\"Title\"] = df[\"Title\"].apply(lambda x: \"\".join(i for i in x if ord(i) < 128))\n df[\"Date\"] = df[\"Date\"].apply(\n lambda x: parser.parse(x).strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n df[\"Title\"] = df[\"Title\"].apply(\n lambda x: \"\\n\".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x\n ", "d_id": 83766, "documentation": { "docstring": "Scrape all substack newsletters from url list.\n [Source: substack.com]\n\n Returns\n -------\n pd.DataFrame\n DataFrame with recent news from most popular DeFi related newsletters.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 2712, "commit_id": "e272ed2fa4c58e0a89e273a3e85da7d13a85e04c", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/action/get_enum_attribute_action.py", "file_name": "get_enum_attribute_action.py", "fun_name": "_object2proto", "commit_message": "[syft.core.node.common.action] Change syft import absolute -> relative", "code": "def _object2proto(self) -> GetEnumAttributeAction_PB:\n \n\n return GetEnumAttributeAction_PB(\n path=self.path,\n id_at_location=serialize(self.id_at_location),\n address=serialize(self.address),\n msg_id=serialize(self.id),\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 18, "token_counts": 45, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def _object2proto(self) -> GetEnumAttributeAction_PB:\n \n\n return GetEnumAttri", "d_id": 343, "documentation": { "docstring": "Returns a protobuf serialization of self.\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n :return: returns a protobuf object\n :rtype: GetOrSetPropertyAction_PB\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "n_words": 68, "vocab_size": 56, "n_whitespaces": 150, "language": "en" } }, { "id": 309217, "commit_id": "a672dc3437b95734e44cb3f61b3f3c299627bb1a", "repo": "core", "path": "tests/components/seventeentrack/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "test_becomes_delivered_not_shown_notification", "commit_message": "Import persistent notification (part 4) (#63901)", "code": "async def test_becomes_delivered_not_shown_notification(hass):\n \n package = Package(\n tracking_number=\"456\",\n destination_country=206,\n friendly_name=\"friendly name 1\",\n info_text=\"info text 1\",\n location=\"location 1\",\n timestamp=\"2020-08-10 10:32\",\n origin_country=206,\n package_type=2,\n )\n ProfileMock.package_list = [package]\n\n await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED)\n\n assert hass.states.get(\"sensor.seventeentrack_package_456\") is not None\n assert len(hass.states.async_entity_ids()) == 1\n\n package_delivered = Package(\n tracking_number=\"456\",\n destination_country=206,\n friendly_name=\"friendly name 1\",\n info_text=\"info text 1\",\n location=\"location 1\",\n timestamp=\"2020-08-10 10:32\",\n origin_country=206,\n package_type=2,\n status=40,\n )\n ProfileMock.package_list = [package_delivered]\n\n with patch(\n \"homeassistant.components.seventeentrack.sensor.persistent_notification\"\n ) as persistent_notification_mock:\n await _goto_future(hass)\n\n persistent_notification_mock.create.assert_called()\n assert not hass.states.async_entity_ids()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 253, "n_words": 70, "vocab_size": 43, "complexity": 1, "nloc": 33, "token_counts": 159, "n_ast_nodes": 265, "n_identifiers": 27, "random_cut": "async def test_becomes_delivered_not_shown_notification(hass):\n \n package = Package(\n tracking_number=\"456\",\n destination_country=206,\n friendly_name=\"friendly name 1\",\n info_text=\"info text 1\",\n location=\"location 1\",\n timestamp=\"2020-08-10 10:32\",\n origin_country=206,\n package_type=2,\n )\n ProfileMock.package_list = [package]\n\n await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED)\n\n assert hass.states.get(\"sensor.seventeentrack_package_456\") is not None\n assert len(hass.states.async_entity_ids()) == 1\n", "d_id": 107924, "documentation": { "docstring": "Ensure notification is triggered when package becomes delivered.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 259775, "commit_id": "767e9ae7e4fec8bea36c0433ab42f500aacfde64", "repo": "scikit-learn", "path": "sklearn/ensemble/_iforest.py", "file_name": "_iforest.py", "fun_name": "fit", "commit_message": "ENH Optimize runtime for IsolationForest (#23149)", "code": "def fit(self, X, y=None, sample_weight=None):\n \n X = self._validate_data(X, accept_sparse=[\"csc\"])\n if issparse(X):\n # Pre-sort indices to avoid that each individual tree of the\n # ensemble sorts the indices.\n X.sort_indices()\n\n rnd = check_random_state(self.random_state)\n y = rnd.uniform(size=X.shape[0])\n\n # ensure that max_sample is in [1, n_samples]:\n n_samples = X.shape[0]\n\n if self.contamination != \"auto\":\n if not (0.0 < self.contamination <= 0.5):\n raise ValueError(\n \"contamination must be in (0, 0.5], got: %f\" % self.contamination\n )\n\n if isinstance(self.max_samples, str):\n if self.max_samples == \"auto\":\n max_samples = min(256, n_samples)\n else:\n raise ValueError(\n \"max_samples (%s) is not supported.\"\n 'Valid choices are: \"auto\", int or'\n \"float\"\n % self.max_samples\n )\n\n elif isinstance(self.max_samples, numbers.Integral):\n if self.max_samples > n_samples:\n warn(\n \"max_samples (%s) is greater than the \"\n \"total number of samples (%s). max_samples \"\n \"will be set to n_samples for estimation.\"\n % (self.max_samples, n_samples)\n )\n max_samples = n_samples\n else:\n max_samples = self.max_samples\n else: # float\n if not 0.0 < self.max_samples <= 1.0:\n raise ValueError(\n \"max_samples must be in (0, 1], got %r\" % self.max_samples\n )\n max_samples = int(self.max_samples * X.shape[0])\n\n self.max_samples_ = max_samples\n max_depth = int(np.ceil(np.log2(max(max_samples, 2))))\n super()._fit(\n X,\n y,\n max_samples,\n max_depth=max_depth,\n sample_weight=sample_weight,\n check_input=False,\n )\n\n if self.contamination == \"auto\":\n # 0.5 plays a special role as described in the original paper.\n # we take the opposite as we consider the opposite of their score.\n self.offset_ = -0.5\n return self\n\n # else, define offset_ wrt contamination parameter\n self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination)\n\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 939, "n_words": 230, "vocab_size": 139, "complexity": 10, "nloc": 54, "token_counts": 318, "n_ast_nodes": 503, "n_identifiers": 38, "random_cut": "def fit(self, X, y=None, sample_weight=None):\n \n X = self._validate_data(X, accept_sparse=[\"csc\"])\n if issparse(X):\n # Pre-sort indices to avoid that each individual tree of the\n # ensemble sorts the indices.\n X.sort_indices()\n\n rnd = check_random_state(self.random_state)\n y = rnd.uniform(size=X.shape[0])\n\n # ensure that max_sample is in [1, n_samples]:\n n_samples = X.shape[0]\n\n if self.contamination != \"auto\":\n if not (0.0 < self.contamination <= 0.5):\n raise ValueError(\n \"contamination must be in (0, 0.5], got: %f\" % self.contamination\n )\n\n if isinstance(self.max_samples, str):\n if self.max_samples == \"auto\":\n max_samples = min(256, n_samples)\n else:\n raise ValueError(\n \"max_samples (%s) is not supported.\"\n 'Valid choices are: \"auto\", int or'\n \"float\"\n % self.max_samples\n )\n\n elif isinstance(self.max_samples, numbers.Integral):\n i", "d_id": 75913, "documentation": { "docstring": "\n Fit estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Use ``dtype=np.float32`` for maximum\n efficiency. Sparse matrices are also supported, use sparse\n ``csc_matrix`` for maximum efficiency.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ", "n_words": 66, "vocab_size": 54, "n_whitespaces": 203, "language": "en" } }, { "id": 200279, "commit_id": "6d2bbf80752549276a968fd4af78231c569d55c5", "repo": "sympy", "path": "sympy/testing/runtests.py", "file_name": "runtests.py", "fun_name": "get_sympy_dir", "commit_message": "runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy", "code": "def get_sympy_dir():\n \n this_file = os.path.abspath(__file__)\n sympy_dir = os.path.join(os.path.dirname(this_file), \"..\", \"..\")\n sympy_dir = os.path.normpath(sympy_dir)\n return os.path.normcase(sympy_dir)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 15, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 55, "n_ast_nodes": 93, "n_identifiers": 11, "random_cut": "def get_sympy_dir():\n \n this_file = os.path.abspath(__file__)\n sympy_dir = os.path.join(os.path.dirname(this_file), \".", "d_id": 49578, "documentation": { "docstring": "\n Returns the root SymPy directory and set the global value\n indicating whether the system is case sensitive or not.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 29, "language": "en" } }, { "id": 60422, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/scripts/cpp_lint.py", "file_name": "cpp_lint.py", "fun_name": "CheckCaffeRandom", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def CheckCaffeRandom(filename, clean_lines, linenum, error):\n \n line = clean_lines.elided[linenum]\n for function in c_random_function_list:\n ix = line.find(function)\n # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison\n if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and\n line[ix - 1] not in ('_', '.', '>'))):\n error(filename, linenum, 'caffe/random_fn', 2,\n 'Use caffe_rng_rand() (or other caffe_rng_* function) instead of '\n + function +\n ') to ensure results are deterministic for a fixed Caffe seed.')\n\n\nthreading_list = (\n ('asctime(', 'asctime_r('),\n ('ctime(', 'ctime_r('),\n ('getgrgid(', 'getgrgid_r('),\n ('getgrnam(', 'getgrnam_r('),\n ('getlogin(', 'getlogin_r('),\n ('getpwnam(', 'getpwnam_r('),\n ('getpwuid(', 'getpwuid_r('),\n ('gmtime(', 'gmtime_r('),\n ('localtime(', 'localtime_r('),\n ('strtok(', 'strtok_r('),\n ('ttyname(', 'ttyname_r('),\n )\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 215, "n_words": 99, "vocab_size": 86, "complexity": 6, "nloc": 10, "token_counts": 90, "n_ast_nodes": 273, "n_identifiers": 13, "random_cut": "def CheckCaffeRandom(filename, clean_lines, linenum, error):\n \n line = clean_lines.elided[linenum]\n for f", "d_id": 12150, "documentation": { "docstring": "Checks for calls to C random functions (rand, rand_r, random, ...).\n\n Caffe code should (almost) always use the caffe_rng_* functions rather\n than these, as the internal state of these C functions is independent of the\n native Caffe RNG system which should produce deterministic results for a\n fixed Caffe seed set using Caffe::set_random_seed(...).\n\n Args:\n filename: The name of the current file.\n clean_lines: A CleansedLines instance containing the file.\n linenum: The number of the line to check.\n error: The function to call with any errors found.\n ", "n_words": 84, "vocab_size": 64, "n_whitespaces": 102, "language": "en" } }, { "id": 42650, "commit_id": "f352ee63a5d09546a7997ba8f2f8702a1ddb4af7", "repo": "airflow", "path": "tests/jobs/test_backfill_job.py", "file_name": "test_backfill_job.py", "fun_name": "test_mapped_dag", "commit_message": "Replaced all days_ago functions with datetime functions (#23237)\n\nCo-authored-by: Dev232001 ", "code": "def test_mapped_dag(self, dag_id, executor_name, session):\n \n # This test needs a real executor to run, so that the `make_list` task can write out the TaskMap\n from airflow.executors.executor_loader import ExecutorLoader\n\n self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py'))\n dag = self.dagbag.get_dag(dag_id)\n\n when = datetime.datetime(2022, 1, 1)\n\n job = BackfillJob(\n dag=dag,\n start_date=when,\n end_date=when,\n donot_pickle=True,\n executor=ExecutorLoader.load_executor(executor_name),\n )\n job.run()\n\n dr = DagRun.find(dag_id=dag.dag_id, execution_date=when, session=session)[0]\n assert dr\n assert dr.state == DagRunState.SUCCESS\n\n # Check that every task has a start and end date\n for ti in dr.task_instances:\n assert ti.state == TaskInstanceState.SUCCESS\n assert ti.start_date is not None\n assert ti.end_date is not None\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 275, "n_words": 89, "vocab_size": 72, "complexity": 2, "nloc": 20, "token_counts": 153, "n_ast_nodes": 233, "n_identifiers": 35, "random_cut": "def test_mapped_dag(self, dag_id, executor_name, session):\n \n # This test needs a real executor to run, so that the `make_list` task can write out the TaskMap\n from airflow.executors.executor_loader import ExecutorLoader\n\n self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py'))\n dag = self.dagbag.get_dag(dag_id)\n\n when = datetime.datetime(2022, 1, 1)\n\n job = BackfillJob(\n dag=dag,\n start_date=when,\n end_date=when,\n donot_pickle=True,\n executor=ExecutorLoader.load_executor(executor_name),\n )\n job.run()\n\n dr = DagRun.find(dag_id=dag.dag_id, execution_date=when, session=session)[0]\n assert dr\n assert dr.state == DagRunState.SUCCESS\n\n # Check that every task has a start and end date\n for ti in dr.task_instances:\n assert ti.state == TaskInstanceState.SUCCESS\n assert ti.start_date is not None\n assert ti.end_date is not Non", "d_id": 7678, "documentation": { "docstring": "\n End-to-end test of a simple mapped dag.\n\n We test with multiple executors as they have different \"execution environments\" -- for instance\n DebugExecutor runs a lot more in the same process than other Executors.\n\n ", "n_words": 33, "vocab_size": 31, "n_whitespaces": 62, "language": "en" } }, { "id": 292272, "commit_id": "3bf2be1765f7a33fbce06cbabeb2e2115f2f07c7", "repo": "core", "path": "tests/components/http/test_init.py", "file_name": "test_init.py", "fun_name": "test_emergency_ssl_certificate_when_invalid", "commit_message": "Startup with an emergency self signed cert if the ssl certificate cannot be loaded (#66707)", "code": "async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog):\n \n\n cert_path, key_path = await hass.async_add_executor_job(\n _setup_broken_ssl_pem_files, tmpdir\n )\n\n hass.config.safe_mode = True\n assert (\n await async_setup_component(\n hass,\n \"http\",\n {\n \"http\": {\"ssl_certificate\": cert_path, \"ssl_key\": key_path},\n },\n )\n is True\n )\n\n await hass.async_start()\n await hass.async_block_till_done()\n assert (\n \"Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate was not usable\"\n in caplog.text\n )\n\n assert hass.http.site is not None\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 203, "n_words": 69, "vocab_size": 52, "complexity": 1, "nloc": 22, "token_counts": 87, "n_ast_nodes": 145, "n_identifiers": 16, "random_cut": "async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog):\n \n\n cert_path, key_path = await hass.async_add_executor_job(\n _setup_broken_ssl_pem_files, tmpdir\n )\n\n hass.config.safe_mode = True\n assert (\n await async_setup_component(\n hass,\n \"http\",\n {\n \"http\": {\"ssl_certificate\": cert_path, \"ssl_key\": key_path},\n },\n )\n is True\n )\n\n await hass.async_start()\n await hass.async_block_till_done()\n assert (\n \"Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate w", "d_id": 91370, "documentation": { "docstring": "Test http can startup with an emergency self signed cert when the current one is broken.", "n_words": 16, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 206811, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/views/debug.py", "file_name": "debug.py", "fun_name": "get_safe_request_meta", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_safe_request_meta(self, request):\n \n if not hasattr(request, \"META\"):\n return {}\n return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 18, "vocab_size": 17, "complexity": 3, "nloc": 4, "token_counts": 45, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def get_safe_request_meta(self, request):\n \n if not hasattr(request, \"META\"):\n return {}\n return {k: self.cleanse_setting(k, v) for k, v in request.M", "d_id": 51721, "documentation": { "docstring": "\n Return a dictionary of request.META with sensitive values redacted.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 245466, "commit_id": "b564ad32895ac4c2c0a18ba0e32c8c5ccb593df4", "repo": "mmdetection", "path": "mmdet/models/data_preprocessors/data_preprocessor.py", "file_name": "data_preprocessor.py", "fun_name": "cuda", "commit_message": "[Feature] Support MultiDataPreprocessor (#8495)\n\n* Support MultiDataPreprocessor\r\n\r\n* Fix some commits\r\n\r\n* Fix a bug\r\n\r\n* Inherit from the BaseDataPreprocessor", "code": "def cuda(self, *args, **kwargs) -> nn.Module:\n \n\n return self.data_preprocessor.cuda(*args, **kwargs)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 7, "token_counts": 29, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def cuda(self, *args, **kwargs) -> nn.Module:\n \n\n return self.data_preprocessor.cuda(*args, **", "d_id": 70802, "documentation": { "docstring": "Overrides this method to set the :attr:`device`\n\n Returns:\n nn.Module: The model itself.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 37, "language": "en" } }, { "id": 73478, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/models.py", "file_name": "models.py", "fun_name": "get_cache_attr_name", "commit_message": "Reformat with black", "code": "def get_cache_attr_name(cls):\n \n return \"_{}.{}\".format(cls._meta.app_label, cls._meta.model_name).lower()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 27, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def get_cache_attr_name(cls):\n \n return \"_{}.{}\".format(cls._meta.app_label, cl", "d_id": 16023, "documentation": { "docstring": "\n Returns the name of the attribute that should be used to store\n a reference to the fetched/created object on a request.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 43, "language": "en" } }, { "id": 29046, "commit_id": "0b46c89dfd9e5e22defb45cbd9869403a7817320", "repo": "saleor", "path": "saleor/graphql/product/mutations/products.py", "file_name": "products.py", "fun_name": "get_instance", "commit_message": "Allow to update/delete product variant by providing SKU (#10861)\n\n* Allow to update/delete product variants by providing SKU\r\n\r\n* Review changes\r\n\r\n* Add SKU argument to ProductVariantStocksUpdate/Delete mutations\r\n\r\n* Review fixes\r\n\r\n* CHANGELOG.md update\r\n\r\n* Code readability improvement", "code": "def get_instance(cls, info, **data):\n \n\n object_id = data.get(\"id\")\n object_sku = data.get(\"sku\")\n attributes = data.get(\"attributes\")\n\n if attributes:\n # Prefetches needed by AttributeAssignmentMixin and\n # associate_attribute_values_to_instance\n qs = cls.Meta.model.objects.prefetch_related(\n \"product__product_type__variant_attributes__values\",\n \"product__product_type__attributevariant\",\n )\n else:\n # Use the default queryset.\n qs = models.ProductVariant.objects.all()\n\n if object_id:\n return cls.get_node_or_error(\n info, object_id, only_type=\"ProductVariant\", qs=qs\n )\n elif object_sku:\n instance = qs.filter(sku=object_sku).first()\n if not instance:\n raise ValidationError(\n {\n \"sku\": ValidationError(\n \"Couldn't resolve to a node: %s\" % object_sku,\n code=\"not_found\",\n )\n }\n )\n return instance\n else:\n return cls._meta.model()\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 485, "n_words": 77, "vocab_size": 58, "complexity": 5, "nloc": 29, "token_counts": 140, "n_ast_nodes": 242, "n_identifiers": 25, "random_cut": "def get_instance(cls, info, **data):\n \n\n object_id = data.get(\"id\")\n object_sku = data.get", "d_id": 5191, "documentation": { "docstring": "Prefetch related fields that are needed to process the mutation.\n\n If we are updating an instance and want to update its attributes,\n # prefetch them.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 46, "language": "en" } }, { "id": 163773, "commit_id": "4248b23371a70b339a2c16b8e5caca9c2e5897f8", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "_can_use_libjoin", "commit_message": "ENH: ExtensionEngine (#45514)", "code": "def _can_use_libjoin(self) -> bool:\n \n if type(self) is Index:\n # excludes EAs\n return isinstance(self.dtype, np.dtype)\n return not is_interval_dtype(self.dtype)\n\n # --------------------------------------------------------------------\n # Uncategorized Methods\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 71, "n_words": 22, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 35, "n_ast_nodes": 61, "n_identifiers": 9, "random_cut": "def _can_use_libjoin(self) -> bool:\n \n if type(self) is Index:\n # excludes EAs\n return isinstance(self.dtype, np.dtype)\n return not is_interval_dtype(self.dtype)", "d_id": 39495, "documentation": { "docstring": "\n Whether we can use the fastpaths implement in _libs.join\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 196089, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/free_groups.py", "file_name": "free_groups.py", "fun_name": "sub_syllables", "commit_message": "Updated import locations", "code": "def sub_syllables(self, from_i, to_j):\n \n if not isinstance(from_i, int) or not isinstance(to_j, int):\n raise ValueError(\"both arguments should be integers\")\n group = self.group\n if to_j <= from_i:\n return group.identity\n else:\n r = tuple(self.array_form[from_i: to_j])\n return group.dtype(r)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 113, "n_words": 34, "vocab_size": 30, "complexity": 4, "nloc": 9, "token_counts": 68, "n_ast_nodes": 110, "n_identifiers": 13, "random_cut": "def sub_syllables(self, from_i, to_j):\n \n if not isinstance(from_i, int) or not isinstance(to_j, int):\n raise ValueError(\"both arguments should be integers\")\n group = self.group\n if to_j <= from_i:\n return group.identity\n else:\n r = tuple(self.array_form[from_i: t", "d_id": 47589, "documentation": { "docstring": "\n `sub_syllables` returns the subword of the associative word `self` that\n consists of syllables from positions `from_to` to `to_j`, where\n `from_to` and `to_j` must be positive integers and indexing is done\n with origin 0.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> f, a, b = free_group(\"a, b\")\n >>> w = a**5*b*a**2*b**-4*a\n >>> w.sub_syllables(1, 2)\n b\n >>> w.sub_syllables(3, 3)\n \n\n ", "n_words": 59, "vocab_size": 48, "n_whitespaces": 158, "language": "en" } }, { "id": 160855, "commit_id": "2223a09864e4ccf5206b78684d3db5c853336df9", "repo": "numpy", "path": "numpy/core/_ufunc_config.py", "file_name": "_ufunc_config.py", "fun_name": "seterr", "commit_message": "DOC: Fixup docs for improved scalar floating point warning message", "code": "def seterr(all=None, divide=None, over=None, under=None, invalid=None):\n \n\n pyvals = umath.geterrobj()\n old = geterr()\n\n if divide is None:\n divide = all or old['divide']\n if over is None:\n over = all or old['over']\n if under is None:\n under = all or old['under']\n if invalid is None:\n invalid = all or old['invalid']\n\n maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +\n (_errdict[over] << SHIFT_OVERFLOW) +\n (_errdict[under] << SHIFT_UNDERFLOW) +\n (_errdict[invalid] << SHIFT_INVALID))\n\n pyvals[1] = maskvalue\n umath.seterrobj(pyvals)\n return old\n\n\n@set_module('numpy')", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "@set_module('numpy')", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 180, "n_words": 72, "vocab_size": 39, "complexity": 9, "nloc": 18, "token_counts": 145, "n_ast_nodes": 235, "n_identifiers": 19, "random_cut": "def seterr(all=None, divide=None, over=None, under=None, invalid=None):\n \n\n pyvals = umath.geterrobj()\n old = geterr()\n\n if divide is None:\n divide = all or old['divide']\n if over is None:\n over = all or old['over']\n if under is None:\n under = all or old['under']\n if i", "d_id": 38760, "documentation": { "docstring": "\n Set how floating-point errors are handled.\n\n Note that operations on integer scalar types (such as `int16`) are\n handled like floating point, and are affected by these settings.\n\n Parameters\n ----------\n all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Set treatment for all types of floating-point errors at once:\n\n - ignore: Take no action when the exception occurs.\n - warn: Print a `RuntimeWarning` (via the Python `warnings` module).\n - raise: Raise a `FloatingPointError`.\n - call: Call a function specified using the `seterrcall` function.\n - print: Print a warning directly to ``stdout``.\n - log: Record error in a Log object specified by `seterrcall`.\n\n The default is not to change the current behavior.\n divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for division by zero.\n over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for floating-point overflow.\n under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for floating-point underflow.\n invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for invalid floating-point operation.\n\n Returns\n -------\n old_settings : dict\n Dictionary containing the old settings.\n\n See also\n --------\n seterrcall : Set a callback function for the 'call' mode.\n geterr, geterrcall, errstate\n\n Notes\n -----\n The floating-point exceptions are defined in the IEEE 754 standard [1]_:\n\n - Division by zero: infinite result obtained from finite numbers.\n - Overflow: result too large to be expressed.\n - Underflow: result so close to zero that some precision\n was lost.\n - Invalid operation: result is not an expressible number, typically\n indicates that a NaN was produced.\n\n .. [1] https://en.wikipedia.org/wiki/IEEE_754\n\n Examples\n --------\n >>> old_settings = np.seterr(all='ignore') #seterr to known value\n >>> np.seterr(over='raise')\n {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}\n >>> np.seterr(**old_settings) # reset to default\n {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}\n\n >>> np.int16(32000) * np.int16(3)\n 30464\n >>> old_settings = np.seterr(all='warn', over='raise')\n >>> np.int16(32000) * np.int16(3)\n Traceback (most recent call last):\n File \"\", line 1, in \n FloatingPointError: overflow encountered in scalar multiply\n\n >>> old_settings = np.seterr(all='print')\n >>> np.geterr()\n {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}\n >>> np.int16(32000) * np.int16(3)\n 30464\n\n ", "n_words": 336, "vocab_size": 195, "n_whitespaces": 577, "language": "en" } }, { "id": 20351, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/img.py", "file_name": "img.py", "fun_name": "_create_drawables", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _create_drawables(self, tokensource):\n \n lineno = charno = maxcharno = 0\n maxlinelength = linelength = 0\n for ttype, value in tokensource:\n while ttype not in self.styles:\n ttype = ttype.parent\n style = self.styles[ttype]\n # TODO: make sure tab expansion happens earlier in the chain. It\n # really ought to be done on the input, as to do it right here is\n # quite complex.\n value = value.expandtabs(4)\n lines = value.splitlines(True)\n # print lines\n for i, line in enumerate(lines):\n temp = line.rstrip('\\n')\n if temp:\n self._draw_text(\n self._get_text_pos(linelength, lineno),\n temp,\n font = self._get_style_font(style),\n text_fg = self._get_text_color(style),\n text_bg = self._get_text_bg_color(style),\n )\n temp_width, temp_hight = self.fonts.get_text_size(temp)\n linelength += temp_width\n maxlinelength = max(maxlinelength, linelength)\n charno += len(temp)\n maxcharno = max(maxcharno, charno)\n if line.endswith('\\n'):\n # add a line for each extra line in the value\n linelength = 0\n charno = 0\n lineno += 1\n self.maxlinelength = maxlinelength\n self.maxcharno = maxcharno\n self.maxlineno = lineno\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 677, "n_words": 144, "vocab_size": 89, "complexity": 6, "nloc": 31, "token_counts": 197, "n_ast_nodes": 318, "n_identifiers": 37, "random_cut": "def _create_drawables(self, tokensource):\n \n lineno = charno = maxcharno = 0\n maxlinelength = linelength = 0\n for ttype, value in tokensource:\n while ttype not in self.styles:\n ttype = ttype.parent\n style = self.styles[ttype]\n # TODO: make sure tab expansion happens earlier in the chain. It\n # really ought to be done on the input, as to do it right here is\n # quite complex.\n value = value.expandtabs(4)\n lines = value.splitlines(True)\n # print lines\n for i, line in enumerate(lines):\n temp = line.rstrip('\\n')\n if temp:\n self._draw_text(\n self._get_text_pos(linelength, lineno),\n temp,\n font = self._get_style_font(style),\n text_fg = self._get_text_color(style),\n text_bg ", "d_id": 3339, "documentation": { "docstring": "\n Create drawables for the token content.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 290030, "commit_id": "1589c06203c0bc9f87adcc97fe34d5c52aaf403a", "repo": "core", "path": "homeassistant/util/dt.py", "file_name": "dt.py", "fun_name": "__monotonic_time_coarse", "commit_message": "Significantly reduce clock_gettime syscalls on platforms with broken vdso (#81257)", "code": "def __monotonic_time_coarse() -> float:\n \n return time.clock_gettime(CLOCK_MONOTONIC_COARSE)\n\n\nmonotonic_time_coarse = time.monotonic\nwith suppress(Exception):\n if (\n platform.system() == \"Linux\"\n and abs(time.monotonic() - __monotonic_time_coarse()) < 1\n ):\n monotonic_time_coarse = __monotonic_time_coarse\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 57, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 12, "token_counts": 14, "n_ast_nodes": 96, "n_identifiers": 12, "random_cut": "def __monotonic_time_coarse() -> float:\n \n return time.clock_gett", "d_id": 89156, "documentation": { "docstring": "Return a monotonic time in seconds.\n\n This is the coarse version of time_monotonic, which is faster but less accurate.\n\n Since many arm64 and 32-bit platforms don't support VDSO with time.monotonic\n because of errata, we can't rely on the kernel to provide a fast\n monotonic time.\n\n https://lore.kernel.org/lkml/20170404171826.25030-1-marc.zyngier@arm.com/\n ", "n_words": 46, "vocab_size": 41, "n_whitespaces": 64, "language": "en" } }, { "id": 31789, "commit_id": "6aae59d0b54f04c13a79f80b708622db8e8a17e4", "repo": "transformers", "path": "tests/test_feature_extraction_common.py", "file_name": "test_feature_extraction_common.py", "fun_name": "prepare_image_inputs", "commit_message": "Compute min_resolution in prepare_image_inputs (#17915)\n\nCo-authored-by: ydshieh ", "code": "def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False):\n \n\n assert not (numpify and torchify), \"You cannot specify both numpy and PyTorch tensors at the same time\"\n\n if equal_resolution:\n image_inputs = []\n for i in range(feature_extract_tester.batch_size):\n image_inputs.append(\n np.random.randint(\n 255,\n size=(\n feature_extract_tester.num_channels,\n feature_extract_tester.max_resolution,\n feature_extract_tester.max_resolution,\n ),\n dtype=np.uint8,\n )\n )\n else:\n image_inputs = []\n\n # To avoid getting image width/height 0\n min_resolution = feature_extract_tester.min_resolution\n if getattr(feature_extract_tester, \"size_divisor\", None):\n # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`\n min_resolution = max(feature_extract_tester.size_divisor, min_resolution)\n\n for i in range(feature_extract_tester.batch_size):\n width, height = np.random.choice(np.arange(min_resolution, feature_extract_tester.max_resolution), 2)\n image_inputs.append(\n np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8)\n )\n\n if not numpify and not torchify:\n # PIL expects the channel dimension as last dimension\n image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]\n\n if torchify:\n image_inputs = [torch.from_numpy(x) for x in image_inputs]\n\n return image_inputs\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 487, "n_words": 129, "vocab_size": 87, "complexity": 11, "nloc": 31, "token_counts": 226, "n_ast_nodes": 344, "n_identifiers": 32, "random_cut": "def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False):\n \n\n assert not (numpify and torchify), \"You cannot specify both numpy and PyTorch tensors at the same time\"\n\n if equal_resolution:\n image_inputs = []\n for i in range(feature_ex", "d_id": 5803, "documentation": { "docstring": "This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,\n or a list of PyTorch tensors if one specifies torchify=True.\n ", "n_words": 28, "vocab_size": 18, "n_whitespaces": 34, "language": "en" } }, { "id": 275266, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/optimizer.py", "file_name": "optimizer.py", "fun_name": "_update_step_xla", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _update_step_xla(self, gradient, variable, key):\n \n return self._update_step(gradient, variable)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 32, "n_identifiers": 6, "random_cut": "def _update_step_xla(self, gradient, variable, key):\n \n return self._update_step(gradient, ", "d_id": 81356, "documentation": { "docstring": "A wrapper of `update_step` to enable XLA acceleration.\n\n Due to `tf.function` tracing mechanism, for (gradient, variable) pairs of\n the same shape and dtype, the execution graph always invoke the first\n pair it has seen. Thus, we need a `key` argument to make each\n (gradient, variable) pair unique. In additions, XLA cannot understand\n string input, so the key is an integer.\n\n Args:\n gradient: backpropagated gradient of the given variable.\n variable: variable whose value needs to be updated.\n key (int): a unique key that identifies the variable.\n\n Returns:\n An `Operation` that applies the specified gradients.\n ", "n_words": 93, "vocab_size": 73, "n_whitespaces": 185, "language": "en" } }, { "id": 102626, "commit_id": "89f15f591cc3cc3e8ae40e95ffc802f7f2561ece", "repo": "chia-blockchain", "path": "chia/rpc/wallet_rpc_api.py", "file_name": "wallet_rpc_api.py", "fun_name": "log_in", "commit_message": "Merge standalone wallet into main (#9793)\n\n* wallet changes from pac\r\n\r\n* cat changes\r\n\r\n* pool tests\r\n\r\n* pooling tests passing\r\n\r\n* offers\r\n\r\n* lint\r\n\r\n* mempool_mode\r\n\r\n* black\r\n\r\n* linting\r\n\r\n* workflow files\r\n\r\n* flake8\r\n\r\n* more cleanup\r\n\r\n* renamed\r\n\r\n* remove obsolete test, don't cast announcement\r\n\r\n* memos are not only bytes32\r\n\r\n* trade renames\r\n\r\n* fix rpcs, block_record\r\n\r\n* wallet rpc, recompile settlement clvm\r\n\r\n* key derivation\r\n\r\n* clvm tests\r\n\r\n* lgtm issues and wallet peers\r\n\r\n* stash\r\n\r\n* rename\r\n\r\n* mypy linting\r\n\r\n* flake8\r\n\r\n* bad initializer\r\n\r\n* flaky tests\r\n\r\n* Make CAT wallets only create on verified hints (#9651)\r\n\r\n* fix clvm tests\r\n\r\n* return to log lvl warn\r\n\r\n* check puzzle unhardened\r\n\r\n* public key, not bytes. api caching change\r\n\r\n* precommit changes\r\n\r\n* remove unused import\r\n\r\n* mypy ci file, tests\r\n\r\n* ensure balance before creating a tx\r\n\r\n* Remove CAT logic from full node test (#9741)\r\n\r\n* Add confirmations and sleeps for wallet (#9742)\r\n\r\n* use pool executor\r\n\r\n* rever merge mistakes/cleanup\r\n\r\n* Fix trade test flakiness (#9751)\r\n\r\n* remove precommit\r\n\r\n* older version of black\r\n\r\n* lint only in super linter\r\n\r\n* Make announcements in RPC be objects instead of bytes (#9752)\r\n\r\n* Make announcements in RPC be objects instead of bytes\r\n\r\n* Lint\r\n\r\n* misc hint'ish cleanup (#9753)\r\n\r\n* misc hint'ish cleanup\r\n\r\n* unremove some ci bits\r\n\r\n* Use main cached_bls.py\r\n\r\n* Fix bad merge in main_pac (#9774)\r\n\r\n* Fix bad merge at 71da0487b9cd5564453ec24b76f1ac773c272b75\r\n\r\n* Remove unused ignores\r\n\r\n* more unused ignores\r\n\r\n* Fix bad merge at 3b143e705057d6c14e2fb3e00078aceff0552d7e\r\n\r\n* One more byte32.from_hexstr\r\n\r\n* Remove obsolete test\r\n\r\n* remove commented out\r\n\r\n* remove duplicate payment object\r\n\r\n* remove long sync\r\n\r\n* remove unused test, noise\r\n\r\n* memos type\r\n\r\n* bytes32\r\n\r\n* make it clear it's a single state at a time\r\n\r\n* copy over asset ids from pacr\r\n\r\n* file endl linter\r\n\r\n* Update chia/server/ws_connection.py\r\n\r\nCo-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>\r\n\r\nCo-authored-by: Matt Hauff \r\nCo-authored-by: Kyle Altendorf \r\nCo-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>", "code": "async def log_in(self, request):\n \n\n fingerprint = request[\"fingerprint\"]\n if self.service.logged_in_fingerprint == fingerprint:\n return {\"fingerprint\": fingerprint}\n\n await self._stop_wallet()\n started = await self.service._start(fingerprint)\n if started is True:\n return {\"fingerprint\": fingerprint}\n\n return {\"success\": False, \"error\": \"Unknown Error\"}\n", "url": "https://github.com/Chia-Network/chia-blockchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 104, "n_words": 33, "vocab_size": 25, "complexity": 3, "nloc": 9, "token_counts": 67, "n_ast_nodes": 120, "n_identifiers": 9, "random_cut": "async def log_in(self, request):\n \n\n fingerprint = request[\"fingerprint\"]\n if self.service.logged_in_fingerprint == fingerprint:\n return {\"fingerpri", "d_id": 21557, "documentation": { "docstring": "\n Logs in the wallet with a specific key.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 85091, "commit_id": "4e4689949438735622bdf669f05d218c671e7e01", "repo": "zulip", "path": "zerver/webhooks/bitbucket2/tests.py", "file_name": "tests.py", "fun_name": "test_bitbucket2_on_push_commits_multiple_committers_with_others", "commit_message": "webhooks: Pick a more reasonable length for short sha.\n\n7 characters are not enough for large projects, so we change\nit to reasonably longer. As an example, The Linux kernel needs\nat least 11 characters of sha in its shortened form to identify\na revision. We pick 11 so it should work for most of the projects.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None:\n commit_info = \"* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\\n\"\n expected_message = f\n self.check_webhook(\n \"push_multiple_committers_with_others\", TOPIC_BRANCH_EVENTS, expected_message\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 24, "n_ast_nodes": 52, "n_identifiers": 6, "random_cut": "def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None:\n commit_info = \"* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-na", "d_id": 17927, "documentation": { "docstring": "Tomasz [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by Tomasz (4), James (3), Brendon (2) and others (1).\\n\\n{commit_info*9}* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))", "n_words": 21, "vocab_size": 20, "n_whitespaces": 20, "language": "en" } }, { "id": 321275, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/mainwindow/tabwidget.py", "file_name": "tabwidget.py", "fun_name": "drawControl", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def drawControl(self, element, opt, p, widget=None):\n \n if element not in [QStyle.ControlElement.CE_TabBarTab, QStyle.ControlElement.CE_TabBarTabShape,\n QStyle.ControlElement.CE_TabBarTabLabel]:\n # Let the real style draw it.\n self._style.drawControl(element, opt, p, widget)\n return\n\n layouts = self._tab_layout(opt)\n if layouts is None:\n log.misc.warning(\"Could not get layouts for tab!\")\n return\n\n if element == QStyle.ControlElement.CE_TabBarTab:\n # We override this so we can control TabBarTabShape/TabBarTabLabel.\n self.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget)\n self.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt, p, widget)\n elif element == QStyle.ControlElement.CE_TabBarTabShape:\n p.fillRect(opt.rect, opt.palette.window())\n self._draw_indicator(layouts, opt, p)\n # We use super() rather than self._style here because we don't want\n # any sophisticated drawing.\n super().drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget)\n elif element == QStyle.ControlElement.CE_TabBarTabLabel:\n if not opt.icon.isNull() and layouts.icon.isValid():\n self._draw_icon(layouts, opt, p)\n alignment = (config.cache['tabs.title.alignment'] |\n Qt.AlignmentFlag.AlignVCenter | Qt.TextFlag.TextHideMnemonic)\n self._style.drawItemText(p,\n layouts.text,\n int(alignment),\n opt.palette,\n bool(opt.state & QStyle.StateFlag.State_Enabled),\n opt.text,\n QPalette.ColorRole.WindowText)\n else:\n raise ValueError(\"Invalid element {!r}\".format(element))\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 646, "n_words": 122, "vocab_size": 86, "complexity": 8, "nloc": 30, "token_counts": 286, "n_ast_nodes": 435, "n_identifiers": 47, "random_cut": "def drawControl(self, element, opt, p, widget=None):\n \n if element not in [QStyle.ControlElement.CE_TabBarTab, QStyle.ControlElement.CE_TabBarTabShape,\n QStyle.ControlElement.CE_TabBarTabLabel]:\n # Let the real style draw it.\n self._style.drawControl(element, opt, p, widget)\n return\n\n layouts = self._tab_layout(opt)\n if layouts is None:\n log.misc.warning(\"Could not get layouts for tab!\")\n return\n\n if element == QStyle.ControlElement.CE_TabBarTab:\n # We override this so we can control TabBarTabShape/TabBarTabLabel.\n self.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget)\n self.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt, p, widget)\n elif element == QStyle.ControlElement.CE_TabBarTabShape:\n p.fillRect(opt.rect, opt.palette.window())\n self._draw_indicator(layouts, opt, p)\n # We use super() rather than self._style here because we don't want\n # any sophisticated drawing.\n super().drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget)\n elif element == QStyle.ControlElement.CE_TabBarTabLabe", "d_id": 117623, "documentation": { "docstring": "Override drawControl to draw odd tabs in a different color.\n\n Draws the given element with the provided painter with the style\n options specified by option.\n\n Args:\n element: ControlElement\n opt: QStyleOption\n p: QPainter\n widget: QWidget\n ", "n_words": 34, "vocab_size": 31, "n_whitespaces": 106, "language": "en" } }, { "id": 24033, "commit_id": "c503dc2f9352272615dc3cc11737b833036c6ccc", "repo": "PaddleOCR", "path": "ppocr/modeling/heads/rec_abinet_head.py", "file_name": "rec_abinet_head.py", "fun_name": "_get_mask", "commit_message": "[New Rec] add vitstr and ABINet", "code": "def _get_mask(length, max_length):\n \n length = length.unsqueeze(-1)\n B = paddle.shape(length)[0]\n grid = paddle.arange(0, max_length).unsqueeze(0).tile([B, 1])\n zero_mask = paddle.zeros([B, max_length], dtype='float32')\n inf_mask = paddle.full([B, max_length], '-inf', dtype='float32')\n diag_mask = paddle.diag(\n paddle.full(\n [max_length], '-inf', dtype=paddle.float32),\n offset=0,\n name=None)\n mask = paddle.where(grid >= length, inf_mask, zero_mask)\n mask = mask.unsqueeze(1) + diag_mask\n return mask.unsqueeze(1)\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 48, "vocab_size": 35, "complexity": 1, "nloc": 14, "token_counts": 148, "n_ast_nodes": 230, "n_identifiers": 22, "random_cut": "def _get_mask(length, max_length):\n \n length = length.unsqueeze(-1)\n B = paddle.shape(length)[0]\n", "d_id": 4685, "documentation": { "docstring": "Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 30, "language": "en" } }, { "id": 118683, "commit_id": "dd9084523e365e637443ea351eaaaa25f52d8412", "repo": "streamlit", "path": "lib/tests/streamlit/config_test.py", "file_name": "config_test.py", "fun_name": "test_config_options_removed_on_reparse", "commit_message": "Report sharing removal (#4260)\n\nThe report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand.", "code": "def test_config_options_removed_on_reparse(self):\n \n\n global_config_path = \"/mock/home/folder/.streamlit/config.toml\"\n makedirs_patch = patch(\"streamlit.config.os.makedirs\")\n makedirs_patch.return_value = True\n pathexists_patch = patch(\"streamlit.config.os.path.exists\")\n pathexists_patch.side_effect = lambda path: path == global_config_path\n\n global_config = \n open_patch = patch(\"streamlit.config.open\", mock_open(read_data=global_config))\n\n with open_patch, makedirs_patch, pathexists_patch:\n config.get_config_options()\n\n self.assertEqual(\"dark\", config.get_option(\"theme.base\"))\n self.assertEqual(\"sans serif\", config.get_option(\"theme.font\"))\n\n global_config = \n open_patch = patch(\"streamlit.config.open\", mock_open(read_data=global_config))\n\n with open_patch, makedirs_patch, pathexists_patch:\n config.get_config_options(force_reparse=True)\n\n self.assertEqual(\"dark\", config.get_option(\"theme.base\"))\n self.assertEqual(None, config.get_option(\"theme.font\"))\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 204, "n_words": 52, "vocab_size": 32, "complexity": 1, "nloc": 25, "token_counts": 147, "n_ast_nodes": 268, "n_identifiers": 18, "random_cut": "def test_config_options_removed_on_reparse(self):\n \n\n global_config_path = \"/mock/home/folder/.streamlit/config.toml\"\n makedirs_patch = pat", "d_id": 26358, "documentation": { "docstring": "Test that config options that are removed in a file are also removed\n from our _config_options dict.\n [theme]\n base = \"dark\"\n font = \"sans serif\"\n \n [theme]\n base = \"dark\"\n ", "n_words": 29, "vocab_size": 21, "n_whitespaces": 86, "language": "en" } }, { "id": 163265, "commit_id": "d603d43df2057ecdf74010d9dadc735e37f8f7b5", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "__getitem__", "commit_message": "TYP: Ignore numpy related issues (#45244)", "code": "def __getitem__(self, key):\n \n getitem = self._data.__getitem__\n\n if is_integer(key) or is_float(key):\n # GH#44051 exclude bool, which would return a 2d ndarray\n key = com.cast_scalar_indexer(key, warn_float=True)\n return getitem(key)\n\n if isinstance(key, slice):\n # This case is separated from the conditional above to avoid\n # pessimization com.is_bool_indexer and ndim checks.\n result = getitem(key)\n # Going through simple_new for performance.\n return type(self)._simple_new(result, name=self._name)\n\n if com.is_bool_indexer(key):\n # if we have list[bools, length=1e5] then doing this check+convert\n # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__\n # time below from 3.8 ms to 496 µs\n # if we already have ndarray[bool], the overhead is 1.4 µs or .25%\n key = np.asarray(key, dtype=bool)\n\n result = getitem(key)\n # Because we ruled out integer above, we always get an arraylike here\n if result.ndim > 1:\n deprecate_ndim_indexing(result)\n if hasattr(result, \"_ndarray\"):\n # error: Item \"ndarray[Any, Any]\" of \"Union[ExtensionArray,\n # ndarray[Any, Any]]\" has no attribute \"_ndarray\" [union-attr]\n # i.e. NDArrayBackedExtensionArray\n # Unpack to ndarray for MPL compat\n return result._ndarray # type: ignore[union-attr]\n return result\n\n # NB: Using _constructor._simple_new would break if MultiIndex\n # didn't override __getitem__\n return self._constructor._simple_new(result, name=self._name)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 511, "n_words": 178, "vocab_size": 123, "complexity": 7, "nloc": 17, "token_counts": 139, "n_ast_nodes": 236, "n_identifiers": 27, "random_cut": "def __getitem__(self, key):\n \n getitem = self._data.__getitem__\n\n if is_integer(key) or is_float(key):\n # GH#44051 exclude bool, which would return a 2d ndarray\n key = com.cast_scalar_indexer(key, warn_float=True)\n return getitem(key)\n\n if isinstance(key, slice):\n # This case is separated from the conditional above to avoid\n # pessimization com.is_bool_indexer and ndim checks.\n result = getitem(key)\n # Going through simple_new for performance.\n return type(self)._simple_new(result, name=self._name)\n\n if com.is_bool_indexer(key):\n # if we have list[bools, length=1e5] then doing this check+convert\n # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__\n # time below from 3.8 ms to 496 µs\n # if we already have ndarray[bool], the overhead is 1.4 µs or .25%\n key = np.asarray(key, dtype=bool)\n\n result = getitem(key)\n # Because we ruled out integer above, we always get an arraylike here\n if result.ndim > 1:\n deprecate_ndim_indexing(result)\n if hasattr(result, \"_ndarray\"):\n # error: Item \"ndarray[Any, Any]\" of \"Union[ExtensionArray,\n # ndarray[Any, Any]]\" has no attribute \"_ndarray\" [union-attr]\n # i.e. NDArrayBackedExtensionArray\n # Unpack to ndarray for MPL compat\n return result._ndarray # type: ignore[union-attr]\n return result\n\n # NB: Using", "d_id": 39411, "documentation": { "docstring": "\n Override numpy.ndarray's __getitem__ method to work as desired.\n\n This function adds lists and Series as valid boolean indexers\n (ndarrays only supports ndarray with dtype=bool).\n\n If resulting ndim != 1, plain ndarray is returned instead of\n corresponding `Index` subclass.\n\n ", "n_words": 38, "vocab_size": 36, "n_whitespaces": 81, "language": "en" } }, { "id": 67430, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/selling/report/sales_order_analysis/sales_order_analysis.py", "file_name": "sales_order_analysis.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(conditions, filters):\n\tdata = frappe.db.sql(\n\t\t.format(\n\t\t\tconditions=conditions\n\t\t),\n\t\tfilters,\n\t\tas_dict=1,\n\t)\n\n\treturn data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 5, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 44, "token_counts": 33, "n_ast_nodes": 51, "n_identifiers": 9, "random_cut": "def get_data(conditions, filters):\n\tdata = frappe.db.sql(\n\t\t.format(\n\t\t\tconditions=conditions\n\t\t),\n\t\tfilters,\n\t\t", "d_id": 14521, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\tso.transaction_date as date,\n\t\t\tsoi.delivery_date as delivery_date,\n\t\t\tso.name as sales_order,\n\t\t\tso.status, so.customer, soi.item_code,\n\t\t\tDATEDIFF(CURDATE(), soi.delivery_date) as delay_days,\n\t\t\tIF(so.status in ('Completed','To Bill'), 0, (SELECT delay_days)) as delay,\n\t\t\tsoi.qty, soi.delivered_qty,\n\t\t\t(soi.qty - soi.delivered_qty) AS pending_qty,\n\t\t\tIF((SELECT pending_qty) = 0, (TO_SECONDS(Max(dn.posting_date))-TO_SECONDS(so.transaction_date)), 0) as time_taken_to_deliver,\n\t\t\tIFNULL(SUM(sii.qty), 0) as billed_qty,\n\t\t\tsoi.base_amount as amount,\n\t\t\t(soi.delivered_qty * soi.base_rate) as delivered_qty_amount,\n\t\t\t(soi.billed_amt * IFNULL(so.conversion_rate, 1)) as billed_amount,\n\t\t\t(soi.base_amount - (soi.billed_amt * IFNULL(so.conversion_rate, 1))) as pending_amount,\n\t\t\tsoi.warehouse as warehouse,\n\t\t\tso.company, soi.name,\n\t\t\tsoi.description as description\n\t\tFROM\n\t\t\t`tabSales Order` so,\n\t\t\t(`tabSales Order Item` soi\n\t\tLEFT JOIN `tabSales Invoice Item` sii\n\t\t\tON sii.so_detail = soi.name and sii.docstatus = 1)\n\t\tLEFT JOIN `tabDelivery Note Item` dni\n\t\t\ton dni.so_detail = soi.name\n\t\tRIGHT JOIN `tabDelivery Note` dn\n\t\t\ton dni.parent = dn.name and dn.docstatus = 1\n\t\tWHERE\n\t\t\tsoi.parent = so.name\n\t\t\tand so.status not in ('Stopped', 'Closed', 'On Hold')\n\t\t\tand so.docstatus = 1\n\t\t\t{conditions}\n\t\tGROUP BY soi.name\n\t\tORDER BY so.transaction_date ASC, soi.item_code ASC\n\t", "n_words": 146, "vocab_size": 102, "n_whitespaces": 112, "language": "en" } }, { "id": 218391, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getcoroutinelocals", "commit_message": "add python 3.10.4 for windows", "code": "def getcoroutinelocals(coroutine):\n \n frame = getattr(coroutine, \"cr_frame\", None)\n if frame is not None:\n return frame.f_locals\n else:\n return {}\n\n\n###############################################################################\n### Function Signature Object (PEP 362)\n###############################################################################\n\n\n_WrapperDescriptor = type(type.__call__)\n_MethodWrapper = type(all.__call__)\n_ClassMethodWrapper = type(int.__dict__['from_bytes'])\n\n_NonUserDefinedCallables = (_WrapperDescriptor,\n _MethodWrapper,\n _ClassMethodWrapper,\n types.BuiltinFunctionType)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 140, "n_words": 40, "vocab_size": 33, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 118, "n_identifiers": 16, "random_cut": "def getcoroutinelocals(coroutine):\n \n frame = getattr(coroutine, \"cr_frame\", None)\n if frame is not None:\n return frame.f_locals\n else:\n return {}\n\n\n###############################################################################\n### Function Signature Object (PEP 362)\n###################################################", "d_id": 55277, "documentation": { "docstring": "\n Get the mapping of coroutine local variables to their current values.\n\n A dict is returned, with the keys the local variable names and values the\n bound values.", "n_words": 27, "vocab_size": 22, "n_whitespaces": 36, "language": "en" } }, { "id": 272189, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/integration_test/forwardprop_test.py", "file_name": "forwardprop_test.py", "fun_name": "_forward_over_back_hessian", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _forward_over_back_hessian(f, params, use_pfor, dtype=None):\n \n return _vectorize_parameters(\n functools.partial(_hvp, f, params),\n params,\n use_pfor=use_pfor,\n dtype=dtype,\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 51, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 39, "n_ast_nodes": 55, "n_identifiers": 9, "random_cut": "def _forward_over_back_hessian(f, params, use_pfor, dtype=None):\n \n return _vectorize_parameters(\n functools.partial(_hvp, f, params),\n params,\n use_pfor=use_pfor,\n dtype=dtype,\n ", "d_id": 80975, "documentation": { "docstring": "Computes the full Hessian matrix for the scalar-valued f(*params).\n\n Args:\n f: A function taking `params` and returning a scalar.\n params: A possibly nested structure of tensors.\n use_pfor: If true, uses `tf.vectorized_map` calls instead of looping.\n dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes\n (e.g. `tf.float32`) matching the structure of `f`'s returns.\n\n Returns:\n A possibly nested structure of matrix slices corresponding to `params`. Each\n slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`)\n in the corresponding element of `params` and `P` is the total number of\n parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating\n along the second axis.\n ", "n_words": 105, "vocab_size": 73, "n_whitespaces": 166, "language": "en" } }, { "id": 259605, "commit_id": "0c20ba744966d23ede67cffd7c5d2e0d01cd0658", "repo": "scikit-learn", "path": "sklearn/linear_model/_stochastic_gradient.py", "file_name": "_stochastic_gradient.py", "fun_name": "predict_proba", "commit_message": "DEP loss \"log\" in favor of \"log loss\" in SGDClassifier (#23046)\n\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def predict_proba(self, X):\n \n check_is_fitted(self)\n\n # TODO(1.3): Remove \"log\"\n if self.loss in (\"log_loss\", \"log\"):\n return self._predict_proba_lr(X)\n\n elif self.loss == \"modified_huber\":\n binary = len(self.classes_) == 2\n scores = self.decision_function(X)\n\n if binary:\n prob2 = np.ones((scores.shape[0], 2))\n prob = prob2[:, 1]\n else:\n prob = scores\n\n np.clip(scores, -1, 1, prob)\n prob += 1.0\n prob /= 2.0\n\n if binary:\n prob2[:, 0] -= prob\n prob = prob2\n else:\n # the above might assign zero to all classes, which doesn't\n # normalize neatly; work around this to produce uniform\n # probabilities\n prob_sum = prob.sum(axis=1)\n all_zero = prob_sum == 0\n if np.any(all_zero):\n prob[all_zero, :] = 1\n prob_sum[all_zero] = len(self.classes_)\n\n # normalize\n prob /= prob_sum.reshape((prob.shape[0], -1))\n\n return prob\n\n else:\n raise NotImplementedError(\n \"predict_(log_)proba only supported when\"\n \" loss='log_loss' or loss='modified_huber' \"\n \"(%r given)\"\n % self.loss\n )\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 603, "n_words": 125, "vocab_size": 85, "complexity": 6, "nloc": 33, "token_counts": 204, "n_ast_nodes": 328, "n_identifiers": 24, "random_cut": "def predict_proba(self, X):\n \n check_is_fitted(self)\n\n # TODO(1.3): Remove \"log\"\n if self.loss in (\"log_loss\", \"log\"):\n return self._predict_proba_lr(X)\n\n elif self.loss == \"modified_huber\":\n binary = len(self.classes_) == 2\n scores = self.decision_function(X)\n\n if binary:\n prob2 = np.ones((scores.shape[0], 2))\n prob = prob2[:, 1]\n else:\n prob = scores\n\n np.clip(scores, -1, 1, prob)\n prob += 1.0\n prob /= 2.0\n\n if binary:\n prob2[:, 0] -= prob\n prob = prob2\n else:\n # the above might assign zero to all classes, which doesn't\n # normalize neatly; work around this to produce uniform\n # probabilities\n prob_sum = prob.sum(axis=1)\n all_zero = prob_sum == 0\n if np.any(all_zero):\n prob[all_zero, :] = 1\n prob_sum[all_zero] = len(self.classes_)\n\n # normalize\n prob /= p", "d_id": 75837, "documentation": { "docstring": "Probability estimates.\n\n This method is only available for log loss and modified Huber loss.\n\n Multiclass probability estimates are derived from binary (one-vs.-rest)\n estimates by simple normalization, as recommended by Zadrozny and\n Elkan.\n\n Binary probability estimates for loss=\"modified_huber\" are given by\n (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions\n it is necessary to perform proper probability calibration by wrapping\n the classifier with\n :class:`~sklearn.calibration.CalibratedClassifierCV` instead.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Input data for prediction.\n\n Returns\n -------\n ndarray of shape (n_samples, n_classes)\n Returns the probability of the sample for each class in the model,\n where classes are ordered as they are in `self.classes_`.\n\n References\n ----------\n Zadrozny and Elkan, \"Transforming classifier scores into multiclass\n probability estimates\", SIGKDD'02,\n https://dl.acm.org/doi/pdf/10.1145/775047.775151\n\n The justification for the formula in the loss=\"modified_huber\"\n case is in the appendix B in:\n http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf\n ", "n_words": 138, "vocab_size": 98, "n_whitespaces": 339, "language": "en" } }, { "id": 26573, "commit_id": "7d2e77c5f235ca60a2bf3ee02f4f9a8b10b03214", "repo": "saleor", "path": "saleor/plugins/openid_connect/utils.py", "file_name": "utils.py", "fun_name": "fetch_jwks", "commit_message": "Make OIDC plugin public (#9406)\n\n* Make OIDC plugin public\r\n\r\n* Add missing dependency package\r\n\r\n* Apply changes after review\r\n\r\n* Update changelog\r\n\r\n* Apply changes after review\r\n\r\n* Add const file", "code": "def fetch_jwks(jwks_url) -> Optional[dict]:\n \n response = None\n try:\n response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT)\n response.raise_for_status()\n jwks = response.json()\n except requests.exceptions.RequestException:\n logger.exception(\"Unable to fetch jwks from %s\", jwks_url)\n raise AuthenticationError(\"Unable to finalize the authentication process.\")\n except json.JSONDecodeError:\n content = response.content if response else \"Unable to find the response\"\n logger.exception(\n \"Unable to decode the response from auth service with jwks. \"\n \"Response: %s\",\n content,\n )\n raise AuthenticationError(\"Unable to finalize the authentication process.\")\n keys = jwks.get(\"keys\", [])\n if not keys:\n logger.warning(\"List of JWKS keys is empty\")\n cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME)\n return keys\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 216, "n_words": 86, "vocab_size": 59, "complexity": 5, "nloc": 28, "token_counts": 122, "n_ast_nodes": 210, "n_identifiers": 25, "random_cut": "def fetch_jwks(jwks_url) -> Optional[dict]:\n \n response = None\n try:\n response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT)\n response.raise_for_status()\n jwks = response.json()\n except requests.exceptions.RequestException:\n logger.exception(\"Unable to fetch jwks from %s\", jwks_url)\n raise AuthenticationError(\"Unable to finalize the authentication process.\")\n except json.JSONDecodeError:\n content = response.content if response else \"Unable to find the response\"\n logger.exception(\n \"Unable to decode the response from auth service with jwks. \"\n \"Response: %s\",\n content,\n )\n raise AuthenticationError(\"Unable to finalize the authentication process.\")\n keys = jwks.get(\"keys\", [])\n if", "d_id": 5027, "documentation": { "docstring": "Fetch JSON Web Key Sets from a provider.\n\n Fetched keys will be stored in the cache to the reduced amount of possible\n requests.\n :raises AuthenticationError\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 37, "language": "en" } } ]