ast_errors
stringlengths 0
3.2k
| d_id
int64 44
121k
| id
int64 70
338k
| n_whitespaces
int64 3
14k
| path
stringlengths 8
134
| n_words
int64 4
4.82k
| n_identifiers
int64 1
131
| random_cut
stringlengths 16
15.8k
| commit_message
stringlengths 2
15.3k
| fun_name
stringlengths 1
84
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| file_name
stringlengths 5
79
| ast_levels
int64 6
31
| nloc
int64 1
548
| url
stringlengths 31
59
| complexity
int64 1
66
| token_counts
int64 6
2.13k
| n_ast_errors
int64 0
28
| vocab_size
int64 4
1.11k
| n_ast_nodes
int64 15
19.2k
| language
stringclasses 1
value | documentation
dict | code
stringlengths 101
62.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
94,918 | 295,910 | 35 | tests/components/siren/test_init.py | 16 | 10 | async def test_missing_tones_list(hass):
siren = MockSirenEntity(SirenEntityFeature.TONES, ["a", "b"])
siren.hass = hass
with pytest.raises(ValueError):
process_turn_on_params(siren, {"tone": "test"})
| Add EntityFeature enum to Siren (#69585)
Co-authored-by: Franck Nijhof <frenck@frenck.nl> | test_missing_tones_list | a61ac3ddc6d65522dfa1eb599adf73420a9267dc | core | test_init.py | 12 | 5 | https://github.com/home-assistant/core.git | 1 | 43 | 0 | 15 | 80 | Python | {
"docstring": "Test ValueError when setting a tone that is missing from available_tones list.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | async def test_missing_tones_list(hass):
siren = MockSirenEntity(SirenEntityFeature.TONES, ["a", "b"])
siren.hass = hass
with pytest.raises(ValueError):
process_turn_on_params(siren, {"tone": "test"})
|
|
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning | 8,146 | 44,090 | 109 | airflow/cli/commands/task_command.py | 52 | 28 | def task_failed_deps(args):
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id, args.map_index)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
| Add `--map-index` parameter to task CLI commands (#20980) | task_failed_deps | 8dabce8887f02216c1037be35e80c214edcbadfe | airflow | task_command.py | 14 | 12 | https://github.com/apache/airflow.git | 3 | 88 | 1 | 44 | 180 | Python | {
"docstring": "\n Returns the unmet dependencies for a task instance from the perspective of the\n scheduler (i.e. why a task instance doesn't get scheduled and then queued by the\n scheduler, and then run by an executor).\n >>> airflow tasks failed-deps tutorial sleep 2015-01-01\n Task instance dependencies not met:\n Dagrun Running: Task instance's dagrun did not exist: Unknown reason\n Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks\n to have succeeded, but found 1 non-success(es).\n ",
"language": "en",
"n_whitespaces": 101,
"n_words": 73,
"vocab_size": 59
} | def task_failed_deps(args):
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id, args.map_index)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.")
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning |
54,959 | 217,834 | 100 | python3.10.4/Lib/http/cookies.py | 24 | 9 | def load(self, rawdata):
if isinstance(rawdata, str):
| add python 3.10.4 for windows | load | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | cookies.py | 12 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 42 | 0 | 23 | 70 | Python | {
"docstring": "Load cookies from a string (presumably HTTP_COOKIE) or\n from a dictionary. Loading cookies from a dictionary 'd'\n is equivalent to calling:\n map(Cookie.__setitem__, d.keys(), d.values())\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 24,
"vocab_size": 19
} | def load(self, rawdata):
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
|
|
22,909 | 107,773 | 125 | lib/matplotlib/axis.py | 32 | 12 | def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False):
backup = {name: value for name, value in self._major_tick_kw.items()
if name in ['tick1On', 'tick2On', 'label1On', 'label2On']}
self. | Refactor handling of tick and ticklabel visiblity in Axis.clear()
This is a follow-up to #20826, which makes the exceptions from clearing
more explicit. | _reset_major_tick_kw | 2357c92d87d96d519c8470776e76180e71663d0b | matplotlib | axis.py | 11 | 9 | https://github.com/matplotlib/matplotlib.git | 5 | 87 | 0 | 27 | 150 | Python | {
"docstring": "\n Reset major tick params to defaults.\n\n Shared subplots pre-configure tick and label visibility. To keep this\n beyond an Axis.clear() operation, we may\n *keep_tick_and_label_visibility*.\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 23,
"vocab_size": 22
} | def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False):
backup = {name: value for name, value in self._major_tick_kw.items()
if name in ['tick1On', 'tick2On', 'label1On', 'label2On']}
self._major_tick_kw.clear()
if keep_tick_and_label_visibility:
self._major_tick_kw.update(backup)
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
|
|
28,990 | 129,623 | 55 | python/ray/tune/tests/test_integration_comet.py | 13 | 12 | def test_class_variable_to_instance(self):
logger = self.logger
self.assertEqual(logger._to_exclude, logger._exclude_results)
self.assertEqual(logger._to_system, lo | Comet Integration (#20766)
This PR adds a `CometLoggerCallback` to the Tune Integrations, allowing users to log runs from Ray to [Comet](https://www.comet.ml/site/).
Co-authored-by: Michael Cullan <mjcullan@gmail.com>
Co-authored-by: Antoni Baum <antoni.baum@protonmail.com> | test_class_variable_to_instance | 3d79815cd08c1be8e56c245e662f34366523847e | ray | test_integration_comet.py | 8 | 6 | https://github.com/ray-project/ray.git | 1 | 59 | 0 | 13 | 93 | Python | {
"docstring": "Test that class variables get properly assigned to instance\n variables.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 10,
"vocab_size": 10
} | def test_class_variable_to_instance(self):
logger = self.logger
self.assertEqual(logger._to_exclude, logger._exclude_results)
self.assertEqual(logger._to_system, logger._system_results)
self.assertEqual(logger._to_other, logger._other_results)
self.assertEqual(logger._to_episodes, logger._episode_results)
|
|
4,085 | 21,881 | 574 | pipenv/patched/pip/_vendor/chardet/__init__.py | 120 | 37 | def detect_all(byte_str, ignore_threshold=False):
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError(
f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
)
b | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | detect_all | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | __init__.py | 17 | 36 | https://github.com/pypa/pipenv.git | 14 | 219 | 0 | 88 | 372 | Python | {
"docstring": "\n Detect all the possible encodings of the given byte string.\n\n :param byte_str: The byte sequence to examine.\n :type byte_str: ``bytes`` or ``bytearray``\n :param ignore_threshold: Include encodings that are below\n ``UniversalDetector.MINIMUM_THRESHOLD``\n in results.\n :type ignore_threshold: ``bool``\n ",
"language": "en",
"n_whitespaces": 134,
"n_words": 35,
"vocab_size": 28
} | def detect_all(byte_str, ignore_threshold=False):
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError(
f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
)
byte_str = bytearray(byte_str)
detector = UniversalDetector()
detector.feed(byte_str)
detector.close()
if detector.input_state == InputState.HIGH_BYTE:
results = []
probers = []
for prober in detector.charset_probers:
if hasattr(prober, "probers"):
probers.extend(p for p in prober.probers)
else:
probers.append(prober)
for prober in probers:
if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:
charset_name = prober.charset_name or ""
lower_charset_name = charset_name.lower()
# Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes
if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes:
charset_name = detector.ISO_WIN_MAP.get(
lower_charset_name, charset_name
)
results.append(
{
"encoding": charset_name,
"confidence": prober.get_confidence(),
"language": prober.language,
}
)
if len(results) > 0:
return sorted(results, key=lambda result: -result["confidence"])
return [detector.result]
|
|
54,715 | 217,317 | 184 | python3.10.4/Lib/enum.py | 39 | 10 | def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(
value,
names,
module=module,
qualname=qualname,
type=type,
start=start,
| add python 3.10.4 for windows | __call__ | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | enum.py | 9 | 11 | https://github.com/XX-net/XX-Net.git | 2 | 70 | 0 | 36 | 100 | Python | {
"docstring": "\n Either returns an existing member, or creates a new enum class.\n\n This method is used both when an enum class is given a value to match\n to an enumeration member (i.e. Color(3)) and for the functional API\n (i.e. Color = Enum('Color', names='RED GREEN BLUE')).\n\n When used for the functional API:\n\n `value` will be the name of the new class.\n\n `names` should be either a string of white-space/comma delimited names\n (values will start at `start`), or an iterator/mapping of name, value pairs.\n\n `module` should be set to the module this class is being created in;\n if it is not set, an attempt to find that module will be made, but if\n it fails the class will not be picklable.\n\n `qualname` should be set to the actual location this class can be found\n at in its module; by default it is set to the global scope. If this is\n not correct, unpickling will fail in some circumstances.\n\n `type`, if set, will be mixed in as the first base class.\n ",
"language": "en",
"n_whitespaces": 281,
"n_words": 167,
"vocab_size": 99
} | def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(
value,
names,
module=module,
qualname=qualname,
type=type,
start=start,
)
|
|
7,567 | 42,482 | 166 | nltk/util.py | 62 | 13 | def edges2dot(edges, shapes=None, attr=None):
if not shapes:
| Fix some tests in Wordnet-related DocStrings | edges2dot | 692adaff901dd9daf29400fdf3385130aefbfb2a | nltk | util.py | 17 | 16 | https://github.com/nltk/nltk.git | 8 | 97 | 0 | 39 | 214 | Python | {
"docstring": "\n :param edges: the set (or list) of edges of a directed graph.\n\n :return dot_string: a representation of 'edges' as a string in the DOT\n graph language, which can be converted to an image by the 'dot' program\n from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string).\n\n :param shapes: dictionary of strings that trigger a specified shape.\n :param attr: dictionary with global graph attributes\n\n >>> import nltk\n >>> from nltk.util import edges2dot\n >>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')]))\n digraph G {\n \"A\" -> \"B\";\n \"A\" -> \"C\";\n \"B\" -> \"C\";\n \"C\" -> \"B\";\n }\n <BLANKLINE>\n ",
"language": "en",
"n_whitespaces": 154,
"n_words": 94,
"vocab_size": 70
} | def edges2dot(edges, shapes=None, attr=None):
if not shapes:
shapes = dict()
if not attr:
attr = dict()
dot_string = "digraph G {\n"
for pair in attr.items():
dot_string += f"{pair[0]} = {pair[1]};\n"
for edge in edges:
for shape in shapes.items():
for node in range(2):
if shape[0] in repr(edge[node]):
dot_string += f'"{edge[node]}" [shape = {shape[1]}];\n'
dot_string += f'"{edge[0]}" -> "{edge[1]}";\n'
dot_string += "}\n"
return dot_string
|
|
3,341 | 20,356 | 92 | pipenv/patched/notpip/_vendor/pygments/formatters/img.py | 23 | 10 | def _draw_line_numbers(self):
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
| check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | _draw_line_numbers | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | img.py | 11 | 7 | https://github.com/pypa/pipenv.git | 4 | 49 | 0 | 21 | 80 | Python | {
"docstring": "\n Create drawables for the line numbers.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | def _draw_line_numbers(self):
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
|
|
46,249 | 189,904 | 197 | manim/cli/cfg/group.py | 58 | 21 | def export(ctx, directory):
directory_path = Path(directory)
if directory_path.absolute == Path.cwd().absolute:
console.print(
,
style="red bold",
end="",
)
proc | Migrate from os.path to pathlib in SVGMobject and other locations (#2687)
* fixed style
* fixed changes
* Update group.py
* Remove extra `Path` call
Co-authored-by: ad_chaos <90276965+Kiran-Raj-Dev@users.noreply.github.com>
* Remove unused context manager
Sorry, just committing here myself so that the PR can be reviewed and merged. This is the only thing left to alter so thought I might as well do it myself.
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Use `with_suffix`
* Remove extra `Path` calls
Co-authored-by: ad_chaos <90276965+Kiran-Raj-Dev@users.noreply.github.com>
Co-authored-by: Darylgolden <darylgolden@gmail.com>
Co-authored-by: Raghav Goel <kilacoda@gmail.com>
Co-authored-by: Raghav Goel <raghavgd2h@gmail.com>
Co-authored-by: ad_chaos <90276965+Kiran-Raj-Dev@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> | export | a20f8aeb6ccd30d6b9d5c34285c3a718b0f5a59b | manim | group.py | 13 | 23 | https://github.com/ManimCommunity/manim.git | 4 | 126 | 0 | 43 | 234 | Python | {
"docstring": "You are reading the config from the same directory you are exporting to.\nThis means that the exported config will overwrite the config for this directory.\nAre you sure you want to continue? (y/n)",
"language": "en",
"n_whitespaces": 31,
"n_words": 34,
"vocab_size": 26
} | def export(ctx, directory):
directory_path = Path(directory)
if directory_path.absolute == Path.cwd().absolute:
console.print(
,
style="red bold",
end="",
)
proceed = input().lower() == "y"
else:
proceed = True
if proceed:
if not directory_path.is_dir():
console.print(f"Creating folder: {directory}.", style="red bold")
directory_path.mkdir(parents=True)
ctx.invoke(write)
from_path = Path.cwd() / "manim.cfg"
to_path = directory_path / "manim.cfg"
console.print(f"Exported final Config at {from_path} to {to_path}.")
else:
console.print("Aborted...", style="red bold")
|
|
23,770 | 109,834 | 153 | lib/matplotlib/backend_managers.py | 40 | 13 | def update_keymap(self, name, key):
if name not in self._tools:
raise KeyError(f'{name!r} not in Tools')
| Add tests for ToolManager | update_keymap | 0d6ee255831adae452af355c025497c0f07aa296 | matplotlib | backend_managers.py | 15 | 11 | https://github.com/matplotlib/matplotlib.git | 5 | 70 | 0 | 31 | 135 | Python | {
"docstring": "\n Set the keymap to associate with the specified tool.\n\n Parameters\n ----------\n name : str\n Name of the Tool.\n key : str or list of str\n Keys to associate with the tool.\n ",
"language": "en",
"n_whitespaces": 96,
"n_words": 31,
"vocab_size": 20
} | def update_keymap(self, name, key):
if name not in self._tools:
raise KeyError(f'{name!r} not in Tools')
self._remove_keys(name)
if isinstance(key, str):
key = [key]
for k in key:
if k in self._keys:
_api.warn_external(
f'Key {k} changed from {self._keys[k]} to {name}')
self._keys[k] = name
|
|
24,602 | 112,161 | 145 | nni/retiarii/oneshot/pytorch/supermodule/differentiable.py | 34 | 12 | def named_parameters(self, *args, **kwargs):
arch = kwargs.pop('arch', False)
for name, p in super().named_parameters(*args, **kwargs):
if any(name == par_name for par_name in self._arch_parameter_names):
if arch:
| Valuechoice oneshot lightning (#4602) | named_parameters | 14d2966b9e91ae16dcc39de8f41017a75cec8ff9 | nni | differentiable.py | 14 | 9 | https://github.com/microsoft/nni.git | 6 | 71 | 0 | 22 | 117 | Python | {
"docstring": "Named parameters excluding architecture parameters.",
"language": "en",
"n_whitespaces": 4,
"n_words": 5,
"vocab_size": 5
} | def named_parameters(self, *args, **kwargs):
arch = kwargs.pop('arch', False)
for name, p in super().named_parameters(*args, **kwargs):
if any(name == par_name for par_name in self._arch_parameter_names):
if arch:
yield name, p
else:
if not arch:
yield name, p
|
|
26,788 | 120,182 | 66 | jax/_src/util.py | 49 | 10 | def unzip3(xyzs):
# Note: we deliberately don't use zip(*xyzs) because it is lazily evaluated,
# is too permissive about inputs, and does not guarantee a length-3 output.
xs = []
ys = []
zs = []
for x, y, z in xyzs:
xs.append(x)
ys.append | Comment on implementation of unzip2 & unzip3 | unzip3 | 72470dee3a5181c8bfe0f0a4725564efbef80f92 | jax | util.py | 9 | 9 | https://github.com/google/jax.git | 2 | 60 | 0 | 43 | 101 | Python | {
"docstring": "Unzip sequence of length-3 tuples into three tuples.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def unzip3(xyzs):
# Note: we deliberately don't use zip(*xyzs) because it is lazily evaluated,
# is too permissive about inputs, and does not guarantee a length-3 output.
xs = []
ys = []
zs = []
for x, y, z in xyzs:
xs.append(x)
ys.append(y)
zs.append(z)
return tuple(xs), tuple(ys), tuple(zs)
|
|
17,609 | 83,182 | 158 | zerver/tests/test_subs.py | 60 | 18 | def test_json_get_subscribers_for_guest_user(self) -> None:
guest_user = self.example_user("polonius")
never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed
# A guest user can only see never subscribed streams that are web-public.
# For Polonius, the only web-public stream that he is not subscribed at
# this point is Rome.
self.assert_length(never_subscribed, 1)
web_public_stream_id = never_subscribed[0]["stream_id"]
result = self.client_get(f"/json/streams/{web_public_stream_id}/members")
self.assert_json_success(result)
result_dict = result.json()
self.assertIn("subscribers", result_dict)
self.assertIsInstance(result_dict["subscribers"], list)
self.assertG | docs: Consistently hyphenate โweb-publicโ.
In English, compound adjectives should essentially always be
hyphenated. This makes them easier to parse, especially for users who
might not recognize that the words โweb publicโ go together as a
phrase.
Signed-off-by: Anders Kaseorg <anders@zulip.com> | test_json_get_subscribers_for_guest_user | 90e202cd38d00945c81da4730d39e3f5c5b1e8b1 | zulip | test_subs.py | 11 | 15 | https://github.com/zulip/zulip.git | 1 | 98 | 0 | 50 | 172 | Python | {
"docstring": "\n Guest users should have access to subscribers of web-public streams, even\n if they aren't subscribed or have never subscribed to that stream.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 19
} | def test_json_get_subscribers_for_guest_user(self) -> None:
guest_user = self.example_user("polonius")
never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed
# A guest user can only see never subscribed streams that are web-public.
# For Polonius, the only web-public stream that he is not subscribed at
# this point is Rome.
self.assert_length(never_subscribed, 1)
web_public_stream_id = never_subscribed[0]["stream_id"]
result = self.client_get(f"/json/streams/{web_public_stream_id}/members")
self.assert_json_success(result)
result_dict = result.json()
self.assertIn("subscribers", result_dict)
self.assertIsInstance(result_dict["subscribers"], list)
self.assertGreater(len(result_dict["subscribers"]), 0)
|
|
57,798 | 226,118 | 402 | packages/python/chart-studio/chart_studio/plotly/chunked_requests/chunked_request.py | 70 | 16 | def _reconnect(self):
if not self._isconnected():
try:
self._connec | switch to black .22 | _reconnect | 43e3a4011080911901176aab919c0ecf5046ddd3 | plotly.py | chunked_request.py | 17 | 17 | https://github.com/plotly/plotly.py.git | 6 | 95 | 0 | 53 | 166 | Python | {
"docstring": "Connect if disconnected.\n Retry self.maxtries times with delays\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 8,
"vocab_size": 8
} | def _reconnect(self):
if not self._isconnected():
try:
self._connect()
except http_client.socket.error as e:
# Attempt to reconnect if the connection was refused
if e.errno == 61 or e.errno == 10061:
# errno 61 is the "Connection Refused" error
time.sleep(self._delay)
self._delay += self._delay # fibonacii delays
self._tries += 1
if self._tries < self.maxtries:
self._reconnect()
else:
self._reset_retries()
raise e
else:
# Unknown scenario
raise e
# Reconnect worked - reset _closed
self._closed = False
|
|
54,450 | 216,173 | 30 | salt/modules/cp.py | 14 | 6 | def list_master(saltenv=None, prefix=""):
if not saltenv:
| fixes saltstack/salt#61562 cp functions derive saltenv from config | list_master | 2bd6323ef5f87d871891a59917ee96f44ef55e75 | salt | cp.py | 11 | 4 | https://github.com/saltstack/salt.git | 3 | 35 | 0 | 14 | 63 | Python | {
"docstring": "\n .. versionchanged:: 3005\n ``saltenv`` will use value from config if not explicitly set\n\n List all of the files stored on the master\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.list_master\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 30,
"vocab_size": 28
} | def list_master(saltenv=None, prefix=""):
if not saltenv:
saltenv = __opts__["saltenv"] or "base"
return _client().file_list(saltenv, prefix)
|
|
1,492 | 8,732 | 83 | tests/ludwig/utils/test_tokenizers.py | 47 | 22 | def test_bert_hf_tokenizer_parity(tmpdir, pretrained_model_name_or_path):
from ludwig.utils.tokenizers import get_hf_tokenizer, HFTokenizer
inputs = "Hello, ``I'm'' รณnรซ of 1,205,000 sentences!"
hf_tokenizer = HFTokenizer(pretrained_model_name_or_path)
torchtext_tokenizer = get_hf_tokenizer(pretrained_model_name_or_path)
# Ensure that the tokenizer is scriptable
tokenizer_path = os.path.join(tmpdir, "tokenizer.pt")
torch.jit.script(torchtext_tokenizer).save(tokenizer_path)
torchtext_tokenizer = tor | [TorchScript] Add user-defined HF Bert tokenizers (#2733)
* first working set
* wip todo: add never_split kwarg
* adds new never_split kwarg
* clean up
* get tests passing
* updated py38 tests
* pr revisions
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* logging > logger
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> | test_bert_hf_tokenizer_parity | f3fbfbbe7e4c772d60dbc4374811d3a959699f2b | ludwig | test_tokenizers.py | 10 | 11 | https://github.com/ludwig-ai/ludwig.git | 1 | 84 | 0 | 38 | 140 | Python | {
"docstring": "Tests the BERTTokenizer implementation.\n\n Asserts both tokens and token IDs are the same by initializing the BERTTokenizer as a standalone tokenizer and as a\n HF tokenizer.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 26,
"vocab_size": 20
} | def test_bert_hf_tokenizer_parity(tmpdir, pretrained_model_name_or_path):
from ludwig.utils.tokenizers import get_hf_tokenizer, HFTokenizer
inputs = "Hello, ``I'm'' รณnรซ of 1,205,000 sentences!"
hf_tokenizer = HFTokenizer(pretrained_model_name_or_path)
torchtext_tokenizer = get_hf_tokenizer(pretrained_model_name_or_path)
# Ensure that the tokenizer is scriptable
tokenizer_path = os.path.join(tmpdir, "tokenizer.pt")
torch.jit.script(torchtext_tokenizer).save(tokenizer_path)
torchtext_tokenizer = torch.jit.load(tokenizer_path)
token_ids_expected = hf_tokenizer(inputs)
token_ids = torchtext_tokenizer(inputs)
assert token_ids_expected == token_ids
|
|
13,723 | 64,790 | 20 | erpnext/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py | 27 | 6 | def get_pe_matching_query(amount_condition, account_from_to, transaction):
# get matching payment entries query
if transaction.deposit > 0:
currency_field = "paid_to_account_currency | style: format code with black | get_pe_matching_query | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | bank_reconciliation_tool.py | 10 | 28 | https://github.com/frappe/erpnext.git | 2 | 27 | 0 | 23 | 60 | Python | {
"docstring": "\n\tSELECT\n\t\t(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END\n\t\t+ 1 ) AS rank,\n\t\t'Payment Entry' as doctype,\n\t\tname,\n\t\tpaid_amount,\n\t\treference_no,\n\t\treference_date,\n\t\tparty,\n\t\tparty_type,\n\t\tposting_date,\n\t\t{currency_field}\n\tFROM\n\t\t`tabPayment Entry`\n\tWHERE\n\t\tpaid_amount {amount_condition} %(amount)s\n\t\tAND docstatus = 1\n\t\tAND payment_type IN (%(payment_type)s, 'Internal Transfer')\n\t\tAND ifnull(clearance_date, '') = \"\"\n\t\tAND {account_from_to} = %(bank_account)s\n\t",
"language": "en",
"n_whitespaces": 48,
"n_words": 68,
"vocab_size": 50
} | def get_pe_matching_query(amount_condition, account_from_to, transaction):
# get matching payment entries query
if transaction.deposit > 0:
currency_field = "paid_to_account_currency as currency"
else:
currency_field = "paid_from_account_currency as currency"
return f
|
|
50,325 | 203,351 | 334 | django/contrib/admin/checks.py | 60 | 23 | def _check_list_display_links(self, obj):
from django.contrib.admin.options import ModelAdmin
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be(
"a list, a tuple, or None",
option="list_display_links",
obj=obj,
id="admin.E110",
)
# Check only if ModelAdmin.get_list_display() isn't overridden.
elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:
return list(
chain.from_iterable(
self._check_list_display_links_item(
| Refs #33476 -- Reformatted code with Black. | _check_list_display_links | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | checks.py | 16 | 21 | https://github.com/django/django.git | 5 | 107 | 0 | 50 | 168 | Python | {
"docstring": "Check that list_display_links is a unique subset of list_display.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _check_list_display_links(self, obj):
from django.contrib.admin.options import ModelAdmin
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be(
"a list, a tuple, or None",
option="list_display_links",
obj=obj,
id="admin.E110",
)
# Check only if ModelAdmin.get_list_display() isn't overridden.
elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:
return list(
chain.from_iterable(
self._check_list_display_links_item(
obj, field_name, "list_display_links[%d]" % index
)
for index, field_name in enumerate(obj.list_display_links)
)
)
return []
|
|
89,351 | 290,233 | 131 | homeassistant/components/zha/core/channels/lighting.py | 40 | 7 | def min_mireds(self) -> int:
min_mireds = self.cluster.get("color_temp_physical_min", self.MIN_MIREDS)
if min_mireds == 0:
self.warning(
" | Fix invalid min and max color temp in bad ZHA light devices (#81604)
* Fix ZHA default color temps
* update test | min_mireds | 83c6a7e18b1b0e4d5a302e304f117dee11d3aa51 | core | lighting.py | 10 | 10 | https://github.com/home-assistant/core.git | 2 | 45 | 0 | 35 | 76 | Python | {
"docstring": "Return the coldest color_temp that this channel supports.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def min_mireds(self) -> int:
min_mireds = self.cluster.get("color_temp_physical_min", self.MIN_MIREDS)
if min_mireds == 0:
self.warning(
"[Min mireds is 0, setting to %s] Please open an issue on the quirks repo to have this device corrected",
self.MIN_MIREDS,
)
min_mireds = self.MIN_MIREDS
return min_mireds
|
|
44,547 | 184,318 | 128 | src/textual/app.py | 35 | 15 | def pop_screen(self) -> Screen:
screen_stack = self._screen_stack
if len(screen_stack) <= 1:
raise ScreenStackError(
"Can't pop screen; there must be at least one screen on the stack"
)
screen = screen_stack.pop()
screen.post_me | prototype screens api | pop_screen | ff55dafb8638f6674f3662aa526a5fc35a007b24 | textual | app.py | 10 | 16 | https://github.com/Textualize/textual.git | 2 | 69 | 0 | 32 | 117 | Python | {
"docstring": "Pop the current screen from the stack, and switch to the previous screen.\n\n Returns:\n Screen: The screen that was replaced.\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 20,
"vocab_size": 17
} | def pop_screen(self) -> Screen:
screen_stack = self._screen_stack
if len(screen_stack) <= 1:
raise ScreenStackError(
"Can't pop screen; there must be at least one screen on the stack"
)
screen = screen_stack.pop()
screen.post_message_no_wait(events.ScreenSuspend(self))
self.screen._screen_resized(self.size)
self.screen.post_message_no_wait(events.ScreenResume(self))
return screen
|
|
71,115 | 246,234 | 221 | tests/handlers/test_appservice.py | 39 | 25 | def test_notify_interested_services_ephemeral(self):
interested_service = self._mkservice(is_interested=True)
services = [interested_service]
self.mock_store.get_app_services.return_value = services
self.mock_store.get_type_stream_id_for_appservice.return_v | Send to-device messages to application services (#11215)
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> | test_notify_interested_services_ephemeral | 64ec45fc1b0856dc7daacca7d3ab75d50bd89f84 | synapse | test_appservice.py | 11 | 22 | https://github.com/matrix-org/synapse.git | 1 | 119 | 0 | 27 | 192 | Python | {
"docstring": "\n Test sending ephemeral events to the appservice handler are scheduled\n to be pushed out to interested appservices, and that the stream ID is\n updated accordingly.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 25,
"vocab_size": 22
} | def test_notify_interested_services_ephemeral(self):
interested_service = self._mkservice(is_interested=True)
services = [interested_service]
self.mock_store.get_app_services.return_value = services
self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable(
579
)
event = Mock(event_id="event_1")
self.event_source.sources.receipt.get_new_events_as.return_value = (
make_awaitable(([event], None))
)
self.handler.notify_interested_services_ephemeral(
"receipt_key", 580, ["@fakerecipient:example.com"]
)
self.mock_scheduler.enqueue_for_appservice.assert_called_once_with(
interested_service, ephemeral=[event]
)
self.mock_store.set_appservice_stream_type_pos.assert_called_once_with(
interested_service,
"read_receipt",
580,
)
|
|
12,732 | 61,868 | 425 | .venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py | 74 | 23 | def convert(self, value):
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.con | upd; format | convert | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | compat.py | 15 | 22 | https://github.com/jindongwang/transferlearning.git | 10 | 161 | 0 | 40 | 256 | Python | {
"docstring": "\n Convert values to an appropriate type. dicts, lists and tuples are\n replaced by their converting alternatives. Strings are checked to\n see if they have a conversion format and are converted if they do.\n ",
"language": "en",
"n_whitespaces": 78,
"n_words": 33,
"vocab_size": 27
} | def convert(self, value):
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
|
|
43,706 | 181,983 | 60 | tests/test_css_parse.py | 30 | 16 | def test_parse_transition(duration, parsed_duration):
css = f
stylesheet = Stylesheet()
stylesheet.parse(css)
rule = stylesheet.rules[0].styles
assert len(stylesheet.rules) == 1
assert len(stylesheet.rule | Stop parsing time as scalar | test_parse_transition | 644fdc7ed181a22773556236e83fb5343efe8fd5 | textual | test_css_parse.py | 12 | 13 | https://github.com/Textualize/textual.git | 1 | 80 | 0 | 24 | 130 | Python | {
"docstring": "#some-widget {{\n transition: offset {duration} in_out_cubic;\n }}\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 7,
"vocab_size": 7
} | def test_parse_transition(duration, parsed_duration):
css = f
stylesheet = Stylesheet()
stylesheet.parse(css)
rule = stylesheet.rules[0].styles
assert len(stylesheet.rules) == 1
assert len(stylesheet.rules[0].errors) == 0
assert rule.transitions == {
"offset": Transition(duration=parsed_duration, easing="in_out_cubic", delay=0.0)
}
|
|
71,652 | 247,396 | 129 | tests/rest/media/v1/test_html_preview.py | 29 | 6 | def test_meta_charset(self) -> None:
encodings = _get_html_media_encodings(
b,
"text/html",
)
self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
# A less well-form | Add type hints to `tests/rest` (#12146)
* Add type hints to `tests/rest`
* newsfile
* change import from `SigningKey` | test_meta_charset | 7e91107be1a4287873266e588a3c5b415279f4c8 | synapse | test_html_preview.py | 9 | 22 | https://github.com/matrix-org/synapse.git | 1 | 62 | 0 | 19 | 111 | Python | {
"docstring": "A character encoding is found via the meta tag.\n <html>\n <head><meta charset=\"ascii\">\n </head>\n </html>\n \n <html>\n <head>< meta charset = ascii>\n </head>\n </html>\n ",
"language": "en",
"n_whitespaces": 93,
"n_words": 22,
"vocab_size": 18
} | def test_meta_charset(self) -> None:
encodings = _get_html_media_encodings(
b,
"text/html",
)
self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
# A less well-formed version.
encodings = _get_html_media_encodings(
b,
"text/html",
)
self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
|
|
15,407 | 70,182 | 285 | glances/amps_list.py | 69 | 17 | def _build_amps_list(self, amp_value, processlist):
ret = []
try:
# Search in both cmdline and name (for kernel thread, see #1261)
for p in processlist:
if (re.search(amp_value.regex(), p['name']) is not None) or (
p['cmdline'] is not None
and p['cmdline'] != []
and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None
):
| AMP: regex with special chars #2152 | _build_amps_list | 1aa5596cc25fbd74cac65c5e4d6b16bd90091138 | glances | amps_list.py | 19 | 15 | https://github.com/nicolargo/glances.git | 7 | 134 | 0 | 57 | 228 | Python | {
"docstring": "Return the AMPS process list according to the amp_value\n\n Search application monitored processes by a regular expression\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 17,
"vocab_size": 16
} | def _build_amps_list(self, amp_value, processlist):
ret = []
try:
# Search in both cmdline and name (for kernel thread, see #1261)
for p in processlist:
if (re.search(amp_value.regex(), p['name']) is not None) or (
p['cmdline'] is not None
and p['cmdline'] != []
and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None
):
ret.append(
{'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}
)
except (TypeError, KeyError) as e:
logger.debug("Can not build AMPS list ({})".format(e))
return ret
|
|
80,526 | 270,689 | 25 | keras/engine/base_layer.py | 10 | 5 | def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
return inputs
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | call | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | base_layer.py | 6 | 2 | https://github.com/keras-team/keras.git | 1 | 16 | 0 | 10 | 27 | Python | {
"docstring": "This is where the layer's logic lives.\n\n The `call()` method may not create state (except in its first invocation,\n wrapping the creation of variables or other resources in `tf.init_scope()`).\n It is recommended to create state in `__init__()`, or the `build()` method\n that is called automatically before `call()` executes the first time.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n The first positional `inputs` argument is subject to special rules:\n - `inputs` must be explicitly passed. A layer cannot have zero\n arguments, and `inputs` cannot be provided via the default value\n of a keyword argument.\n - NumPy array or Python scalar values in `inputs` get cast as tensors.\n - Keras mask metadata is only collected from `inputs`.\n - Layers are built (`build(input_shape)` method)\n using shape info from `inputs` only.\n - `input_spec` compatibility is only checked against `inputs`.\n - Mixed precision input casting is only applied to `inputs`.\n If a layer has tensor arguments in `*args` or `**kwargs`, their\n casting behavior in mixed precision should be handled manually.\n - The SavedModel input specification is generated using `inputs` only.\n - Integration with various ecosystem packages like TFMOT, TFLite,\n TF.js, etc is only supported for `inputs` and not for tensors in\n positional and keyword arguments.\n *args: Additional positional arguments. May contain tensors, although\n this is not recommended, for the reasons above.\n **kwargs: Additional keyword arguments. May contain tensors, although\n this is not recommended, for the reasons above.\n The following optional keyword arguments are reserved:\n - `training`: Boolean scalar tensor of Python boolean indicating\n whether the `call` is meant for training or inference.\n - `mask`: Boolean input mask. If the layer's `call()` method takes a\n `mask` argument, its default value will be set to the mask generated\n for `inputs` by the previous layer (if `input` did come from a layer\n that generated a corresponding mask, i.e. if it came from a Keras\n layer with masking support).\n\n Returns:\n A tensor or list/tuple of tensors.\n ",
"language": "en",
"n_whitespaces": 714,
"n_words": 319,
"vocab_size": 177
} | def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
return inputs
|
|
25,676 | 116,155 | 494 | tests/unit/test_executor.py | 144 | 31 | def test_use_predictor_with_view(self, mock_handler):
# set integration data
df = pd.DataFrame([
{'a': 1, 'b': 'one'},
{'a': 2, 'b': 'two'},
{'a': 1, 'b': 'three'},
])
self.set_handler(mock_handler, name='pg', tables={'tasks': df})
view_name = 'vtasks'
# --- create view ---
ret = self.command_executor.execute_command(parse_sql(
f'create view {view_name} (select * from pg (select * from tasks))',
dialect='mindsdb')
)
assert ret.error_code is None
# --- use predictor ---
predicted_value = 3.14
predictor = {
'name': 'task_model',
'predict': 'p',
'dtypes': {
'p': dtype.float,
'a': dtype.integer,
'b': dtype.categorical
},
'predicted_value': predicted_value
}
self.set_predictor(predictor)
ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))
assert ret.error_code is None
# native query was called
assert mock_handler().native_query.mock_calls[0].args[0] == 'select * from tasks'
# check predictor call
# model was called
assert self.mock_model_interface.predict.mock_calls[0].args[0] == 'task_model'
# input = one row whit a==2
when_data = self.mock_model_interface.predict.mock_calls[0].args[1]
assert len(when_data) == 1
assert when_data[0]['a'] == 2
# check prediction
assert ret.data[0][0] = | executor tests | test_use_predictor_with_view | 02a831997cdffafca7cb160eb1938e72020ee049 | mindsdb | test_executor.py | 13 | 39 | https://github.com/mindsdb/mindsdb.git | 1 | 254 | 0 | 90 | 442 | Python | {
"docstring": "\n select task_model.p \n from views.{view_name}\n join mindsdb.task_model\n where {view_name}.a = 2\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 10,
"vocab_size": 10
} | def test_use_predictor_with_view(self, mock_handler):
# set integration data
df = pd.DataFrame([
{'a': 1, 'b': 'one'},
{'a': 2, 'b': 'two'},
{'a': 1, 'b': 'three'},
])
self.set_handler(mock_handler, name='pg', tables={'tasks': df})
view_name = 'vtasks'
# --- create view ---
ret = self.command_executor.execute_command(parse_sql(
f'create view {view_name} (select * from pg (select * from tasks))',
dialect='mindsdb')
)
assert ret.error_code is None
# --- use predictor ---
predicted_value = 3.14
predictor = {
'name': 'task_model',
'predict': 'p',
'dtypes': {
'p': dtype.float,
'a': dtype.integer,
'b': dtype.categorical
},
'predicted_value': predicted_value
}
self.set_predictor(predictor)
ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))
assert ret.error_code is None
# native query was called
assert mock_handler().native_query.mock_calls[0].args[0] == 'select * from tasks'
# check predictor call
# model was called
assert self.mock_model_interface.predict.mock_calls[0].args[0] == 'task_model'
# input = one row whit a==2
when_data = self.mock_model_interface.predict.mock_calls[0].args[1]
assert len(when_data) == 1
assert when_data[0]['a'] == 2
# check prediction
assert ret.data[0][0] == predicted_value
assert len(ret.data) == 1
|
|
28,017 | 125,895 | 421 | rllib/connectors/tests/test_agent.py | 114 | 31 | def test_vr_connector_shift_by_one(self):
view_rq_dict = {
"state": ViewRequirement("obs"),
"next_state": ViewRequirement(
"obs", shift=1, used_for_compute_actions=False
),
"prev_state": ViewRequirement("obs", shift=-1),
}
obs_arrs = np.arange(10)[:, None] + 1
config = PPOConfig().to_dict()
ctx = ConnectorContext(
view_requirements=view_rq_dict, config=config, is_policy_recurrent=True
)
c = ViewRequirementAgentConnector(ctx)
# keep a running list of observations
obs_list = []
for t, obs in enumerate(obs_arrs):
# t=0 is the next state of t=-1
data = AgentConnectorDataType(
0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1}
)
process | [RLlib] Implemented ViewRequirementConnector (#26998) | test_vr_connector_shift_by_one | 8ddcf89096e5631c6b6e0d04dc094b458a15c9f9 | ray | test_agent.py | 15 | 26 | https://github.com/ray-project/ray.git | 3 | 187 | 0 | 87 | 308 | Python | {
"docstring": "Test that the ViewRequirementConnector can handle shift by one correctly and\n can ignore future referencing view_requirements to respect causality",
"language": "en",
"n_whitespaces": 25,
"n_words": 19,
"vocab_size": 18
} | def test_vr_connector_shift_by_one(self):
view_rq_dict = {
"state": ViewRequirement("obs"),
"next_state": ViewRequirement(
"obs", shift=1, used_for_compute_actions=False
),
"prev_state": ViewRequirement("obs", shift=-1),
}
obs_arrs = np.arange(10)[:, None] + 1
config = PPOConfig().to_dict()
ctx = ConnectorContext(
view_requirements=view_rq_dict, config=config, is_policy_recurrent=True
)
c = ViewRequirementAgentConnector(ctx)
# keep a running list of observations
obs_list = []
for t, obs in enumerate(obs_arrs):
# t=0 is the next state of t=-1
data = AgentConnectorDataType(
0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1}
)
processed = c([data]) # env.reset() for t == -1 else env.step()
for_action = processed[0].data.for_action
# add cur obs to the list
obs_list.append(obs)
if t == 0:
check(for_action["prev_state"], for_action["state"])
else:
# prev state should be equal to the prev time step obs
check(for_action["prev_state"], obs_list[-2][None])
|
|
18,743 | 91,232 | 127 | tools/flake8_plugin.py | 85 | 19 | def adapt_error(cls, e):
return e._replace(message=e.message.format(*e.vars))[:4]
error = namedtuple("error", "lineno col message type vars")
Error = partial(partial, error, message="", type=SentryCheck, vars=())
S001 = Error(
message="S001: Avoid us | Revert "ref: simplify and type flake8 plugin (#35645)" (#35651) | adapt_error | 8b9bcdc92d8ff23ec9f44d90d14348d9464d476b | sentry | flake8_plugin.py | 13 | 2 | https://github.com/getsentry/sentry.git | 1 | 31 | 0 | 75 | 227 | Python | {
"docstring": "Adapts the extended error namedtuple to be compatible with Flake8.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def adapt_error(cls, e):
return e._replace(message=e.message.format(*e.vars))[:4]
error = namedtuple("error", "lineno col message type vars")
Error = partial(partial, error, message="", type=SentryCheck, vars=())
S001 = Error(
message="S001: Avoid using the {} mock call as it is "
"confusing and prone to causing invalid test "
"behavior."
)
S001.methods = {
"not_called",
"called_once",
"called_once_with",
}
S002 = Error(message="S002: print functions or statements are not allowed.")
S003 = Error(message="S003: Use ``from sentry.utils import json`` instead.")
S003.modules = {"json", "simplejson"}
S003.names = {
"load",
"loads",
"dump",
"dumps",
"JSONEncoder",
"JSONDecodeError",
"_default_encoder",
}
|
|
80,335 | 269,925 | 43 | keras/callbacks.py | 11 | 6 | def on_train_begin(self, logs=None):
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_begin(logs)
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | on_train_begin | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | callbacks.py | 9 | 4 | https://github.com/keras-team/keras.git | 2 | 31 | 0 | 11 | 51 | Python | {
"docstring": "Calls the `on_train_begin` methods of its callbacks.\n\n Args:\n logs: Dict. Currently, no data is passed via this argument\n for this method, but that may change in the future.\n ",
"language": "en",
"n_whitespaces": 66,
"n_words": 28,
"vocab_size": 26
} | def on_train_begin(self, logs=None):
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_begin(logs)
|
|
76,068 | 260,093 | 80 | sklearn/utils/tests/test_param_validation.py | 40 | 11 | def test_decorate_validated_function():
decorated_function = deprecated()(_func)
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
decorated_function(1, 2, c=3)
# outer decorator does not inte | MNT Param validation: do not expose internal values in error msg (#23459)
* allow to not expose internal valid params in error msg
* ensure deprecated and internal do not overlap
* deprecated and internal must be subsets of options
* black | test_decorate_validated_function | 122876e9ab1ab494b4bf0ca3360d5a1527caf2e7 | scikit-learn | test_param_validation.py | 13 | 7 | https://github.com/scikit-learn/scikit-learn.git | 1 | 70 | 0 | 29 | 123 | Python | {
"docstring": "Check that validate_params functions can be decorated",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def test_decorate_validated_function():
decorated_function = deprecated()(_func)
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
decorated_function(1, 2, c=3)
# outer decorator does not interfer with validation
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
with pytest.raises(ValueError, match=r"The 'c' parameter of _func must be"):
decorated_function(1, 2, c="wrong")
|
|
74,253 | 253,816 | 48 | d2l/jax.py | 19 | 16 | def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
axes.set_xlabel(xlabel), axes.set_ylabel(ylabel)
axes.set_xscale(xscale), axes.set_yscale(yscale)
axes.set_xlim(xlim), axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
| [Jax] Add calculus | set_axes | 7487da3edb1a68af60104e0290216f0849a8765c | d2l-en | jax.py | 9 | 7 | https://github.com/d2l-ai/d2l-en.git | 2 | 73 | 0 | 19 | 111 | Python | {
"docstring": "Set the axes for matplotlib.\n\n Defined in :numref:`sec_calculus`",
"language": "en",
"n_whitespaces": 10,
"n_words": 8,
"vocab_size": 8
} | def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
axes.set_xlabel(xlabel), axes.set_ylabel(ylabel)
axes.set_xscale(xscale), axes.set_yscale(yscale)
axes.set_xlim(xlim), axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
|
|
73,504 | 250,551 | 397 | mitmproxy/addonmanager.py | 91 | 27 | def register(self, addon):
api_changes = {
# mitmproxy 6 -> mitmproxy 7
"clientconnect": "client_connected",
"clientdisconnect": "client_disconnected",
"serverconnect": "server_connect and server_connected",
"serverdisconnect": "server_disconnected",
}
for a in traverse([addon]):
for old, new in api_changes.items():
if hasattr(a, old):
ctx.log.warn(f"The {old} event has been removed, use {new} instead. "
f"For more details, see https://docs.mitmproxy.or | Rename new async helper functions.
async_trigger -> trigger_event
invoke_addon -> invoke_addon_sync (API breakage)
async_invoke_addon -> invoke_addon | register | ee4999e8e4380f7b67faef92f04c361deffba412 | mitmproxy | addonmanager.py | 16 | 26 | https://github.com/mitmproxy/mitmproxy.git | 7 | 164 | 0 | 68 | 283 | Python | {
"docstring": "\n Register an addon, call its load event, and then register all its\n sub-addons. This should be used by addons that dynamically manage\n addons.\n\n If the calling addon is already running, it should follow with\n running and configure events. Must be called within a current\n context.\n ",
"language": "en",
"n_whitespaces": 119,
"n_words": 45,
"vocab_size": 41
} | def register(self, addon):
api_changes = {
# mitmproxy 6 -> mitmproxy 7
"clientconnect": "client_connected",
"clientdisconnect": "client_disconnected",
"serverconnect": "server_connect and server_connected",
"serverdisconnect": "server_disconnected",
}
for a in traverse([addon]):
for old, new in api_changes.items():
if hasattr(a, old):
ctx.log.warn(f"The {old} event has been removed, use {new} instead. "
f"For more details, see https://docs.mitmproxy.org/stable/addons-events/.")
name = _get_name(a)
if name in self.lookup:
raise exceptions.AddonManagerError(
"An addon called '%s' already exists." % name
)
l = Loader(self.master)
self.invoke_addon_sync(addon, LoadHook(l))
for a in traverse([addon]):
name = _get_name(a)
self.lookup[name] = a
for a in traverse([addon]):
self.master.commands.collect_commands(a)
self.master.options.process_deferred()
return addon
|
|
@method_decorator(never_cache, name='dispatch') | 45,927 | 188,689 | 292 | apps/authentication/views/login.py | 74 | 41 | def get_context_data(self, **kwargs):
from tickets.models import Ticket
from tickets.const import TICKET_DETAIL_URL
ticket_id = self.request.session.get("auth_ticket_id")
if not ticket_id:
| fix: login confirm bug (#7914)
Co-authored-by: feng626 <1304903146@qq.com> | get_context_data | 08ff8fa285575b8ca5ee187d297d807bf197a161 | jumpserver | login.py | 15 | 26 | https://github.com/jumpserver/jumpserver.git | 4 | 180 | 1 | 52 | 320 | Python | {
"docstring": "Wait for <b>{}</b> confirm, You also can copy link to her/him <br/>\n Don't close this page",
"language": "en",
"n_whitespaces": 32,
"n_words": 16,
"vocab_size": 16
} | def get_context_data(self, **kwargs):
from tickets.models import Ticket
from tickets.const import TICKET_DETAIL_URL
ticket_id = self.request.session.get("auth_ticket_id")
if not ticket_id:
ticket = None
else:
ticket = Ticket.all().filter(pk=ticket_id).first()
context = super().get_context_data(**kwargs)
if ticket:
timestamp_created = datetime.datetime.timestamp(ticket.date_created)
ticket_detail_url = TICKET_DETAIL_URL.format(id=ticket_id, type=ticket.type)
assignees = ticket.current_node.first().ticket_assignees.all()
assignees_display = ', '.join([str(i.assignee) for i in assignees])
msg = _().format(assignees_display)
else:
timestamp_created = 0
ticket_detail_url = ''
msg = _("No ticket found")
context.update({
"msg": msg,
"timestamp": timestamp_created,
"ticket_detail_url": ticket_detail_url
})
return context
@method_decorator(never_cache, name='dispatch') |
5,748 | 31,457 | 319 | src/transformers/modeling_tf_utils.py | 137 | 29 | def tf_shard_checkpoint(weights, max_shard_size="10GB"):
max_shard_size = convert_file_size_to_int(max_shard_size)
sharded_state_dicts = []
current_block = []
current_block_size = 0
total_size = 0
for item in weights:
weight_size = item.numpy().size * dtype_byte_size(item.dtype)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
sharded_state_dicts.append(current_block)
current_block = []
current_block_size = 0
current_block.append(item)
current_block_size += weight_size
total_size += weight_size
# Add the last block
sharded_state_dicts.append(current_block)
# If we only have one shard, we return it
if len(sharded_state_dicts) == 1:
return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
weight_map = {}
shards = {}
for idx, shard in enumerate(sharded_state_dicts):
shard_file = TF2_WEIGHTS_NAME. | TF Sharded (#17713)
* initial commit
* update modeeling tf utils
* quality
* clean and update args
* update
* remove potential bug
* code quality
* update
* update max shard
* update tests for sharding from pretrained
* fix remaining test
* make style
* h5py if tf available
* update and fix test
* fix test
* style
* modified push to hub to support shard for TF
* quick fix
* update code
* merge branch main and style
* Apply suggestions from code review
Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com>
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
* update based on reviews
* update doc
* update and style
* Apply suggestions from code review
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
* Update based on reviews
* fix typo
* style
Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com>
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> | tf_shard_checkpoint | 7cced021fa8ddc59f0f77384300760d34545394e | transformers | modeling_tf_utils.py | 14 | 29 | https://github.com/huggingface/transformers.git | 6 | 181 | 0 | 83 | 324 | Python | {
"docstring": "\n Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a\n given size.\n\n The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no\n optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the\n limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],\n [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].\n\n <Tip warning={true}>\n\n If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will\n have a size greater than `max_shard_size`.\n\n </Tip>\n\n Args:\n weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save.\n max_shard_size (`int` or `str`, *optional*, defaults to `\"10GB\"`):\n The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit\n (like `\"5MB\"`).\n ",
"language": "en",
"n_whitespaces": 231,
"n_words": 158,
"vocab_size": 105
} | def tf_shard_checkpoint(weights, max_shard_size="10GB"):
max_shard_size = convert_file_size_to_int(max_shard_size)
sharded_state_dicts = []
current_block = []
current_block_size = 0
total_size = 0
for item in weights:
weight_size = item.numpy().size * dtype_byte_size(item.dtype)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
sharded_state_dicts.append(current_block)
current_block = []
current_block_size = 0
current_block.append(item)
current_block_size += weight_size
total_size += weight_size
# Add the last block
sharded_state_dicts.append(current_block)
# If we only have one shard, we return it
if len(sharded_state_dicts) == 1:
return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
weight_map = {}
shards = {}
for idx, shard in enumerate(sharded_state_dicts):
shard_file = TF2_WEIGHTS_NAME.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5")
shards[shard_file] = shard
for weight in shard:
weight_name = weight.name
weight_map[weight_name] = shard_file
# Add the metadata
metadata = {"total_size": total_size}
index = {"metadata": metadata, "weight_map": weight_map}
return shards, index
|
|
47,761 | 196,261 | 72 | sympy/geometry/curve.py | 22 | 14 | def scale(self, x=1, y=1, pt=None):
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
fx, | Updated import locations | scale | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | curve.py | 17 | 6 | https://github.com/sympy/sympy.git | 2 | 85 | 0 | 20 | 130 | Python | {
"docstring": "Override GeometryEntity.scale since Curve is not made up of Points.\n\n Returns\n =======\n\n Curve :\n returns scaled curve.\n\n Examples\n ========\n\n >>> from sympy import Curve\n >>> from sympy.abc import x\n >>> Curve((x, x), (x, 0, 1)).scale(2)\n Curve((2*x, x), (x, 0, 1))\n\n ",
"language": "en",
"n_whitespaces": 121,
"n_words": 40,
"vocab_size": 31
} | def scale(self, x=1, y=1, pt=None):
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
fx, fy = self.functions
return self.func((fx*x, fy*y), self.limits)
|
|
48,536 | 197,428 | 740 | sympy/physics/vector/frame.py | 217 | 58 | def orient_body_fixed(self, parent, angles, rotation_order):
_check_frame(parent)
amounts = list(angles)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
# make sure XYZ => 123
rot_order = translate(str(rotation_order), 'XYZxyz', '123123')
if rot_order not in approved_orders:
raise TypeError('The rotation order is not a valid order.')
parent_orient_body = []
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient_body = (self._rot(a1, amounts[0]) *
self._rot(a2, amounts[1]) *
self._rot(a3, amounts[2]))
self._dcm(parent, parent_orient_body)
try:
from sympy.polys.polyerrors import CoercionFailed
from sympy.physics.vector.functions import kinematic_equations
q1, q2, q3 = amounts
u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy)
templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],
'body', rot_order)
templist = [expand(i) for i in templist]
td = solve(templist, [u1, u2, u3])
u1 = expand(td[u1])
u2 = expand(td[u2])
u3 = expand(td[u3])
wvec = u1 * self.x + u2 * self.y + u3 * self.z
# NOTE : SymPy 1.7 removed the call to simplify() that occured
# inside the solve() function, so this restores the pre-1.7
# behavior. See:
# https://github.com/sympy/sympy/issues/23140
# and
# https://github.com/sympy/sympy/issues/23130
wvec = wvec.simplify()
except (CoercionFailed, AssertionError):
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
| Restores pre-1.7 simplify behavior for orient_body_fixed() | orient_body_fixed | 5afe37b3dee65554d7e4592c20f922cb551fde31 | sympy | frame.py | 12 | 40 | https://github.com/sympy/sympy.git | 7 | 394 | 0 | 155 | 626 | Python | {
"docstring": "Rotates this reference frame relative to the parent reference frame\n by right hand rotating through three successive body fixed simple axis\n rotations. Each subsequent axis of rotation is about the \"body fixed\"\n unit vectors of a new intermediate reference frame. This type of\n rotation is also referred to rotating through the `Euler and Tait-Bryan\n Angles`_.\n\n .. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles\n\n Parameters\n ==========\n\n parent : ReferenceFrame\n Reference frame that this reference frame will be rotated relative\n to.\n angles : 3-tuple of sympifiable\n Three angles in radians used for the successive rotations.\n rotation_order : 3 character string or 3 digit integer\n Order of the rotations about each intermediate reference frames'\n unit vectors. The Euler rotation about the X, Z', X'' axes can be\n specified by the strings ``'XZX'``, ``'131'``, or the integer\n ``131``. There are 12 unique valid rotation orders (6 Euler and 6\n Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx,\n and yxz.\n\n Warns\n ======\n\n UserWarning\n If the orientation creates a kinematic loop.\n\n Examples\n ========\n\n Setup variables for the examples:\n\n >>> from sympy import symbols\n >>> from sympy.physics.vector import ReferenceFrame\n >>> q1, q2, q3 = symbols('q1, q2, q3')\n >>> N = ReferenceFrame('N')\n >>> B = ReferenceFrame('B')\n >>> B1 = ReferenceFrame('B1')\n >>> B2 = ReferenceFrame('B2')\n >>> B3 = ReferenceFrame('B3')\n\n For example, a classic Euler Angle rotation can be done by:\n\n >>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX')\n >>> B.dcm(N)\n Matrix([\n [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],\n [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],\n [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])\n\n This rotates reference frame B relative to reference frame N through\n ``q1`` about ``N.x``, then rotates B again through ``q2`` about\n ``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to\n three successive ``orient_axis()`` calls:\n\n >>> B1.orient_axis(N, N.x, q1)\n >>> B2.orient_axis(B1, B1.y, q2)\n >>> B3.orient_axis(B2, B2.x, q3)\n >>> B3.dcm(N)\n Matrix([\n [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],\n [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],\n [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])\n\n Acceptable rotation orders are of length 3, expressed in as a string\n ``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis\n twice in a row are prohibited.\n\n >>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ')\n >>> B.orient_body_fixed(N, (q1, q2, 0), '121')\n >>> B.orient_body_fixed(N, (q1, q2, q3), 123)\n\n ",
"language": "en",
"n_whitespaces": 954,
"n_words": 365,
"vocab_size": 213
} | def orient_body_fixed(self, parent, angles, rotation_order):
_check_frame(parent)
amounts = list(angles)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
# make sure XYZ => 123
rot_order = translate(str(rotation_order), 'XYZxyz', '123123')
if rot_order not in approved_orders:
raise TypeError('The rotation order is not a valid order.')
parent_orient_body = []
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient_body = (self._rot(a1, amounts[0]) *
self._rot(a2, amounts[1]) *
self._rot(a3, amounts[2]))
self._dcm(parent, parent_orient_body)
try:
from sympy.polys.polyerrors import CoercionFailed
from sympy.physics.vector.functions import kinematic_equations
q1, q2, q3 = amounts
u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy)
templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],
'body', rot_order)
templist = [expand(i) for i in templist]
td = solve(templist, [u1, u2, u3])
u1 = expand(td[u1])
u2 = expand(td[u2])
u3 = expand(td[u3])
wvec = u1 * self.x + u2 * self.y + u3 * self.z
# NOTE : SymPy 1.7 removed the call to simplify() that occured
# inside the solve() function, so this restores the pre-1.7
# behavior. See:
# https://github.com/sympy/sympy/issues/23140
# and
# https://github.com/sympy/sympy/issues/23130
wvec = wvec.simplify()
except (CoercionFailed, AssertionError):
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
|
|
14,124 | 66,175 | 42 | erpnext/hr/doctype/leave_block_list/leave_block_list.py | 66 | 18 | def get_applicable_block_lists(employee=None, company=None, all_lists=False):
block_lists = []
if not employee:
employee = frappe.db.get_value("Employee", {"user_id": frappe.session.user})
if not employee:
return []
if not company:
company = frappe.db.get_value("Employee", employee, "company")
def add_block_list(block_list):
if block_list:
if all_lists or not is_user_in_allow_list(block_list):
block_lists.append(block_list)
# per department
department = frappe.db.get_value("Employee", employee, "department")
if department:
block_list = frappe.db.get_value("Department", department, "leave_block_list")
add_block_list(block_list)
# global
for block_list in frappe.db.sql_list( | style: format code with black | get_applicable_block_lists | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | leave_block_list.py | 14 | 20 | https://github.com/frappe/erpnext.git | 6 | 132 | 0 | 43 | 257 | Python | {
"docstring": "select name from `tabLeave Block List`\n\t\twhere applies_to_all_departments=1 and company=%s",
"language": "en",
"n_whitespaces": 8,
"n_words": 10,
"vocab_size": 10
} | def get_applicable_block_lists(employee=None, company=None, all_lists=False):
block_lists = []
if not employee:
employee = frappe.db.get_value("Employee", {"user_id": frappe.session.user})
if not employee:
return []
if not company:
company = frappe.db.get_value("Employee", employee, "company")
def add_block_list(block_list):
if block_list:
if all_lists or not is_user_in_allow_list(block_list):
block_lists.append(block_list)
# per department
department = frappe.db.get_value("Employee", employee, "department")
if department:
block_list = frappe.db.get_value("Department", department, "leave_block_list")
add_block_list(block_list)
# global
for block_list in frappe.db.sql_list(
,
company,
):
add_block_list(block_list)
return list(set(block_lists))
|
|
25,164 | 114,363 | 79 | mindsdb/integrations/libs/storage_handler.py | 20 | 9 | def _setup_connection(self):
# noqa
cur = self.connection.cursor()
if ('store',) not in lis | feat: add docs, improve base class signatures | _setup_connection | 27a34a6a706a06e1241671d29c8cab93d77a19c1 | mindsdb | storage_handler.py | 11 | 6 | https://github.com/mindsdb/mindsdb.git | 2 | 45 | 0 | 20 | 83 | Python | {
"docstring": " Checks that a key-value table exists, otherwise creates it. create table store (key text, value text)",
"language": "en",
"n_whitespaces": 16,
"n_words": 16,
"vocab_size": 15
} | def _setup_connection(self):
# noqa
cur = self.connection.cursor()
if ('store',) not in list(cur.execute("SELECT name FROM sqlite_master WHERE type='table';")):
cur.execute(
)
self.internal_registry.commit()
|
|
36,574 | 156,129 | 214 | dask/optimization.py | 64 | 20 | def cull(dsk, keys):
if not isinstance(keys, (lis | absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | cull | cccb9d8d8e33a891396b1275c2448c352ef40c27 | dask | optimization.py | 15 | 19 | https://github.com/dask/dask.git | 6 | 121 | 0 | 44 | 193 | Python | {
"docstring": "Return new dask with only the tasks required to calculate keys.\n\n In other words, remove unnecessary tasks from dask.\n ``keys`` may be a single key or list of keys.\n\n Examples\n --------\n >>> def inc(x):\n ... return x + 1\n\n >>> def add(x, y):\n ... return x + y\n\n >>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}\n >>> dsk, dependencies = cull(d, 'out')\n >>> dsk # doctest: +ELLIPSIS\n {'out': (<function add at ...>, 'x', 10), 'x': 1}\n >>> dependencies # doctest: +ELLIPSIS\n {'out': ['x'], 'x': []}\n\n Returns\n -------\n dsk: culled dask graph\n dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate\n other optimizations, notably fuse.\n ",
"language": "en",
"n_whitespaces": 277,
"n_words": 109,
"vocab_size": 86
} | def cull(dsk, keys):
if not isinstance(keys, (list, set)):
keys = [keys]
seen = set()
dependencies = dict()
out = {}
work = list(set(flatten(keys)))
while work:
new_work = []
for k in work:
dependencies_k = get_dependencies(dsk, k, as_list=True) # fuse needs lists
out[k] = dsk[k]
dependencies[k] = dependencies_k
for d in dependencies_k:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
return out, dependencies
|
|
34,339 | 148,815 | 92 | freqtrade/exchange/exchange.py | 23 | 11 | def fill_leverage_tiers(self) -> None:
leverage_tiers = self.load_leverage_tiers()
for pair, tiers in leverage_tiers.items():
tiers = []
| freqtrade.exchange edited load_leverage_tiers | fill_leverage_tiers | 41d8330fbc95224020a046bd46eea6252374ee15 | freqtrade | exchange.py | 13 | 11 | https://github.com/freqtrade/freqtrade.git | 3 | 54 | 0 | 17 | 89 | Python | {
"docstring": "\n Assigns property _leverage_tiers to a dictionary of information about the leverage\n allowed on each pair\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 15
} | def fill_leverage_tiers(self) -> None:
leverage_tiers = self.load_leverage_tiers()
for pair, tiers in leverage_tiers.items():
tiers = []
for tier in tiers:
tiers.append(self.parse_leverage_tier(tier))
self._leverage_tiers[pair] = tiers
|
|
47,889 | 196,389 | 57 | sympy/matrices/expressions/kronecker.py | 21 | 7 | def kronecker_product(*matrices):
if not matrices: | Moved imports to higher level | kronecker_product | 59d22b6bb7287613d598611027f640d068ca5748 | sympy | kronecker.py | 13 | 8 | https://github.com/sympy/sympy.git | 3 | 46 | 0 | 19 | 82 | Python | {
"docstring": "\n The Kronecker product of two or more arguments.\n\n This computes the explicit Kronecker product for subclasses of\n ``MatrixBase`` i.e. explicit matrices. Otherwise, a symbolic\n ``KroneckerProduct`` object is returned.\n\n\n Examples\n ========\n\n For ``MatrixSymbol`` arguments a ``KroneckerProduct`` object is returned.\n Elements of this matrix can be obtained by indexing, or for MatrixSymbols\n with known dimension the explicit matrix can be obtained with\n ``.as_explicit()``\n\n >>> from sympy import kronecker_product, MatrixSymbol\n >>> A = MatrixSymbol('A', 2, 2)\n >>> B = MatrixSymbol('B', 2, 2)\n >>> kronecker_product(A)\n A\n >>> kronecker_product(A, B)\n KroneckerProduct(A, B)\n >>> kronecker_product(A, B)[0, 1]\n A[0, 0]*B[0, 1]\n >>> kronecker_product(A, B).as_explicit()\n Matrix([\n [A[0, 0]*B[0, 0], A[0, 0]*B[0, 1], A[0, 1]*B[0, 0], A[0, 1]*B[0, 1]],\n [A[0, 0]*B[1, 0], A[0, 0]*B[1, 1], A[0, 1]*B[1, 0], A[0, 1]*B[1, 1]],\n [A[1, 0]*B[0, 0], A[1, 0]*B[0, 1], A[1, 1]*B[0, 0], A[1, 1]*B[0, 1]],\n [A[1, 0]*B[1, 0], A[1, 0]*B[1, 1], A[1, 1]*B[1, 0], A[1, 1]*B[1, 1]]])\n\n For explicit matrices the Kronecker product is returned as a Matrix\n\n >>> from sympy import Matrix, kronecker_product\n >>> sigma_x = Matrix([\n ... [0, 1],\n ... [1, 0]])\n ...\n >>> Isigma_y = Matrix([\n ... [0, 1],\n ... [-1, 0]])\n ...\n >>> kronecker_product(sigma_x, Isigma_y)\n Matrix([\n [ 0, 0, 0, 1],\n [ 0, 0, -1, 0],\n [ 0, 1, 0, 0],\n [-1, 0, 0, 0]])\n\n See Also\n ========\n KroneckerProduct\n\n ",
"language": "en",
"n_whitespaces": 371,
"n_words": 212,
"vocab_size": 97
} | def kronecker_product(*matrices):
if not matrices:
raise TypeError("Empty Kronecker product is undefined")
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
else:
return KroneckerProduct(*matrices).doit()
|
|
50,364 | 203,419 | 97 | django/contrib/admin/options.py | 33 | 18 | def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
msg = _("%(name)s with ID โ%(key)sโ doesnโt exist. Perhaps it was deleted?") % {
"name": opts.verbose_name,
"key": unquote(object_id),
}
self.message_user(request, | Refs #33476 -- Reformatted code with Black. | _get_obj_does_not_exist_redirect | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | options.py | 11 | 8 | https://github.com/django/django.git | 1 | 65 | 0 | 32 | 106 | Python | {
"docstring": "\n Create a message informing the user that the object doesn't exist\n and return a redirect to the admin index page.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 17
} | def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
msg = _("%(name)s with ID โ%(key)sโ doesnโt exist. Perhaps it was deleted?") % {
"name": opts.verbose_name,
"key": unquote(object_id),
}
self.message_user(request, msg, messages.WARNING)
url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(url)
|
|
55,829 | 219,816 | 999 | python3.10.4/Lib/_pydecimal.py | 183 | 19 | def compare_total(self, other, context=None):
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
retu | add python 3.10.4 for windows | compare_total | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _pydecimal.py | 14 | 57 | https://github.com/XX-net/XX-Net.git | 27 | 242 | 0 | 61 | 391 | Python | {
"docstring": "Compares self to other using the abstract representations.\n\n This is not like the standard compare, which use their numerical\n value. Note that a total ordering is defined for all possible abstract\n representations.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 32,
"vocab_size": 28
} | def compare_total(self, other, context=None):
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
|
|
69,581 | 241,553 | 161 | pytorch_lightning/utilities/enums.py | 61 | 11 | def detect_current_mode(cls) -> _FaultTolerantMode:
env_value = os.getenv("PL_FAULT_TOLERANT_TRAINING", "0").lower()
# the int values are kept for backwards compatibility, but long-term we want to keep only the strings
if env_value in ("0", "disabled"):
return _FaultT | Add typing for utilities/enums.py (#11298) | detect_current_mode | a610e043d797ca0bae1ce186829fece79077407a | lightning | enums.py | 11 | 12 | https://github.com/Lightning-AI/lightning.git | 4 | 66 | 0 | 52 | 122 | Python | {
"docstring": "This classmethod detects if `Fault Tolerant` is activated and maps its value to `_FaultTolerantMode`.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 14
} | def detect_current_mode(cls) -> _FaultTolerantMode:
env_value = os.getenv("PL_FAULT_TOLERANT_TRAINING", "0").lower()
# the int values are kept for backwards compatibility, but long-term we want to keep only the strings
if env_value in ("0", "disabled"):
return _FaultTolerantMode.DISABLED
elif env_value in ("1", "automatic"):
return _FaultTolerantMode.AUTOMATIC
elif env_value in ("2", "manual"):
return _FaultTolerantMode.MANUAL
raise MisconfigurationException(
"The environment flag `PL_FAULT_TOLERANT_TRAINING` should be either 'disabled', 'automatic', or 'manual'."
)
|
|
@keras_export("keras.preprocessing.image.random_channel_shift") | 81,449 | 275,711 | 71 | keras/preprocessing/image.py | 41 | 16 | def apply_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity, min_x, max_x) for x_channel in x
]
x = np.stack(channel_imag | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | apply_channel_shift | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | image.py | 10 | 9 | https://github.com/keras-team/keras.git | 2 | 89 | 1 | 29 | 144 | Python | {
"docstring": "Performs a channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 28,
"vocab_size": 26
} | def apply_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity, min_x, max_x) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
@keras_export("keras.preprocessing.image.random_channel_shift") |
118,377 | 323,132 | 79 | paddlenlp/trainer/trainer_base.py | 22 | 7 | def _nested_gather(self, tensors, name=None):
if tensors is None:
return
if self.ar | [Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)
* add some datasets for finetune.
* support fine tune for all tastks.
* add trainer prototype.
* init verison for paddlenlp trainer.
* refine trainer.
* update for some details.
* support multi-cards training evaluation.
* support load from ckpt.
* support for export inference model.
* first version of trainer.
* seq cls support clue.
* trainer support for token classification and question answersing tasks.
* fix as reviews.
Co-authored-by: Zeyu Chen <chenzeyu01@baidu.com> | _nested_gather | 44a290e94d1becd1f09fddc3d873f9e19c9d6919 | PaddleNLP | trainer_base.py | 10 | 6 | https://github.com/PaddlePaddle/PaddleNLP.git | 3 | 36 | 0 | 18 | 60 | Python | {
"docstring": "\n Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before\n concatenating them to `gathered`\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 17
} | def _nested_gather(self, tensors, name=None):
if tensors is None:
return
if self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate. |
|
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False]) | 76,234 | 260,411 | 595 | sklearn/linear_model/_glm/tests/test_glm.py | 269 | 37 | def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
# solver=solver, # only lbfgs available
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. Most current GLM solvers do
# NOT return the minimum norm solution with fit_intercept=True.
rtol = 5e-5
if n_samples > n_features:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
assert_allclose(model.predict(X), y, rtol=1e-6)
if fit_intercept:
# But it is not the mi | TST tight tests for GLMs (#23619)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> | test_glm_regression_unpenalized | 9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f | scikit-learn | test_glm.py | 15 | 31 | https://github.com/scikit-learn/scikit-learn.git | 4 | 241 | 1 | 170 | 413 | Python | {
"docstring": "Test that unpenalized GLM converges for all solvers to correct solution.\n\n We work with a simple constructed data set with known solution.\n Note: This checks the minimum norm solution for wide X, i.e.\n n_samples < n_features:\n min ||w||_2 subject to w = argmin deviance(X, y, w)\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 46,
"vocab_size": 42
} | def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
# solver=solver, # only lbfgs available
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. Most current GLM solvers do
# NOT return the minimum norm solution with fit_intercept=True.
rtol = 5e-5
if n_samples > n_features:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
assert_allclose(model.predict(X), y, rtol=1e-6)
if fit_intercept:
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
assert norm_model > (1 + 1e-12) * norm_solution
# See https://github.com/scikit-learn/scikit-learn/issues/23670.
# Note: Even adding a tiny penalty does not give the minimal norm solution.
# XXX: We could have naively expected LBFGS to find the minimal norm
# solution by adding a very small penalty. Even that fails for a reason we
# do not properly understand at this point.
else:
# When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
# solution on this problem.
# XXX: Do we have any theoretical guarantees why this should be the case?
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False]) |
42,392 | 177,486 | 55 | networkx/algorithms/bipartite/redundancy.py | 33 | 11 | def _node_redundancy(G, v):
n = len(G[v])
overlap = sum(
1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v} | Minor Python 2 cleanup (#6219)
Python3 cleanup
Use dict.keys() for set operations rather than explicitly
creating sets. | _node_redundancy | 1f033118f2e0cca12c6e2375708dc92197b62da6 | networkx | redundancy.py | 15 | 6 | https://github.com/networkx/networkx.git | 3 | 79 | 0 | 29 | 121 | Python | {
"docstring": "Returns the redundancy of the node `v` in the bipartite graph `G`.\n\n If `G` is a graph with `n` nodes, the redundancy of a node is the ratio\n of the \"overlap\" of `v` to the maximum possible overlap of `v`\n according to its degree. The overlap of `v` is the number of pairs of\n neighbors that have mutual neighbors themselves, other than `v`.\n\n `v` must have at least two neighbors in `G`.\n\n ",
"language": "en",
"n_whitespaces": 90,
"n_words": 72,
"vocab_size": 41
} | def _node_redundancy(G, v):
n = len(G[v])
overlap = sum(
1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v}
)
return (2 * overlap) / (n * (n - 1))
|
|
49,063 | 198,893 | 958 | sympy/physics/continuum_mechanics/truss.py | 199 | 45 | def solve(self):
count_reaction_loads = 0
for node in self._nodes:
if node in list(self._supports):
if self._supports[node[0]]=='pinned':
count_reaction_loads += 2
elif self._supports[node[0]]=='roller':
count_reaction_loads += 1
coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))]
load_matrix = zeros(2*len(self.nodes), 1)
load_matrix_row = 0
for node in self._nodes:
if node[0] in list(self._loads):
for load in self._loads[node[0]]:
if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'):
load_matrix[load_matrix_row] -= load[0]*math.cos(pi*load[1]/180)
load_matrix[load_matrix_row + 1] -= load[0]*math.sin(pi*load[1]/180)
load_matrix_row += 2
cols = 0
row = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
coefficients_matrix[row][cols] += 1
coefficients_matrix[row+1][cols+1] += 1
cols += 2
elif self._supports[node[0]]=='roller':
coefficients_matrix[row+1][cols] += 1
cols += 1
row += 2
for member in list(self._members):
start = self._members[member][0]
end = self._members[member][1]
length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2)
start_index = self._node_labels.index(start)
end_index = self._node_labels.index(end)
horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length
vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length
horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length
| solve method added for the truss class | solve | af847114b9138a321933574fc3c3ec73af8b3459 | sympy | truss.py | 20 | 61 | https://github.com/sympy/sympy.git | 22 | 749 | 0 | 80 | 1,153 | Python | {
"docstring": "\n This method solves for all reaction forces of all supports and all internal forces\n of all the members in the truss, provided the Truss is solvable.\n\n A Truss is solvable if the following condition is met,\n\n 2n >= r + m\n\n Where n is the number of nodes, r is the number of reaction forces, where each pinned\n support has 2 reaction forces and each roller has 1, and m is the number of members.\n\n The given condition is derived from the fact that a system of equations is solvable\n only when the number of variables is lesser than or equal to the number of equations.\n Equilibrium Equations in x and y directions give two equations per node giving 2n number\n equations. The number of variables is simply the sum of the number of reaction forces and\n member forces.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node(\"node_1\", 0, 0)\n >>> t.add_node(\"node_2\", 6, 0)\n >>> t.add_node(\"node_3\", 2, 2)\n >>> t.add_node(\"node_4\", 2, 0)\n >>> t.add_member(\"member_1\", \"node_1\", \"node_4\")\n >>> t.add_member(\"member_2\", \"node_2\", \"node_4\")\n >>> t.add_member(\"member_3\", \"node_1\", \"node_3\")\n >>> t.add_member(\"member_4\", \"node_2\", \"node_3\")\n >>> t.add_member(\"member_5\", \"node_3\", \"node_4\")\n >>> t.apply_load(\"node_4\", magnitude=10, direction=270)\n >>> t.apply_support(\"node_1\", type=\"pinned\")\n >>> t.apply_support(\"node_2\", type=\"roller\")\n >>> t.solve()\n >>> t.reaction_loads\n {'R_node_1_x': 1.83697019872103e-15, 'R_node_1_y': 6.66666666666667, 'R_node_2_y': 3.33333333333333}\n >>> t.internal_forces\n {'member_1': 6.66666666666666, 'member_2': 6.66666666666667, 'member_3': -6.66666666666667*sqrt(2), 'member_4': -3.33333333333333*sqrt(5), 'member_5': 10.0}\n ",
"language": "en",
"n_whitespaces": 450,
"n_words": 218,
"vocab_size": 128
} | def solve(self):
count_reaction_loads = 0
for node in self._nodes:
if node in list(self._supports):
if self._supports[node[0]]=='pinned':
count_reaction_loads += 2
elif self._supports[node[0]]=='roller':
count_reaction_loads += 1
coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))]
load_matrix = zeros(2*len(self.nodes), 1)
load_matrix_row = 0
for node in self._nodes:
if node[0] in list(self._loads):
for load in self._loads[node[0]]:
if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'):
load_matrix[load_matrix_row] -= load[0]*math.cos(pi*load[1]/180)
load_matrix[load_matrix_row + 1] -= load[0]*math.sin(pi*load[1]/180)
load_matrix_row += 2
cols = 0
row = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
coefficients_matrix[row][cols] += 1
coefficients_matrix[row+1][cols+1] += 1
cols += 2
elif self._supports[node[0]]=='roller':
coefficients_matrix[row+1][cols] += 1
cols += 1
row += 2
for member in list(self._members):
start = self._members[member][0]
end = self._members[member][1]
length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2)
start_index = self._node_labels.index(start)
end_index = self._node_labels.index(end)
horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length
vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length
horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length
vertical_component_end = (self._node_coordinates[start][1]-self._node_coordinates[end][1])/length
coefficients_matrix[start_index*2][cols] += horizontal_component_start
coefficients_matrix[start_index*2+1][cols] += vertical_component_start
coefficients_matrix[end_index*2][cols] += horizontal_component_end
coefficients_matrix[end_index*2+1][cols] += vertical_component_end
cols += 1
forces_matrix = (Matrix(coefficients_matrix)**-1)*load_matrix
self._reaction_loads = {}
i = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
self._reaction_loads['R_'+str(node[0])+'_x'] = forces_matrix[i]
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i+1]
i += 2
elif self._supports[node[0]]=='roller':
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i]
i += 1
for member in list(self._members):
self._internal_forces[member] = forces_matrix[i]
i += 1
return
|
|
48,565 | 197,463 | 319 | sympy/polys/galoistools.py | 127 | 28 | def gf_edf_zassenhaus(f, n, p, K):
factors = [f]
if gf_degree(f) <= n:
return factors
N = gf_degree(f) // n
if p != 2:
b = gf_frobenius_monomial_base(f, p, K)
t = [K.one, K.zero]
while len(factors) < N:
if p == 2:
h = r = t
for i in range(n - 1):
r = gf_pow_mod(r, 2, f, p, K)
h = gf_add(h, r, p, K)
g = gf_gcd(f, h, p, K)
t += [K.zero, K.zero]
else:
r = gf_random(2 * n - 1, p, K)
h = _gf_pow_pnm1d2(r, n, f, b, p, K)
g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)
if g != [K.one] and g != f:
factors = gf_edf_zassenhaus(g, n, p, K) \
+ gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K)
return _sort_factors(factors, multiple=False)
| Improve `gf_edf_zassenhaus()`.
For the case p == 2, we use Algorithm 3.4.8 of [Cohen93], instead of the current procedure.
The current algorithm was failing to terminate on at least one known case (factoring cyclotomic_poly(17) mod 2).
A simple bugfix would have been to change the iteration to `for i in range(n - 1):`
when computing the polynomial `h` (`Tr` in Geddes), but Alg 3.4.8 is thought to
be better in practice. | gf_edf_zassenhaus | d8bc197a19c0f4ea76c088da6f1655f1586cd700 | sympy | galoistools.py | 16 | 24 | https://github.com/sympy/sympy.git | 8 | 247 | 0 | 67 | 352 | Python | {
"docstring": "\n Cantor-Zassenhaus: Probabilistic Equal Degree Factorization\n\n Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and\n an integer ``n``, such that ``n`` divides ``deg(f)``, returns all\n irreducible factors ``f_1,...,f_d`` of ``f``, each of degree ``n``.\n EDF procedure gives complete factorization over Galois fields.\n\n Consider the square-free polynomial ``f = x**3 + x**2 + x + 1`` in\n ``GF(5)[x]``. Let's compute its irreducible factors of degree one::\n\n >>> from sympy.polys.domains import ZZ\n >>> from sympy.polys.galoistools import gf_edf_zassenhaus\n\n >>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ)\n [[1, 1], [1, 2], [1, 3]]\n\n References\n ==========\n\n .. [1] [Gathen99]_\n .. [2] [Geddes92]_\n .. [3] [Cohen93]_\n\n ",
"language": "en",
"n_whitespaces": 160,
"n_words": 96,
"vocab_size": 79
} | def gf_edf_zassenhaus(f, n, p, K):
factors = [f]
if gf_degree(f) <= n:
return factors
N = gf_degree(f) // n
if p != 2:
b = gf_frobenius_monomial_base(f, p, K)
t = [K.one, K.zero]
while len(factors) < N:
if p == 2:
h = r = t
for i in range(n - 1):
r = gf_pow_mod(r, 2, f, p, K)
h = gf_add(h, r, p, K)
g = gf_gcd(f, h, p, K)
t += [K.zero, K.zero]
else:
r = gf_random(2 * n - 1, p, K)
h = _gf_pow_pnm1d2(r, n, f, b, p, K)
g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)
if g != [K.one] and g != f:
factors = gf_edf_zassenhaus(g, n, p, K) \
+ gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K)
return _sort_factors(factors, multiple=False)
|
|
41,753 | 176,187 | 168 | networkx/linalg/algebraicconnectivity.py | 89 | 23 | def _tracemin_fiedler(L, X, normalized, tol, method):
import n | Use scipy.sparse array datastructure (#5139)
* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.
Seems like a reasonable place to start.
nx.to_scipy_sparse_matrix is one of the primary interfaces to
scipy.sparse from within NetworkX.
* 1: Use np.outer instead of mult col/row vectors
Fix two instances in modularitymatrix where a new 2D array was being
created via an outer product of two \"vectors\".
In the matrix case, this was a row vector \* a column vector. In the
array case this can be disambiguated by being explicit with np.outer.
* Update _transition_matrix in laplacianmatrix module
- A few instances of matrix multiplication operator
- Add np.newaxis + transpose to get shape right for broadcasting
- Explicitly convert e.g. sp.sparse.spdiags to a csr_array.
* Update directed_combinitorial_laplacian w/ sparse array.
- Wrap spdiags in csr_array and update matmul operators.
* Rm matrix-specific code from lgc and hmn modules
- Replace .A call with appropriate array semantics
- wrap sparse.diags in csr_array.
* Change hits to use sparse array semantics.
- Replace * with @
- Remove superfluous calls to flatten.
* Update sparse matrix usage in layout module.
- Simplify lil.getrowview call
- Wrap spdiags in csr_array.
* lil_matrix -> lil_array in graphmatrix.py.
* WIP: Start working on algebraic connectivity module.
* Incorporate auth mat varname feedback.
* Revert 1D slice and comment for 1D sparse future.
* Add TODOs: rm csr_array wrapper around spdiags etc.
* WIP: cleanup algebraicconn: tracemin_fiedler.
* Typo.
* Finish reviewing algebraicconnectivity.
* Convert bethe_hessian matrix to use sparse arrays.
* WIP: update laplacian.
Update undirected laplacian functions.
* WIP: laplacian - add comment about _transition_matrix return types.
* Finish laplacianmatrix review.
* Update attrmatrix.
* Switch to official laplacian function.
* Update pagerank to use sparse array.
* Switch bipartite matrix to sparse arrays.
* Check from_scipy_sparse_matrix works with arrays.
Modifies test suite.
* Apply changes from review.
* Fix failing docstring tests.
* Fix missing axis for in-place multiplication.
* Use scipy==1.8rc2
* Use matrix multiplication
* Fix PyPy CI
* [MRG] Create plot_subgraphs.py example (#5165)
* Create plot_subgraphs.py
https://github.com/networkx/networkx/issues/4220
* Update plot_subgraphs.py
black
* Update plot_subgraphs.py
lint plus font_size
* Update plot_subgraphs.py
added more plots
* Update plot_subgraphs.py
removed plots from the unit test and added comments
* Update plot_subgraphs.py
lint
* Update plot_subgraphs.py
typos fixed
* Update plot_subgraphs.py
added nodes to the plot of the edges removed that was commented out for whatever reason
* Update plot_subgraphs.py
revert the latest commit - the line was commented out for a reason - it's broken
* Update plot_subgraphs.py
fixed node color issue
* Update plot_subgraphs.py
format fix
* Update plot_subgraphs.py
forgot to draw the nodes... now fixed
* Fix sphinx warnings about heading length.
* Update examples/algorithms/plot_subgraphs.py
* Update examples/algorithms/plot_subgraphs.py
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
Co-authored-by: Dan Schult <dschult@colgate.edu>
* Add traveling salesman problem to example gallery (#4874)
Adds an example of the using Christofides to solve the TSP problem to the example galery.
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037)
* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges()
* Resolved Requested Changes
* Revert changes to degree docstrings.
* Update comments in example.
* Apply wording to edges method in all graph classes.
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
* Compatibility updates from testing with numpy/scipy/pytest rc's (#5226)
* Rm deprecated scipy subpkg access.
* Use recwarn fixture in place of deprecated pytest pattern.
* Rm unnecessary try/except from tests.
* Replace internal `close` fn with `math.isclose`. (#5224)
* Replace internal close fn with math.isclose.
* Fix lines in docstring examples.
* Fix Python 3.10 deprecation warning w/ int div. (#5231)
* Touchups and suggestions for subgraph gallery example (#5225)
* Simplify construction of G with edges rm'd
* Rm unused graph attribute.
* Shorten categorization by node type.
* Simplify node coloring.
* Simplify isomorphism check.
* Rm unit test.
* Rm redundant plotting of each subgraph.
* Use new package name (#5234)
* Allowing None edges in weight function of bidirectional Dijkstra (#5232)
* added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None.
* changed syntax for better readability and code duplicate avoidance
Co-authored-by: Hohmann, Nikolas <nikolas.hohmann@tu-darmstadt.de>
* Add an FAQ about assigning issues. (#5182)
* Add FAQ about assigning issues.
* Add note about linking issues from new PRs.
* Update dev deps (#5243)
* Update minor doc issues with tex notation (#5244)
* Add FutureWarnings to fns that return sparse matrices
- biadjacency_matrix.
- bethe_hessian_matrix.
- incidence_matrix.
- laplacian functions.
- modularity_matrix functions.
- adjacency_matrix.
* Add to_scipy_sparse_array and use it everywhere.
Add a new conversion function to preserve array semantics internally
while not altering behavior for users.
Also adds FutureWarning to to_scipy_sparse_matrix.
* Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix.
* Handle deprecations in separate PR.
* Fix docstring examples.
Co-authored-by: Mridul Seth <mail@mriduls.com>
Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com>
Co-authored-by: Andrew Knyazev <andrew.knyazev@ucdenver.edu>
Co-authored-by: Dan Schult <dschult@colgate.edu>
Co-authored-by: eskountis <56514439+eskountis@users.noreply.github.com>
Co-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com>
Co-authored-by: NikHoh <nikhoh@web.de>
Co-authored-by: Hohmann, Nikolas <nikolas.hohmann@tu-darmstadt.de>
Co-authored-by: Sultan Orazbayev <contact@econpoint.com>
Co-authored-by: Mridul Seth <mail@mriduls.com> | _tracemin_fiedler | 5dfd57af2a141a013ae3753e160180b82bec9469 | networkx | algebraicconnectivity.py | 14 | 42 | https://github.com/networkx/networkx.git | 7 | 412 | 0 | 61 | 178 | Python | {
"docstring": "Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.\n\n The Fiedler vector of a connected undirected graph is the eigenvector\n corresponding to the second smallest eigenvalue of the Laplacian matrix\n of the graph. This function starts with the Laplacian L, not the Graph.\n\n Parameters\n ----------\n L : Laplacian of a possibly weighted or normalized, but undirected graph\n\n X : Initial guess for a solution. Usually a matrix of random numbers.\n This function allows more than one column in X to identify more than\n one eigenvector if desired.\n\n normalized : bool\n Whether the normalized Laplacian matrix is used.\n\n tol : float\n Tolerance of relative residual in eigenvalue computation.\n Warning: There is no limit on number of iterations.\n\n method : string\n Should be 'tracemin_pcg' or 'tracemin_lu'.\n Otherwise exception is raised.\n\n Returns\n -------\n sigma, X : Two NumPy arrays of floats.\n The lowest eigenvalues and corresponding eigenvectors of L.\n The size of input X determines the size of these outputs.\n As this is for Fiedler vectors, the zero eigenvalue (and\n constant eigenvector) are avoided.\n ",
"language": "en",
"n_whitespaces": 291,
"n_words": 172,
"vocab_size": 108
} | def _tracemin_fiedler(L, X, normalized, tol, method):
import numpy as np
import scipy as sp
import scipy.linalg # call as sp.linalg
import scipy.linalg.blas # call as sp.linalg.blas
import scipy.sparse # call as sp.sparse
n = X.shape[0]
if normalized:
# Form the normalized Laplacian matrix and determine the eigenvector of
# its nullspace.
e = np.sqrt(L.diagonal())
# TODO: rm csr_array wrapper when spdiags array creation becomes available
D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format="csr"))
L = D @ L @ D
e *= 1.0 / np.linalg.norm(e, 2)
if normalized:
|
|
@pytest.mark.skipif(
not os.environ.get("OPENAI_API_KEY", None),
reason="Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
) | 75,229 | 258,374 | 98 | test/nodes/test_prompt_node.py | 55 | 22 | def test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml(tmp_path):
with open(tmp_path / "tmp_config_with_prompt_template.yml", "w") as tmp_file:
tmp_file.write(
f
)
pipeline = Pipeline.load_from_yam | feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667)
Co-authored-by: ZanSara <sarazanzo94@gmail.com> | test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml | 9ebf164cfdfb320503b7161493420c1b0ec577a3 | haystack | test_prompt_node.py | 13 | 43 | https://github.com/deepset-ai/haystack.git | 1 | 78 | 1 | 50 | 181 | Python | {
"docstring": "\n version: ignore\n components:\n - name: pmodel\n type: PromptModel\n params:\n model_name_or_path: google/flan-t5-small\n model_kwargs:\n torch_dtype: torch.bfloat16\n - name: question_generation_template\n type: PromptTemplate\n params:\n name: question-generation-new\n prompt_text: \"Given the context please generate a question. Context: $documents; Question:\"\n - name: p1\n params:\n model_name_or_path: pmodel\n default_prompt_template: question_generation_template\n output_variable: questions\n type: PromptNode\n - name: p2\n params:\n model_name_or_path: pmodel\n default_prompt_template: question-answering\n type: PromptNode\n pipelines:\n - name: query\n nodes:\n - name: p1\n inputs:\n - Query\n - name: p2\n inputs:\n - p1\n ",
"language": "en",
"n_whitespaces": 523,
"n_words": 72,
"vocab_size": 40
} | def test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml(tmp_path):
with open(tmp_path / "tmp_config_with_prompt_template.yml", "w") as tmp_file:
tmp_file.write(
f
)
pipeline = Pipeline.load_from_yaml(path=tmp_path / "tmp_config_with_prompt_template.yml")
result = pipeline.run(query="not relevant", documents=[Document("Berlin is an amazing city.")])
assert "Berlin" in result["results"][0]
assert len(result["meta"]["invocation_context"]) > 0
@pytest.mark.skipif(
not os.environ.get("OPENAI_API_KEY", None),
reason="Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
) |
19,851 | 100,362 | 595 | lib/utils.py | 109 | 33 | def _download_model(self):
self.logger.info("Downloading model: '%s' from: %s", self._model_name, self._url_download)
for attempt in range(self._retries):
try:
downloaded_size = | Update code to support Tensorflow versions up to 2.8 (#1213)
* Update maximum tf version in setup + requirements
* - bump max version of tf version in launcher
- standardise tf version check
* update keras get_custom_objects for tf>2.6
* bugfix: force black text in GUI file dialogs (linux)
* dssim loss - Move to stock tf.ssim function
* Update optimizer imports for compatibility
* fix logging for tf2.8
* Fix GUI graphing for TF2.8
* update tests
* bump requirements.txt versions
* Remove limit on nvidia-ml-py
* Graphing bugfixes
- Prevent live graph from displaying if data not yet available
* bugfix: Live graph. Collect loss labels correctly
* fix: live graph - swallow inconsistent loss errors
* Bugfix: Prevent live graph from clearing during training
* Fix graphing for AMD | _download_model | c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf | faceswap | utils.py | 17 | 26 | https://github.com/deepfakes/faceswap.git | 5 | 220 | 0 | 89 | 366 | Python | {
"docstring": " Download the model zip from github to the cache folder. ",
"language": "en",
"n_whitespaces": 11,
"n_words": 10,
"vocab_size": 9
} | def _download_model(self):
self.logger.info("Downloading model: '%s' from: %s", self._model_name, self._url_download)
for attempt in range(self._retries):
try:
downloaded_size = self._url_partial_size
req = urllib.request.Request(self._url_download)
if downloaded_size != 0:
req.add_header("Range", f"bytes={downloaded_size}-")
with urllib.request.urlopen(req, timeout=10) as response:
self.logger.debug("header info: {%s}", response.info())
self.logger.debug("Return Code: %s", response.getcode())
self._write_zipfile(response, downloaded_size)
break
except (socket_error, socket_timeout,
urllib.error.HTTPError, urllib.error.URLError) as err:
if attempt + 1 < self._retries:
self.logger.warning("Error downloading model (%s). Retrying %s of %s...",
str(err), attempt + 2, self._retries)
else:
self.logger.error("Failed to download model. Exiting. (Error: '%s', URL: "
"'%s')", str(err), self._url_download)
self.logger.info("You can try running again to resume the download.")
self.logger.info("Alternatively, you can manually download the model from: %s "
"and unzip the contents to: %s",
self._url_download, self._cache_dir)
sys.exit(1)
|
|
76,333 | 260,546 | 36 | sklearn/manifold/_locally_linear.py | 8 | 7 | def fit_transform(self, X, y=None):
self._validate_params()
self._fit_transform(X)
| MAINT Use _validate_params in LocallyLinearEmbedding (#23938)
Co-authored-by: jeremiedbb <jeremiedbb@yahoo.fr> | fit_transform | ceeda362402bfc978bcc93d02481fe28e21a07ad | scikit-learn | _locally_linear.py | 7 | 4 | https://github.com/scikit-learn/scikit-learn.git | 1 | 27 | 0 | 8 | 45 | Python | {
"docstring": "Compute the embedding vectors for data X and transform X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training set.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n Returns the instance itself.\n ",
"language": "en",
"n_whitespaces": 134,
"n_words": 45,
"vocab_size": 37
} | def fit_transform(self, X, y=None):
self._validate_params()
self._fit_transform(X)
return self.embedding_
|
|
5,345 | 30,144 | 169 | tests/types/test_song.py | 84 | 20 | def test_song_from_data_dump():
# Loads from str
song = Song.from_data_dump(
)
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming edm", "melbourne bounce international"]
assert song.disc_number == 1
assert song.duration == 188
assert song.year == 2021
assert song.date == "2021-10-28"
assert song.track_n | v4 init | test_song_from_data_dump | fa2ad657482aca9dc628e6d7062b8badf2706bb6 | spotify-downloader | test_song.py | 9 | 47 | https://github.com/spotDL/spotify-downloader.git | 1 | 119 | 0 | 50 | 207 | Python | {
"docstring": "\n Tests if Song.from_data_dump() works correctly.\n \n {\n \"name\": \"Ropes\",\n \"artists\": [\"Dirty Palm\", \"Chandler Jewels\"],\n \"album_name\": \"Ropes\",\n \"album_artist\": \"Dirty Palm\",\n \"genres\": [\"gaming edm\", \"melbourne bounce international\"],\n \"disc_number\": 1,\n \"duration\": 188,\n \"year\": 2021,\n \"date\": \"2021-10-28\",\n \"track_number\": 1,\n \"tracks_count\": 1,\n \"isrc\": \"GB2LD2110301\",\n \"song_id\": \"1t2qKa8K72IBC8yQlhD9bU\",\n \"cover_url\": \"https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332\",\n \"explicit\": false,\n \"download_url\": null,\n \"artist\" : \"Dirty Palm\",\n \"disc_count\": 1,\n \"copyright\": \"\",\n \"publisher\": \"\",\n \"url\": \"https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU\"\n }\n ",
"language": "en",
"n_whitespaces": 319,
"n_words": 59,
"vocab_size": 51
} | def test_song_from_data_dump():
# Loads from str
song = Song.from_data_dump(
)
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming edm", "melbourne bounce international"]
assert song.disc_number == 1
assert song.duration == 188
assert song.year == 2021
assert song.date == "2021-10-28"
assert song.track_number == 1
assert song.tracks_count == 1
assert song.isrc == "GB2LD2110301"
assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU"
assert (
song.cover_url
== "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"
)
assert song.explicit == False
assert song.download_url == None
|
|
77,125 | 262,100 | 482 | TTS/tts/models/vits.py | 73 | 36 | def test_run(self) -> Tuple[Dict, Dict]:
print(" | > Synthesizing test sentences.")
test_audios = {}
test_figures = {}
test_sentences = self.config.test_sentences
for idx, s_info in enumerate(test_sentences):
try:
aux_inputs = self.get_aux_input_from_test_sentences(s_info)
wav, alignment, _, _ = synthesis(
self,
aux_inputs["text"],
self.config,
"cuda" in str(next(self.parameters()).device),
ap,
speaker_id=aux_inputs["speaker_id"],
d_vector=aux_inputs["d_vector"],
style_wav=aux_inputs["style_wav"],
language_id=aux_inputs["language_id"],
language_name=aux_inputs["language_name"],
enable_eos_bos_chars=self.config.enable_eos_bos_chars,
use_griffin_lim=True,
do_trim_silence=False,
).values()
test_audios["{}-audio".format(idx)] = wav
test_figures["{}-alignment".format(idx)] = plot_alignment(alignment.T, output_fig=False)
except: # pylint: disable=bare-except
print(" !! Error creating Test Sentence -", idx)
return test_figures, test_audios
| Update VITS for the new API | test_run | ea965a5683c56a39570b4cc91e86cd2bb9799308 | TTS | vits.py | 22 | 35 | https://github.com/coqui-ai/TTS.git | 3 | 190 | 0 | 63 | 304 | Python | {
"docstring": "Generic test run for `tts` models used by `Trainer`.\n\n You can override this for a different behaviour.\n\n Returns:\n Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.\n ",
"language": "en",
"n_whitespaces": 61,
"n_words": 29,
"vocab_size": 27
} | def test_run(self) -> Tuple[Dict, Dict]:
print(" | > Synthesizing test sentences.")
test_audios = {}
test_figures = {}
test_sentences = self.config.test_sentences
for idx, s_info in enumerate(test_sentences):
try:
aux_inputs = self.get_aux_input_from_test_sentences(s_info)
wav, alignment, _, _ = synthesis(
self,
aux_inputs["text"],
self.config,
"cuda" in str(next(self.parameters()).device),
ap,
speaker_id=aux_inputs["speaker_id"],
d_vector=aux_inputs["d_vector"],
style_wav=aux_inputs["style_wav"],
language_id=aux_inputs["language_id"],
language_name=aux_inputs["language_name"],
enable_eos_bos_chars=self.config.enable_eos_bos_chars,
use_griffin_lim=True,
do_trim_silence=False,
).values()
test_audios["{}-audio".format(idx)] = wav
test_figures["{}-alignment".format(idx)] = plot_alignment(alignment.T, output_fig=False)
except: # pylint: disable=bare-except
print(" !! Error creating Test Sentence -", idx)
return test_figures, test_audios
|
|
56,469 | 221,674 | 298 | python3.10.4/Lib/configparser.py | 60 | 19 | def read_dict(self, dictionary, source='<dict>'):
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
| add python 3.10.4 for windows | read_dict | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | configparser.py | 14 | 18 | https://github.com/XX-net/XX-Net.git | 9 | 141 | 0 | 42 | 222 | Python | {
"docstring": "Read configuration from a dictionary.\n\n Keys are section names, values are dictionaries with keys and values\n that should be present in the section. If the used dictionary type\n preserves order, sections and their keys will be added in order.\n\n All types held in the dictionary are converted to strings during\n reading, including section names, option names and keys.\n\n Optional second argument is the `source' specifying the name of the\n dictionary being read.\n ",
"language": "en",
"n_whitespaces": 128,
"n_words": 72,
"vocab_size": 54
} | def read_dict(self, dictionary, source='<dict>'):
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
|
|
54,501 | 216,287 | 382 | salt/channel/client.py | 67 | 22 | def send(self, load, tries=3, timeout=60, raw=False):
_try = 1
while True:
try:
if self.crypt == "clear":
log.trace("ReqChannel send clear load=%r", load)
ret = yield self._uncrypted_transfer(load, timeout=timeout)
else:
log.trace("ReqChannel send crypt load=%r", load)
ret = yield self._crypted_transfer(
load, timeout=timeout, raw=raw
)
break
except Exception as exc:
log.error("Failed to send msg %r", dir(exc))
if _try == tries:
raise #salt.exceptions.SaltClientError("Connection to master lost")
else:
_try += 1
continue
raise salt.ext.tornado.gen.Return(ret)
| Move retries to channel | send | 25e7a51c729cca539778c53f0858d6070e7d76cb | salt | client.py | 17 | 21 | https://github.com/saltstack/salt.git | 5 | 125 | 0 | 49 | 206 | Python | {
"docstring": "\n Send a request, return a future which will complete when we send the message\n\n :param dict load: A load to send across the wire\n :param int tries: The number of times to make before failure\n :param int timeout: The number of seconds on a response before failing\n ",
"language": "en",
"n_whitespaces": 83,
"n_words": 47,
"vocab_size": 35
} | def send(self, load, tries=3, timeout=60, raw=False):
_try = 1
while True:
try:
if self.crypt == "clear":
log.trace("ReqChannel send clear load=%r", load)
ret = yield self._uncrypted_transfer(load, timeout=timeout)
else:
log.trace("ReqChannel send crypt load=%r", load)
ret = yield self._crypted_transfer(
load, timeout=timeout, raw=raw
)
break
except Exception as exc:
log.error("Failed to send msg %r", dir(exc))
if _try == tries:
raise #salt.exceptions.SaltClientError("Connection to master lost")
else:
_try += 1
continue
raise salt.ext.tornado.gen.Return(ret)
|
|
50,438 | 203,542 | 83 | django/contrib/admin/utils.py | 25 | 13 | def get_fields_from_path(model, path):
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.app | Refs #33476 -- Reformatted code with Black. | get_fields_from_path | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | utils.py | 14 | 10 | https://github.com/django/django.git | 3 | 58 | 0 | 20 | 96 | Python | {
"docstring": "Return list of Fields given path relative to model.\n\n e.g. (ModelX, \"user__groups__name\") -> [\n <django.db.models.fields.related.ForeignKey object at 0x...>,\n <django.db.models.fields.related.ManyToManyField object at 0x...>,\n <django.db.models.fields.CharField object at 0x...>,\n ]\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 27,
"vocab_size": 21
} | def get_fields_from_path(model, path):
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
|
|
56,788 | 222,870 | 310 | python3.10.4/Lib/distutils/dist.py | 92 | 23 | def find_config_files(self):
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
| add python 3.10.4 for windows | find_config_files | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | dist.py | 13 | 21 | https://github.com/XX-net/XX-Net.git | 7 | 150 | 0 | 61 | 267 | Python | {
"docstring": "Find as many configuration files as should be processed for this\n platform, and return a list of filenames in the order in which they\n should be parsed. The filenames returned are guaranteed to exist\n (modulo nasty race conditions).\n\n There are three possible config files: distutils.cfg in the\n Distutils installation directory (ie. where the top-level\n Distutils __inst__.py file lives), a file in the user's home\n directory named .pydistutils.cfg on Unix and pydistutils.cfg\n on Windows/Mac; and setup.cfg in the current directory.\n\n The file in the user's home directory can be disabled with the\n --no-user-cfg option.\n ",
"language": "en",
"n_whitespaces": 171,
"n_words": 93,
"vocab_size": 64
} | def find_config_files(self):
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
if DEBUG:
self.announce("using config files: %s" % ', '.join(files))
return files
|
|
10,339 | 51,516 | 87 | modules/image/Image_gan/gan/stgan_bald/processor.py | 42 | 15 | def get_save_image_name(org_im_path, output_dir, num):
# name prefix of orginal image
org_im_name = os.path.split(org_im_path)[-1]
im_prefix = os.path.splitext(org_im_name)[0]
ext = '.png'
# save image path
save_im_path = os.path.join(output_dir, im_prefix + ext)
if os.path.exists(sav | update stgan_bald (#2022) | get_save_image_name | 02d7e5514b0da9a7ebabb004533b274056c954e2 | PaddleHub | processor.py | 14 | 9 | https://github.com/PaddlePaddle/PaddleHub.git | 2 | 85 | 0 | 28 | 137 | Python | {
"docstring": "\n Get save image name from source image path.\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 7
} | def get_save_image_name(org_im_path, output_dir, num):
# name prefix of orginal image
org_im_name = os.path.split(org_im_path)[-1]
im_prefix = os.path.splitext(org_im_name)[0]
ext = '.png'
# save image path
save_im_path = os.path.join(output_dir, im_prefix + ext)
if os.path.exists(save_im_path):
save_im_path = os.path.join(
output_dir, im_prefix + str(num) + ext)
return save_im_path
|
|
24,007 | 110,265 | 310 | lib/matplotlib/colors.py | 175 | 24 | def rgb_to_hsv(arr):
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {} was found.".format(arr.shape))
in_shape = arr.shape
arr = np.array(
arr, copy=False,
dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = ar | DOC: improve grammar and consistency | rgb_to_hsv | 9b6abd0b4933811e0a45c2535ab8fd107db65dd9 | matplotlib | colors.py | 13 | 28 | https://github.com/matplotlib/matplotlib.git | 2 | 308 | 0 | 95 | 452 | Python | {
"docstring": "\n Convert float RGB values (in the range [0, 1]), in a numpy array to HSV\n values.\n\n Parameters\n ----------\n arr : (..., 3) array-like\n All values must be in the range [0, 1]\n\n Returns\n -------\n (..., 3) ndarray\n Colors converted to HSV values in range [0, 1]\n ",
"language": "en",
"n_whitespaces": 86,
"n_words": 46,
"vocab_size": 32
} | def rgb_to_hsv(arr):
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {} was found.".format(arr.shape))
in_shape = arr.shape
arr = np.array(
arr, copy=False,
dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
return out.reshape(in_shape)
|
|
8,969 | 46,726 | 382 | tests/jobs/test_scheduler_job.py | 64 | 38 | def test_scheduler_verify_pool_full(self, dag_maker, configs):
with conf_vars(configs):
with dag_maker(dag_id='test_scheduler_verify_pool_full'):
BashOperator(
task_id='dummy',
pool='test_scheduler_verify_pool_full',
bash_command='echo hi',
)
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
session.flush()
self.scheduler_job = SchedulerJob(executor=self.null_exec)
self.scheduler_job.processor_agent = mock.MagicMock()
# Create 2 dagruns, which will create 2 task instances.
dr = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
)
self.scheduler_job._schedule_dag_run(dr, session)
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING)
self.scheduler_job._schedule_dag_run(dr, session)
session.flush()
task_instances_list = self.scheduler_job._executable_task_instances_to_queued(
max_tis=32, session=session
)
assert len(task_instances_list) == 1
| Add dag-processor cli command (#22305) | test_scheduler_verify_pool_full | f5f11aefea775448105098b59c4657fa1809fb94 | airflow | test_scheduler_job.py | 13 | 25 | https://github.com/apache/airflow.git | 1 | 173 | 0 | 49 | 285 | Python | {
"docstring": "\n Test task instances not queued when pool is full\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def test_scheduler_verify_pool_full(self, dag_maker, configs):
with conf_vars(configs):
with dag_maker(dag_id='test_scheduler_verify_pool_full'):
BashOperator(
task_id='dummy',
pool='test_scheduler_verify_pool_full',
bash_command='echo hi',
)
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
session.flush()
self.scheduler_job = SchedulerJob(executor=self.null_exec)
self.scheduler_job.processor_agent = mock.MagicMock()
# Create 2 dagruns, which will create 2 task instances.
dr = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
)
self.scheduler_job._schedule_dag_run(dr, session)
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING)
self.scheduler_job._schedule_dag_run(dr, session)
session.flush()
task_instances_list = self.scheduler_job._executable_task_instances_to_queued(
max_tis=32, session=session
)
assert len(task_instances_list) == 1
|
|
4,481 | 22,868 | 215 | VoiceAssistant/Project_Basic_struct/textRead.py | 74 | 21 | def ms_word():
# TODO : Take location input from the user
try:
speak("Enter the document's location - ")
| VoiceAssistant
This is Voice Assistant coded using Python which can do the following: -
1. Speak Text entered by User.
2. Search anything on Google.
3. Search anything on Wikipedia.
4. Read an MS Word(docx) document.
5. Read a book(PDF).
6. Can be used as a Dictator. | ms_word | 39c49e07066b2a53e176d555af6a7bf8aabb8a9c | Python | textRead.py | 12 | 16 | https://github.com/geekcomputers/Python.git | 3 | 86 | 0 | 57 | 166 | Python | {
"docstring": "[Print and speak out a ms_word docx file as specified in the path]\r\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 13,
"vocab_size": 13
} | def ms_word():
# TODO : Take location input from the user
try:
speak("Enter the document's location - ")
location = input("Enter the document's location - ")
file_loc = doubleslash(location)
doc = docx.Document(file_loc)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
#print(fullText)
doc_file = '\n'.join(fullText)
print(doc_file)
speak(doc_file)
except Exception as exp:
#print(exp)
print(f"ERROR - {exp}")
print(Fore.YELLOW + "I could'nt locate the file!\nIf you didn't specify the extension of the file, please specify it.")
return "None"
|
|
41,792 | 176,252 | 369 | networkx/readwrite/json_graph/tree.py | 151 | 18 | def tree_data(G, root, attrs=None, ident="id", children="children"):
if G.number_of_nodes() != G.number_of_edges() + 1:
raise TypeError("G is not a tree.")
if not G.is_directed():
raise TypeError("G is not directed.")
| Add exception for unconnected graph (#5287) | tree_data | cceb43d15e1d01476c8c15ff273399dee0e3b1aa | networkx | tree.py | 11 | 31 | https://github.com/networkx/networkx.git | 6 | 167 | 0 | 104 | 247 | Python | {
"docstring": "Returns data in tree format that is suitable for JSON serialization\n and use in Javascript documents.\n\n Parameters\n ----------\n G : NetworkX graph\n G must be an oriented tree\n\n root : node\n The root of the tree\n\n attrs : dict\n A dictionary that contains two keys 'id' and 'children'. The\n corresponding values provide the attribute names for storing\n NetworkX-internal graph data. The values should be unique. Default\n value: :samp:`dict(id='id', children='children')`.\n\n If some user-defined graph data use these attribute names as data keys,\n they may be silently dropped.\n\n .. deprecated:: 2.6\n\n The `attrs` keyword argument is replaced by `ident` and `children`\n and will be removed in networkx 3.0\n\n ident : string\n Attribute name for storing NetworkX-internal graph data. `ident` must\n have a different value than `children`. The default is 'id'.\n\n children : string\n Attribute name for storing NetworkX-internal graph data. `children`\n must have a different value than `ident`. The default is 'children'.\n\n Returns\n -------\n data : dict\n A dictionary with node-link formatted data.\n\n Raises\n ------\n NetworkXError\n If `children` and `ident` attributes are identical.\n\n Examples\n --------\n >>> from networkx.readwrite import json_graph\n >>> G = nx.DiGraph([(1, 2)])\n >>> data = json_graph.tree_data(G, root=1)\n\n To serialize with json\n\n >>> import json\n >>> s = json.dumps(data)\n\n Notes\n -----\n Node attributes are stored in this format but keys\n for attributes must be strings if you want to serialize with JSON.\n\n Graph and edge attributes are not stored.\n\n See Also\n --------\n tree_graph, node_link_data, adjacency_data\n ",
"language": "en",
"n_whitespaces": 450,
"n_words": 235,
"vocab_size": 139
} | def tree_data(G, root, attrs=None, ident="id", children="children"):
if G.number_of_nodes() != G.number_of_edges() + 1:
raise TypeError("G is not a tree.")
if not G.is_directed():
raise TypeError("G is not directed.")
if not nx.is_weakly_connected(G):
raise TypeError("G is not weakly connected.")
# NOTE: to be removed in 3.0
if attrs is not None:
import warnings
msg = (
"\nThe `attrs` keyword argument of tree_data is deprecated\n"
"and will be removed in networkx 3.0.\n"
"It is replaced with explicit `ident` and `children` "
"keyword arguments.\n"
"To make this warning go away and ensure usage is forward\n"
"compatible, replace `attrs` with `ident` and `children,\n"
"for example:\n\n"
" >>> tree_data(G, root, attrs={'id': 'foo', 'children': 'bar'})\n\n"
"should instead be written as\n\n"
" >>> tree_data(G, root, ident='foo', children='bar')\n\n"
"The default values of 'id' and 'children' will not change."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
ident = attrs["id"]
children = attrs["children"]
if ident == children:
raise nx.NetworkXError("The values for `id` and `children` must be different.")
|
|
24,166 | 110,450 | 150 | lib/mpl_toolkits/mplot3d/tests/test_axes3d.py | 87 | 14 | def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):
ax1 = fig_test.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax1.plot(x, y, z, 'o-')
ax1.set_ylim([0, 4])
ax1.set_zlim([0, 4])
fig_test.draw_without_rendering()
# mutate y,z to get a nontrivial line
y[:] = [1, 2, 3]
z[:] = [1, 2, 3]
# draw the same plot without mutating x and y
ax2 = fig_ref.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax2.plot(x, y, z, 'o-')
ax2.set_ylim([0, 4])
ax2.set_zlim([0, 4])
fig_test.draw_without_rendering()
| Test that plot results aren't affected by mutating input arrays | test_mutating_input_arrays_y_and_z | 7a1df7830f7685a99291d90c5e79bfc5e7876f31 | matplotlib | test_axes3d.py | 10 | 19 | https://github.com/matplotlib/matplotlib.git | 1 | 208 | 0 | 46 | 277 | Python | {
"docstring": "\n Test to see if the `z` axis does not get mutated\n after a call to `Axes3D.plot`\n\n test cases came from GH#8990\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 21,
"vocab_size": 20
} | def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):
ax1 = fig_test.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax1.plot(x, y, z, 'o-')
ax1.set_ylim([0, 4])
ax1.set_zlim([0, 4])
fig_test.draw_without_rendering()
# mutate y,z to get a nontrivial line
y[:] = [1, 2, 3]
z[:] = [1, 2, 3]
# draw the same plot without mutating x and y
ax2 = fig_ref.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax2.plot(x, y, z, 'o-')
ax2.set_ylim([0, 4])
ax2.set_zlim([0, 4])
fig_test.draw_without_rendering()
|
|
16,431 | 75,623 | 184 | wagtail/search/tests/elasticsearch_common_tests.py | 40 | 20 | def test_search_with_hyphen(self):
book = models.Book.objects.create(
title="Harry Potter and the Half-Blood Prince",
publication_date=date(2009, 7, 15),
number_of_pages=607,
)
index = self.backend.get_index_for_model(models.Book)
index.add_item(book)
index.refresh()
results = self.backend.search("Half-Blood", models.Book)
self.assertUnsortedListEqual(
[r.title for r in results],
[
"Harry Potter and the Half-Blood Prince",
],
)
| Reformat with black | test_search_with_hyphen | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | elasticsearch_common_tests.py | 11 | 16 | https://github.com/wagtail/wagtail.git | 2 | 93 | 0 | 32 | 148 | Python | {
"docstring": "\n This tests that punctuation characters are treated the same\n way in both indexing and querying.\n\n See: https://github.com/wagtail/wagtail/issues/937\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 17,
"vocab_size": 17
} | def test_search_with_hyphen(self):
book = models.Book.objects.create(
title="Harry Potter and the Half-Blood Prince",
publication_date=date(2009, 7, 15),
number_of_pages=607,
)
index = self.backend.get_index_for_model(models.Book)
index.add_item(book)
index.refresh()
results = self.backend.search("Half-Blood", models.Book)
self.assertUnsortedListEqual(
[r.title for r in results],
[
"Harry Potter and the Half-Blood Prince",
],
)
|
|
12,738 | 61,879 | 344 | .venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py | 58 | 21 | def resolve(self, s):
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
| upd; format | resolve | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | compat.py | 15 | 18 | https://github.com/jindongwang/transferlearning.git | 4 | 114 | 0 | 38 | 189 | Python | {
"docstring": "\n Resolve strings to objects using standard import and attribute\n syntax.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 10,
"vocab_size": 10
} | def resolve(self, s):
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
|
|
76,867 | 261,569 | 38 | examples/ensemble/plot_gradient_boosting_oob.py | 19 | 16 | def heldout_score(clf, X_test, y_test):
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
| DOC Fix FutureWarning in ensemble/plot_gradient_boosting_oob.py (#24948) | heldout_score | 2c1581c32e641e535305647eb57a1787bcf803f0 | scikit-learn | plot_gradient_boosting_oob.py | 12 | 5 | https://github.com/scikit-learn/scikit-learn.git | 2 | 59 | 0 | 17 | 91 | Python | {
"docstring": "compute deviance scores on ``X_test`` and ``y_test``.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def heldout_score(clf, X_test, y_test):
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = binomial_deviance(y_test, y_pred.ravel())
return score
|
|
@pytest.mark.parametrize(
"search, expected_names",
(
("", ["The best juices", "The best beers", "The worst beers"]),
("best", ["The best juices", "The best beers"]),
("worst", ["The worst beers"]),
("average", []),
),
) | 5,214 | 29,299 | 130 | saleor/graphql/product/tests/queries/test_product_types_query.py | 72 | 17 | def test_product_types_query_ids_not_exists(user_api_client, category):
query = NOT_EXISTS_IDS_COLLECTIONS_QUERY
variables = {"filter": {"ids": ["fTEJRuFHU6fd2RU=", "2XwnQNNhwCdEjhP="]}}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response, ignore_errors=True)
message_error = '{"ids": [{"message": "Invalid ID specified.", "code": ""}]}'
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == message_error
assert content["data"]["productTypes"] is None
QUERY_FILTER_PRODUCT_TYPES =
@pytest.mark.parametrize(
"search, expected_names",
(
("", ["The best juices", "The best beers", "The worst beers"]),
("best", ["The best juices", "The best beers"]),
("worst", ["The wor | Split test_product.py and test_variant.py into multiple files (#11173)
* Split test_product.py into multiple files
* Split test_variant.py into multiple files | test_product_types_query_ids_not_exists | d90be220d6b687d08153934a51354011a3cb5ca1 | saleor | test_product_types_query.py | 12 | 9 | https://github.com/saleor/saleor.git | 1 | 81 | 1 | 52 | 234 | Python | {
"docstring": "\n query($filters: ProductTypeFilterInput) {\n productTypes(first: 10, filter: $filters) {\n edges {\n node {\n name\n }\n }\n }\n }\n",
"language": "en",
"n_whitespaces": 76,
"n_words": 17,
"vocab_size": 11
} | def test_product_types_query_ids_not_exists(user_api_client, category):
query = NOT_EXISTS_IDS_COLLECTIONS_QUERY
variables = {"filter": {"ids": ["fTEJRuFHU6fd2RU=", "2XwnQNNhwCdEjhP="]}}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response, ignore_errors=True)
message_error = '{"ids": [{"message": "Invalid ID specified.", "code": ""}]}'
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == message_error
assert content["data"]["productTypes"] is None
QUERY_FILTER_PRODUCT_TYPES =
@pytest.mark.parametrize(
"search, expected_names",
(
("", ["The best juices", "The best beers", "The worst beers"]),
("best", ["The best juices", "The best beers"]),
("worst", ["The worst beers"]),
("average", []),
),
) |
21,945 | 104,721 | 384 | datasets/hans/hans.py | 90 | 11 | def _generate_examples(self, filepath):
for idx, line in enumerate(open(filepath, "r", encoding="utf-8")):
if idx == 0:
continue # skip header
line = line.strip()
split_line = line.split("\t")
# Examples not marked with a three out of five consensus are marked with
# "-" and should not be used in standard evaluations.
if split_line[0] == "-":
continue
# Works for both splits even though dev has some extra human labels.
yield idx, {
"premise": split_line[5],
"hypothesis": split_line[6],
"label": split_line[0],
"binary_parse_premise": split_line[1],
"binary_parse_hypothesis": split_line[2],
| Make HANS dataset streamable (#4155)
* Make HANS dataset streamable
* Fix tags | _generate_examples | 0060f4c7d3f8e4fb7a3694a925ca3b7f44e1f2ea | datasets | hans.py | 12 | 20 | https://github.com/huggingface/datasets.git | 4 | 132 | 0 | 76 | 223 | Python | {
"docstring": "Generate hans examples.\n\n Args:\n filepath: a string\n\n Yields:\n dictionaries containing \"premise\", \"hypothesis\" and \"label\" strings\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 15,
"vocab_size": 15
} | def _generate_examples(self, filepath):
for idx, line in enumerate(open(filepath, "r", encoding="utf-8")):
if idx == 0:
continue # skip header
line = line.strip()
split_line = line.split("\t")
# Examples not marked with a three out of five consensus are marked with
# "-" and should not be used in standard evaluations.
if split_line[0] == "-":
continue
# Works for both splits even though dev has some extra human labels.
yield idx, {
"premise": split_line[5],
"hypothesis": split_line[6],
"label": split_line[0],
"binary_parse_premise": split_line[1],
"binary_parse_hypothesis": split_line[2],
"parse_premise": split_line[3],
"parse_hypothesis": split_line[4],
"heuristic": split_line[8],
"subcase": split_line[9],
"template": split_line[10],
}
|
|
87,754 | 288,598 | 241 | homeassistant/components/light/__init__.py | 72 | 15 | def _light_internal_color_mode(self) -> str:
if (color_mode := self.color_mode) is None:
# Backwards compatibility for color_mode added in 2021.4
# Add warning in 2021.6, remove in 2021.10
supported = self._light_internal_supported_color_modes
if ColorMode.HS in supported and self.hs_color is not None:
return ColorMode.HS
if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None:
return ColorMode.COLOR_TEMP | Use Kelvin as the preferred color temperature unit (#79591)
* Use Kelvin as the preferred white temperature unit
* Update homekit
* Adjust tests | _light_internal_color_mode | 47d0598e75487f63901931875f69f802a477df13 | core | __init__.py | 10 | 14 | https://github.com/home-assistant/core.git | 9 | 95 | 0 | 38 | 150 | Python | {
"docstring": "Return the color mode of the light with backwards compatibility.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def _light_internal_color_mode(self) -> str:
if (color_mode := self.color_mode) is None:
# Backwards compatibility for color_mode added in 2021.4
# Add warning in 2021.6, remove in 2021.10
supported = self._light_internal_supported_color_modes
if ColorMode.HS in supported and self.hs_color is not None:
return ColorMode.HS
if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None:
return ColorMode.COLOR_TEMP
if ColorMode.BRIGHTNESS in supported and self.brightness is not None:
return ColorMode.BRIGHTNESS
if ColorMode.ONOFF in supported:
return ColorMode.ONOFF
return ColorMode.UNKNOWN
return color_mode
|
|
39,609 | 164,815 | 30 | pandas/plotting/_core.py | 13 | 7 | def kde(self, bw_method=None, ind=None, **kwargs):
| DOC: fix URLs, formatting and typos (#45920) | kde | 1b5338e95917a8b94a9f7b2e1881442dd663c02d | pandas | _core.py | 9 | 2 | https://github.com/pandas-dev/pandas.git | 1 | 35 | 0 | 13 | 59 | Python | {
"docstring": "\n Generate Kernel Density Estimate plot using Gaussian kernels.\n\n In statistics, `kernel density estimation`_ (KDE) is a non-parametric\n way to estimate the probability density function (PDF) of a random\n variable. This function uses Gaussian kernels and includes automatic\n bandwidth determination.\n\n .. _kernel density estimation:\n https://en.wikipedia.org/wiki/Kernel_density_estimation\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable.\n If None (default), 'scott' is used.\n See :class:`scipy.stats.gaussian_kde` for more information.\n ind : NumPy array or int, optional\n Evaluation points for the estimated PDF. If None (default),\n 1000 equally spaced points are used. If `ind` is a NumPy array, the\n KDE is evaluated at the points passed. If `ind` is an integer,\n `ind` number of equally spaced points are used.\n **kwargs\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or numpy.ndarray of them\n\n See Also\n --------\n scipy.stats.gaussian_kde : Representation of a kernel-density\n estimate using Gaussian kernels. This is the function used\n internally to estimate the PDF.\n\n Examples\n --------\n Given a Series of points randomly sampled from an unknown\n distribution, estimate its PDF using KDE with automatic\n bandwidth determination and plot the results, evaluating them at\n 1000 equally spaced points (default):\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])\n >>> ax = s.plot.kde()\n\n A scalar bandwidth can be specified. Using a small bandwidth value can\n lead to over-fitting, while using a large bandwidth value may result\n in under-fitting:\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(bw_method=0.3)\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(bw_method=3)\n\n Finally, the `ind` parameter determines the evaluation points for the\n plot of the estimated PDF:\n\n .. plot::\n :context: close-figs\n\n >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])\n\n For DataFrame, it works in the same way:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],\n ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],\n ... })\n >>> ax = df.plot.kde()\n\n A scalar bandwidth can be specified. Using a small bandwidth value can\n lead to over-fitting, while using a large bandwidth value may result\n in under-fitting:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(bw_method=0.3)\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(bw_method=3)\n\n Finally, the `ind` parameter determines the evaluation points for the\n plot of the estimated PDF:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])\n ",
"language": "en",
"n_whitespaces": 1083,
"n_words": 399,
"vocab_size": 184
} | def kde(self, bw_method=None, ind=None, **kwargs):
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
|
|
@pytest.fixture | 5,005 | 26,447 | 21 | saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py | 10 | 8 | def subscription_invoice_requested_webhook(subscription_webhook):
return subscription_webhook(
INVOICE_REQUESTED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.INVOICE_REQUESTED
)
INVOICE_DELETED_SUBSCRIPTION_QUERY =
@pytest | Add Webhook payload via graphql subscriptions (#9394)
* Add PoC of webhook subscriptions
* add async webhooks subscription payloads feature
* remove unneeded file
* add translations subscription handling, fixes after review
* remove todo
* add descriptions
* add descriptions, move subsrciption_payloads.py
* refactor
* fix imports, add changelog
* check_document_is_single_subscription refactor
Co-authored-by: Maciej Korycinski <maciej@mirumee.com>
Co-authored-by: Marcin Gฤbala <5421321+maarcingebala@users.noreply.github.com> | subscription_invoice_requested_webhook | aca6418d6c36956bc1ab530e6ef7e146ec9df90c | saleor | fixtures.py | 8 | 4 | https://github.com/saleor/saleor.git | 1 | 14 | 1 | 10 | 36 | Python | {
"docstring": "\n subscription{\n event{\n ...on InvoiceDeleted{\n invoice{\n id\n }\n }\n }\n }\n",
"language": "en",
"n_whitespaces": 69,
"n_words": 10,
"vocab_size": 7
} | def subscription_invoice_requested_webhook(subscription_webhook):
return subscription_webhook(
INVOICE_REQUESTED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.INVOICE_REQUESTED
)
INVOICE_DELETED_SUBSCRIPTION_QUERY =
@pytest.fixture |
46,647 | 191,522 | 146 | tests/unit_tests/prompts/test_prompt.py | 45 | 11 | def test_prompt_from_examples_valid() -> None:
template =
input_variables = ["question"]
example_separator = "\n\n"
prefix =
suffix =
examples = [
,
,
]
prompt_from_examples = PromptTemplate.from_examples(
examples,
suffix,
input_variables,
example_separator=example_separator,
prefix=prefix,
)
prompt_from_template = PromptTemplate(
input_variables=input_variables, template=template
)
assert prompt_from_examples.template == prompt_from_template.template
assert prompt_from_examples.input_variables == prompt_from_template.input_variables
| add few shot example (#148) | test_prompt_from_examples_valid | c02eb199b6587aeeb50fbb083693572bd2f030cc | langchain | test_prompt.py | 9 | 32 | https://github.com/hwchase17/langchain.git | 1 | 81 | 0 | 34 | 143 | Python | {
"docstring": "Test prompt can be successfully constructed from examples.Test Prompt:\n\nQuestion: who are you?\nAnswer: foo\n\nQuestion: what are you?\nAnswer: bar\n\nQuestion: {question}\nAnswer:Test Prompt:Question: {question}\\nAnswer:Question: who are you?\\nAnswer: fooQuestion: what are you?\\nAnswer: bar",
"language": "en",
"n_whitespaces": 27,
"n_words": 34,
"vocab_size": 23
} | def test_prompt_from_examples_valid() -> None:
template =
input_variables = ["question"]
example_separator = "\n\n"
prefix =
suffix =
examples = [
,
,
]
prompt_from_examples = PromptTemplate.from_examples(
examples,
suffix,
input_variables,
example_separator=example_separator,
prefix=prefix,
)
prompt_from_template = PromptTemplate(
input_variables=input_variables, template=template
)
assert prompt_from_examples.template == prompt_from_template.template
assert prompt_from_examples.input_variables == prompt_from_template.input_variables
|
|
13,149 | 63,105 | 455 | .venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py | 163 | 13 | def compatible_platforms(provided, required):
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not | upd; format | compatible_platforms | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | __init__.py | 16 | 22 | https://github.com/jindongwang/transferlearning.git | 14 | 168 | 0 | 95 | 281 | Python | {
"docstring": "Can code for the `provided` platform run on the `required` platform?\n\n Returns true if either platform is ``None``, or the platforms are equal.\n\n XXX Needs compatibility checks for Linux and other unixy OSes.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 33,
"vocab_size": 29
} | def compatible_platforms(provided, required):
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
|
|
41,989 | 176,590 | 422 | networkx/algorithms/shortest_paths/weighted.py | 125 | 24 | def find_negative_cycle(G, source, weight="weight"):
weight = _weight_function(G, weight)
pred = {source: []}
v = _inner_bellman_ford(G, [source], weight, pred=pred)
if v is None:
raise nx.NetworkXError("No negative cycles detected.")
# negative cycle detected... find it
neg_cycle = []
stack = [(v, list(pred[v]))]
seen = {v}
while stack:
node, preds = stack[-1]
if v in preds:
# found the cycle
neg_cycle.extend([node, v])
neg_cycle = list(reversed(neg_cycle))
return neg_cycle
if preds:
nbr = preds.pop()
if nbr not in seen:
stack.append((nbr, list(pred[nbr])))
neg_cycle.append(node)
seen.add(nbr)
else:
stack.pop()
if neg_cycle:
neg_c | Corrected the documentation of find_negative_cycle() solving issue #5610 (#5613)
* issue
* Update branchings.py
* Update weakly_connected.py | find_negative_cycle | ec2e239764c92adf3b1abcf12817198a878d8772 | networkx | weighted.py | 17 | 31 | https://github.com/networkx/networkx.git | 9 | 221 | 0 | 83 | 358 | Python | {
"docstring": "Returns a cycle with negative total weight if it exists.\n\n Bellman-Ford is used to find shortest_paths. That algorithm\n stops if there exists a negative cycle. This algorithm\n picks up from there and returns the found negative cycle.\n\n The cycle consists of a list of nodes in the cycle order. The last\n node equals the first to make it a cycle.\n You can look up the edge weights in the original graph. In the case\n of multigraphs the relevant edge is the minimal weight edge between\n the nodes in the 2-tuple.\n\n If the graph has no negative cycle, a NetworkXError is raised.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source: node label\n The search for the negative cycle will start from this node.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)])\n >>> nx.find_negative_cycle(G, 0)\n [4, 0, 1, 4]\n\n Returns\n -------\n cycle : list\n A list of nodes in the order of the cycle found. The last node\n equals the first to indicate a cycle.\n\n Raises\n ------\n NetworkXError\n If no negative cycle is found.\n ",
"language": "en",
"n_whitespaces": 464,
"n_words": 285,
"vocab_size": 144
} | def find_negative_cycle(G, source, weight="weight"):
weight = _weight_function(G, weight)
pred = {source: []}
v = _inner_bellman_ford(G, [source], weight, pred=pred)
if v is None:
raise nx.NetworkXError("No negative cycles detected.")
# negative cycle detected... find it
neg_cycle = []
stack = [(v, list(pred[v]))]
seen = {v}
while stack:
node, preds = stack[-1]
if v in preds:
# found the cycle
neg_cycle.extend([node, v])
neg_cycle = list(reversed(neg_cycle))
return neg_cycle
if preds:
nbr = preds.pop()
if nbr not in seen:
stack.append((nbr, list(pred[nbr])))
neg_cycle.append(node)
seen.add(nbr)
else:
stack.pop()
if neg_cycle:
neg_cycle.pop()
else:
if v in G[v] and weight(G, v, v) < 0:
return [v, v]
# should not reach here
raise nx.NetworkXError("Negative cycle is detected but not found")
# should not get here...
msg = "negative cycle detected but not identified"
raise nx.NetworkXUnbounded(msg)
|
|
70,030 | 243,427 | 133 | src/PIL/ImageOps.py | 61 | 26 | def expand(image, border=0, fill=0):
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
color = _color(fill, | Use getpalette() in ImageOps | expand | 279ddf4ce6c76498ac29df2552a3023b9aaa76c1 | Pillow | ImageOps.py | 13 | 16 | https://github.com/python-pillow/Pillow.git | 5 | 149 | 0 | 45 | 230 | Python | {
"docstring": "\n Add border to the image\n\n :param image: The image to expand.\n :param border: Border width, in pixels.\n :param fill: Pixel fill value (a color value). Default is 0 (black).\n :return: An image.\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 32,
"vocab_size": 28
} | def expand(image, border=0, fill=0):
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
color = _color(fill, image.mode)
if image.mode == "P" and image.palette:
palette = ImagePalette.ImagePalette(palette=image.getpalette())
if isinstance(color, tuple):
color = palette.getcolor(color)
else:
palette = None
out = Image.new(image.mode, (width, height), color)
if palette:
out.putpalette(palette.palette)
out.paste(image, (left, top))
return out
|
|
18,085 | 86,210 | 174 | tests/sentry/integrations/slack/notifications/test_issue_alert.py | 54 | 32 | def test_digest_enabled(self, digests, mock_func):
backend = RedisBackend()
digests.digest = backend.digest
digests.enabled.return_value = True
rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
key = f"mail:p:{self.project.id}"
backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)
with self.tasks():
deliver_digest(key)
attachment, text = get_attachment()
assert attachment["title"] == "Hello world"
assert attachment["text"] == ""
| feat(workflow): Set project ownership fallthrough default false (#39305) | test_digest_enabled | 210295c5ed1d7286ae808b15d14f6e83356af16e | sentry | test_issue_alert.py | 12 | 16 | https://github.com/getsentry/sentry.git | 1 | 150 | 0 | 45 | 260 | Python | {
"docstring": "\n Test that with digests enabled, but Slack notification settings\n (and not email settings), we send a Slack notification\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 16
} | def test_digest_enabled(self, digests, mock_func):
backend = RedisBackend()
digests.digest = backend.digest
digests.enabled.return_value = True
rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
key = f"mail:p:{self.project.id}"
backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)
with self.tasks():
deliver_digest(key)
attachment, text = get_attachment()
assert attachment["title"] == "Hello world"
assert attachment["text"] == ""
|
|
47,393 | 195,738 | 68 | sympy/physics/control/control_plots.py | 37 | 18 | def pole_zero_numerical_data(system):
_check_system(system)
system = system.doit() # Get the equivalent TransferFunction object.
num_poly = Poly(system.num, system.var).all_coeffs()
den_poly = Poly(system.den, system.var).all_coeffs()
num_poly = np.array(num_poly, dtype=np.complex128)
den_poly = np.array(den_poly, dtype=np.complex128)
zeros = np.roots(num_poly)
poles = np.roots( | Allow complex transfer functions in pole-zero plot | pole_zero_numerical_data | bf1cb469061d7ad07bfbf687f5635d9f4ec569dd | sympy | control_plots.py | 11 | 10 | https://github.com/sympy/sympy.git | 1 | 97 | 0 | 26 | 157 | Python | {
"docstring": "\n Returns the numerical data of poles and zeros of the system.\n It is internally used by ``pole_zero_plot`` to get the data\n for plotting poles and zeros. Users can use this data to further\n analyse the dynamics of the system or plot using a different\n backend/plotting-module.\n\n Parameters\n ==========\n\n system : SISOLinearTimeInvariant\n The system for which the pole-zero data is to be computed.\n\n Returns\n =======\n\n tuple : (zeros, poles)\n zeros = Zeros of the system. NumPy array of complex numbers.\n poles = Poles of the system. NumPy array of complex numbers.\n\n Raises\n ======\n\n NotImplementedError\n When a SISO LTI system is not passed.\n\n When time delay terms are present in the system.\n\n ValueError\n When more than one free symbol is present in the system.\n The only variable in the transfer function should be\n the variable of the Laplace transform.\n\n Examples\n ========\n\n >>> from sympy.abc import s\n >>> from sympy.physics.control.lti import TransferFunction\n >>> from sympy.physics.control.control_plots import pole_zero_numerical_data\n >>> tf1 = TransferFunction(s**2 + 1, s**4 + 4*s**3 + 6*s**2 + 5*s + 2, s)\n >>> pole_zero_numerical_data(tf1) # doctest: +SKIP\n ([-0.+1.j 0.-1.j], [-2. +0.j -0.5+0.8660254j -0.5-0.8660254j -1. +0.j ])\n\n See Also\n ========\n\n pole_zero_plot\n\n ",
"language": "en",
"n_whitespaces": 341,
"n_words": 187,
"vocab_size": 117
} | def pole_zero_numerical_data(system):
_check_system(system)
system = system.doit() # Get the equivalent TransferFunction object.
num_poly = Poly(system.num, system.var).all_coeffs()
den_poly = Poly(system.den, system.var).all_coeffs()
num_poly = np.array(num_poly, dtype=np.complex128)
den_poly = np.array(den_poly, dtype=np.complex128)
zeros = np.roots(num_poly)
poles = np.roots(den_poly)
return zeros, poles
|
|
56,767 | 222,834 | 25 | python3.10.4/Lib/distutils/cygwinccompiler.py | 16 | 5 | def get_versions():
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return t | add python 3.10.4 for windows | get_versions | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | cygwinccompiler.py | 10 | 3 | https://github.com/XX-net/XX-Net.git | 2 | 28 | 0 | 16 | 51 | Python | {
"docstring": " Try to find out the versions of gcc, ld and dllwrap.\n\n If not possible it returns None for it.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 19,
"vocab_size": 19
} | def get_versions():
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
|
|
53,868 | 215,170 | 63 | salt/beacons/napalm_beacon.py | 26 | 11 | def __virtual__():
if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
err_msg = "NAPALM is not installed."
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg) | Align enhanced logging accross beacons | __virtual__ | 4e3632254fb73210ce3e1954ec507473433018b8 | salt | napalm_beacon.py | 11 | 7 | https://github.com/saltstack/salt.git | 2 | 42 | 0 | 23 | 71 | Python | {
"docstring": "\n This beacon can only work when running under a regular or a proxy minion, managed through napalm.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 17,
"vocab_size": 16
} | def __virtual__():
if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
err_msg = "NAPALM is not installed."
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg
|
|
120,971 | 337,188 | 37 | examples/community/lpw_stable_diffusion.py | 19 | 7 | def parse_prompt_attention(text):
res = []
round_brackets = []
square_brackets = []
round_bracket_multiplier = 1.1
square_bracket_multiplier = 1 / 1.1
| [Community Pipelines] Long Prompt Weighting Stable Diffusion Pipelines (#907)
* [Community Pipelines] Long Prompt Weighting
* Update README.md
* fix
* style
* fix style
* Update examples/community/README.md
Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> | parse_prompt_attention | 2a0c823527694058d410ed6f91b52e7dd9f94ebe | diffusers | lpw_stable_diffusion.py | 7 | 38 | https://github.com/huggingface/diffusers.git | 16 | 299 | 0 | 12 | 49 | Python | {
"docstring": "\n Parses a string with attention tokens and returns a list of pairs: text and its assoicated weight.\n Accepted tokens are:\n (abc) - increases attention to abc by a multiplier of 1.1\n (abc:3.12) - increases attention to abc by a multiplier of 3.12\n [abc] - decreases attention to abc by a multiplier of 1.1\n \\( - literal character '('\n \\[ - literal character '['\n \\) - literal character ')'\n \\] - literal character ']'\n \\\\ - literal character '\\'\n anything else - just text\n >>> parse_prompt_attention('normal text')\n [['normal text', 1.0]]\n >>> parse_prompt_attention('an (important) word')\n [['an ', 1.0], ['important', 1.1], [' word', 1.0]]\n >>> parse_prompt_attention('(unbalanced')\n [['unbalanced', 1.1]]\n >>> parse_prompt_attention('\\(literal\\]')\n [['(literal]', 1.0]]\n >>> parse_prompt_attention('(unnecessary)(parens)')\n [['unnecessaryparens', 1.1]]\n >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')\n [['a ', 1.0],\n ['house', 1.5730000000000004],\n [' ', 1.1],\n ['on', 1.0],\n [' a ', 1.1],\n ['hill', 0.55],\n [', sun, ', 1.1],\n ['sky', 1.4641000000000006],\n ['.', 1.1]]\n ",
"language": "en",
"n_whitespaces": 268,
"n_words": 145,
"vocab_size": 83
} | def parse_prompt_attention(text):
res = []
round_brackets = []
square_brackets = []
round_bracket_multiplier = 1.1
square_bracket_multiplier = 1 / 1.1
|
|
78,638 | 266,879 | 364 | lib/ansible/galaxy/dependency_resolution/providers.py | 178 | 12 | def get_dependencies(self, candidate):
# type: (Candidate) -> list[Candidate]
r
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implmentation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collectio | galaxy - Clean up type hints and imports. | get_dependencies | 8b2e6285650ec42ec4a19075a8567047e8304ba2 | ansible | providers.py | 11 | 13 | https://github.com/ansible/ansible.git | 4 | 60 | 0 | 125 | 115 | Python | {
"docstring": "Get direct dependencies of a candidate.\n\n :returns: A collection of requirements that `candidate` \\\n specifies as its dependencies.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 18,
"vocab_size": 17
} | def get_dependencies(self, candidate):
# type: (Candidate) -> list[Candidate]
r
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implmentation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collection_dependencies()` call because
# NOTE: internally it polulates the artifact URL of the candidate,
# NOTE: its SHA hash and the Galaxy API token. These are still
# NOTE: necessary with `--no-deps` because even with the disabled
# NOTE: dependency resolution the outer layer will still need to
# NOTE: know how to download and validate the artifact.
#
# NOTE: Virtual candidates should always return dependencies
# NOTE: because they are ephemeral and non-installable.
if not self._with_deps and not candidate.is_virtual:
return []
return [
self._make_req_from_dict({'name': dep_name, 'version': dep_req})
for dep_name, dep_req in req_map.items()
]
|
|
71,047 | 246,153 | 208 | tests/rest/admin/test_user.py | 40 | 14 | def test_set_displayname(self) -> None:
# Modify user
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"displayname": "foobar"},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name | Add type hints to `tests/rest/admin` (#11851) | test_set_displayname | 901b264c0c88f39cbfb8b2229e0dc57968882658 | synapse | test_user.py | 12 | 21 | https://github.com/matrix-org/synapse.git | 1 | 142 | 0 | 25 | 235 | Python | {
"docstring": "\n Test setting the displayname of another user.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def test_set_displayname(self) -> None:
# Modify user
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"displayname": "foobar"},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
|
|
117,847 | 321,664 | 437 | qutebrowser/browser/webkit/network/networkmanager.py | 94 | 37 | def on_ssl_errors(self, reply, qt_errors):
| Refactor certificate error handling
- Make CertificateErrorWrapper responsible for accepting/rejecting certs
- Try to avoid dealing with unclear booleans
- Implement support for deferred errors (#4616) - disabled due to PyQt bug
- Implement support for Qt 6 API (#7086) | on_ssl_errors | e5340c449f23608803c286da0563b62f58ba25b0 | qutebrowser | networkmanager.py | 13 | 35 | https://github.com/qutebrowser/qutebrowser.git | 8 | 220 | 0 | 62 | 353 | Python | {
"docstring": "Decide if SSL errors should be ignored or not.\n\n This slot is called on SSL/TLS errors by the self.sslErrors signal.\n\n Args:\n reply: The QNetworkReply that is encountering the errors.\n qt_errors: A list of errors.\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 34,
"vocab_size": 30
} | def on_ssl_errors(self, reply, qt_errors):
errors = certificateerror.CertificateErrorWrapper(reply, qt_errors)
log.network.debug("Certificate errors: {!r}".format(errors))
try:
host_tpl: Optional[urlutils.HostTupleType] = urlutils.host_tuple(
reply.url())
except ValueError:
host_tpl = None
is_accepted = False
is_rejected = False
else:
assert host_tpl is not None
is_accepted = errors in self._accepted_ssl_errors[host_tpl]
is_rejected = errors in self._rejected_ssl_errors[host_tpl]
log.network.debug("Already accepted: {} / "
"rejected {}".format(is_accepted, is_rejected))
if is_rejected:
return
elif is_accepted:
reply.ignoreSslErrors()
return
abort_on = self._get_abort_signals(reply)
tab = self._get_tab()
first_party_url = QUrl() if tab is None else tab.data.last_navigation.url
shared.handle_certificate_error(
request_url=reply.url(),
first_party_url=first_party_url,
error=errors,
abort_on=abort_on,
)
if errors.certificate_was_accepted():
if host_tpl is not None:
self._accepted_ssl_errors[host_tpl].add(errors)
elif host_tpl is not None:
self._rejected_ssl_errors[host_tpl].add(errors)
|
|
40,694 | 171,627 | 142 | pandas/_version.py | 36 | 4 | def render_pep440(pieces):
i | BLD: use nonvendor versioneer (#49924)
* BLD: remove vendored versioneer
* run vis
* move config to pyproject.toml
* add versioneer to deps
* run pyupgrade
* fix isort and pylint
* fix ci
* fix env | render_pep440 | e2df99823758210fb2b7c4aba39e23f3445f7cd3 | pandas | _version.py | 14 | 13 | https://github.com/pandas-dev/pandas.git | 6 | 65 | 0 | 20 | 163 | Python | {
"docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 37,
"vocab_size": 35
} | def render_pep440(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += f"{pieces['distance']}.g{pieces['short']}"
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}"
if pieces["dirty"]:
rendered += ".dirty"
return rendered
|
|
50,052 | 202,099 | 69 | tests/cache/tests_async.py | 19 | 7 | async def test_aset_many(self):
self.assertEqual(await cache.aset_many({"a": 1, "b": 2}), [])
self.assert | Refs #33476 -- Reformatted code with Black. | test_aset_many | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests_async.py | 13 | 6 | https://github.com/django/django.git | 1 | 61 | 0 | 16 | 105 | Python | {
"docstring": "aset_many() does nothing for the dummy cache backend.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | async def test_aset_many(self):
self.assertEqual(await cache.aset_many({"a": 1, "b": 2}), [])
self.assertEqual(
await cache.aset_many({"a": 1, "b": 2}, timeout=2, version="1"),
[],
)
|
|
50,013 | 201,852 | 77 | tests/bash_completion/tests.py | 34 | 13 | def _user_input(self, input_str):
os.environ["COMP_WORDS"] = input_str
idx = | Refs #33476 -- Reformatted code with Black. | _user_input | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 12 | 6 | https://github.com/django/django.git | 2 | 63 | 0 | 27 | 110 | Python | {
"docstring": "\n Set the environment and the list of command line arguments.\n\n This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is\n an array consisting of the individual words in the current command\n line, the latter is the index of the current cursor position, so in\n case a word is completed and the cursor is placed after a whitespace,\n $COMP_CWORD must be incremented by 1:\n\n * 'django-admin start' -> COMP_CWORD=1\n * 'django-admin startproject' -> COMP_CWORD=1\n * 'django-admin startproject ' -> COMP_CWORD=2\n ",
"language": "en",
"n_whitespaces": 157,
"n_words": 80,
"vocab_size": 53
} | def _user_input(self, input_str):
os.environ["COMP_WORDS"] = input_str
idx = len(input_str.split(" ")) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(" ") else idx
os.environ["COMP_CWORD"] = str(comp_cword)
sys.argv = input_str.split()
|
|
26,352 | 118,671 | 229 | lib/streamlit/config.py | 102 | 9 | def _check_conflicts() -> None:
# Node-related conflicts
# When using the Node server, we must always connect to 8501 (this is
# hard-coded in JS). Otherwise, the browser would decide what port to
# connect to based on window.location.port, which in dev is going to
# be (3000)
# Import logger locally to prevent circular references
f | Report sharing removal (#4260)
The report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand. | _check_conflicts | dd9084523e365e637443ea351eaaaa25f52d8412 | streamlit | config.py | 12 | 25 | https://github.com/streamlit/streamlit.git | 5 | 65 | 0 | 74 | 132 | Python | {
"docstring": "\nWarning: the config option 'server.enableCORS=false' is not compatible with 'server.enableXsrfProtection=true'.\nAs a result, 'server.enableCORS' is being overridden to 'true'.\n\nMore information:\nIn order to protect against CSRF attacks, we send a cookie with each request.\nTo do so, we must specify allowable origins, which places a restriction on\ncross-origin resource sharing.\n\nIf cross origin resource sharing is required, please disable server.enableXsrfProtection.\n ",
"language": "en",
"n_whitespaces": 66,
"n_words": 61,
"vocab_size": 53
} | def _check_conflicts() -> None:
# Node-related conflicts
# When using the Node server, we must always connect to 8501 (this is
# hard-coded in JS). Otherwise, the browser would decide what port to
# connect to based on window.location.port, which in dev is going to
# be (3000)
# Import logger locally to prevent circular references
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
if get_option("global.developmentMode"):
assert _is_unset(
"server.port"
), "server.port does not work when global.developmentMode is true."
assert _is_unset("browser.serverPort"), (
"browser.serverPort does not work when global.developmentMode is " "true."
)
# XSRF conflicts
if get_option("server.enableXsrfProtection"):
if not get_option("server.enableCORS") or get_option("global.developmentMode"):
LOGGER.warning(
)
|
|
21,967 | 104,785 | 35 | src/datasets/dataset_dict.py | 14 | 9 | def num_columns(self) -> Dict[str, int]:
self._check_values_type()
return {k: dataset.num_columns for k, datase | Add code examples for DatasetDict (#4245)
* ๐ add code examples for DatasetDict
* ๐ apply quentin review | num_columns | 1904d0c0a3a96330d9b870cdca3e9a3a137f2977 | datasets | dataset_dict.py | 9 | 14 | https://github.com/huggingface/datasets.git | 2 | 36 | 0 | 14 | 58 | Python | {
"docstring": "Number of columns in each split of the dataset.\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\")\n >>> ds.num_columns\n {'test': 2, 'train': 2, 'validation': 2}\n ```\n ",
"language": "en",
"n_whitespaces": 85,
"n_words": 29,
"vocab_size": 25
} | def num_columns(self) -> Dict[str, int]:
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
|
|
36,586 | 156,162 | 207 | dask/bag/random.py | 72 | 23 | def _sample_with_replacement_map_partitions(population, k):
stream = iter(population)
e = next(stream)
reservoir, stream_length = [e for _ in range(k)], 1
w = [rnd.random() for _ in range(k)]
nxt = [_geometric(wi) for wi in w]
min_nxt = min(nxt)
for i, e in enumerate(stream, 1):
if i == min_nxt:
for j, n in enumerate(nxt):
if n == min_nxt:
reservoir[j] = e
w[j] *= rnd.random()
nxt[j] += _geometric(w[j])
min_nxt = min(nxt)
| Bag: add implementation for reservoir sampling (#7068) (#7636)
- Implement the [L algorithm](https://en.wikipedia.org/wiki/Reservoir_sampling#An_optimal_algorithm) for reservoir sampling without replacement.
- Use the **k** reservoir of size 1 strategy for sampling with replacement (see [reference](http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf)) of **k** items | _sample_with_replacement_map_partitions | 4e5dfe7463028a39a90e026c7fb9220969093ab3 | dask | random.py | 17 | 17 | https://github.com/dask/dask.git | 8 | 144 | 0 | 43 | 224 | Python | {
"docstring": "\n Reservoir sampling with replacement, the main idea is to use k reservoirs of size 1\n See Section Applications in http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 20,
"vocab_size": 20
} | def _sample_with_replacement_map_partitions(population, k):
stream = iter(population)
e = next(stream)
reservoir, stream_length = [e for _ in range(k)], 1
w = [rnd.random() for _ in range(k)]
nxt = [_geometric(wi) for wi in w]
min_nxt = min(nxt)
for i, e in enumerate(stream, 1):
if i == min_nxt:
for j, n in enumerate(nxt):
if n == min_nxt:
reservoir[j] = e
w[j] *= rnd.random()
nxt[j] += _geometric(w[j])
min_nxt = min(nxt)
stream_length += 1
return reservoir, stream_length
|
|
41,589 | 175,300 | 61 | Lib/enum.py | 22 | 9 | def __setattr__(cls, name, value):
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('cannot reassign member %r' % (name, ))
super().__s | bpo-40066: [Enum] update str() and format() output (GH-30582)
Undo rejected PEP-663 changes:
- restore `repr()` to its 3.10 status
- restore `str()` to its 3.10 status
New changes:
- `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result
- zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == '<Color: 0>'`
- update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type
- added `_numeric_repr_` to `Flag` to control display of unnamed values
- enums without doc strings have a more comprehensive doc string added
- `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum` | __setattr__ | acf7403f9baea3ae1119fc6b4a3298522188bf96 | cpython | enum.py | 11 | 5 | https://github.com/python/cpython.git | 2 | 48 | 0 | 22 | 80 | Python | {
"docstring": "\n Block attempts to reassign Enum members.\n\n A simple assignment to the class namespace only changes one of the\n several possible ways to get an Enum member from the Enum class,\n resulting in an inconsistent Enumeration.\n ",
"language": "en",
"n_whitespaces": 71,
"n_words": 35,
"vocab_size": 28
} | def __setattr__(cls, name, value):
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('cannot reassign member %r' % (name, ))
super().__setattr__(name, value)
|
|
56,453 | 221,633 | 92 | python3.10.4/Lib/configparser.py | 19 | 7 | def read_file(self, f, source=None):
if source is None:
try:
source = f.name
except AttributeError:
| add python 3.10.4 for windows | read_file | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | configparser.py | 12 | 7 | https://github.com/XX-net/XX-Net.git | 3 | 38 | 0 | 16 | 64 | Python | {
"docstring": "Like read() but the argument must be a file-like object.\n\n The `f' argument must be iterable, returning one line at a time.\n Optional second argument is the `source' specifying the name of the\n file being read. If not given, it is taken from f.name. If `f' has no\n `name' attribute, `<???>' is used.\n ",
"language": "en",
"n_whitespaces": 88,
"n_words": 53,
"vocab_size": 41
} | def read_file(self, f, source=None):
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
|
|
83,766 | 281,449 | 286 | gamestonk_terminal/cryptocurrency/defi/substack_model.py | 90 | 42 | def get_newsletters() -> pd.DataFrame:
urls = [
"https://defiweekly.substack.com/archive",
"https://newsletter.thedefiant.io/archive",
"https://thedailygwei.substack.com/archive",
"https://todayindefi.substack.com/archive",
"https://newsletter.banklesshq.com/archive",
"https://defislate.substack.com/archive",
]
threads = len(urls)
newsletters = []
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
for newsletter in executor.map(scrape_substack, urls):
try:
newsletters.append(pd.DataFrame(newsletter))
except KeyError as e:
console.print(e, "\n")
continue
df = pd.concat(newsletters, ignore_index=True)
df.columns = ["Title", "Link", "Date"]
df["Title"] = df["Title"].apply(lambda x: "".join(i for i in x if ord(i) < 128))
df["Date"] = df["Date"].apply(
lambda x: parser.parse(x).strftime("%Y-%m-%d %H:%M:%S")
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x
| Terminal Wide Rich (#1161)
* My idea for how we handle Rich moving forward
* remove independent consoles
* FIxed pylint issues
* add a few vars
* Switched print to console
* More transitions
* Changed more prints
* Replaced all prints
* Fixing tabulate
* Finished replace tabulate
* Finished removing rich from Tabulate
* add Panel around menu
* add GST watermark under feature flag
* Fixed 46 tests
* Delete test_screener[False].yaml
* Delete test_screener[True].yaml
* Fixed the rest of the tests
* add help and source color vars and use rgb
* rich on stocks/options
* update rich on disc, dps, sia
* rich in gov, ins and scr menus
* ba and ca menus with rich
* Fixed import issue
* Fixed some tests
* removed termcolor
* Removed prettytable
* add rich to remaining stocks menus
* FIxed linting issue
* Added James' changes
* Updated dependencies
* Add rich to cryptocurrency menu
* refactor economy and forex
* refactor etf with rich
* refactor mfunds
* refactor rich rest
* not specify style so default color works well on any background
* Fixing mypy issues
* Updated tests
* More test fixes
* James' test fixes
* Updating tests : stocks/screener - fix cassettes using BR
* Updating tests : crypto
* Updating tests : disable DEBUG_MODE
* Updating tests : stocks/fa/yfinance
* minor fixes that escape
* Improve the rich table function (that replaces tabulate :D )
* Fixed bad code
* delete rogue file + dcf fix + NoConsole
* sia mypy
* fuck you linter
* fuck you linter pt 2
* skip hehe
* i hate the black linter
* ubuntu mypy attempt
* Update : rich_config + gtff
* Updating tests : conftest
* Updating tests : stocks
* Update : rich_config
* Updating : rich_config
* make panel configurable for Theodore :b
* colors update
* Merged
* Updating : rich_config + feature_flags
* Updating : rich_config
* Updating tests : stocks
* Updating : feature_flags
Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt>
Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
Co-authored-by: james <jmaslek11@gmail.com>
Co-authored-by: jose-donato <zmcdonato@gmail.com> | get_newsletters | 82747072c511beb1b2672846ae2ee4aec53eb562 | OpenBBTerminal | substack_model.py | 15 | 40 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 6 | 242 | 0 | 72 | 419 | Python | {
"docstring": "Scrape all substack newsletters from url list.\n [Source: substack.com]\n\n Returns\n -------\n pd.DataFrame\n DataFrame with recent news from most popular DeFi related newsletters.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 21
} | def get_newsletters() -> pd.DataFrame:
urls = [
"https://defiweekly.substack.com/archive",
"https://newsletter.thedefiant.io/archive",
"https://thedailygwei.substack.com/archive",
"https://todayindefi.substack.com/archive",
"https://newsletter.banklesshq.com/archive",
"https://defislate.substack.com/archive",
]
threads = len(urls)
newsletters = []
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
for newsletter in executor.map(scrape_substack, urls):
try:
newsletters.append(pd.DataFrame(newsletter))
except KeyError as e:
console.print(e, "\n")
continue
df = pd.concat(newsletters, ignore_index=True)
df.columns = ["Title", "Link", "Date"]
df["Title"] = df["Title"].apply(lambda x: "".join(i for i in x if ord(i) < 128))
df["Date"] = df["Date"].apply(
lambda x: parser.parse(x).strftime("%Y-%m-%d %H:%M:%S")
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x
)
return (
df[["Title", "Date", "Link"]]
.sort_values(by="Date", ascending=False)
.reset_index(drop="index")
)
|
|
343 | 2,712 | 76 | packages/syft/src/syft/core/node/common/action/get_enum_attribute_action.py | 11 | 9 | def _object2proto(self) -> GetEnumAttributeAction_PB:
return GetEnumAttri | [syft.core.node.common.action] Change syft import absolute -> relative | _object2proto | e272ed2fa4c58e0a89e273a3e85da7d13a85e04c | PySyft | get_enum_attribute_action.py | 11 | 18 | https://github.com/OpenMined/PySyft.git | 1 | 45 | 0 | 11 | 70 | Python | {
"docstring": "Returns a protobuf serialization of self.\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n :return: returns a protobuf object\n :rtype: GetOrSetPropertyAction_PB\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ",
"language": "en",
"n_whitespaces": 150,
"n_words": 68,
"vocab_size": 56
} | def _object2proto(self) -> GetEnumAttributeAction_PB:
return GetEnumAttributeAction_PB(
path=self.path,
id_at_location=serialize(self.id_at_location),
address=serialize(self.address),
msg_id=serialize(self.id),
)
|
|
107,924 | 309,217 | 253 | tests/components/seventeentrack/test_sensor.py | 70 | 27 | async def test_becomes_delivered_not_shown_notification(hass):
package = Package(
tracking_number="456",
destination_country=206,
friendly_name="friendly name 1",
info_text="info text 1",
location="location 1",
timestamp="2020-08-10 10:32",
origin_country=206,
package_type=2,
)
ProfileMock.package_list = [package]
await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert len(hass.states.async_entity_ids()) == 1
| Import persistent notification (part 4) (#63901) | test_becomes_delivered_not_shown_notification | a672dc3437b95734e44cb3f61b3f3c299627bb1a | core | test_sensor.py | 11 | 33 | https://github.com/home-assistant/core.git | 1 | 159 | 0 | 43 | 265 | Python | {
"docstring": "Ensure notification is triggered when package becomes delivered.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | async def test_becomes_delivered_not_shown_notification(hass):
package = Package(
tracking_number="456",
destination_country=206,
friendly_name="friendly name 1",
info_text="info text 1",
location="location 1",
timestamp="2020-08-10 10:32",
origin_country=206,
package_type=2,
)
ProfileMock.package_list = [package]
await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert len(hass.states.async_entity_ids()) == 1
package_delivered = Package(
tracking_number="456",
destination_country=206,
friendly_name="friendly name 1",
info_text="info text 1",
location="location 1",
timestamp="2020-08-10 10:32",
origin_country=206,
package_type=2,
status=40,
)
ProfileMock.package_list = [package_delivered]
with patch(
"homeassistant.components.seventeentrack.sensor.persistent_notification"
) as persistent_notification_mock:
await _goto_future(hass)
persistent_notification_mock.create.assert_called()
assert not hass.states.async_entity_ids()
|
|
75,913 | 259,775 | 939 | sklearn/ensemble/_iforest.py | 230 | 38 | def fit(self, X, y=None, sample_weight=None):
X = self._validate_data(X, accept_sparse=["csc"])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if self.contamination != "auto":
if not (0.0 < self.contamination <= 0.5):
raise ValueError(
"contamination must be in (0, 0.5], got: %f" % self.contamination
)
if isinstance(self.max_samples, str):
if self.max_samples == "auto":
max_samples = min(256, n_samples)
else:
raise ValueError(
"max_samples (%s) is not supported."
'Valid choices are: "auto", int or'
"float"
% self.max_samples
)
elif isinstance(self.max_samples, numbers.Integral):
i | ENH Optimize runtime for IsolationForest (#23149) | fit | 767e9ae7e4fec8bea36c0433ab42f500aacfde64 | scikit-learn | _iforest.py | 15 | 54 | https://github.com/scikit-learn/scikit-learn.git | 10 | 318 | 0 | 139 | 503 | Python | {
"docstring": "\n Fit estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Use ``dtype=np.float32`` for maximum\n efficiency. Sparse matrices are also supported, use sparse\n ``csc_matrix`` for maximum efficiency.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If None, then samples are equally weighted.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ",
"language": "en",
"n_whitespaces": 203,
"n_words": 66,
"vocab_size": 54
} | def fit(self, X, y=None, sample_weight=None):
X = self._validate_data(X, accept_sparse=["csc"])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if self.contamination != "auto":
if not (0.0 < self.contamination <= 0.5):
raise ValueError(
"contamination must be in (0, 0.5], got: %f" % self.contamination
)
if isinstance(self.max_samples, str):
if self.max_samples == "auto":
max_samples = min(256, n_samples)
else:
raise ValueError(
"max_samples (%s) is not supported."
'Valid choices are: "auto", int or'
"float"
% self.max_samples
)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not 0.0 < self.max_samples <= 1.0:
raise ValueError(
"max_samples must be in (0, 1], got %r" % self.max_samples
)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(
X,
y,
max_samples,
max_depth=max_depth,
sample_weight=sample_weight,
check_input=False,
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination)
return self
|