complexity
int64 1
56
| n_identifiers
int64 1
114
| code
stringlengths 19
12.7k
| path
stringlengths 8
134
| n_ast_nodes
int64 12
2.35k
| ast_errors
stringlengths 0
4.01k
| repo
stringlengths 3
28
| documentation
dict | n_words
int64 2
866
| language
stringclasses 1
value | vocab_size
int64 2
323
| commit_id
stringlengths 40
40
| file_name
stringlengths 5
79
| id
int64 243
338k
| nloc
int64 1
228
| token_counts
int64 5
1.4k
| fun_name
stringlengths 1
77
| url
stringlengths 31
60
| commit_message
stringlengths 3
15.3k
| n_whitespaces
int64 1
3.23k
| n_ast_errors
int64 0
20
| d_id
int64 74
121k
| ast_levels
int64 4
29
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1 | 2 | def minzoom(self):
return self["minzoom"]
| packages/python/plotly/plotly/graph_objs/layout/mapbox/_layer.py | 22 | plotly.py | {
"docstring": "\n Sets the minimum zoom level (mapbox.layer.minzoom). At zoom\n levels less than the minzoom, the layer will be hidden.\n\n The 'minzoom' property is a number and may be specified as:\n - An int or float in the interval [0, 24]\n\n Returns\n -------\n int|float\n ",
"language": "en",
"n_whitespaces": 101,
"n_words": 42,
"vocab_size": 37
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _layer.py | 232,037 | 2 | 11 | minzoom | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 63,481 | 7 |
|
16 | 27 | def telescopic(L, R, limits):
(i, a, b) = limits
if L.is_Add or R.is_Add:
return None
# We want to solve(L.subs(i, i + m) + R, m)
# First we try a simple match since this does things that
# solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails
k = Wild("k")
sol = (-R).match(L.subs(i, i + k))
s = None
if sol and k in sol:
s = sol[k]
if not (s.is_Integer and L.subs(i, i + s) == -R):
# sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x}))
s = None
# But there are things that match doesn't do that solve
# can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1
if s is None:
m = Dummy('m')
try:
from sympy.solvers.solvers import solve
sol = solve(L.subs(i, i + m) + R, m) or []
except NotImplementedError:
return None
sol = [si for si in sol if si.is_Integer and
(L.subs(i, i + si) + R).expand().is_zero]
if len(sol) != 1:
return None
s = sol[0]
if s < 0:
return telescopic_direct(R, L, abs(s), (i, a, b))
elif s > 0:
return telescopic_direct(L, R, s, (i, a, b))
| sympy/concrete/summations.py | 374 | sympy | {
"docstring": "\n Tries to perform the summation using the telescopic property.\n\n Return None if not possible.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 14,
"vocab_size": 13
} | 189 | Python | 104 | f757f3daae6e11ea0cfb7dadc133274d8d74315f | summations.py | 196,771 | 27 | 242 | telescopic | https://github.com/sympy/sympy.git | Reordered imports 2 | 391 | 0 | 48,161 | 19 |
|
1 | 2 | def startarrowsize(self):
return self["startarrowsize"]
| packages/python/plotly/plotly/graph_objs/layout/_annotation.py | 22 | plotly.py | {
"docstring": "\n Sets the size of the start annotation arrow head, relative to\n `arrowwidth`. A value of 1 (default) gives a head about 3x as\n wide as the line.\n\n The 'startarrowsize' property is a number and may be specified as:\n - An int or float in the interval [0.3, inf]\n\n Returns\n -------\n int|float\n ",
"language": "en",
"n_whitespaces": 117,
"n_words": 51,
"vocab_size": 45
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _annotation.py | 230,902 | 2 | 11 | startarrowsize | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 62,575 | 7 |
|
4 | 12 | def get_dependencies(self, candidate):
# type: (Candidate) -> list[Candidate]
r
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implmentation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collection_dependencies()` call because
# NOTE: internally it polulates the artifact URL of the candidate,
# NOTE: its SHA hash and the Galaxy API token. These are still
# NOTE: necessary with `--no-deps` because even with the disabled
# NOTE: dependency resolution the outer layer will still need to
# NOTE: know how to download and validate the artifact.
#
# NOTE: Virtual candidates should always return dependencies
# NOTE: because they are ephemeral and non-installable.
if not self._with_deps and not candidate.is_virtual:
return []
return [
self._make_req_from_dict({'name': dep_name, 'version': dep_req})
for dep_name, dep_req in req_map.items()
]
| lib/ansible/galaxy/dependency_resolution/providers.py | 115 | ansible | {
"docstring": "Get direct dependencies of a candidate.\n\n :returns: A collection of requirements that `candidate` \\\n specifies as its dependencies.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 18,
"vocab_size": 17
} | 178 | Python | 125 | 8b2e6285650ec42ec4a19075a8567047e8304ba2 | providers.py | 266,879 | 13 | 60 | get_dependencies | https://github.com/ansible/ansible.git | galaxy - Clean up type hints and imports. | 364 | 0 | 78,638 | 11 |
|
3 | 7 | def active_loop_name(self) -> Optional[Text]:
if not self.active_loop or self.active_loop.name == SHOULD_NOT_BE_SET:
return None
return self.active_loop.name
| rasa/shared/core/trackers.py | 54 | rasa | {
"docstring": "Get the name of the currently active loop.\n\n Returns: `None` if no active loop or the name of the currently active loop.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 22,
"vocab_size": 13
} | 15 | Python | 13 | e798bf049f036a5865c14d4343ed8a833864aabe | trackers.py | 159,564 | 8 | 33 | active_loop_name | https://github.com/RasaHQ/rasa.git | convert TrackerActiveLoop to a dataclass | 47 | 0 | 38,336 | 9 |
|
1 | 6 | def get_lr(self) -> List:
return [self.config.lr_disc, self.config.lr_gen]
| TTS/tts/models/vits.py | 36 | TTS | {
"docstring": "Set the initial learning rates for each optimizer.\n\n Returns:\n List: learning rates for each optimizer.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 15,
"vocab_size": 10
} | 7 | Python | 7 | 00c7600103ee34ac50506af88f1b34b713f849e7 | vits.py | 262,246 | 7 | 22 | get_lr | https://github.com/coqui-ai/TTS.git | Update Vits model API | 21 | 0 | 77,157 | 8 |
|
6 | 9 | def _convert_args_to_cli(vargs):
args = ['cleanup']
for option in ('exclude_strings', 'remove_images'):
if vargs.get(option):
args.append('--{}={}'.format(option.replace('_', '-'), ' '.join(vargs.get(option))))
for option in ('file_pattern', 'image_prune', 'process_isolation_executable', 'grace_period'):
if vargs.get(option) is True:
args.append('--{}'.format(option.replace('_', '-')))
elif vargs.get(option) not in (None, ''):
args.append('--{}={}'.format(option.replace('_', '-'), vargs.get(option)))
return args
| awx/main/tasks/receptor.py | 251 | awx | {
"docstring": "\n For the ansible-runner worker cleanup command\n converts the dictionary (parsed argparse variables) used for python interface\n into a string of CLI options, which has to be used on execution nodes.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 30,
"vocab_size": 28
} | 40 | Python | 31 | a4a3ba65d736045733cb49430d7076b73aec23bb | receptor.py | 80,333 | 11 | 141 | _convert_args_to_cli | https://github.com/ansible/awx.git | Refactored tasks.py to a package
--- Added 3 new sub-package : awx.main.tasks.system , awx.main.tasks.jobs , awx.main.tasks.receptor
--- Modified the functional tests and unit tests accordingly | 109 | 0 | 17,051 | 17 |
|
1 | 11 | def get_normal_vector(self) -> np.ndarray:
p0, p1, p2 = self.tip.get_start_anchors()[:3]
return normalize(np.cross(p2 - p1, p1 - p0))
| manim/mobject/geometry/line.py | 69 | manim | {
"docstring": "Returns the normal of a vector.\n\n Examples\n --------\n ::\n\n >>> np.round(Arrow().get_normal_vector()) + 0. # add 0. to avoid negative 0 in output\n array([ 0., 0., -1.])\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 26,
"vocab_size": 24
} | 16 | Python | 14 | e040bcacd38378386749db18aeba575b93f4ebca | line.py | 189,683 | 12 | 43 | get_normal_vector | https://github.com/ManimCommunity/manim.git | Improved structure of the :mod:`.mobject` module (#2476)
* group graphing and update its references
* group text and update its references
* group opengl and update its references
* group three_d and update its references
* group geometry and update (most) references
* move some chaning.py + updater files into animation
* refactor arc.py
* refactor line.py
* refactor polygram.py
* refactor tips.py
* black + isort
* import new files in __init__.py
* refactor places where geometry was used
* black + isort again
* remove unused imports
* update reference.rst
* add descriptions to files
* fix circular imports
* forgot ArrowTip
* fix tests
* fix doctests
* satisfy mypy?
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix ALL merge conflicts
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* one VMobject import slipped through
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* re-add imports to `manim/opengl/__init__.py`
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix reference manual
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* ignore unknown directive type
* fix arrow tip imports in docstrings
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at> | 37 | 0 | 46,164 | 10 |
|
4 | 19 | def get_loan_wise_pledges(filters):
loan_wise_unpledges = {}
current_pledges = {}
conditions = ""
if filters.get("company"):
conditions = "AND company = %(company)s"
unpledges = frappe.db.sql(
.format(
conditions=conditions
),
filters,
as_dict=1,
)
for unpledge in unpledges:
loan_wise_unpledges.setdefault((unpledge.loan, unpledge.loan_security), unpledge.qty)
pledges = frappe.db.sql(
.format(
conditions=conditions
),
filters,
as_dict=1,
)
for security in pledges:
current_pledges.setdefault((security.loan, security.loan_security), security.qty)
current_pledges[(security.loan, security.loan_security)] -= loan_wise_unpledges.get(
(security.loan, security.loan_security), 0.0
)
return current_pledges
| erpnext/loan_management/report/loan_interest_report/loan_interest_report.py | 236 | erpnext | {
"docstring": "\n\t\tSELECT up.loan, u.loan_security, sum(u.qty) as qty\n\t\tFROM `tabLoan Security Unpledge` up, `tabUnpledge` u\n\t\tWHERE u.parent = up.name\n\t\tAND up.status = 'Approved'\n\t\t{conditions}\n\t\tGROUP BY up.loan, u.loan_security\n\t\n\t\tSELECT lp.loan, p.loan_security, sum(p.qty) as qty\n\t\tFROM `tabLoan Security Pledge` lp, `tabPledge`p\n\t\tWHERE p.parent = lp.name\n\t\tAND lp.status = 'Pledged'\n\t\t{conditions}\n\t\tGROUP BY lp.loan, p.loan_security\n\t",
"language": "en",
"n_whitespaces": 39,
"n_words": 51,
"vocab_size": 35
} | 61 | Python | 41 | 494bd9ef78313436f0424b918f200dab8fc7c20b | loan_interest_report.py | 66,352 | 42 | 154 | get_loan_wise_pledges | https://github.com/frappe/erpnext.git | style: format code with black | 33 | 0 | 14,173 | 12 |
|
6 | 5 | def _check_multi_class(multi_class, solver, n_classes):
if multi_class == "auto":
if solver in ("liblinear", "newton-cholesky"):
multi_class = "ovr"
elif n_classes > 2:
multi_class = "multinomial"
else:
multi_class = "ovr"
if multi_class == "multinomial" and solver in ("liblinear", "newton-cholesky"):
raise ValueError("Solver %s does not support a multinomial backend." % solver)
return multi_class
| sklearn/linear_model/_logistic.py | 118 | scikit-learn | {
"docstring": "Computes the multi class type, either \"multinomial\" or \"ovr\".\n\n For `n_classes` > 2 and a solver that supports it, returns \"multinomial\".\n For all other cases, in particular binary classification, return \"ovr\".\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 31,
"vocab_size": 29
} | 49 | Python | 33 | bb080aa690364d84d11232c73dc8db2f0dde3578 | _logistic.py | 261,494 | 11 | 62 | _check_multi_class | https://github.com/scikit-learn/scikit-learn.git | ENH add newton-cholesky solver to LogisticRegression (#24767) | 122 | 0 | 76,838 | 12 |
|
5 | 31 | def get_data(filters, mode_of_payments):
data = []
conditions = get_conditions(filters)
entry = frappe.db.sql(
% (conditions),
as_dict=1,
)
branch_wise_entries, gross_pay = prepare_data(entry)
branches = frappe.db.sql_list(
% (conditions)
)
total_row = {"total": 0, "branch": "Total"}
for branch in branches:
total = 0
row = {"branch": branch}
for mode in mode_of_payments:
if branch_wise_entries.get(branch).get(mode):
row[mode] = branch_wise_entries.get(branch).get(mode)
total += branch_wise_entries.get(branch).get(mode)
row["total"] = total
data.append(row)
total_row = get_total_based_on_mode_of_payment(data, mode_of_payments)
total_deductions = gross_pay - total_row.get("total")
report_summary = []
if data:
data.append(total_row)
data.append({})
data.append({"branch": "<b>Total Gross Pay</b>", mode_of_payments[0]: gross_pay})
data.append({"branch": "<b>Total Deductions</b>", mode_of_payments[0]: total_deductions})
data.append({"branch": "<b>Total Net Pay</b>", mode_of_payments[0]: total_row.get("total")})
currency = erpnext.get_company_currency(filters.company)
report_summary = get_report_summary(
gross_pay, total_deductions, total_row.get("total"), currency
)
return data, total_row, report_summary
| erpnext/payroll/report/salary_payments_based_on_payment_mode/salary_payments_based_on_payment_mode.py | 448 | erpnext | {
"docstring": "\n\t\tselect branch, mode_of_payment, sum(net_pay) as net_pay, sum(gross_pay) as gross_pay\n\t\tfrom `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t\tgroup by branch, mode_of_payment\n\t\t\n\t\tselect distinct branch from `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t",
"language": "en",
"n_whitespaces": 28,
"n_words": 34,
"vocab_size": 22
} | 107 | Python | 71 | 494bd9ef78313436f0424b918f200dab8fc7c20b | salary_payments_based_on_payment_mode.py | 66,953 | 45 | 270 | get_data | https://github.com/frappe/erpnext.git | style: format code with black | 72 | 0 | 14,387 | 16 |
|
1 | 2 | def attribute_rule(allowed_attrs):
| wagtail/core/whitelist.py | 13 | wagtail | {
"docstring": "\n Generator for functions that can be used as entries in Whitelister.element_rules.\n These functions accept a tag, and modify its attributes by looking each attribute\n up in the 'allowed_attrs' dict defined here:\n * if the lookup fails, drop the attribute\n * if the lookup returns a callable, replace the attribute with the result of calling\n it - e.g. {'title': uppercase} will replace 'title' with the result of uppercasing\n the title. If the callable returns None, the attribute is dropped\n * if the lookup returns a truthy value, keep the attribute; if falsy, drop it\n ",
"language": "en",
"n_whitespaces": 125,
"n_words": 93,
"vocab_size": 60
} | 2 | Python | 2 | d10f15e55806c6944827d801cd9c2d53f5da4186 | whitelist.py | 74,706 | 3 | 10 | attribute_rule | https://github.com/wagtail/wagtail.git | Reformat with black | 5 | 0 | 16,302 | 6 |
|
2 | 11 | def get_region_to_control_producer(self) -> KafkaProducer:
if self._publisher is None:
config = settings.KAFKA_TOPICS.get(settings.KAFKA_REGION_TO_CONTROL)
self._publisher = KafkaProducer(
kafka_config.get_kafka_producer_cluster_options(config["cluster"])
)
| src/sentry/region_to_control/producer.py | 73 | sentry | {
"docstring": "\n Creates, if necessary, an arroyo.KafkaProducer client configured for region to control communication and returns\n it, caching it for future calls. Installs an exit handler to close the worker thread processes.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 30,
"vocab_size": 27
} | 16 | Python | 14 | fe07466a1449a5ae60526528ce7bf9399b59b47d | producer.py | 87,145 | 13 | 53 | get_region_to_control_producer | https://github.com/getsentry/sentry.git | chore(hybrid-cloud): Extract region to control silo into service abstraction (#40353)
1. Use the `silo_mode_delegator` to make the silo conditional sensitive
logic of region to control processing like other services that need to
be conditional based on deployment.
2. Leverage the lifecycle management offered by the
`DelegatedBySiloMode` to stop arroyo kafka producer between tests or
after test failures (rather than requiring explicit test fixture clean
up, it's now 'implicit' to the lifecycle of the mocks introduced at the
top level).
3. Add default mocks for the region to control kafka producer so that
most tests do not require kafka running (also improves performance
significantly). There is still the integration test that uses the real
producer.
4. *Attempt* to fix ModuleDeadlock error with more granular importing. I
could not reproduce this issue locally, unfortunately, so this is a best
effort attempt to reduce any circular import possibilities.
Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com> | 78 | 0 | 18,234 | 14 |
|
2 | 6 | def dict_from_cookiejar(cj):
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
| .venv/lib/python3.8/site-packages/pip/_vendor/requests/utils.py | 45 | transferlearning | {
"docstring": "Returns a key/value dictionary from a CookieJar.\n\n :param cj: CookieJar object to extract cookies from.\n :rtype: dict\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 17,
"vocab_size": 16
} | 14 | Python | 12 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | utils.py | 63,636 | 5 | 27 | dict_from_cookiejar | https://github.com/jindongwang/transferlearning.git | upd; format | 33 | 0 | 13,432 | 10 |
|
4 | 18 | def galois_group(T, max_tries=30, randomize=False):
r
from sympy.combinatorics.named_groups import CyclicGroup
gg = {
3: _galois_group_degree_3,
4: _galois_group_degree_4,
5: _galois_group_degree_5,
}
max_supported = max(gg.keys())
n = T.degree()
if n > max_supported:
raise ValueError(f"Only polynomials up to degree {max_supported} are supported.")
if n < 1:
raise ValueError("Constant polynomial has no Galois group.")
if n < 3:
return (CyclicGroup(n), n == 1)
return gg[n](T, max_tries=max_tries, randomize=randomize)
| sympy/polys/numberfields/galoisgroups.py | 171 | sympy | {
"docstring": "\n Compute the Galois group for polynomials *T* up to degree 5.\n\n Parameters\n ==========\n\n T : Poly\n Irreducible, monic polynomial over :ref:`ZZ`, whose Galois group\n is to be determined.\n max_tries : int, default 30\n Make at most this many attempts in those steps that involve\n generating Tschirnhausen transformations.\n randomize : bool, default False\n If ``True``, then use random coefficients when generating Tschirnhausen\n transformations. Otherwise try transformations in a fixed order,\n starting with small coefficients and degrees and working upward.\n\n Returns\n =======\n\n Pair ``(PermutationGroup, bool)``\n The first element is the Galois group, and the second says whether the\n group is contained in the alternating group $A_n$ ($n$ the degree of\n *T*).\n\n Raises\n ======\n\n ValueError\n if *T* is of an unsupported degree.\n\n MaxTriesException\n if could not complete before exceeding *max_tries* in those steps\n that involve generating Tschirnhausen transformations.\n\n ",
"language": "en",
"n_whitespaces": 269,
"n_words": 135,
"vocab_size": 98
} | 62 | Python | 50 | d3c0fc825c4a80904a1fb9a2092137c3d9e0c3fe | galoisgroups.py | 195,681 | 52 | 109 | galois_group | https://github.com/sympy/sympy.git | Add a `galois_group()` function | 133 | 0 | 47,364 | 11 |
|
2 | 12 | def seek(self, pos, whence=SEEK_SET):
if isinstance(pos, float):
raise TypeError('an integer is required')
self._checkClosed()
return os.lseek(self._fd, pos, whence)
| python3.10.4/Lib/_pyio.py | 69 | XX-Net | {
"docstring": "Move to new file position.\n\n Argument offset is a byte count. Optional argument whence defaults to\n SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values\n are SEEK_CUR or 1 (move relative to current position, positive or negative),\n and SEEK_END or 2 (move relative to end of file, usually negative, although\n many platforms allow seeking beyond the end of a file).\n\n Note that not all file objects are seekable.\n ",
"language": "en",
"n_whitespaces": 124,
"n_words": 74,
"vocab_size": 58
} | 17 | Python | 16 | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | _pyio.py | 219,892 | 5 | 43 | seek | https://github.com/XX-net/XX-Net.git | add python 3.10.4 for windows | 56 | 0 | 55,884 | 10 |
|
8 | 19 | def deploy_dask_func(deployer, axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs):
result = deployer(axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs)
ip = get_ip()
if isinstance(result, pandas.DataFrame):
return result, len(result), len(result.columns), ip
elif all(isinstance(r, pandas.DataFrame) for r in result):
return [i for r in result for i in [r, len(r), len(r.columns), ip]]
else:
return [i for r in result for i in [r, None, None, ip]]
| modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py | 192 | modin | {
"docstring": "\n Execute a function on an axis partition in a worker process.\n\n This is ALWAYS called on either ``PandasDataframeAxisPartition.deploy_axis_func``\n or ``PandasDataframeAxisPartition.deploy_func_between_two_axis_partitions``, which both\n serve to deploy another dataframe function on a Dask worker process.\n\n Parameters\n ----------\n deployer : callable\n A `PandasDataFrameAxisPartition.deploy_*` method that will call `deploy_f`.\n axis : {0, 1}\n The axis to perform the function along.\n f_to_deploy : callable or RayObjectID\n The function to deploy.\n f_args : list or tuple\n Positional arguments to pass to ``f_to_deploy``.\n f_kwargs : dict\n Keyword arguments to pass to ``f_to_deploy``.\n *args : list\n Positional arguments to pass to ``func``.\n **kwargs : dict\n Keyword arguments to pass to ``func``.\n\n Returns\n -------\n list\n The result of the function ``func`` and metadata for it.\n ",
"language": "en",
"n_whitespaces": 224,
"n_words": 116,
"vocab_size": 69
} | 61 | Python | 36 | d6d503ac7c3028d871c34d9e99e925ddb0746df6 | virtual_partition.py | 154,492 | 9 | 136 | deploy_dask_func | https://github.com/modin-project/modin.git | FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715)
Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Jonathan Shi <jhshi@ponder.io> | 100 | 0 | 36,015 | 14 |
|
3 | 11 | def get_used_airflow_sources() -> Path:
current_sources = search_upwards_for_airflow_sources_root(Path.cwd())
if current_sources is None:
current_sources = get_installation_airflow_sources()
if current_sources is None:
warn_non_editable()
sys.exit(1)
return current_sources
@lru_cache(maxsize=None) | dev/breeze/src/airflow_breeze/utils/path_utils.py | 88 | @lru_cache(maxsize=None) | airflow | {
"docstring": "\n Retrieves the Root of used Airflow Sources which we operate on. Those are either Airflow sources found\n upwards in directory tree or sources where Breeze was installed from.\n :return: the Path for Airflow sources we use.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 36,
"vocab_size": 30
} | 23 | Python | 15 | bca849b4586c7446438f959b62903da4b997b9ea | path_utils.py | 46,862 | 13 | 43 | get_used_airflow_sources | https://github.com/apache/airflow.git | Switch to `pipx` as the only installation Breeze2 method (#22740)
Switching Breeze2 to only use `pipx` for installation of Breeze2
due to problems it might cause for autocompletion if entrypoint
is not avaiable on PATH. | 70 | 1 | 9,023 | 11 |
1 | 2 | def require_spacy_model(model):
| tests/utils.py | 13 | datasets | {
"docstring": "\n Decorator marking a test that requires a spacy model.\n\n These tests are skipped when they aren't installed.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 16
} | 2 | Python | 2 | 0d9c12ad5155c6d505e70813a07c0aecd7120405 | utils.py | 105,894 | 3 | 10 | require_spacy_model | https://github.com/huggingface/datasets.git | Make torch.Tensor and spacy models cacheable (#5191)
* Make torch.Tensor and spacy models cacheable
* Use newest models
* Address comments
* Small optim | 5 | 0 | 22,215 | 6 |
|
6 | 28 | def tutte_polynomial(G):
r
import sympy
x = sympy.Symbol("x")
y = sympy.Symbol("y")
stack = deque()
stack.append(nx.MultiGraph(G))
polynomial = 0
while stack:
G = stack.pop()
bridges = set(nx.bridges(G))
e = None
for i in G.edges:
if (i[0], i[1]) not in bridges and i[0] != i[1]:
e = i
break
if not e:
loops = list(nx.selfloop_edges(G, keys=True))
polynomial += x ** len(bridges) * y ** len(loops)
else:
# deletion-contraction
C = nx.contracted_edge(G, e, self_loops=True)
C.remove_edge(e[0], e[0])
G.remove_edge(*e)
stack.append(G)
stack.append(C)
return sympy.simplify(polynomial)
| networkx/algorithms/polynomials.py | 314 | networkx | {
"docstring": "Returns the Tutte polynomial of `G`\n \n This function computes the Tutte polynomial via an iterative version of\n the deletion-contraction algorithm.\n\n The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in\n two variables. It encodes a wide array of information related to the\n edge-connectivity of a graph; \"Many problems about graphs can be reduced to\n problems of finding and evaluating the Tutte polynomial at certain values\" [1]_.\n In fact, every deletion-contraction-expressible feature of a graph is a\n specialization of the Tutte polynomial [2]_ (see Notes for examples).\n\n There are several equivalent definitions; here are three:\n\n Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the\n number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of\n `G`, and `c(A)` the number of connected components of the graph with vertex\n set `V` and edge set `A` [3]_:\n\n .. math::\n\n T_G(x, y) = \\sum_{A \\in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)}\n\n Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning\n tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict\n linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of\n $E \\setminus T \\cup {e}$. An edge `e` is internally active with respect to\n `T` and `L` if `e` is the least edge in `B_e` according to the linear order\n `L`. The internal activity of `T` (denoted `i(T)`) is the number of edges\n in $E \\setminus T$ that are internally active with respect to `T` and `L`.\n Let `P_e` be the unique path in $T \\cup {e}$ whose source and target vertex\n are the same. An edge `e` is externally active with respect to `T` and `L`\n if `e` is the least edge in `P_e` according to the linear order `L`. The\n external activity of `T` (denoted `e(T)`) is the number of edges in\n $E \\setminus T$ that are externally active with respect to `T` and `L`.\n Then [4]_ [5]_:\n\n .. math::\n\n T_G(x, y) = \\sum_{T \\text{ a spanning tree of } G} x^{i(T)} y^{e(T)}\n\n Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e`\n the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained\n from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`,\n and `l(G)` the number of self-loops of `G`:\n\n .. math::\n T_G(x, y) = \\begin{cases}\n \t x^{k(G)} y^{l(G)}, & \\text{if all edges are cut-edges or self-loops} \\\\\n T_{G-e}(x, y) + T_{G/e}(x, y), & \\text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop}\n \\end{cases}\n\n Parameters\n ----------\n G : NetworkX graph\n\n Returns\n -------\n instance of `sympy.core.add.Add`\n A Sympy expression representing the Tutte polynomial for `G`.\n\n Examples\n --------\n >>> C = nx.cycle_graph(5)\n >>> nx.tutte_polynomial(C)\n x**4 + x**3 + x**2 + x + y\n\n >>> D = nx.diamond_graph()\n >>> nx.tutte_polynomial(D)\n x**3 + 2*x**2 + 2*x*y + x + y**2 + y\n\n Notes\n -----\n Some specializations of the Tutte polynomial:\n\n - `T_G(1, 1)` counts the number of spanning trees of `G`\n - `T_G(1, 2)` counts the number of connected spanning subgraphs of `G`\n - `T_G(2, 1)` counts the number of spanning forests in `G`\n - `T_G(0, 2)` counts the number of strong orientations of `G`\n - `T_G(2, 0)` counts the number of acyclic orientations of `G`\n\n Edge contraction is defined and deletion-contraction is introduced in [6]_.\n Combinatorial meaning of the coefficients is introduced in [7]_.\n Universality, properties, and applications are discussed in [8]_.\n\n Practically, up-front computation of the Tutte polynomial may be useful when\n users wish to repeatedly calculate edge-connectivity-related information\n about one or more graphs.\n\n References\n ----------\n .. [1] M. Brandt,\n \"The Tutte Polynomial.\"\n Talking About Combinatorial Objects Seminar, 2015\n https://math.berkeley.edu/~brandtm/talks/tutte.pdf\n .. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto,\n \"Computing the Tutte polynomial in vertex-exponential time\"\n 49th Annual IEEE Symposium on Foundations of Computer Science, 2008\n https://ieeexplore.ieee.org/abstract/document/4691000\n .. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman,\n \"Graph Polynomials,\" p. 14\n .. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman,\n \"Graph Polynomials,\" p. 46\n .. [5] A. Nešetril, J. Goodall,\n \"Graph invariants, homomorphisms, and the Tutte polynomial\"\n https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf\n .. [6] D. B. West,\n \"Introduction to Graph Theory,\" p. 84\n .. [7] G. Coutinho,\n \"A brief introduction to the Tutte polynomial\"\n Structural Analysis of Complex Networks, 2011\n https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf\n .. [8] J. A. Ellis-Monaghan, C. Merino,\n \"Graph polynomials and their applications I: The Tutte polynomial\"\n Structural Analysis of Complex Networks, 2011\n https://arxiv.org/pdf/0803.3079.pdf\n ",
"language": "en",
"n_whitespaces": 1105,
"n_words": 732,
"vocab_size": 354
} | 78 | Python | 59 | f11068c0115ede0c7b631f771c10be7efd0b950b | polynomials.py | 176,426 | 142 | 195 | tutte_polynomial | https://github.com/networkx/networkx.git | Add Tutte polynomial (#5265)
Add a new polynomial module to algorithms for characteristic polynomials.
Adds the Tutte polynomial, which is computed and ultimate represented as a
sympy expression.
Co-authored-by: Dan Schult <dschult@colgate.edu>
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> | 275 | 0 | 41,889 | 15 |
|
1 | 7 | def idxmax(self, **kwargs): # noqa: PR02
return DataFrameDefault.register(pandas.DataFrame.idxmax)(self, **kwargs)
| modin/core/storage_formats/base/query_compiler.py | 44 | modin | {
"docstring": "\n Get position of the first occurrence of the maximum for each row or column.\n\n Parameters\n ----------\n axis : {0, 1}\n skipna : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n One-column QueryCompiler with index labels of the specified axis,\n where each row contains position of the maximum element for the\n corresponding row or column.\n ",
"language": "en",
"n_whitespaces": 177,
"n_words": 62,
"vocab_size": 43
} | 9 | Python | 9 | 57e29bc5d82348006c5170ef9ac0a9eedcd9acf9 | query_compiler.py | 153,822 | 2 | 26 | idxmax | https://github.com/modin-project/modin.git | REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514)
Co-authored-by: Rehan Sohail Durrani <rdurrani@berkeley.edu>
Signed-off-by: jeffreykennethli <jkli@ponder.io> | 24 | 0 | 35,637 | 10 |
|
1 | 5 | def get_mapped_pr_records():
return frappe._dict(
frappe.db.sql(
)
)
| erpnext/buying/report/procurement_tracker/procurement_tracker.py | 32 | erpnext | {
"docstring": "\n\t\tSELECT\n\t\t\tpr_item.purchase_order_item,\n\t\t\tpr.posting_date\n\t\tFROM `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item\n\t\tWHERE\n\t\t\tpr.docstatus=1\n\t\t\tAND pr.name=pr_item.parent\n\t\t\tAND pr_item.purchase_order_item IS NOT NULL\n\t\t\tAND pr.status not in (\"Closed\",\"Completed\",\"Cancelled\")\n\t\t",
"language": "en",
"n_whitespaces": 17,
"n_words": 25,
"vocab_size": 22
} | 7 | Python | 6 | 494bd9ef78313436f0424b918f200dab8fc7c20b | procurement_tracker.py | 65,569 | 16 | 18 | get_mapped_pr_records | https://github.com/frappe/erpnext.git | style: format code with black | 2 | 0 | 13,945 | 10 |
|
1 | 15 | def calc_mean_std(feat, eps=1e-5):
size = feat.size()
assert len(size) == 4, 'The input feature should be 4D tensor.'
b, c = size[:2]
feat_var = feat.view(b, c, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(b, c, 1, 1)
feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
return feat_mean, feat_std
| modules/codeformer/codeformer_arch.py | 168 | stable-diffusion-webui | {
"docstring": "Calculate mean and std for adaptive_instance_normalization.\n\n Args:\n feat (Tensor): 4D tensor.\n eps (float): A small value added to the variance to avoid\n divide-by-zero. Default: 1e-5.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 25,
"vocab_size": 24
} | 45 | Python | 34 | 6a9b33c848281cb02f38764e4f91ef767f5e3edd | codeformer_arch.py | 152,171 | 8 | 112 | calc_mean_std | https://github.com/AUTOMATIC1111/stable-diffusion-webui.git | codeformer support | 69 | 0 | 35,175 | 13 |
|
4 | 17 | def _lg_directed(G, create_using=None):
L = nx.empty_graph(0, create_using, default=G.__class__)
# Create a graph specific edge function.
get_edges = partial(G.edges, keys=True) if G.is_multigraph() else G.edges
for from_node in get_edges():
# from_node is: (u,v) or (u,v,key)
L.add_node(from_node)
for to_node in get_edges(from_node[1]):
L.add_edge(from_node, to_node)
return L
| networkx/generators/line.py | 128 | networkx | {
"docstring": "Returns the line graph L of the (multi)digraph G.\n\n Edges in G appear as nodes in L, represented as tuples of the form (u,v)\n or (u,v,key) if G is a multidigraph. A node in L corresponding to the edge\n (u,v) is connected to every node corresponding to an edge (v,w).\n\n Parameters\n ----------\n G : digraph\n A directed graph or directed multigraph.\n create_using : NetworkX graph constructor, optional\n Graph type to create. If graph instance, then cleared before populated.\n Default is to use the same graph class as `G`.\n\n ",
"language": "en",
"n_whitespaces": 131,
"n_words": 88,
"vocab_size": 58
} | 42 | Python | 36 | e308b80f17264b89acf8defe185c71c6656d5105 | line.py | 176,348 | 8 | 82 | _lg_directed | https://github.com/networkx/networkx.git | MAINT: Remove unnecessary helper functions, use inbuilt methods for line graph generator (#5327)
* MAINT: Remove unnecessary helper functions, use inbuilt methods
* Use multigraph key to create node, add tests for multi(di)graphs | 92 | 0 | 41,851 | 11 |
|
3 | 5 | def id_for_label(self, id_, index="0"):
if id_ and self.add_id_index:
id_ = "%s_%s" % (id_, index)
return id_
| django/forms/widgets.py | 51 | django | {
"docstring": "\n Use an incremented id for each option where the main widget\n references the zero index.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 14
} | 16 | Python | 14 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | widgets.py | 206,039 | 4 | 30 | id_for_label | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 48 | 0 | 51,334 | 10 |
|
14 | 23 | def validate_snuba() -> None:
if not settings.DEBUG:
return
has_all_snuba_required_backends = (
settings.SENTRY_SEARCH
in (
"sentry.search.snuba.EventsDatasetSnubaSearchBackend",
"sentry.utils.services.ServiceDelegator",
)
and settings.SENTRY_TAGSTORE == "sentry.tagstore.snuba.SnubaTagStorage"
and
# TODO(mattrobenolt): Remove ServiceDelegator check
settings.SENTRY_TSDB
in ("sentry.tsdb.redissnuba.RedisSnubaTSDB", "sentry.utils.services.ServiceDelegator")
)
eventstream_is_snuba = (
settings.SENTRY_EVENTSTREAM == "sentry.eventstream.snuba.SnubaEventStream"
or settings.SENTRY_EVENTSTREAM == "sentry.eventstream.kafka.KafkaEventStream"
)
# All good here, it doesn't matter what else is going on
if has_all_snuba_required_backends and eventstream_is_snuba:
return
from sentry.features import requires_snuba as snuba_features
snuba_enabled_features = set()
for feature in snuba_features:
if settings.SENTRY_FEATURES.get(feature, False):
snuba_enabled_features.add(feature)
if snuba_enabled_features and not eventstream_is_snuba:
from .importer import ConfigurationError
show_big_error(
% "\n".join(snuba_enabled_features)
)
raise ConfigurationError("Cannot continue without Snuba configured.")
if not eventstream_is_snuba:
from .importer import ConfigurationError
show_big_error(
% (
settings.SENTRY_SEARCH,
settings.SENTRY_TAGSTORE,
settings.SENTRY_TSDB,
settings.SENTRY_EVENTSTREAM,
)
)
raise ConfigurationError("Cannot continue without Snuba configured correctly.")
if eventstream_is_snuba and not has_all_snuba_required_backends:
show_big_error(
% (
settings.SENTRY_SEARCH,
settings.SENTRY_TAGSTORE,
settings.SENTRY_TSDB,
settings.SENTRY_EVENTSTREAM,
)
)
| src/sentry/runner/initializer.py | 333 | sentry | {
"docstring": "\n Make sure everything related to Snuba is in sync.\n\n This covers a few cases:\n\n * When you have features related to Snuba, you must also\n have Snuba fully configured correctly to continue.\n * If you have Snuba specific search/tagstore/tsdb backends,\n you must also have a Snuba compatible eventstream backend\n otherwise no data will be written into Snuba.\n * If you only have Snuba related eventstream, yell that you\n probably want the other backends otherwise things are weird.\n \nYou have features enabled which require Snuba,\nbut you don't have any Snuba compatible configuration.\n\nFeatures you have enabled:\n%s\n\nSee: https://github.com/getsentry/snuba#sentry--snuba\n\nIt appears that you are requiring Snuba,\nbut your SENTRY_EVENTSTREAM is not compatible.\n\nCurrent settings:\n\nSENTRY_SEARCH = %r\nSENTRY_TAGSTORE = %r\nSENTRY_TSDB = %r\nSENTRY_EVENTSTREAM = %r\n\nSee: https://github.com/getsentry/snuba#sentry--snuba\nYou are using a Snuba compatible eventstream\nwithout configuring search/tagstore/tsdb also to use Snuba.\nThis is probably not what you want.\n\nCurrent settings:\n\nSENTRY_SEARCH = %r\nSENTRY_TAGSTORE = %r\nSENTRY_TSDB = %r\nSENTRY_EVENTSTREAM = %r\n\nSee: https://github.com/getsentry/snuba#sentry--snuba",
"language": "en",
"n_whitespaces": 182,
"n_words": 165,
"vocab_size": 86
} | 133 | Python | 77 | 2f6716c264bbd916c2773edb8b75cf2e9b26c51b | initializer.py | 85,629 | 98 | 194 | validate_snuba | https://github.com/getsentry/sentry.git | ref: type devserver startup (#38598)
I noticed `sentry devserver 127.0.0.1` produced this error and decided
to prevent it using typing:
```console
$ sentry devserver 127.0.0.1
INFO:The Sentry runner will report development issues to Sentry.io. Use SENTRY_DEVENV_NO_REPORT to avoid reporting issues.
16:33:40 [WARNING] sentry.utils.geo: settings.GEOIP_PATH_MMDB not configured.
/Users/armenzg/code/sentry/src/sentry/runner/initializer.py:571: DeprecatedSettingWarning: The SENTRY_URL_PREFIX setting is deprecated. Please use SENTRY_OPTIONS['system.url-prefix'] instead.
warnings.warn(DeprecatedSettingWarning(old, "SENTRY_OPTIONS['%s']" % new))
16:33:41 [INFO] sentry.plugins.github: apps-not-configured
16:33:41 [INFO] sentry.runner: We have reported the error below to Sentry
/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/sentry_sdk/worker.py:123: ResourceWarning: unclosed <ssl.SSLSocket fd=6, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('192.168.0.14', 58764), raddr=('34.120.195.249', 443)>
callback = self._queue.get()
ResourceWarning: Enable tracemalloc to get the object allocation traceback
Traceback (most recent call last):
File "/Users/armenzg/code/sentry/.venv/bin/sentry", line 33, in <module>
sys.exit(load_entry_point('sentry', 'console_scripts', 'sentry')())
File "/Users/armenzg/code/sentry/src/sentry/runner/__init__.py", line 186, in main
raise e
File "/Users/armenzg/code/sentry/src/sentry/runner/__init__.py", line 178, in main
func(**kwargs)
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/decorators.py", line 26, in new_func
return f(get_current_context(), *args, **kwargs)
File "/Users/armenzg/code/sentry/src/sentry/runner/decorators.py", line 69, in inner
return ctx.invoke(f, *args, **kwargs)
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/decorators.py", line 26, in new_func
return f(get_current_context(), *args, **kwargs)
File "/Users/armenzg/code/sentry/src/sentry/runner/decorators.py", line 29, in inner
return ctx.invoke(f, *args, **kwargs)
File "/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/Users/armenzg/code/sentry/src/sentry/runner/commands/devserver.py", line 215, in devserver
port = port + 1
TypeError: unsupported operand type(s) for +: 'NoneType' and 'int
``` | 580 | 0 | 18,018 | 13 |
|
2 | 8 | def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
| src/datasets/features/image.py | 86 | datasets | {
"docstring": "If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 13
} | 23 | Python | 23 | 3804442bb7cfcb9d52044d92688115cfdc69c2da | image.py | 104,578 | 11 | 48 | flatten | https://github.com/huggingface/datasets.git | Fix flatten of complex feature types (#3723)
* Flatten Translation and TranslationVariableLanguages
* Add tests
* Style
* Flatten for decodable features
* Fix flatten for non-dict types
* Add test
* Descriptive message in flatten for Audio feature
* Small refactor
* Add flatten to features
* Update table_flatten
* Revert changes in Dataset.flatten_/flatten
* Apply Quentin's suggestions from code review
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
* Improve table_flatten docstring
* Fix tests
* Add nested test
* Minor fix
* Remove comment
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> | 125 | 0 | 21,903 | 12 |
|
1 | 9 | def dot(self, other):
from dask.array.routines import tensordot
return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))
| dask/array/core.py | 66 | dask | {
"docstring": "Dot product of self and other.\n\n Refer to :func:`dask.array.tensordot` for full documentation.\n\n See Also\n --------\n dask.array.dot : equivalent function\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 19,
"vocab_size": 19
} | 16 | Python | 15 | 2820bae493a49cb1d0a6e376985c5473b8f04fa8 | core.py | 156,733 | 3 | 45 | dot | https://github.com/dask/dask.git | Don't include docs in ``Array`` methods, just refer to module docs (#9244)
Co-authored-by: James Bourbeau <jrbourbeau@users.noreply.github.com> | 37 | 0 | 36,743 | 12 |
|
1 | 3 | def sort(self) -> None:
raise NotImplementedError()
| tools/sort/sort_methods.py | 23 | faceswap | {
"docstring": " Override for method specific logic for sorting the loaded statistics\n\n The scored list :attr:`_result` should be sorted in place\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 19,
"vocab_size": 18
} | 6 | Python | 6 | 98d01760e469fd2108eed8d0b0a1ba6297c3177c | sort_methods.py | 101,615 | 6 | 12 | sort | https://github.com/deepfakes/faceswap.git | Overhaul sort:
- Standardize image data reading and writing
- Optimize loading (just one pass required)
- Make all sort groups binnable (to greater or lesser results)
- Add sort by pitch
- Deprecate multiple options
- linting, docs + locales | 20 | 0 | 21,023 | 7 |
|
6 | 14 | def losses(self):
collected_losses = []
for layer in self._flatten_layers():
# If any eager losses are present, we assume the model to be part of
# an eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
if layer._eager_losses:
# Filter placeholder losses that may have been added by revived
# layers. (see base_layer_utils for details).
if (
layer._eager_losses[0]
is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER
):
collected_losses.extend(layer._eager_losses)
else:
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
| keras/engine/base_layer.py | 140 | keras | {
"docstring": "List of losses added using the `add_loss()` API.\n\n Variable regularization tensors are created when this property is\n accessed, so it is eager safe: accessing `losses` under a\n `tf.GradientTape` will propagate gradients back to the corresponding\n variables.\n\n Examples:\n\n >>> class MyLayer(tf.keras.layers.Layer):\n ... def call(self, inputs):\n ... self.add_loss(tf.abs(tf.reduce_mean(inputs)))\n ... return inputs\n >>> l = MyLayer()\n >>> l(np.ones((10, 1)))\n >>> l.losses\n [1.0]\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> x = tf.keras.layers.Dense(10)(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Activity regularization.\n >>> len(model.losses)\n 0\n >>> model.add_loss(tf.abs(tf.reduce_mean(x)))\n >>> len(model.losses)\n 1\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones')\n >>> x = d(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Weight regularization.\n >>> model.add_loss(lambda: tf.reduce_mean(d.kernel))\n >>> model.losses\n [<tf.Tensor: shape=(), dtype=float32, numpy=1.0>]\n\n Returns:\n A list of tensors.\n ",
"language": "en",
"n_whitespaces": 385,
"n_words": 128,
"vocab_size": 83
} | 93 | Python | 71 | fa6d9107a498f7c2403ff28c7b389a1a0c5cc083 | base_layer.py | 277,252 | 16 | 83 | losses | https://github.com/keras-team/keras.git | reduct too long lines | 369 | 0 | 81,916 | 14 |
|
2 | 11 | def store_rendered_templates(store, signal, sender, template, context, **kwargs):
store.setdefault("templates", []).append(template)
if "context" not in store:
store["context"] = ContextList()
store["context"].append(copy(context))
| django/test/client.py | 96 | django | {
"docstring": "\n Store templates and contexts that are rendered.\n\n The context is copied so that it is an accurate representation at the time\n of rendering.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 23,
"vocab_size": 21
} | 18 | Python | 18 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | client.py | 206,346 | 5 | 57 | store_rendered_templates | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 37 | 0 | 51,498 | 10 |
|
2 | 7 | def test_unicode_idval(self) -> None:
values = [
("", r""),
("ascii", r"ascii"),
("ação", r"a\xe7\xe3o"),
("josé@blah.com", r"jos\xe9@blah.com"),
(
r"δοκ.ιμή@παράδειγμα.δοκιμή",
r"\u03b4\u03bf\u03ba.\u03b9\u03bc\u03ae@\u03c0\u03b1\u03c1\u03ac\u03b4\u03b5\u03b9\u03b3"
r"\u03bc\u03b1.\u03b4\u03bf\u03ba\u03b9\u03bc\u03ae",
),
]
for val, expected in values:
assert (
IdMaker([], [], None, None, None, None)._idval(val, "a", 6) == expected
)
| testing/python/metafunc.py | 135 | pytest | {
"docstring": "Test that Unicode strings outside the ASCII character set get\n escaped, using byte escapes if they're in that range or unicode\n escapes if they're not.\n\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 25,
"vocab_size": 21
} | 39 | Python | 35 | b21b008118fc8cf65b4bcd9b059f1cd704e05c68 | metafunc.py | 190,666 | 21 | 88 | test_unicode_idval | https://github.com/pytest-dev/pytest.git | Refactor idmaker functions into class IdMaker
This commit only refactors, it does not change or add functionality yet. Public
API is retained. Reason or refactoring:
User provided parameter IDs (e.g. Metafunc.parametrize(ids=...)) had so far
only been used to calculate a unique test ID for each test invocation. That
test ID was a joined string where each parameter contributed some partial ID.
We're soon going to reuse functionality to generate parameter keys for
reorder_items and FixtureDef cache. We will be interested in the partial
IDs, and only if they originate from explicit user information. Refactoring
makes logic and data accessible for reuse, and increases cohesion in general. | 215 | 0 | 46,373 | 14 |
|
2 | 7 | def _has_webengine(self) -> bool:
try:
import qutebrowser.qt.webenginewidgets # pylint: disable=unused-import
except ImportError:
return False
return True
| qutebrowser/config/configfiles.py | 40 | qutebrowser | {
"docstring": "Check if QtWebEngine is available.\n\n Note that it's too early to use objects.backend here...\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 14,
"vocab_size": 14
} | 16 | Python | 15 | 218f490484066660dd4e899da600b252f7edd468 | configfiles.py | 321,750 | 10 | 23 | _has_webengine | https://github.com/qutebrowser/qutebrowser.git | Warn on QtWebEngine downgrade and Qt 5 -> 6 upgrade | 67 | 0 | 117,884 | 8 |
|
2 | 15 | def tag_resource(self, resource_ids, tags, resource_type="instance"):
request = TagResourcesRequest()
request.set_Tags(tags)
request.set_ResourceType(resource_type)
request.set_ResourceIds(resource_ids)
response = self._send_request(request)
if response is not None:
logging.info("instance %s create tag successfully.", resource_ids)
else:
logging.error("instance %s create tag failed.", resource_ids)
| python/ray/autoscaler/_private/aliyun/utils.py | 117 | ray | {
"docstring": "Create and bind tags to specified ECS resources.\n\n :param resource_ids: The IDs of N resources.\n :param tags: The tags of the resource.\n :param resource_type: The type of the resource.\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 29,
"vocab_size": 19
} | 32 | Python | 26 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | utils.py | 130,357 | 10 | 69 | tag_resource | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 110 | 0 | 29,243 | 11 |
|
3 | 22 | async def async_close_cover(self, **kwargs):
await mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_CLOSE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
# Optimistically assume that cover has changed state.
self._state = STATE_CLOSED
if self._config.get(CONF_GET_POSITION_TOPIC):
self._position = self.find_percentage_in_range(
self._config[CONF_POSITION_CLOSED], COVER_PAYLOAD
)
self.async_write_ha_state()
| homeassistant/components/mqtt/cover.py | 150 | core | {
"docstring": "Move the cover down.\n\n This method is a coroutine.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 9,
"vocab_size": 9
} | 35 | Python | 32 | d0c4f0fec4216e4193da716001b5e13e1e3f2106 | cover.py | 308,401 | 16 | 98 | async_close_cover | https://github.com/home-assistant/core.git | Add mqtt encoding support for publishing (#62739)
* encoding support for mqtt publishing - todo tests
* signature allows None values for qos and retain
* common test for mqtt publishing encoding
* better test with command templates
* more tests
* fix tests alarm control panel+tests light basic
* tests light json and template
* add tests vacuum and fix tests light_template | 222 | 0 | 107,158 | 14 |
|
4 | 9 | def links(self):
header = self.headers.get("link")
resolved_links = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get("rel") or link.get("url")
resolved_links[key] = link
return resolved_links
| pipenv/patched/pip/_vendor/requests/models.py | 100 | pipenv | {
"docstring": "Returns the parsed header links of the response, if any.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | 27 | Python | 21 | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | models.py | 22,098 | 9 | 57 | links | https://github.com/pypa/pipenv.git | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | 114 | 0 | 4,177 | 14 |
|
1 | 8 | def text(self, body):
text_proto = TextProto()
text_proto.body = clean_text(body)
return self.dg._enqueue("text", text_proto)
| lib/streamlit/elements/text.py | 55 | streamlit | {
"docstring": "Write fixed-width and preformatted text.\n\n Parameters\n ----------\n body : str\n The string to display.\n\n Example\n -------\n >>> st.text('This is some text.')\n\n ",
"language": "en",
"n_whitespaces": 81,
"n_words": 21,
"vocab_size": 21
} | 12 | Python | 11 | 72703b38029f9358a0ec7ca5ed875a6b438ece19 | text.py | 118,743 | 4 | 32 | text | https://github.com/streamlit/streamlit.git | Replace static apps with live Cloud apps (#4317)
Co-authored-by: kajarenc <kajarenc@gmail.com> | 40 | 0 | 26,400 | 8 |
|
1 | 2 | def baseratio(self):
return self["baseratio"]
| packages/python/plotly/plotly/graph_objs/_funnelarea.py | 22 | plotly.py | {
"docstring": "\n Sets the ratio between bottom length and maximum top length.\n\n The 'baseratio' property is a number and may be specified as:\n - An int or float in the interval [0, 1]\n\n Returns\n -------\n int|float\n ",
"language": "en",
"n_whitespaces": 86,
"n_words": 34,
"vocab_size": 32
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _funnelarea.py | 226,825 | 2 | 11 | baseratio | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 58,498 | 7 |
|
1 | 16 | def test_sum_distinct_aggregate(self):
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum("age"))
self.assertEqual(age_sum["age__sum"], 103)
| tests/aggregation/tests.py | 132 | django | {
"docstring": "\n Sum on a distinct() QuerySet should aggregate only the distinct items.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | 26 | Python | 24 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | tests.py | 200,894 | 7 | 79 | test_sum_distinct_aggregate | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 82 | 0 | 49,822 | 11 |
|
2 | 3 | def test_episodes_unit(self):
self.batch_id = 0
| rllib/utils/replay_buffers/tests/test_reservoir_buffer.py | 21 | ray | {
"docstring": "Tests adding, sampling, get-/set state, and eviction with\n experiences stored by timesteps.",
"language": "en",
"n_whitespaces": 18,
"n_words": 12,
"vocab_size": 12
} | 5 | Python | 5 | acf2bf9b2fa9f6cac8c599ec1eea6a9d5249905f | test_reservoir_buffer.py | 126,148 | 14 | 104 | test_episodes_unit | https://github.com/ray-project/ray.git | [RLlib] Get rid of all these deprecation warnings. (#27085) | 19 | 0 | 28,072 | 7 |
|
1 | 2 | def test_presubmit_shortcircuit(ray_start_1_cpu):
| python/ray/util/dask/tests/test_dask_callback.py | 13 | ray | {
"docstring": "\n Test that presubmit return short-circuits task submission, and that task's\n result is set to the presubmit return value.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 18,
"vocab_size": 15
} | 2 | Python | 2 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | test_dask_callback.py | 133,142 | 8 | 43 | test_presubmit_shortcircuit | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 5 | 0 | 29,941 | 6 |
|
3 | 17 | def _check_prepopulated_fields_value(self, obj, val, label):
if not isinstance(val, (list, tuple)):
return must_be("a list or tuple", option=label, obj=obj, id="admin.E029")
else:
return list(
chain.from_iterable(
self._check_prepopulated_fields_value_item(
obj, subfield_name, "%s[%r]" % (label, index)
)
for index, subfield_name in enumerate(val)
)
)
| django/contrib/admin/checks.py | 120 | django | {
"docstring": "Check a value of `prepopulated_fields` dictionary, i.e. it's an\n iterable of existing fields.",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 12
} | 37 | Python | 33 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | checks.py | 203,345 | 12 | 78 | _check_prepopulated_fields_value | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 201 | 0 | 50,319 | 16 |
|
5 | 31 | def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r
requires_backends(cls, "pyctcdecode")
from pyctcdecode import BeamSearchDecoderCTC
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs)
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs)
if os.path.isdir(pretrained_model_name_or_path):
decoder = BeamSearchDecoderCTC.load_from_dir(pretrained_model_name_or_path)
else:
# BeamSearchDecoderCTC has no auto class
kwargs.pop("_from_auto", None)
# make sure that only relevant filenames are downloaded
language_model_filenames = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*")
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
allow_regex = [language_model_filenames, alphabet_filename]
decoder = BeamSearchDecoderCTC.load_from_hf_hub(
pretrained_model_name_or_path, allow_regex=allow_regex, **kwargs
)
# set language model attributes
for attribute in ["alpha", "beta", "unk_score_offset", "score_boundary"]:
value = kwargs.pop(attribute, None)
if value is not None:
cls._set_language_model_attribute(decoder, attribute, value)
# make sure that decoder's alphabet and tokenizer's vocab match in content
missing_decoder_tokens = cls.get_missing_alphabet_tokens(decoder, tokenizer)
if len(missing_decoder_tokens) > 0:
raise ValueError(
f"The tokens {missing_decoder_tokens} are defined in the tokenizer's "
"vocabulary, but not in the decoder's alphabet. "
f"Make sure to include {missing_decoder_tokens} in the decoder's alphabet."
)
return cls(feature_extractor=feature_extractor, tokenizer=tokenizer, decoder=decoder)
| src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py | 321 | transformers | {
"docstring": "\n Instantiate a [`Wav2Vec2ProcessorWithLM`] from a pretrained Wav2Vec2 processor.\n\n <Tip>\n\n This class method is simply calling Wav2Vec2FeatureExtractor's\n [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], Wav2Vec2CTCTokenizer's\n [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`], and\n [`pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub`].\n\n Please refer to the docstrings of the methods above for more information.\n\n </Tip>\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on\n huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or\n namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.\n - a path to a *directory* containing a feature extractor file saved using the\n [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`.\n - a path or url to a saved feature extractor JSON *file*, e.g.,\n `./my_model_directory/preprocessor_config.json`.\n **kwargs\n Additional keyword arguments passed along to both [`SequenceFeatureExtractor`] and\n [`PreTrainedTokenizer`]\n ",
"language": "en",
"n_whitespaces": 375,
"n_words": 124,
"vocab_size": 89
} | 137 | Python | 100 | efb35a4107478f7d2ebcf56572c0967e68536e15 | processing_wav2vec2_with_lm.py | 33,998 | 56 | 194 | from_pretrained | https://github.com/huggingface/transformers.git | [Wav2Vec2ProcessorWithLM] improve decoder downlaod (#15040) | 445 | 0 | 6,183 | 12 |
|
2 | 8 | async def follower_loop(self):
try:
await self._connect_to_leaders()
except Exception as e:
logger.error("Exception occurred in follower loop: ")
logger.exception(e)
| freqtrade/rpc/replicate/__init__.py | 60 | freqtrade | {
"docstring": "\n Main follower coroutine\n\n This starts all of the leader connection coros\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 11,
"vocab_size": 11
} | 17 | Python | 17 | 9f6bba40af1a407f190a89f5c0c8b4e3f528ba46 | __init__.py | 150,412 | 6 | 31 | follower_loop | https://github.com/freqtrade/freqtrade.git | initial concept for replicate, basic leader and follower logic | 71 | 0 | 34,736 | 11 |
|
1 | 4 | def coverage_ratio(self) -> float:
return self._coverage_ratio
| scripts/convert.py | 22 | faceswap | {
"docstring": " float: The coverage ratio that the model was trained at. ",
"language": "en",
"n_whitespaces": 11,
"n_words": 10,
"vocab_size": 10
} | 6 | Python | 6 | 1022651eb8a7741014f5d2ec7cbfe882120dfa5f | convert.py | 101,373 | 3 | 12 | coverage_ratio | https://github.com/deepfakes/faceswap.git | Bugfix: convert - Gif Writer
- Fix non-launch error on Gif Writer
- convert plugins - linting
- convert/fs_media/preview/queue_manager - typing
- Change convert items from dict to Dataclass | 20 | 0 | 20,788 | 6 |
|
1 | 6 | def test_standard_get_document_model_string(self):
del settings.WAGTAILDOCS_DOCUMENT_MODEL
self.assertEqual(get_document_model_string(), "wagtaildocs.Document")
| wagtail/documents/tests/test_models.py | 37 | wagtail | {
"docstring": "Test get_document_model_string with no WAGTAILDOCS_DOCUMENT_MODEL",
"language": "en",
"n_whitespaces": 4,
"n_words": 5,
"vocab_size": 5
} | 6 | Python | 6 | d10f15e55806c6944827d801cd9c2d53f5da4186 | test_models.py | 74,850 | 3 | 20 | test_standard_get_document_model_string | https://github.com/wagtail/wagtail.git | Reformat with black | 27 | 0 | 16,328 | 9 |
|
1 | 4 | def get_denominations() -> Dict[DENOMINATION, float]:
return {
"Trillions": 1_000_000_000_000,
"Billions": 1_000_000_000,
"Millions": 1_000_000,
"Thousands": 1_000,
"Units": 1,
}
| openbb_terminal/helpers_denomination.py | 61 | OpenBBTerminal | {
"docstring": "Gets all supported denominations and their lower bound value\n\n Returns:\n Dict[DENOMINATION, int]: All supported denominations and their lower bound value\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 20,
"vocab_size": 13
} | 18 | Python | 18 | 07c08df84e2af99be4ee32ab276128cafb9e7986 | helpers_denomination.py | 285,833 | 13 | 35 | get_denominations | https://github.com/OpenBB-finance/OpenBBTerminal.git | Bug/2583 (#2671)
* #2583 [CT] Add and use denomination helper
* #2583 [CT] Fix Yahoo Finance denomination
* #2583 [CT] Fix typings for dict
* #2583 [CT] Add YF model get financials tests
* #2583 [CT] Fix stubbed currency
* #2583 [CT] Add test coverage for denomination helpers
* #2583 [CT] Fix YF view not exporting raw data
Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt>
Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> | 62 | 0 | 85,447 | 8 |
|
5 | 10 | def update_parent_account_names(accounts):
name_to_account_map = {}
for d in accounts:
if d.account_number:
account_name = d.account_number + " - " + d.account_name
else:
account_name = d.account_name
name_to_account_map[d.name] = account_name
for account in accounts:
if account.parent_account:
account["parent_account_name"] = name_to_account_map.get(account.parent_account)
return accounts
| erpnext/accounts/report/consolidated_financial_statement/consolidated_financial_statement.py | 118 | erpnext | {
"docstring": "Update parent_account_name in accounts list.\n\n\tparent_name is `name` of parent account which could have other prefix\n\tof account_number and suffix of company abbr. This function adds key called\n\t`parent_account_name` which does not have such prefix/suffix.\n\t",
"language": "en",
"n_whitespaces": 31,
"n_words": 35,
"vocab_size": 31
} | 38 | Python | 25 | 494bd9ef78313436f0424b918f200dab8fc7c20b | consolidated_financial_statement.py | 65,199 | 12 | 71 | update_parent_account_names | https://github.com/frappe/erpnext.git | style: format code with black | 26 | 0 | 13,822 | 13 |
|
12 | 15 | def test_cluster_interrupt_searcher(start_connected_cluster, tmpdir, searcher):
cluster = start_connected_cluster
dirpath = str(tmpdir)
local_checkpoint_dir = os.path.join(dirpath, "experiment")
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
| python/ray/tune/tests/test_cluster_searcher.py | 72 | ray | {
"docstring": "Tests restoration of HyperOptSearch experiment on cluster shutdown\n with actual interrupt.\n\n Restoration should restore both state of trials\n and previous search algorithm (HyperOptSearch) state.\n This is an end-to-end test.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 29,
"vocab_size": 28
} | 20 | Python | 18 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | test_cluster_searcher.py | 132,452 | 60 | 313 | test_cluster_interrupt_searcher | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 38 | 0 | 29,762 | 9 |
|
1 | 3 | def __invert__(self):
return NotAny(self)
| .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 21 | transferlearning | {
"docstring": "\n Implementation of ~ operator - returns :class:`NotAny`\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | 4 | Python | 4 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | pyparsing.py | 63,387 | 2 | 11 | __invert__ | https://github.com/jindongwang/transferlearning.git | upd; format | 18 | 0 | 13,282 | 7 |
|
1 | 2 | def solidity(self):
return self["solidity"]
| packages/python/plotly/plotly/graph_objs/bar/marker/_pattern.py | 22 | plotly.py | {
"docstring": "\n Sets the solidity of the pattern fill. Solidity is roughly the\n fraction of the area filled by the pattern. Solidity of 0 shows\n only the background color without pattern and solidty of 1\n shows only the foreground color without pattern.\n\n The 'solidity' property is a number and may be specified as:\n - An int or float in the interval [0, 1]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n ",
"language": "en",
"n_whitespaces": 157,
"n_words": 75,
"vocab_size": 52
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _pattern.py | 228,784 | 2 | 11 | solidity | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 60,457 | 7 |
|
5 | 13 | def extract_data(self, response):
try:
data = response.json()
except ValueError as e: # If there was no json to parse
data = {}
if response.text or response.status_code not in (200, 202, 204):
text = response.text
if len(text) > 1024:
text = text[:1024] + '... <<< Truncated >>> ...'
log.debug("Unable to parse JSON response ({0.status_code}): {1} - '{2}'".format(response, e, text))
return data
| awxkit/awxkit/api/pages/page.py | 137 | awx | {
"docstring": "Takes a `requests.Response` and returns a data dict.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 7
} | 60 | Python | 50 | 68a44529b6b77d2d43d7099b654560bfd8bbf518 | page.py | 81,964 | 11 | 83 | extract_data | https://github.com/ansible/awx.git | Register pages for the Instance peers and install bundle endpoints
This includes exposing a new interface for Page objects, Page.bytes,
to return the full bytestring contents of the response. | 186 | 0 | 17,284 | 16 |
|
1 | 9 | def add_to_apply_calls(self, func, *args, **kwargs):
return PandasOnPythonDataframePartition(
self._data.copy(),
call_queue=self.call_queue + [(func, args, kwargs)],
)
| modin/core/execution/python/implementations/pandas_on_python/partitioning/partition.py | 63 | modin | {
"docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnPythonDataframePartition\n New ``PandasOnPythonDataframePartition`` object with extended call queue.\n ",
"language": "en",
"n_whitespaces": 167,
"n_words": 52,
"vocab_size": 34
} | 14 | Python | 14 | 4ec7f6347903f9133c65ebc5b6e0e15553b98577 | partition.py | 153,874 | 5 | 42 | add_to_apply_calls | https://github.com/modin-project/modin.git | REFACTOR-#4530: Standardize access to physical data in partitions (#4563)
Signed-off-by: Alexey Prutskov <lehaprutskov@gmail.com> | 57 | 0 | 35,677 | 11 |
|
1 | 6 | def callback_data(self) -> JSONData:
return json.loads(self.data["callback_id"])
| src/sentry/integrations/slack/requests/action.py | 36 | sentry | {
"docstring": "\n We store certain data in ``callback_id`` as JSON. It's a bit hacky, but\n it's the simplest way to store state without saving it on the Sentry\n side.\n\n Data included in this field:\n - issue: the ID of the corresponding Issue\n - orig_response_url: URL from the original message we received\n - is_message: did the original message have a 'message' type\n ",
"language": "en",
"n_whitespaces": 128,
"n_words": 59,
"vocab_size": 47
} | 6 | Python | 6 | 10fbaf4b856f85879611d50b714fa47eb4a358c3 | action.py | 88,279 | 12 | 20 | callback_data | https://github.com/getsentry/sentry.git | ref: add src/sentry/utils/json.py to mypy.ini (#41133)
first commit I sorted some of the mypy files (separated out to make the
diff of the second commit easier to follow) | 20 | 0 | 18,370 | 9 |
|
8 | 42 | def test_overlap_first(business_client, setup_before_upload, show_overlap_first):
c = business_client
config = dict(
title='test_overlap_first',
is_published=True,
maximum_annotations=1,
show_overlap_first=show_overlap_first,
sampling="Uniform sampling",
label_config=
)
project = make_project(config, business_client.user)
annotation_result = json.dumps([{
'from_name': 'text_class',
'to_name': 'text',
'type': 'choices',
'value': {'choices': ['class_A']}
}])
num_tasks = 1000
overlap_cohort_percentage = 1
# set up tasks overlap
setup_after_upload = True
if setup_before_upload:
r = c.patch(
f'/api/projects/{project.id}/',
data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}),
content_type='application/json'
)
assert r.status_code == 200
setup_after_upload = False
# create tasks
tasks = []
for i in range(num_tasks):
tasks.append({'data': {'text': f'this is {str(i)}'}})
r = business_client.post(
f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json')
assert r.status_code == 201
if setup_after_upload:
r = c.patch(
f'/api/projects/{project.id}/',
data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}),
content_type='application/json'
)
assert r.status_code == 200
expected_tasks_with_overlap = int(overlap_cohort_percentage / 100. * num_tasks)
assert Task.objects.filter(Q(project_id=project.id) & Q(overlap__gt=1)).count() == expected_tasks_with_overlap
| label_studio/tests/test_next_task.py | 474 | label-studio | {
"docstring": "\n <View>\n <Text name=\"text\" value=\"$text\"></Text>\n <Choices name=\"text_class\" choice=\"single\">\n <Choice value=\"class_A\"></Choice>\n <Choice value=\"class_B\"></Choice>\n </Choices>\n </View>",
"language": "en",
"n_whitespaces": 104,
"n_words": 13,
"vocab_size": 12
} | 122 | Python | 84 | 35125cca12ba1e8703c4284894e4e2db44ce7009 | test_next_task.py | 177,582 | 63 | 396 | test_overlap_first | https://github.com/heartexlabs/label-studio.git | fix: DEV-1348: Fix _rearrange_overlap_cohort filter condition for overlap bulk update with concurrent import (#1844)
* [fix] Rearrange overlap depending in annotations count
* Fix next task test for not random overlap assignment
* Delete unused method
* Rename rearrange method to have back compatibility
* Refactor to Q_finished_annotations from tasks.models
* Fix filter for tasks with max annotations
* Change filter for tasks with max annotations
* Change project stats recalculation condition
* Fix rearrange during import from storage
* Change _rearrange_overlap_cohort filter condition
* Switching to bulk_update in _rearrange_overlap_cohort
* Stylize code
* Add is_labeled on import
* Fix tests
* Fix tests
* Fix tests more
Co-authored-by: nik <nik@heartex.net>
Co-authored-by: Sergei Ivashchenko <triklozoid@gmail.com>
Co-authored-by: niklub <lubimov.nicolas@gmail.com>
Co-authored-by: Max Tkachenko <makseq@gmail.com> | 377 | 0 | 42,449 | 17 |
|
5 | 18 | def get_variant(template, args=None, variant=None, manufacturer=None, manufacturer_part_no=None):
item_template = frappe.get_doc("Item", template)
if item_template.variant_based_on == "Manufacturer" and manufacturer:
return make_variant_based_on_manufacturer(item_template, manufacturer, manufacturer_part_no)
else:
if isinstance(args, str):
args = json.loads(args)
if not args:
frappe.throw(_("Please specify at least one attribute in the Attributes table"))
return find_variant(template, args, variant)
| erpnext/controllers/item_variant.py | 143 | erpnext | {
"docstring": "Validates Attributes and their Values, then looks for an exactly\n\tmatching Item Variant\n\n\t:param item: Template Item\n\t:param args: A dictionary with \"Attribute\" as key and \"Attribute Value\" as value\n\t",
"language": "en",
"n_whitespaces": 26,
"n_words": 30,
"vocab_size": 26
} | 44 | Python | 40 | 494bd9ef78313436f0424b918f200dab8fc7c20b | item_variant.py | 65,637 | 10 | 90 | get_variant | https://github.com/frappe/erpnext.git | style: format code with black | 34 | 0 | 13,965 | 15 |
|
1 | 30 | def replaceHTMLEntity(t):
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form ``<!-- ... -->``"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form ``// ... (to end of line)``"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment")
"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
javaStyleComment = cppStyleComment
"Same as :class:`cppStyleComment`"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form ``# ... (to end of line)``"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',')
+ Optional(Word(" \t")
+ ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem")
commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList")
# some other useful expressions - using lower-case class name since we are really using this as a namespace | .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 347 | transferlearning | {
"docstring": "Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or\nquoted strings, separated by commas.\n\nThis expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.\n",
"language": "en",
"n_whitespaces": 31,
"n_words": 34,
"vocab_size": 31
} | 141 | Python | 91 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | pyparsing.py | 63,296 | 2 | 15 | replaceHTMLEntity | https://github.com/jindongwang/transferlearning.git | upd; format | 207 | 0 | 13,236 | 21 |
|
1 | 7 | def test_reading_jsonl_dataset_should_be_successful(tasks_base_path):
dataset = JsonlDataset(tasks_base_path / "jsonl/train.jsonl")
assert len(dataset.sentences) == 5
assert dataset.sentences[0].to_tagged_string() == "This is New <B-LOC> Berlin <I-LOC>"
assert dataset.sentences[1].to_tagged_string() == "This is New <B-LOC> Berlin <I-LOC> ."
assert dataset.sentences[2].to_tagged_string() == "This is New <B-LOC> Berlin <I-LOC> . <I-LOC>"
assert (
dataset.sentences[3].to_tagged_string()
== "EU <B-ORG> rejects German <B-MISC> call to boycott British <B-MISC> lamb <I-MISC> ."
)
| tests/test_datasets.py | 133 | flair | {
"docstring": "\n Tests reading a JsonlDataset containing multiple tagged entries\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | 59 | Python | 37 | a3120b5179f51308d4c0c1f4865873debb566bbd | test_datasets.py | 214,487 | 10 | 77 | test_reading_jsonl_dataset_should_be_successful | https://github.com/flairNLP/flair.git | refactor: :recycle: make label_type configurable for Jsonl corpora | 97 | 0 | 53,743 | 11 |
|
1 | 8 | def rands(nchars) -> str:
return "".join(np.random.choice(RANDS_CHARS, nchars))
| pandas/_testing/_random.py | 42 | pandas | {
"docstring": "\n Generate one random byte string.\n\n See `rands_array` if you want to create an array of random strings.\n\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 16
} | 7 | Python | 7 | f538568afc2c76c2d738d32e3544cf9fe6742960 | _random.py | 167,582 | 8 | 24 | rands | https://github.com/pandas-dev/pandas.git | TYP: misc return type annotations (#47558) | 13 | 0 | 40,041 | 10 |
|
1 | 8 | def test_get_action(self):
action_name = "delete_selected"
self.assertEqual(self.site.get_action(action_name), delete_selected)
self.site.disable_action(action_name)
self.assertEqual(self.site.get_action(action_name), delete_selected)
| tests/admin_views/test_adminsite.py | 79 | django | {
"docstring": "AdminSite.get_action() returns an action even if it's disabled.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | 10 | Python | 8 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | test_adminsite.py | 207,506 | 5 | 47 | test_get_action | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 45 | 0 | 51,992 | 10 |
|
1 | 2 | def arrowsize(self):
return self["arrowsize"]
| packages/python/plotly/plotly/graph_objs/layout/_annotation.py | 22 | plotly.py | {
"docstring": "\n Sets the size of the end annotation arrow head, relative to\n `arrowwidth`. A value of 1 (default) gives a head about 3x as\n wide as the line.\n\n The 'arrowsize' property is a number and may be specified as:\n - An int or float in the interval [0.3, inf]\n\n Returns\n -------\n int|float\n ",
"language": "en",
"n_whitespaces": 117,
"n_words": 51,
"vocab_size": 45
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _annotation.py | 230,881 | 2 | 11 | arrowsize | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 62,554 | 7 |
|
39 | 80 | def get_basic_details(args, item, overwrite_warehouse=True):
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
if item.variant_of:
item.update_template_tables()
item_defaults = get_item_defaults(item.name, args.company)
item_group_defaults = get_item_group_defaults(item.name, args.company)
brand_defaults = get_brand_defaults(item.name, args.company)
defaults = frappe._dict(
{
"item_defaults": item_defaults,
"item_group_defaults": item_group_defaults,
"brand_defaults": brand_defaults,
}
)
warehouse = get_item_warehouse(item, args, overwrite_warehouse, defaults)
if args.get("doctype") == "Material Request" and not args.get("material_request_type"):
args["material_request_type"] = frappe.db.get_value(
"Material Request", args.get("name"), "material_request_type", cache=True
)
expense_account = None
if args.get("doctype") == "Purchase Invoice" and item.is_fixed_asset:
from erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account
expense_account = get_asset_category_account(
fieldname="fixed_asset_account", item=args.item_code, company=args.company
)
# Set the UOM to the Default Sales UOM or Default Purchase UOM if configured in the Item Master
if not args.get("uom"):
if args.get("doctype") in sales_doctypes:
args.uom = item.sales_uom if item.sales_uom else item.stock_uom
elif (args.get("doctype") in ["Purchase Order", "Purchase Receipt", "Purchase Invoice"]) or (
args.get("doctype") == "Material Request" and args.get("material_request_type") == "Purchase"
):
args.uom = item.purchase_uom if item.purchase_uom else item.stock_uom
else:
args.uom = item.stock_uom
if args.get("batch_no") and item.name != frappe.get_cached_value(
"Batch", args.get("batch_no"), "item"
):
args["batch_no"] = ""
out = frappe._dict(
{
"item_code": item.name,
"item_name": item.item_name,
"description": cstr(item.description).strip(),
"image": cstr(item.image).strip(),
"warehouse": warehouse,
"income_account": get_default_income_account(
args, item_defaults, item_group_defaults, brand_defaults
),
"expense_account": expense_account
or get_default_expense_account(args, item_defaults, item_group_defaults, brand_defaults),
"discount_account": get_default_discount_account(args, item_defaults),
"cost_center": get_default_cost_center(
args, item_defaults, item_group_defaults, brand_defaults
),
"has_serial_no": item.has_serial_no,
"has_batch_no": item.has_batch_no,
"batch_no": args.get("batch_no"),
"uom": args.uom,
"min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "",
"qty": flt(args.qty) or 1.0,
"stock_qty": flt(args.qty) or 1.0,
"price_list_rate": 0.0,
"base_price_list_rate": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"net_rate": 0.0,
"net_amount": 0.0,
"discount_percentage": 0.0,
"discount_amount": 0.0,
"supplier": get_default_supplier(args, item_defaults, item_group_defaults, brand_defaults),
"update_stock": args.get("update_stock")
if args.get("doctype") in ["Sales Invoice", "Purchase Invoice"]
else 0,
"delivered_by_supplier": item.delivered_by_supplier
if args.get("doctype") in ["Sales Order", "Sales Invoice"]
else 0,
"is_fixed_asset": item.is_fixed_asset,
"last_purchase_rate": item.last_purchase_rate
if args.get("doctype") in ["Purchase Order"]
else 0,
"transaction_date": args.get("transaction_date"),
"against_blanket_order": args.get("against_blanket_order"),
"bom_no": item.get("default_bom"),
"weight_per_unit": args.get("weight_per_unit") or item.get("weight_per_unit"),
"weight_uom": args.get("weight_uom") or item.get("weight_uom"),
"grant_commission": item.get("grant_commission"),
}
)
if item.get("enable_deferred_revenue") or item.get("enable_deferred_expense"):
out.update(calculate_service_end_date(args, item))
# calculate conversion factor
if item.stock_uom == args.uom:
out.conversion_factor = 1.0
else:
out.conversion_factor = args.conversion_factor or get_conversion_factor(item.name, args.uom).get(
"conversion_factor"
)
args.conversion_factor = out.conversion_factor
out.stock_qty = out.qty * out.conversion_factor
args.stock_qty = out.stock_qty
# calculate last purchase rate
if args.get("doctype") in purchase_doctypes:
from erpnext.buying.doctype.purchase_order.purchase_order import item_last_purchase_rate
out.last_purchase_rate = item_last_purchase_rate(
args.name, args.conversion_rate, item.name, out.conversion_factor
)
# if default specified in item is for another company, fetch from company
for d in [
["Account", "income_account", "default_income_account"],
["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"],
["Warehouse", "warehouse", ""],
]:
if not out[d[1]]:
out[d[1]] = frappe.get_cached_value("Company", args.company, d[2]) if d[2] else None
for fieldname in ("item_name", "item_group", "brand", "stock_uom"):
out[fieldname] = item.get(fieldname)
if args.get("manufacturer"):
part_no = get_item_manufacturer_part_no(args.get("item_code"), args.get("manufacturer"))
if part_no:
out["manufacturer_part_no"] = part_no
else:
out["manufacturer_part_no"] = None
out["manufacturer"] = None
else:
data = frappe.get_value(
"Item", item.name, ["default_item_manufacturer", "default_manufacturer_part_no"], as_dict=1
)
if data:
out.update(
{
"manufacturer": data.default_item_manufacturer,
"manufacturer_part_no": data.default_manufacturer_part_no,
}
)
child_doctype = args.doctype + " Item"
meta = frappe.get_meta(child_doctype)
if meta.get_field("barcode"):
update_barcode_value(out)
if out.get("weight_per_unit"):
out["total_weight"] = out.weight_per_unit * out.stock_qty
return out
| erpnext/stock/get_item_details.py | 1,809 | erpnext | {
"docstring": "\n\t:param args: {\n\t \"item_code\": \"\",\n\t \"warehouse\": None,\n\t \"customer\": \"\",\n\t \"conversion_rate\": 1.0,\n\t \"selling_price_list\": None,\n\t \"price_list_currency\": None,\n\t \"price_list_uom_dependant\": None,\n\t \"plc_conversion_rate\": 1.0,\n\t \"doctype\": \"\",\n\t \"name\": \"\",\n\t \"supplier\": None,\n\t \"transaction_date\": None,\n\t \"conversion_rate\": 1.0,\n\t \"buying_price_list\": None,\n\t \"is_subcontracted\": \"Yes\" / \"No\",\n\t \"ignore_pricing_rule\": 0/1\n\t \"project\": \"\",\n\t barcode: \"\",\n\t serial_no: \"\",\n\t currency: \"\",\n\t update_stock: \"\",\n\t price_list: \"\",\n\t company: \"\",\n\t order_type: \"\",\n\t is_pos: \"\",\n\t project: \"\",\n\t qty: \"\",\n\t stock_qty: \"\",\n\t conversion_factor: \"\",\n\t against_blanket_order: 0/1\n\t }\n\t:param item: `item_code` of Item object\n\t:return: frappe._dict\n\t",
"language": "en",
"n_whitespaces": 528,
"n_words": 74,
"vocab_size": 47
} | 468 | Python | 274 | 494bd9ef78313436f0424b918f200dab8fc7c20b | get_item_details.py | 67,797 | 142 | 1,097 | get_basic_details | https://github.com/frappe/erpnext.git | style: format code with black | 322 | 0 | 14,620 | 15 |
|
1 | 4 | def outer_size(self) -> Size:
return self._size
| src/textual/widget.py | 22 | textual | {
"docstring": "The size of the widget (including padding and border).",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | 6 | Python | 6 | 0ba3ffb1718bdd01a5136fd1bc30e8ed58e6a47c | widget.py | 184,066 | 3 | 12 | outer_size | https://github.com/Textualize/textual.git | size properties | 20 | 0 | 44,455 | 6 |
|
2 | 54 | def test_retina_sepbn_head_loss(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
sampler=dict(type='PseudoSampler'
), # Focal loss should use PseudoSampler
allowed_border=-1,
pos_weight=-1,
debug=False))
anchor_head = RetinaSepBNHead(
num_classes=4, num_ins=5, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = []
for i in range(len(anchor_head.prior_generator.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
cls_scores, bbox_preds = anchor_head.forward(tuple(feats))
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| tests/test_models/test_dense_heads/test_retina_sepBN_head.py | 592 | mmdetection | {
"docstring": "Tests RetinaSepBN head loss when truth is empty and non-empty.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 216 | Python | 136 | 665b55f6768dd0c2c32f8e73cd3069eddc1677b0 | test_retina_sepBN_head.py | 245,233 | 51 | 364 | test_retina_sepbn_head_loss | https://github.com/open-mmlab/mmdetection.git | [Refactor] Refactor NAS-FPN and anchor-free | 929 | 0 | 70,717 | 16 |
|
2 | 10 | def transpose(self) -> Tuple[int, int]:
if self.transpose_method is not None:
# Safety: `transpose` takes an int rather than e.g. an IntEnum.
# self.transpose_method is set above to be a value in
# EXIF_TRANSPOSE_MAPPINGS, and that only contains correct values.
with self.image:
self.image = self.image.transpose(self.transpose_method) # type: ignore[arg-type]
self.width, self.height = self.image.size
self.transpose_method = None
# We don't need EXIF any more
self.image.info["exif"] = None
return self.image.size
| synapse/rest/media/v1/thumbnailer.py | 125 | synapse | {
"docstring": "Transpose the image using its EXIF Orientation tag\n\n Returns:\n A tuple containing the new image size in pixels as (width, height).\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 21,
"vocab_size": 19
} | 66 | Python | 53 | 5949ab86f8db0ef3dac2063e42210030f17786fb | thumbnailer.py | 248,471 | 13 | 74 | transpose | https://github.com/matrix-org/synapse.git | Fix potential thumbnail memory leaks. (#12932) | 191 | 0 | 72,299 | 13 |
|
1 | 12 | def test_get_command_line(self):
mock_context = MagicMock()
mock_context.parent.command_path = "streamlit"
with patch("click.get_current_context", return_value=mock_context):
with patch("click.get_os_args", return_value=["os_arg1", "os_arg2"]):
result = cli._get_command_line_as_string()
self.assertEqual("streamlit os_arg1 os_arg2", result)
| lib/tests/streamlit/cli_test.py | 108 | streamlit | {
"docstring": "Test that _get_command_line_as_string correctly concatenates values\n from click.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 8,
"vocab_size": 8
} | 22 | Python | 19 | 5f39da13c0c551533a6d313dd0e2f6f9f0f9a5ac | cli_test.py | 118,707 | 7 | 57 | test_get_command_line | https://github.com/streamlit/streamlit.git | Get rid of preheated script runs (#4259)
* Get rid of preheated script runs
When a streamlit server is first started, we currently trigger a run of the
script defining an app and save the resulting deltas so that the very first
page load of an app can be more or less instantaneous.
This optimization is currently not too helpful given how streamlit is used in
practice today (it was originally added to make long-running jobs started
via `streamlit run` feel fast, but people generally don't use streamlit to kick
off long-running computations). Furthermore, we'll soon be adding some features
that won't play nicely with the optimization. In particular, the upcoming
`st.user` feature interacts with script preheats weirdly as the information
required to populate `st.user` doesn't exist in a preheat run.
Given complications that will arise in the near-future as well as the fact that
the optimization itself is a vestigial one, it seems like it's time to remove
preheated script runs.
* Rework cli_smoke_tests to no longer rely on script preheats
* Try making tests less timing sensitive
* Revert an unintended change in an e2e test script
* Replace `%` usage with an f-string | 91 | 0 | 26,370 | 14 |
|
18 | 42 | def get_positions_from_labels(self, row_loc, col_loc):
from modin.pandas.indexing import (
is_boolean_array,
is_list_like,
is_range_like,
boolean_mask_to_numeric,
)
lookups = []
for axis, axis_loc in enumerate((row_loc, col_loc)):
if is_scalar(axis_loc):
axis_loc = np.array([axis_loc])
if isinstance(axis_loc, slice) or is_range_like(axis_loc):
if isinstance(axis_loc, slice) and axis_loc == slice(None):
axis_lookup = axis_loc
else:
axis_labels = self.get_axis(axis)
# `slice_indexer` returns a fully-defined numeric slice for a non-fully-defined labels-based slice
axis_lookup = axis_labels.slice_indexer(
axis_loc.start, axis_loc.stop, axis_loc.step
)
# Converting negative indices to their actual positions:
axis_lookup = pandas.RangeIndex(
start=(
axis_lookup.start
if axis_lookup.start >= 0
else axis_lookup.start + len(axis_labels)
),
stop=(
axis_lookup.stop
if axis_lookup.stop >= 0
else axis_lookup.stop + len(axis_labels)
),
step=axis_lookup.step,
)
elif self.has_multiindex(axis):
# `Index.get_locs` raises an IndexError by itself if missing labels were provided,
# we don't have to do missing-check for the received `axis_lookup`.
if isinstance(axis_loc, pandas.MultiIndex):
axis_lookup = self.get_axis(axis).get_indexer_for(axis_loc)
else:
axis_lookup = self.get_axis(axis).get_locs(axis_loc)
elif is_boolean_array(axis_loc):
axis_lookup = boolean_mask_to_numeric(axis_loc)
else:
axis_labels = self.get_axis(axis)
if is_list_like(axis_loc) and not isinstance(
axis_loc, (np.ndarray, pandas.Index)
):
# `Index.get_indexer_for` works much faster with numpy arrays than with python lists,
# so although we lose some time here on converting to numpy, `Index.get_indexer_for`
# speedup covers the loss that we gain here.
axis_loc = np.array(axis_loc, dtype=axis_labels.dtype)
axis_lookup = axis_labels.get_indexer_for(axis_loc)
# `Index.get_indexer_for` sets -1 value for missing labels, we have to verify whether
# there are any -1 in the received indexer to raise a KeyError here.
missing_mask = axis_lookup == -1
if missing_mask.any():
missing_labels = (
axis_loc[missing_mask]
if is_list_like(axis_loc)
# If `axis_loc` is not a list-like then we can't select certain
# labels that are missing and so printing the whole indexer
else axis_loc
)
raise KeyError(missing_labels)
if isinstance(axis_lookup, pandas.Index) and not is_range_like(axis_lookup):
axis_lookup = axis_lookup.values
lookups.append(axis_lookup)
return lookups
| modin/core/storage_formats/base/query_compiler.py | 557 | modin | {
"docstring": "\n Compute index and column positions from their respective locators.\n\n Inputs to this method are arguments the the pandas user could pass to loc.\n This function will compute the corresponding index and column positions\n that the user could equivalently pass to iloc.\n\n Parameters\n ----------\n row_loc : scalar, slice, list, array or tuple\n Row locator.\n col_loc : scalar, slice, list, array or tuple\n Columns locator.\n\n Returns\n -------\n row_lookup : slice(None) if full axis grab, pandas.RangeIndex if repetition is detected, numpy.ndarray otherwise\n List of index labels.\n col_lookup : slice(None) if full axis grab, pandas.RangeIndex if repetition is detected, numpy.ndarray otherwise\n List of columns labels.\n\n Notes\n -----\n Usage of `slice(None)` as a resulting lookup is a hack to pass information about\n full-axis grab without computing actual indices that triggers lazy computations.\n Ideally, this API should get rid of using slices as indexers and either use a\n common ``Indexer`` object or range and ``np.ndarray`` only.\n ",
"language": "en",
"n_whitespaces": 328,
"n_words": 150,
"vocab_size": 98
} | 274 | Python | 160 | dc7abf04518230d102bb5272c5ebf9fe20092338 | query_compiler.py | 155,388 | 58 | 353 | get_positions_from_labels | https://github.com/modin-project/modin.git | REFACTOR-#5202: Pass loc arguments to query compiler. (#5305)
Some Modin implementations may prefer to take rows and columns by label rather than by position.
Signed-off-by: mvashishtha <mahesh@ponder.io> | 1,449 | 0 | 36,372 | 21 |
|
1 | 5 | def test_get_stored_cert_serials(certutil, populate_store):
serials = certutil.get_stored_cert_serials("TrustedPublisher")
assert "5be1cc5d51b78dbd49a0b7c00d44806d" in serials
| tests/pytests/functional/modules/test_win_certutil.py | 38 | salt | {
"docstring": "\n Test get_stored_cert_serials with a certificate we put in\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | 10 | Python | 9 | a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857 | test_win_certutil.py | 215,907 | 3 | 20 | test_get_stored_cert_serials | https://github.com/saltstack/salt.git | Add tests, fix state module | 19 | 0 | 54,240 | 9 |
|
15 | 13 | def _generate_sparse6_bytes(G, nodes, header):
n = len(G)
if n >= 2**36:
raise ValueError(
"sparse6 is only defined if number of nodes is less " "than 2 ** 36"
)
if header:
yield b">>sparse6<<"
yield b":"
for d in n_to_data(n):
yield str.encode(chr(d + 63))
k = 1
while 1 << k < n:
k += 1
| networkx/readwrite/sparse6.py | 122 | networkx | {
"docstring": "Yield bytes in the sparse6 encoding of a graph.\n\n `G` is an undirected simple graph. `nodes` is the list of nodes for\n which the node-induced subgraph will be encoded; if `nodes` is the\n list of all nodes in the graph, the entire graph will be\n encoded. `header` is a Boolean that specifies whether to generate\n the header ``b'>>sparse6<<'`` before the remaining data.\n\n This function generates `bytes` objects in the following order:\n\n 1. the header (if requested),\n 2. the encoding of the number of nodes,\n 3. each character, one-at-a-time, in the encoding of the requested\n node-induced subgraph,\n 4. a newline character.\n\n This function raises :exc:`ValueError` if the graph is too large for\n the graph6 format (that is, greater than ``2 ** 36`` nodes).\n\n ",
"language": "en",
"n_whitespaces": 167,
"n_words": 122,
"vocab_size": 78
} | 55 | Python | 44 | f6755ffa00211b523c6c0bec5398bc6c3c43c8b1 | sparse6.py | 176,499 | 49 | 393 | _generate_sparse6_bytes | https://github.com/networkx/networkx.git | Update black (#5438)
* CI: sync up black dev requirements version with precommit
* Run black
Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com> | 125 | 0 | 41,938 | 13 |
|
3 | 29 | def load_linnerud(*, return_X_y=False, as_frame=False):
data_filename = "linnerud_exercise.csv"
target_filename = "linnerud_physiological.csv"
# Read header and data
with _open_text(DATA_MODULE, data_filename) as f:
header_exercise = f.readline().split()
f.seek(0) # reset file obj
data_exercise = np.loadtxt(f, skiprows=1)
with _open_text(DATA_MODULE, target_filename) as f:
header_physiological = f.readline().split()
f.seek(0) # reset file obj
data_physiological = np.loadtxt(f, skiprows=1)
fdescr = load_descr("linnerud.rst")
frame = None
if as_frame:
(frame, data_exercise, data_physiological) = _convert_data_dataframe(
"load_linnerud",
data_exercise,
data_physiological,
header_exercise,
header_physiological,
)
if return_X_y:
return data_exercise, data_physiological
return Bunch(
data=data_exercise,
feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
frame=frame,
DESCR=fdescr,
data_filename=data_filename,
target_filename=target_filename,
data_module=DATA_MODULE,
)
| sklearn/datasets/_base.py | 284 | scikit-learn | {
"docstring": "Load and return the physical exercise Linnerud dataset.\n\n This dataset is suitable for multi-output regression tasks.\n\n ============== ============================\n Samples total 20\n Dimensionality 3 (for both data and target)\n Features integer\n Targets integer\n ============== ============================\n\n Read more in the :ref:`User Guide <linnerrud_dataset>`.\n\n Parameters\n ----------\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object.\n See below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.18\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric, string or categorical). The target is\n a pandas DataFrame or Series depending on the number of target columns.\n If `return_X_y` is True, then (`data`, `target`) will be pandas\n DataFrames or Series as described below.\n\n .. versionadded:: 0.23\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (20, 3)\n The data matrix. If `as_frame=True`, `data` will be a pandas\n DataFrame.\n target: {ndarray, dataframe} of shape (20, 3)\n The regression targets. If `as_frame=True`, `target` will be\n a pandas DataFrame.\n feature_names: list\n The names of the dataset columns.\n target_names: list\n The names of the target columns.\n frame: DataFrame of shape (20, 6)\n Only present when `as_frame=True`. DataFrame with `data` and\n `target`.\n\n .. versionadded:: 0.23\n DESCR: str\n The full description of the dataset.\n data_filename: str\n The path to the location of the data.\n target_filename: str\n The path to the location of the target.\n\n .. versionadded:: 0.20\n\n (data, target) : tuple if ``return_X_y`` is True\n Returns a tuple of two ndarrays or dataframe of shape\n `(20, 3)`. Each row represents one sample and each column represents the\n features in `X` and a target in `y` of a given sample.\n\n .. versionadded:: 0.18\n ",
"language": "en",
"n_whitespaces": 658,
"n_words": 284,
"vocab_size": 153
} | 85 | Python | 58 | f2c78fe8c5cf2576f8351238c55dace23fb1d691 | _base.py | 261,741 | 34 | 178 | load_linnerud | https://github.com/scikit-learn/scikit-learn.git | MAINT handle deprecations from `importlib.resources` (#25157)
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> | 304 | 0 | 76,971 | 12 |
|
19 | 61 | def train_epoch(self, iterator, info=None, num_steps=None, epoch_idx=0):
| python/ray/util/sgd/torch/training_operator.py | 177 | """Runs one standard training pass over the training dataloader.
Bythis method will iterate over the givencall ``self.train_batch`` over each batch. Ifscheduler_step_freqis set, this default method will also step the scheduler accordingly.
You do not need to call ``train_batch`` in this method if you plan
to implement a custom optimization/training routine here.
You may find ``ray.util.sgd.utils.AverageMeterCollection`` useful
when overriding this method. See example below:
.. code-block:: pythonthis default method will also step the scheduler accordingly.
You do not need to calltrain_batchthisyou plan
to implement a custom optimization/training routine here.
You may find ``ray.util.sgd.utils.AverageMeterCollection`` useful
when overriding this method. See example below:
.. code-block::training routine here.
You may find ``ray.util.sgd.utils.AverageMeterCollection`` useful
when overriding this method. See exampleroutine here | ray | {
"docstring": "Runs one standard training pass over the training dataloader.\n\n By default, this method will iterate over the given iterator and\n call ``self.train_batch`` over each batch. If ``scheduler_step_freq``\n is set, this default method will also step the scheduler accordingly.\n\n You do not need to call ``train_batch`` in this method if you plan\n to implement a custom optimization/training routine here.\n\n You may find ``ray.util.sgd.utils.AverageMeterCollection`` useful\n when overriding this method. See example below:\n\n .. code-block:: python\n",
"language": "en",
"n_whitespaces": 128,
"n_words": 73,
"vocab_size": 59
} | 6 | Python | 6 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | training_operator.py | 133,361 | 46 | 318 | train_epoch | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 13 | 11 | 29,990 | 12 |
3 | 13 | def solve_linear_system_LU(matrix, syms):
if matrix.rows != matrix.cols - 1:
raise ValueError("Rows should be equal to columns - 1")
A = matrix[:matrix.rows, :matrix.rows]
b = matrix[:, matrix.cols - 1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions[syms[i]] = soln[i, 0]
return solutions
| sympy/solvers/solvers.py | 140 | sympy | {
"docstring": "\n Solves the augmented matrix system using ``LUsolve`` and returns a\n dictionary in which solutions are keyed to the symbols of *syms* as ordered.\n\n Explanation\n ===========\n\n The matrix must be invertible.\n\n Examples\n ========\n\n >>> from sympy import Matrix, solve_linear_system_LU\n >>> from sympy.abc import x, y, z\n\n >>> solve_linear_system_LU(Matrix([\n ... [1, 2, 0, 1],\n ... [3, 2, 2, 1],\n ... [2, 0, 0, 1]]), [x, y, z])\n {x: 1/2, y: 1/4, z: -1/2}\n\n See Also\n ========\n\n LUsolve\n\n ",
"language": "en",
"n_whitespaces": 130,
"n_words": 75,
"vocab_size": 60
} | 44 | Python | 36 | 59d22b6bb7287613d598611027f640d068ca5748 | solvers.py | 196,428 | 10 | 89 | solve_linear_system_LU | https://github.com/sympy/sympy.git | Moved imports to higher level | 82 | 0 | 47,928 | 10 |
|
1 | 2 | def args2(self):
return self["args2"]
| packages/python/plotly/plotly/graph_objs/layout/updatemenu/_button.py | 22 | plotly.py | {
"docstring": "\n Sets a 2nd set of `args`, these arguments values are passed to\n the Plotly method set in `method` when clicking this button\n while in the active state. Use this to create toggle buttons.\n\n The 'args2' property is an info array that may be specified as:\n\n * a list or tuple of up to 3 elements where:\n (0) The 'args2[0]' property accepts values of any type\n (1) The 'args2[1]' property accepts values of any type\n (2) The 'args2[2]' property accepts values of any type\n\n Returns\n -------\n list\n ",
"language": "en",
"n_whitespaces": 203,
"n_words": 86,
"vocab_size": 59
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _button.py | 232,763 | 2 | 11 | args2 | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 64,207 | 7 |
|
1 | 16 | def test_suppresses_second_cancellation(self):
deferred: "Deferred[str]" = Deferred()
wrapper_deferred = delay_cancellation(deferred)
# Cancel the new `Deferred`, twice.
wrapper_deferred.cancel()
wrapper_deferred.cancel()
self.assertNoResult(wrapper_deferred)
self.assertFalse(
deferred.called, "Original `Deferred` was unexpectedly cancelled"
)
# Now make the original `Deferred` fail.
# The `Failure` must be consumed, otherwise unwanted tracebacks will be printed
# in logs.
deferred.errback(ValueError("abc"))
self.assertIsNone(deferred.result, "`Failure` was not consumed")
# Now that the original `Deferred` has failed, we should get a `CancelledError`.
self.failureResultOf(wrapper_deferred, CancelledError)
| tests/util/test_async_helpers.py | 133 | synapse | {
"docstring": "Test that a second cancellation is suppressed.\n\n Identical to `test_cancellation` except the new `Deferred` is cancelled twice.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 17,
"vocab_size": 16
} | 69 | Python | 55 | 90b2327066d2343faa86c464a182b6f3c4422ecd | test_async_helpers.py | 247,579 | 12 | 72 | test_suppresses_second_cancellation | https://github.com/matrix-org/synapse.git | Add `delay_cancellation` utility function (#12180)
`delay_cancellation` behaves like `stop_cancellation`, except it
delays `CancelledError`s until the original `Deferred` resolves.
This is handy for unifying cleanup paths and ensuring that uncancelled
coroutines don't use finished logcontexts.
Signed-off-by: Sean Quah <seanq@element.io> | 192 | 0 | 71,755 | 10 |
|
3 | 18 | def do_extends(parser, token):
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
bits[1] = construct_relative_path(parser.origin.template_name, bits[1])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError(
"'%s' cannot appear more than once in the same template" % bits[0]
)
return ExtendsNode(nodelist, parent_name)
@register.tag("include") | django/template/loader_tags.py | 166 | @register.tag("include") | django | {
"docstring": "\n Signal that this template extends a parent template.\n\n This tag may be used in two ways: ``{% extends \"base\" %}`` (with quotes)\n uses the literal value \"base\" as the name of the parent template to extend,\n or ``{% extends variable %}`` uses the value of ``variable`` as either the\n name of the parent template to extend (if it evaluates to a string) or as\n the parent template itself (if it evaluates to a Template object).\n ",
"language": "en",
"n_whitespaces": 97,
"n_words": 75,
"vocab_size": 42
} | 48 | Python | 42 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | loader_tags.py | 206,284 | 12 | 94 | do_extends | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 103 | 1 | 51,466 | 11 |
2 | 20 | def get_temp_export_dir(timestamped_export_dir):
(dirname, basename) = os.path.split(timestamped_export_dir)
if isinstance(basename, bytes):
str_name = basename.decode("utf-8")
else:
str_name = str(basename)
temp_export_dir = tf.io.gfile.join(
tf.compat.as_bytes(dirname),
tf.compat.as_bytes("temp-{}".format(str_name)),
)
return temp_export_dir
| keras/saving/utils_v1/export_utils.py | 132 | keras | {
"docstring": "Builds a directory name based on the argument but starting with 'temp-'.\n\n This relies on the fact that TensorFlow Serving ignores subdirectories of\n the base directory that can't be parsed as integers.\n\n Args:\n timestamped_export_dir: the name of the eventual export directory, e.g.\n /foo/bar/<timestamp>\n\n Returns:\n A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>.\n ",
"language": "en",
"n_whitespaces": 84,
"n_words": 52,
"vocab_size": 40
} | 24 | Python | 19 | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | export_utils.py | 276,298 | 11 | 80 | get_temp_export_dir | https://github.com/keras-team/keras.git | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 73 | 0 | 81,620 | 13 |
|
1 | 20 | def test_callbacks(self) -> None:
cache: DeferredCache[str, int] = DeferredCache("test")
callbacks = set()
# start with an entry, with a callback
cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill"))
# now replace that entry with a pending result
origin_d: "defer.Deferred[int]" = defer.Deferred()
set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set"))
# ... and also make a get request
get_d = cache.get("k1", callback=lambda: callbacks.add("get"))
# we don't expect the invalidation callback for the original value to have
# been called yet, even though get() will now return a different result.
# I'm not sure if that is by design or not.
self.assertEqual(callbacks, set())
# now fire off all the deferreds
origin_d.callback(20)
self.assertEqual(self.successResultOf(set_d), 20)
self.assertEqual(self.successResultOf(get_d), 20)
# now the original invalidation callback should have been called, but none of
# the others
self.assertEqual(callbacks, {"prefill"})
callbacks.clear()
# another update should invalidate both the previous results
cache.prefill("k1", 30)
self.assertEqual(callbacks, {"set", "get"})
| tests/util/caches/test_deferred_cache.py | 300 | synapse | {
"docstring": "Invalidation callbacks are called at the right time",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | 140 | Python | 100 | 4ae967cf6308e80b03da749f0cbaed36988e235e | test_deferred_cache.py | 249,857 | 16 | 171 | test_callbacks | https://github.com/matrix-org/synapse.git | Add missing type hints to test.util.caches (#14529) | 315 | 0 | 73,173 | 13 |
|
3 | 9 | def components(self) -> Dict[str, BaseComponent]:
all_components = self._find_all_components()
return {component.name: component for component in all_components if component.name is not None}
| haystack/pipelines/base.py | 61 | haystack | {
"docstring": "\n Returns all components used by this pipeline.\n Note that this also includes such components that are being utilized by other components only and are not being used as a pipeline node directly.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 32,
"vocab_size": 24
} | 20 | Python | 18 | f6e3a639063887f9f5b27f574a04c7fe602b3185 | base.py | 257,346 | 7 | 39 | components | https://github.com/deepset-ai/haystack.git | Prevent losing names of utilized components when loaded from config (#2525)
* Prevent losing names of utilized components when loaded from config
* Update Documentation & Code Style
* update test
* fix failing tests
* Update Documentation & Code Style
* fix even more tests
* Update Documentation & Code Style
* incorporate review feedback
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> | 41 | 0 | 75,070 | 9 |
|
3 | 12 | def set_xcomargs_dependencies(self) -> None:
from airflow.models.xcom_arg import XComArg
for field in self.template_fields:
if hasattr(self, field):
arg = getattr(self, field)
XComArg.apply_upstream_relationship(self, arg)
| airflow/models/baseoperator.py | 73 | airflow | {
"docstring": "\n Resolves upstream dependencies of a task. In this way passing an ``XComArg``\n as value for a template field will result in creating upstream relation between\n two tasks.\n\n **Example**: ::\n\n with DAG(...):\n generate_content = GenerateContentOperator(task_id=\"generate_content\")\n send_email = EmailOperator(..., html_content=generate_content.output)\n\n # This is equivalent to\n with DAG(...):\n generate_content = GenerateContentOperator(task_id=\"generate_content\")\n send_email = EmailOperator(\n ..., html_content=\"{{ task_instance.xcom_pull('generate_content') }}\"\n )\n generate_content >> send_email\n\n ",
"language": "en",
"n_whitespaces": 237,
"n_words": 59,
"vocab_size": 47
} | 21 | Python | 21 | 10f5db863e387c0fd7369cf521d624b6df77a65d | baseoperator.py | 44,076 | 26 | 47 | set_xcomargs_dependencies | https://github.com/apache/airflow.git | Set dependencies in MappedOperator via XComArgs (#20931)
Co-authored-by: Kaxil Naik <kaxilnaik@gmail.com>
Co-authored-by: Ephraim Anierobi <splendidzigy24@gmail.com> | 83 | 0 | 8,139 | 12 |
|
1 | 13 | def _hyab(self, y_true, y_pred):
delta = y_true - y_pred
root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None))
delta_norm = frobenius_norm(delta[..., 1:3])
return root + delta_norm
| lib/model/loss/perceptual_loss_plaid.py | 97 | faceswap | {
"docstring": " Compute the HyAB distance between true and predicted images.\n\n Parameters\n ----------\n y_true: :class:`plaidml.tile.Value`\n The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space\n y_pred: :class:`plaidml.tile.Value`\n The predicted batch of images in in standard or Hunt-adjusted L*A*B* color space\n\n Returns\n -------\n :class:`plaidml.tile.Value`\n image tensor containing the per-pixel HyAB distances between true and predicted images\n ",
"language": "en",
"n_whitespaces": 146,
"n_words": 56,
"vocab_size": 34
} | 24 | Python | 20 | 582c2ce40c11ef235dd3f9100f70e1e2832f8dd3 | perceptual_loss_plaid.py | 101,059 | 5 | 65 | _hyab | https://github.com/deepfakes/faceswap.git | Add Flip Loss Function
- Add Flip for AMD and TF
- Split Perceptual Loss functions to own modules
- Fix allowed input shape for models
- Allow GUI tooltip to display at higher width | 59 | 0 | 20,496 | 14 |
|
1 | 25 | def test_valid_full_refresh_read_no_slices(mocker):
stream_output = [{"k1": "v1"}, {"k2": "v2"}]
s1 = MockStream([({"sync_mode": SyncMode.full_refresh}, stream_output)], name="s1")
s2 = MockStream([({"sync_mode": SyncMode.full_refresh}, stream_output)], name="s2")
mocker.patch.object(MockStream, "get_json_schema", return_value={})
src = MockSource(streams=[s1, s2])
catalog = ConfiguredAirbyteCatalog(
streams=[_configured_stream(s1, SyncMode.full_refresh), _configured_stream(s2, SyncMode.full_refresh)]
)
expected = _as_records("s1", stream_output) + _as_records("s2", stream_output)
messages = _fix_emitted_at(list(src.read(logger, {}, catalog)))
assert expected == messages
| airbyte-cdk/python/unit_tests/sources/test_abstract_source.py | 256 | airbyte | {
"docstring": "Tests that running a full refresh sync on streams which don't specify slices produces the expected AirbyteMessages",
"language": "en",
"n_whitespaces": 16,
"n_words": 17,
"vocab_size": 17
} | 51 | Python | 39 | f83eca58eaf2129d21b5796a301732ab22675130 | test_abstract_source.py | 3,357 | 12 | 156 | test_valid_full_refresh_read_no_slices | https://github.com/airbytehq/airbyte.git | CDK: Fix typing errors (#9037)
* fix typing, drop AirbyteLogger
* format
* bump the version
* use logger instead of fixture logger
Co-authored-by: Eugene Kulak <kulak.eugene@gmail.com>
Co-authored-by: auganbay <auganenu@gmail.com> | 91 | 0 | 459 | 13 |
|
6 | 18 | def depth_first_search(self):
if self.isSolvable() == False:
return (None, None)
closed = list()
q = list()
q.append(Node(state=self.state, depth=0))
while q:
node = q.pop()
if node.isGoalState():
return (node.moves, len(closed))
if node.state not in closed:
closed.append(node.state)
for action in node.getAvailableActions():
q.append(node.getResultFromAction(action))
return (None, None)
| Eight_Puzzle_Solver/eight_puzzle.py | 190 | Python | {
"docstring": "\n Parameters: State\n Returns: List of Moves to solve the state, otherwise None if unsolvable\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 14,
"vocab_size": 14
} | 41 | Python | 31 | f0af0c43340763724f139fa68aa1e5a9ffe458b4 | eight_puzzle.py | 22,419 | 15 | 118 | depth_first_search | https://github.com/geekcomputers/Python.git | refactor: clean code
Signed-off-by: slowy07 <slowy.arfy@gmail.com> | 198 | 0 | 4,325 | 15 |
|
3 | 20 | def update(self) -> bool:
try:
# Add or remove DeploymentReplica instances in self._replicas.
# This should be the only place we adjust total number of replicas
# we manage.
running_replicas_changed = self._scale_deployment_replicas()
# Check the state of existing replicas and transition if necessary.
running_replicas_changed |= self._check_and_update_replicas()
if running_replicas_changed:
self._notify_running_replicas_changed()
deleted = self._check_curr_status()
except Exception:
self._curr_status_info = DeploymentStatusInfo(
name=self._name,
status=DeploymentStatus.UNHEALTHY,
message="Failed to update deployment:" f"\n{traceback.format_exc()}",
)
deleted = False
return deleted
| python/ray/serve/_private/deployment_state.py | 138 | ray | {
"docstring": "Attempts to reconcile this deployment to match its goal state.\n\n This is an asynchronous call; it's expected to be called repeatedly.\n\n Also updates the internal DeploymentStatusInfo based on the current\n state of the system.\n\n Returns true if this deployment was successfully deleted.\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 42,
"vocab_size": 36
} | 70 | Python | 56 | 65d0c0aa48be8f9f7faae857d3ab71444997755a | deployment_state.py | 128,240 | 24 | 72 | update | https://github.com/ray-project/ray.git | [Serve] add alpha gRPC support (#28175) | 279 | 0 | 28,641 | 17 |
|
7 | 50 | def _finished_processing(self) -> None:
assert self.logcontext is not None
assert self.finish_time is not None
usage = self.logcontext.get_resource_usage()
if self._processing_finished_time is None:
# we completed the request without anything calling processing()
self._processing_finished_time = time.time()
# the time between receiving the request and the request handler finishing
processing_time = self._processing_finished_time - self.start_time
# the time between the request handler finishing and the response being sent
# to the client (nb may be negative)
response_send_time = self.finish_time - self._processing_finished_time
user_agent = get_request_user_agent(self, "-")
# int(self.code) looks redundant, because self.code is already an int.
# But self.code might be an HTTPStatus (which inherits from int)---which has
# a different string representation. So ensure we really have an integer.
code = str(int(self.code))
if not self.finished:
# we didn't send the full response before we gave up (presumably because
# the connection dropped)
code += "!"
log_level = logging.INFO if self._should_log_request() else logging.DEBUG
# If this is a request where the target user doesn't match the user who
# authenticated (e.g. and admin is puppetting a user) then we log both.
requester, authenticated_entity = self.get_authenticated_entity()
if authenticated_entity:
requester = f"{authenticated_entity}|{requester}"
self.synapse_site.access_logger.log(
log_level,
"%s - %s - {%s}"
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
' %sB %s "%s %s %s" "%s" [%d dbevts]',
self.getClientIP(),
self.synapse_site.site_tag,
requester,
processing_time,
response_send_time,
usage.ru_utime,
usage.ru_stime,
usage.db_sched_duration_sec,
usage.db_txn_duration_sec,
int(usage.db_txn_count),
self.sentLength,
code,
self.get_method(),
self.get_redacted_uri(),
self.clientproto.decode("ascii", errors="replace"),
user_agent,
usage.evt_db_fetch_count,
)
# complete the opentracing span, if any.
if self._opentracing_span:
self._opentracing_span.finish()
try:
self.request_metrics.stop(self.finish_time, self.code, self.sentLength)
except Exception as e:
logger.warning("Failed to stop metrics: %r", e)
| synapse/http/site.py | 432 | synapse | {
"docstring": "Log the completion of this request and update the metrics",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | 250 | Python | 168 | d8df8e6c1432d25ea1c0310a5f2dc48d1688345f | site.py | 246,117 | 46 | 262 | _finished_processing | https://github.com/matrix-org/synapse.git | Don't print HTTPStatus.* in "Processed..." logs (#11827)
* Don't print HTTPStatus.* in "Processed..." logs
Fixes #11812. See also #7118 and
https://github.com/matrix-org/synapse/pull/7188#r401719326 in
particular.
Co-authored-by: Brendan Abolivier <babolivier@matrix.org> | 769 | 0 | 71,021 | 11 |
|
1 | 7 | def test_submit_with_logs_instant_job(self, ray_start_stop):
cmd = "echo hello"
stdout, _ = _run_cmd(f"ray job submit -- bash -c '{cmd}'")
assert "hello" in stdout
| dashboard/modules/job/tests/test_cli_integration.py | 49 | ray | {
"docstring": "Should exit immediately and print logs even if job returns instantly.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | 21 | Python | 20 | 813e1a857d5dfc060b3b6cb846157fdca425e6b0 | test_cli_integration.py | 134,271 | 4 | 24 | test_submit_with_logs_instant_job | https://github.com/ray-project/ray.git | Revert "Revert "[Job Submission][refactor 5/N] Remove the head node dependency on the `Raylet` process"" (#29008)
Reverts #28931 and fixes the tests that were made flaky by that PR.
Fix address="auto" in cpp job test (fixed by @Catch-Bull )
Fix len_new_owner_port flakiness in test_sdk(fixed by @Catch-Bull )
Fix int conversion flakiness
Additionally, this PR updates the log tailing behavior from the previous PR to return logs instantly when the job exits, to match the current behavior on master, including the case where the runtime env fails to set up. (In the previous PR, there was a timeout for waiting for the supervisor actor to start, so if the runtime env failed to set up instantly,ray job submit would still wait for the entire 60s timeout before closing the log stream and returning.)
Finally, this PR updates the default scheduling behavior from the previous PR to make jobs run on the head node by default (configurable via the environment variable RAY_JOB_ALLOW_DRIVERS_ON_HEAD_NODE.). This is to avoid making a breaking behavior change unless absolutely necessary. We can update this default in the future after more discussion.
In this PR, the head node id is passed to the agent via internal KV. This is a workaround for the fact that there is no way to retrieve the head node id from within Ray (#29607) | 49 | 0 | 30,235 | 10 |
|
3 | 21 | def serialize_model_as_bytecode(model):
# Note: we don't use a RAM path for this because zipfile cannot write
# to such paths.
temp_dir = tempfile.mkdtemp()
try:
filepath = os.path.join(temp_dir, "model.keras")
saving_lib.save_model(model, filepath)
with open(filepath, "rb") as f:
data = f.read()
except Exception as e:
raise e
else:
return data
finally:
tf.io.gfile.rmtree(temp_dir)
| keras/saving/pickle_utils.py | 134 | keras | {
"docstring": "Convert a Keras Model into a bytecode representation for pickling.\n\n Args:\n model: Keras Model instance.\n\n Returns:\n Tuple that can be read by `deserialize_from_bytecode`.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 23,
"vocab_size": 20
} | 49 | Python | 44 | 2ed044d06d0ae552477672aa8b778f8edafb52f1 | pickle_utils.py | 279,795 | 13 | 75 | serialize_model_as_bytecode | https://github.com/keras-team/keras.git | Use new saving logic for pickling. This is somewhat cleaner since it restores the exact same model (no usage of traces). It may however be less convenient since it requires get_config() to be implemented and the use of a custom_object_scope.
PiperOrigin-RevId: 474146108 | 126 | 0 | 83,134 | 13 |
|
1 | 6 | def get_value_data_from_instance(self, instance):
return {
"id": instance.pk,
"edit_url": AdminURLFinder().get_edit_url(instance),
}
| wagtail/admin/widgets/chooser.py | 49 | wagtail | {
"docstring": "\n Given a model instance, return a value that we can pass to both the server-side template\n and the client-side rendering code (via telepath) that contains all the information needed\n for display. Typically this is a dict of id, title etc; it must be JSON-serialisable.\n ",
"language": "en",
"n_whitespaces": 73,
"n_words": 44,
"vocab_size": 39
} | 10 | Python | 10 | 39f7886a6f8ee98db7e73ce33d94c06139f35bd8 | chooser.py | 77,547 | 5 | 28 | get_value_data_from_instance | https://github.com/wagtail/wagtail.git | Split out common logic from get_value_data | 53 | 0 | 16,673 | 11 |
|
2 | 17 | def copy_files(from_dir, to_dir):
if from_dir.exists():
shutil.copytree(from_dir, to_dir, dirs_exist_ok=True)
dirs_list = [
SETTINGS_DIRECTORY,
USER_DATA_DIRECTORY,
USER_DATA_DIRECTORY / "styles",
CUSTOM_IMPORTS_DIRECTORY,
CUSTOM_IMPORTS_DIRECTORY / "econometrics",
]
dirs_files = [USER_ENV_FILE, REPOSITORY_ENV_FILE]
create_paths(dirs_list)
create_files(dirs_files)
copy_files(REPOSITORY_DIRECTORY / "custom_imports", CUSTOM_IMPORTS_DIRECTORY)
| openbb_terminal/core/config/paths_helper.py | 109 | OpenBBTerminal | {
"docstring": "\n Copy default/example files from the repo\n to the user data folder",
"language": "en",
"n_whitespaces": 17,
"n_words": 11,
"vocab_size": 10
} | 31 | Python | 28 | c4658b63a936ad219625d30dcbd12a1aa798af09 | paths_helper.py | 285,729 | 3 | 27 | copy_files | https://github.com/OpenBB-finance/OpenBBTerminal.git | Add path for custom_imports outside the terminal (#2567)
* add log path
* add test to check if log file is in correct dir
* env path
* black
* mypy fix
* add styles folder and styles from repo
* add timezone as env variable
* fix changes with main
* fix test
* flake8
* fix linting
* fix linting
* changes
* custom changes
* add custom_imports outside terminal
* black
* black terminal
* fix test
* fix merge and remove styles/user
* some stylistic changes and remove move_files
* flake8
* merge main
* merge move and make into paths_helper
Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> | 53 | 0 | 85,399 | 10 |
|
4 | 27 | def build_query_compiler(cls, path, columns, index_columns, **kwargs):
col_partitions, column_widths = cls.build_columns(columns)
partition_ids = cls.call_deploy(path, col_partitions, **kwargs)
index, sync_index = cls.build_index(path, partition_ids, index_columns)
remote_parts = cls.build_partition(partition_ids, column_widths)
if len(partition_ids) > 0:
row_lengths = [part.length() for part in remote_parts.T[0]]
else:
row_lengths = None
frame = cls.frame_cls(
remote_parts,
index,
columns,
row_lengths=row_lengths,
column_widths=column_widths,
dtypes=None,
)
if sync_index:
frame.synchronize_labels(axis=0)
return cls.query_compiler_cls(frame)
| modin/core/io/column_stores/parquet_dispatcher.py | 204 | modin | {
"docstring": "\n Build query compiler from deployed tasks outputs.\n\n Parameters\n ----------\n path : str, path object or file-like object\n Path to the file to read.\n columns : list\n List of columns that should be read from file.\n index_columns : list\n List of index columns specified by pandas metadata.\n **kwargs : dict\n Parameters of deploying read_* function.\n\n Returns\n -------\n new_query_compiler : BaseQueryCompiler\n Query compiler with imported data for further processing.\n ",
"language": "en",
"n_whitespaces": 200,
"n_words": 67,
"vocab_size": 51
} | 55 | Python | 44 | 8864bc197974da6d8cda2de2f35ca31d561be1cc | parquet_dispatcher.py | 154,122 | 20 | 136 | build_query_compiler | https://github.com/modin-project/modin.git | PERF-#4305: Parallelize `read_parquet` over row groups (#4700)
Co-authored-by: mvashishtha <mahesh@ponder.io> | 231 | 0 | 35,795 | 12 |
|
1 | 2 | def packing(self):
return self["packing"]
| packages/python/plotly/plotly/graph_objs/treemap/_tiling.py | 22 | plotly.py | {
"docstring": "\n Determines d3 treemap solver. For more info please refer to\n https://github.com/d3/d3-hierarchy#treemap-tiling\n\n The 'packing' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['squarify', 'binary', 'dice', 'slice', 'slice-dice',\n 'dice-slice']\n\n Returns\n -------\n Any\n ",
"language": "en",
"n_whitespaces": 127,
"n_words": 38,
"vocab_size": 37
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _tiling.py | 235,599 | 2 | 11 | packing | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 67,043 | 7 |
|
1 | 5 | def kg_to_pounds(n):
return float(n) * 2.204623
@register.filter("startswith") | netbox/utilities/templatetags/helpers.py | 38 | @register.filter("startswith") | netbox | {
"docstring": "\n Convert a weight from kilograms to pounds.\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | 7 | Python | 7 | 87fd09ca8b5a0d3ec692e241351e1bbc4ac298a7 | helpers.py | 266,144 | 2 | 15 | kg_to_pounds | https://github.com/netbox-community/netbox.git | Cleanup for #9654 | 12 | 1 | 78,308 | 8 |
4 | 13 | def set_weights(self, weights):
if not getattr(self, "_built", False):
raise ValueError(
"You are calling `set_weights()` on an optimizer that has not "
"yet been built. Please call "
"`optimizer.build(trainable_variables)` to create the "
"optimizer weights before calling `set_weights()`."
)
for variable, weight in zip(self._variables, weights):
if variable.shape != weight.shape:
raise ValueError(
f"Optimizer variable {self._var_key(variable)} has shape "
f"{str(variable.shape)} not compatible with provided "
f"weight shape {str(weight.shape)}."
)
variable.assign(weight)
| keras/optimizers/optimizer_experimental/optimizer.py | 150 | keras | {
"docstring": "Set the weights of the optimizer.\n\n Args:\n weights: a list of `tf.Variable`s or numpy arrays, the target values\n of optimizer variables. It should have the same order as\n `self._variables`.\n ",
"language": "en",
"n_whitespaces": 84,
"n_words": 29,
"vocab_size": 24
} | 67 | Python | 53 | 571d8786df580d6daa5c57c77b5b15a125631c8f | optimizer.py | 279,802 | 16 | 66 | set_weights | https://github.com/keras-team/keras.git | Add method `set_weights` for optimizer backward compatibility.
Remove @doc_controls.do_not_generate_docs for `variables()` method because optimizer is no longer a `tf.Module`.
PiperOrigin-RevId: 474149115 | 279 | 0 | 83,138 | 17 |
|
1 | 25 | def test_subdag_pools(self):
dag = DAG('parent', default_args=default_args)
subdag = DAG('parent.child', default_args=default_args)
session = airflow.settings.Session()
pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1)
pool_10 = airflow.models.Pool(pool='test_pool_10', slots=10)
session.add(pool_1)
session.add(pool_10)
session.commit()
EmptyOperator(task_id='dummy', dag=subdag, pool='test_pool_1')
with pytest.raises(AirflowException):
SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_1')
# recreate dag because failed subdagoperator was already added
dag = DAG('parent', default_args=default_args)
SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_10')
session.delete(pool_1)
session.delete(pool_10)
session.commit()
| tests/operators/test_subdag_operator.py | 287 | airflow | {
"docstring": "\n Subdags and subdag tasks can't both have a pool with 1 slot\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 12
} | 53 | Python | 38 | 49e336ae0302b386a2f47269a6d13988382d975f | test_subdag_operator.py | 47,650 | 17 | 169 | test_subdag_pools | https://github.com/apache/airflow.git | Replace usage of `DummyOperator` with `EmptyOperator` (#22974)
* Replace usage of `DummyOperator` with `EmptyOperator` | 183 | 0 | 9,191 | 11 |
|
2 | 9 | def __call__(self, results):
assert 'mix_results' in results
num_images = len(results['mix_results'])
assert num_images == 1, \
f'CopyPaste only supports processing 2 images, got {num_images}'
if self.selected:
selected_results = self._select_object(results['mix_results'][0])
else:
selected_results = results['mix_results'][0]
return self._copy_paste(results, selected_results)
| mmdet/datasets/pipelines/transforms.py | 116 | mmdetection | {
"docstring": "Call function to make a copy-paste of image.\n\n Args:\n results (dict): Result dict.\n Returns:\n dict: Result dict with copy-paste transformed.\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 20,
"vocab_size": 18
} | 35 | Python | 30 | 9a166a380229d2aaf5986fa1ff303a941865961a | transforms.py | 244,183 | 10 | 68 | __call__ | https://github.com/open-mmlab/mmdetection.git | [Feature] Support simple copy paste with some configs. (#7501)
* Testing pre-commit hooks
* Added base code in transforms
* Added Simple Copy Paste working version
* Added checks to simple copy paste
* refactor simplecopypaste and provide some configs
* remove lvis-api in .gitignore
* refactor simplecopypaste and use resize/flip/pad in load_pipeline
* pre-commit
* add README.md for simplecopypaste
* add some unit tests
* rename some variables
* add a blend_fn
* add some unit tests
* add some comments
* delete blend_fn
* simplify some commits
Co-authored-by: Sudarshan Kamath <sudarshan.kamath97@gmail.com> | 117 | 0 | 70,272 | 13 |
|
3 | 7 | def safe_quote_currency(self) -> str:
try:
return self.stake_currency or self.pair.split('/')[1].split(':')[0]
except IndexError:
return ''
| freqtrade/persistence/models.py | 70 | freqtrade | {
"docstring": "\n Compatibility layer for asset - which can be empty for old trades.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | 13 | Python | 12 | 8e98a2ff9f4fabf81bf5a4f4e1f772f5c4a091ec | models.py | 149,525 | 8 | 39 | safe_quote_currency | https://github.com/freqtrade/freqtrade.git | api - provide assset_currency via API | 56 | 0 | 34,441 | 15 |
|
6 | 18 | def _make_twin_axes(self, *args, **kwargs):
if 'sharex' in kwargs and 'sharey' in kwargs:
# The following line is added in v2.2 to avoid breaking Seaborn,
# which currently uses this internal API.
if kwargs["sharex"] is not self and kwargs["sharey"] is not self:
raise ValueError("Twinned Axes may share only one axis")
ss = self.get_subplotspec()
if ss:
twin = self.figure.add_subplot(ss, *args, **kwargs)
else:
twin = self.figure.add_axes(
self.get_position(True), *args, **kwargs,
axes_locator=_TransformedBoundsLocator(
[0, 0, 1, 1], self.transAxes))
self.set_adjustable('datalim')
twin.set_adjustable('datalim')
self._twinned_axes.join(self, twin)
return twin
| lib/matplotlib/axes/_base.py | 222 | matplotlib | {
"docstring": "Make a twinx Axes of self. This is used for twinx and twiny.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 12
} | 78 | Python | 63 | c73f4c455514cf5422d27bf38c93250de8316b21 | _base.py | 109,447 | 16 | 135 | _make_twin_axes | https://github.com/matplotlib/matplotlib.git | Merge SubplotBase into AxesBase. | 260 | 0 | 23,592 | 15 |
|
2 | 10 | def setup_awaitable_errors() -> Callable[[], None]:
warnings.simplefilter("error", RuntimeWarning)
# unraisablehook was added in Python 3.8.
if not hasattr(sys, "unraisablehook"):
return lambda: None
# State shared between unraisablehook and check_for_unraisable_exceptions.
unraisable_exceptions = []
orig_unraisablehook = sys.unraisablehook
| tests/test_utils/__init__.py | 76 | synapse | {
"docstring": "\n Convert warnings from a non-awaited coroutines into errors.\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | 34 | Python | 31 | 646324437543c096e737777c81b4fe4b45c3e1a7 | __init__.py | 248,078 | 13 | 54 | setup_awaitable_errors | https://github.com/matrix-org/synapse.git | Remove unused `# type: ignore`s (#12531)
Over time we've begun to use newer versions of mypy, typeshed, stub
packages---and of course we've improved our own annotations. This makes
some type ignore comments no longer necessary. I have removed them.
There was one exception: a module that imports `select.epoll`. The
ignore is redundant on Linux, but I've kept it ignored for those of us
who work on the source tree using not-Linux. (#11771)
I'm more interested in the config line which enforces this. I want
unused ignores to be reported, because I think it's useful feedback when
annotating to know when you've fixed a problem you had to previously
ignore.
* Installing extras before typechecking
Lacking an easy way to install all extras generically, let's bite the bullet and
make install the hand-maintained `all` extra before typechecking.
Now that https://github.com/matrix-org/backend-meta/pull/6 is merged to
the release/v1 branch. | 62 | 0 | 72,089 | 9 |
|
1 | 4 | def required_columns(self) -> List[str]:
return []
| ludwig/data/split.py | 25 | ludwig | {
"docstring": "Returns the list of columns that are required for splitting.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 6 | Python | 6 | d85269cd60734790a65c11673bfdd98516b62b6c | split.py | 8,629 | 3 | 14 | required_columns | https://github.com/ludwig-ai/ludwig.git | Use clearer error messages in ludwig serving, and enable serving to work with configs that have stratified splitting on target columns. (#2740)
* Use clearer serving error messages, and enable serving to work with configs that have stratified splitting on target columns.
* Adjust warning message | 20 | 0 | 1,468 | 6 |
|
2 | 11 | def get_local_ip_address() -> str:
try:
ip_address = requests.get(
"https://checkip.amazonaws.com/", timeout=3
).text.strip()
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
ip_address = "No internet connection"
return ip_address
| gradio/utils.py | 78 | gradio | {
"docstring": "Gets the public IP address or returns the string \"No internet connection\" if unable to obtain it.",
"language": "en",
"n_whitespaces": 16,
"n_words": 17,
"vocab_size": 16
} | 21 | Python | 18 | 51824608865b66ab04b018f55055124edbe603f3 | utils.py | 181,347 | 9 | 45 | get_local_ip_address | https://github.com/gradio-app/gradio.git | Patching `test_get_ip` attempt 2 (#2810)
* ip-patch-2
* formatting
* patch 2 | 65 | 0 | 43,310 | 14 |
|
1 | 4 | def path(self):
self._deprecate("path")
return self._path
| pandas/io/excel/_base.py | 31 | pandas | {
"docstring": "\n Path to Excel file.\n\n .. deprecated:: 1.5.0\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 7,
"vocab_size": 7
} | 5 | Python | 5 | 047137ce2619cfe2027e3999dfb92eb614d9a485 | _base.py | 164,688 | 3 | 16 | path | https://github.com/pandas-dev/pandas.git | DEP: Protect some ExcelWriter attributes (#45795)
* DEP: Deprecate ExcelWriter attributes
* DEP: Deprecate ExcelWriter attributes
* Fixup for test
* Move tests and restore check_extension
y
* Deprecate xlwt fm_date and fm_datetime; doc improvements | 26 | 0 | 39,592 | 8 |