language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
joke2k__faker
|
tests/providers/test_person.py
|
{
"start": 15865,
"end": 17585
}
|
class ____(unittest.TestCase):
"""Tests person in the en-IE locale"""
def setUp(self):
self.fake = Faker("en-ie")
self.provider = EnIEProvider
Faker.seed(0)
def test_first_name(self):
# General first name
name = self.fake.first_name()
assert name
self.assertIsInstance(name, str)
assert name in self.provider.first_names
# Females first name
name = self.fake.first_name_female()
assert name
self.assertIsInstance(name, str)
assert name in self.provider.first_names
assert name in self.provider.first_names_female
# Male first name
name = self.fake.first_name_male()
assert name
self.assertIsInstance(name, str)
assert name in self.provider.first_names
assert name in self.provider.first_names_male
def test_last_name(self):
assert not hasattr(self.provider, "last_names_male")
assert not hasattr(self.provider, "last_names_female")
# All last names apply for all genders.
assert hasattr(self.provider, "last_names")
# General last name.
name = self.fake.last_name()
assert name
self.assertIsInstance(name, str)
assert name in self.provider.last_names
# Females last name.
name = self.fake.last_name_female()
assert name
self.assertIsInstance(name, str)
assert name in self.provider.last_names
assert name in self.provider.last_names
# Male last name.
name = self.fake.last_name_male()
assert name
self.assertIsInstance(name, str)
assert name in self.provider.last_names
|
TestEnIE
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1570929,
"end": 1571815
}
|
class ____(VegaLiteSchema):
"""
ValueDefnumberwidthheightExprRef schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : dict, float, :class:`ExprRef`, Literal['height', 'width']
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {"$ref": '#/definitions/ValueDef<(number|"width"|"height"|ExprRef)>'}
def __init__(
self,
value: Optional[
float | Parameter | SchemaBase | Literal["height", "width"] | Map
] = Undefined,
**kwds,
):
super().__init__(value=value, **kwds)
|
ValueDefnumberwidthheightExprRef
|
python
|
pyca__cryptography
|
tests/x509/test_x509.py
|
{
"start": 207238,
"end": 216384
}
|
class ____:
@pytest.mark.supported(
only_if=lambda backend: backend.signature_hash_supported(
hashes.SHA1()
),
skip_message="Does not support SHA-1 signature.",
)
def test_load_dsa_cert(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "dsa_selfsigned_ca.pem"),
x509.load_pem_x509_certificate,
)
assert isinstance(cert.signature_hash_algorithm, hashes.SHA1)
public_key = cert.public_key()
assert isinstance(public_key, dsa.DSAPublicKey)
assert cert.signature_algorithm_parameters is None
num = public_key.public_numbers()
assert num.y == int(
"4c08bfe5f2d76649c80acf7d431f6ae2124b217abc8c9f6aca776ddfa94"
"53b6656f13e543684cd5f6431a314377d2abfa068b7080cb8ddc065afc2"
"dea559f0b584c97a2b235b9b69b46bc6de1aed422a6f341832618bcaae2"
"198aba388099dafb05ff0b5efecb3b0ae169a62e1c72022af50ae68af3b"
"033c18e6eec1f7df4692c456ccafb79cc7e08da0a5786e9816ceda651d6"
"1b4bb7b81c2783da97cea62df67af5e85991fdc13aff10fc60e06586386"
"b96bb78d65750f542f86951e05a6d81baadbcd35a2e5cad4119923ae6a2"
"002091a3d17017f93c52970113cdc119970b9074ca506eac91c3dd37632"
"5df4af6b3911ef267d26623a5a1c5df4a6d13f1c",
16,
)
assert num.parameter_numbers.g == int(
"4b7ced71dc353965ecc10d441a9a06fc24943a32d66429dd5ef44d43e67"
"d789d99770aec32c0415dc92970880872da45fef8dd1e115a3e4801387b"
"a6d755861f062fd3b6e9ea8e2641152339b828315b1528ee6c7b79458d2"
"1f3db973f6fc303f9397174c2799dd2351282aa2d8842c357a73495bbaa"
"c4932786414c55e60d73169f5761036fba29e9eebfb049f8a3b1b7cee6f"
"3fbfa136205f130bee2cf5b9c38dc1095d4006f2e73335c07352c64130a"
"1ab2b89f13b48f628d3cc3868beece9bb7beade9f830eacc6fa241425c0"
"b3fcc0df416a0c89f7bf35668d765ec95cdcfbe9caff49cfc156c668c76"
"fa6247676a6d3ac945844a083509c6a1b436baca",
16,
)
assert num.parameter_numbers.p == int(
"bfade6048e373cd4e48b677e878c8e5b08c02102ae04eb2cb5c46a523a3"
"af1c73d16b24f34a4964781ae7e50500e21777754a670bd19a7420d6330"
"84e5556e33ca2c0e7d547ea5f46a07a01bf8669ae3bdec042d9b2ae5e6e"
"cf49f00ba9dac99ab6eff140d2cedf722ee62c2f9736857971444c25d0a"
"33d2017dc36d682a1054fe2a9428dda355a851ce6e6d61e03e419fd4ca4"
"e703313743d86caa885930f62ed5bf342d8165627681e9cc3244ba72aa2"
"2148400a6bbe80154e855d042c9dc2a3405f1e517be9dea50562f56da93"
"f6085f844a7e705c1f043e65751c583b80d29103e590ccb26efdaa0893d"
"833e36468f3907cfca788a3cb790f0341c8a31bf",
16,
)
assert num.parameter_numbers.q == int(
"822ff5d234e073b901cf5941f58e1f538e71d40d", 16
)
def test_load_dsa_cert_null_alg_params(self, backend):
"""
This test verifies that we successfully load certificates with encoded
null parameters in the signature AlgorithmIdentifier. This is invalid,
but all versions of Java less than 21 generate certificates with this
encoding so we need to tolerate it at the moment.
"""
with pytest.warns(utils.DeprecatedIn41):
cert = _load_cert(
os.path.join("x509", "custom", "dsa_null_alg_params.pem"),
x509.load_pem_x509_certificate,
)
assert isinstance(cert.signature_hash_algorithm, hashes.SHA256)
assert isinstance(cert.public_key(), dsa.DSAPublicKey)
def test_signature(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "dsa_selfsigned_ca.pem"),
x509.load_pem_x509_certificate,
)
assert cert.signature == binascii.unhexlify(
b"302c021425c4a84a936ab311ee017d3cbd9a3c650bb3ae4a02145d30c64b4326"
b"86bdf925716b4ed059184396bcce"
)
r, s = decode_dss_signature(cert.signature)
assert r == 215618264820276283222494627481362273536404860490
assert s == 532023851299196869156027211159466197586787351758
def test_tbs_certificate_bytes(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "dsa_selfsigned_ca.pem"),
x509.load_pem_x509_certificate,
)
assert cert.tbs_certificate_bytes == binascii.unhexlify(
b"3082051aa003020102020900a37352e0b2142f86300906072a8648ce3804033"
b"067310b3009060355040613025553310e300c06035504081305546578617331"
b"0f300d0603550407130641757374696e3121301f060355040a1318496e74657"
b"26e6574205769646769747320507479204c7464311430120603550403130b50"
b"79434120445341204341301e170d3134313132373035313431375a170d31343"
b"13232373035313431375a3067310b3009060355040613025553310e300c0603"
b"55040813055465786173310f300d0603550407130641757374696e3121301f0"
b"60355040a1318496e7465726e6574205769646769747320507479204c746431"
b"1430120603550403130b50794341204453412043413082033a3082022d06072"
b"a8648ce380401308202200282010100bfade6048e373cd4e48b677e878c8e5b"
b"08c02102ae04eb2cb5c46a523a3af1c73d16b24f34a4964781ae7e50500e217"
b"77754a670bd19a7420d633084e5556e33ca2c0e7d547ea5f46a07a01bf8669a"
b"e3bdec042d9b2ae5e6ecf49f00ba9dac99ab6eff140d2cedf722ee62c2f9736"
b"857971444c25d0a33d2017dc36d682a1054fe2a9428dda355a851ce6e6d61e0"
b"3e419fd4ca4e703313743d86caa885930f62ed5bf342d8165627681e9cc3244"
b"ba72aa22148400a6bbe80154e855d042c9dc2a3405f1e517be9dea50562f56d"
b"a93f6085f844a7e705c1f043e65751c583b80d29103e590ccb26efdaa0893d8"
b"33e36468f3907cfca788a3cb790f0341c8a31bf021500822ff5d234e073b901"
b"cf5941f58e1f538e71d40d028201004b7ced71dc353965ecc10d441a9a06fc2"
b"4943a32d66429dd5ef44d43e67d789d99770aec32c0415dc92970880872da45"
b"fef8dd1e115a3e4801387ba6d755861f062fd3b6e9ea8e2641152339b828315"
b"b1528ee6c7b79458d21f3db973f6fc303f9397174c2799dd2351282aa2d8842"
b"c357a73495bbaac4932786414c55e60d73169f5761036fba29e9eebfb049f8a"
b"3b1b7cee6f3fbfa136205f130bee2cf5b9c38dc1095d4006f2e73335c07352c"
b"64130a1ab2b89f13b48f628d3cc3868beece9bb7beade9f830eacc6fa241425"
b"c0b3fcc0df416a0c89f7bf35668d765ec95cdcfbe9caff49cfc156c668c76fa"
b"6247676a6d3ac945844a083509c6a1b436baca0382010500028201004c08bfe"
b"5f2d76649c80acf7d431f6ae2124b217abc8c9f6aca776ddfa9453b6656f13e"
b"543684cd5f6431a314377d2abfa068b7080cb8ddc065afc2dea559f0b584c97"
b"a2b235b9b69b46bc6de1aed422a6f341832618bcaae2198aba388099dafb05f"
b"f0b5efecb3b0ae169a62e1c72022af50ae68af3b033c18e6eec1f7df4692c45"
b"6ccafb79cc7e08da0a5786e9816ceda651d61b4bb7b81c2783da97cea62df67"
b"af5e85991fdc13aff10fc60e06586386b96bb78d65750f542f86951e05a6d81"
b"baadbcd35a2e5cad4119923ae6a2002091a3d17017f93c52970113cdc119970"
b"b9074ca506eac91c3dd376325df4af6b3911ef267d26623a5a1c5df4a6d13f1"
b"ca381cc3081c9301d0603551d0e04160414a4fb887a13fcdeb303bbae9a1dec"
b"a72f125a541b3081990603551d2304819130818e8014a4fb887a13fcdeb303b"
b"bae9a1deca72f125a541ba16ba4693067310b3009060355040613025553310e"
b"300c060355040813055465786173310f300d0603550407130641757374696e3"
b"121301f060355040a1318496e7465726e657420576964676974732050747920"
b"4c7464311430120603550403130b5079434120445341204341820900a37352e"
b"0b2142f86300c0603551d13040530030101ff"
)
assert cert.signature_hash_algorithm is not None
public_key = cert.public_key()
assert isinstance(public_key, dsa.DSAPublicKey)
public_key.verify(
cert.signature,
cert.tbs_certificate_bytes,
cert.signature_hash_algorithm,
)
def test_verify_directly_issued_by_dsa(self, backend):
issuer_private_key = DSA_KEY_3072.private_key()
subject_private_key = DSA_KEY_2048.private_key()
ca, cert = _generate_ca_and_leaf(
issuer_private_key, subject_private_key
)
cert.verify_directly_issued_by(ca)
def test_verify_directly_issued_by_dsa_bad_sig(self, backend):
issuer_private_key = DSA_KEY_3072.private_key()
subject_private_key = DSA_KEY_2048.private_key()
ca, cert = _generate_ca_and_leaf(
issuer_private_key, subject_private_key
)
cert_bad_sig = _break_cert_sig(cert)
with pytest.raises(InvalidSignature):
cert_bad_sig.verify_directly_issued_by(ca)
@pytest.mark.supported(
only_if=lambda backend: backend.dsa_supported(),
skip_message="Does not support DSA.",
)
@pytest.mark.supported(
only_if=lambda backend: backend.signature_hash_supported(hashes.SHA1()),
skip_message="Does not support SHA-1 signature.",
)
|
TestDSACertificate
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/enumGenNextValue1.py
|
{
"start": 329,
"end": 414
}
|
class ____(EnumC):
x = auto()
reveal_type(EnumD.x.value, expected_text="str")
|
EnumD
|
python
|
kamyu104__LeetCode-Solutions
|
Python/course-schedule-ii.py
|
{
"start": 968,
"end": 1782
}
|
class ____(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
adj = collections.defaultdict(list)
in_degree = collections.Counter()
for u, v in prerequisites:
in_degree[u] += 1
adj[v].append(u)
result = []
stk = [u for u in xrange(numCourses) if u not in in_degree]
while stk:
u = stk.pop()
result.append(u)
for v in adj[u]:
in_degree[v] -= 1
if in_degree[v] == 0:
stk.append(v)
return result if len(result) == numCourses else []
# Time: O(|V| + |E|)
# Space: O(|E|)
import collections
# dfs solution
|
Solution2
|
python
|
kamyu104__LeetCode-Solutions
|
Python/double-a-number-represented-as-a-linked-list.py
|
{
"start": 43,
"end": 465
}
|
class ____(object):
def doubleIt(self, head):
"""
:type head: Optional[ListNode]
:rtype: Optional[ListNode]
"""
if head.val >= 5:
head = ListNode(0, head)
curr = head
while curr:
curr.val = (curr.val*2)%10
if curr.next and curr.next.val >= 5:
curr.val += 1
curr = curr.next
return head
|
Solution
|
python
|
spyder-ide__spyder
|
spyder/plugins/profiler/confpage.py
|
{
"start": 315,
"end": 1092
}
|
class ____(PluginConfigPage):
def setup_page(self):
switch_to_plugin_cb = self.create_checkbox(
_("Open profiler when profiling finishes"),
"switch_to_plugin",
tip=_(
"This option switches to the profiler plugin "
"when a profiling has ended."
),
)
slow_spin = self.create_spinbox(
_("Maximum number of items displayed with large local time"),
"",
'n_slow_children',
min_=1,
max_=1000,
step=1
)
vlayout = QVBoxLayout()
vlayout.addWidget(switch_to_plugin_cb)
vlayout.addWidget(slow_spin)
vlayout.addStretch(1)
self.setLayout(vlayout)
|
ProfilerConfigPage
|
python
|
cookiecutter__cookiecutter
|
cookiecutter/main.py
|
{
"start": 7636,
"end": 8046
}
|
class ____: # noqa: N801
def __init__(self, repo_dir: Path | str) -> None:
self._repo_dir = f"{repo_dir}" if isinstance(repo_dir, Path) else repo_dir
def __enter__(self) -> None:
self._path = copy(sys.path)
sys.path.append(self._repo_dir)
def __exit__(self, _type, _value, _traceback): # type: ignore[no-untyped-def]
sys.path = self._path
|
_patch_import_path_for_repo
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/storage_tests/test_defs_state_storage.py
|
{
"start": 333,
"end": 663
}
|
class ____(TestDefsStateStorage):
"""Tests the default state storage implementation."""
__test__ = True
@pytest.fixture(name="storage", scope="function")
def state_storage(self):
with instance_for_test() as instance:
yield check.not_none(instance.defs_state_storage)
|
TestDefaultDefsStateStorage
|
python
|
apache__airflow
|
providers/tableau/tests/unit/tableau/operators/test_tableau.py
|
{
"start": 1070,
"end": 7754
}
|
class ____:
"""
Test class for TableauOperator
"""
def setup_method(self):
self.mocked_workbooks = []
self.mock_datasources = []
for i in range(3):
mock_workbook = Mock()
mock_workbook.id = i
mock_workbook.name = f"wb_{i}"
self.mocked_workbooks.append(mock_workbook)
mock_datasource = Mock()
mock_datasource.id = i
mock_datasource.name = f"ds_{i}"
self.mock_datasources.append(mock_datasource)
self.kwargs = {
"site_id": "test_site",
"task_id": "task",
"dag": None,
"match_with": "name",
"method": "refresh",
}
@patch("airflow.providers.tableau.operators.tableau.TableauHook")
def test_execute_workbooks(self, mock_tableau_hook):
"""
Test Execute Workbooks
"""
mock_tableau_hook.get_all = Mock(return_value=self.mocked_workbooks)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(blocking_refresh=False, find="wb_2", resource="workbooks", **self.kwargs)
job_id = operator.execute(context={})
mock_tableau_hook.server.workbooks.refresh.assert_called_once_with(2)
assert mock_tableau_hook.server.workbooks.refresh.return_value.id == job_id
@patch("airflow.providers.tableau.operators.tableau.TableauHook")
def test_execute_workbooks_blocking(self, mock_tableau_hook):
"""
Test execute workbooks blocking
"""
mock_signed_in = [False]
def mock_hook_enter():
mock_signed_in[0] = True
return mock_tableau_hook
def mock_hook_exit(exc_type, exc_val, exc_tb):
mock_signed_in[0] = False
def mock_wait_for_state(job_id, target_state, check_interval):
if not mock_signed_in[0]:
raise Exception("Not signed in")
return True
mock_tableau_hook.return_value.__enter__ = Mock(side_effect=mock_hook_enter)
mock_tableau_hook.return_value.__exit__ = Mock(side_effect=mock_hook_exit)
mock_tableau_hook.wait_for_state = Mock(side_effect=mock_wait_for_state)
mock_tableau_hook.get_all = Mock(return_value=self.mocked_workbooks)
mock_tableau_hook.server.jobs.get_by_id = Mock(
return_value=Mock(finish_code=TableauJobFinishCode.SUCCESS.value)
)
operator = TableauOperator(find="wb_2", resource="workbooks", **self.kwargs)
job_id = operator.execute(context={})
mock_tableau_hook.server.workbooks.refresh.assert_called_once_with(2)
assert mock_tableau_hook.server.workbooks.refresh.return_value.id == job_id
mock_tableau_hook.wait_for_state.assert_called_once_with(
job_id=job_id, check_interval=20, target_state=TableauJobFinishCode.SUCCESS
)
@patch("airflow.providers.tableau.operators.tableau.TableauHook")
def test_execute_missing_workbook(self, mock_tableau_hook):
"""
Test execute missing workbook
"""
mock_tableau_hook.get_all = Mock(return_value=self.mocked_workbooks)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(find="test", resource="workbooks", **self.kwargs)
with pytest.raises(AirflowException):
operator.execute({})
@patch("airflow.providers.tableau.operators.tableau.TableauHook")
def test_execute_datasources(self, mock_tableau_hook):
"""
Test Execute datasources
"""
mock_tableau_hook.get_all = Mock(return_value=self.mock_datasources)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(blocking_refresh=False, find="ds_2", resource="datasources", **self.kwargs)
job_id = operator.execute(context={})
mock_tableau_hook.server.datasources.refresh.assert_called_once_with(2)
assert mock_tableau_hook.server.datasources.refresh.return_value.id == job_id
@patch("airflow.providers.tableau.operators.tableau.TableauHook")
def test_execute_datasources_blocking(self, mock_tableau_hook):
"""
Test execute datasources blocking
"""
mock_signed_in = [False]
def mock_hook_enter():
mock_signed_in[0] = True
return mock_tableau_hook
def mock_hook_exit(exc_type, exc_val, exc_tb):
mock_signed_in[0] = False
def mock_wait_for_state(job_id, target_state, check_interval):
if not mock_signed_in[0]:
raise Exception("Not signed in")
return True
mock_tableau_hook.return_value.__enter__ = Mock(side_effect=mock_hook_enter)
mock_tableau_hook.return_value.__exit__ = Mock(side_effect=mock_hook_exit)
mock_tableau_hook.wait_for_state = Mock(side_effect=mock_wait_for_state)
mock_tableau_hook.get_all = Mock(return_value=self.mock_datasources)
operator = TableauOperator(find="ds_2", resource="datasources", **self.kwargs)
job_id = operator.execute(context={})
mock_tableau_hook.server.datasources.refresh.assert_called_once_with(2)
assert mock_tableau_hook.server.datasources.refresh.return_value.id == job_id
mock_tableau_hook.wait_for_state.assert_called_once_with(
job_id=job_id, check_interval=20, target_state=TableauJobFinishCode.SUCCESS
)
@patch("airflow.providers.tableau.operators.tableau.TableauHook")
def test_execute_missing_datasource(self, mock_tableau_hook):
"""
Test execute missing datasource
"""
mock_tableau_hook.get_all = Mock(return_value=self.mock_datasources)
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
operator = TableauOperator(find="test", resource="datasources", **self.kwargs)
with pytest.raises(AirflowException):
operator.execute({})
def test_execute_unavailable_resource(self):
"""
Test execute unavailable resource
"""
operator = TableauOperator(resource="test", find="test", **self.kwargs)
with pytest.raises(AirflowException):
operator.execute({})
def test_get_resource_id(self):
"""
Test get resource id
"""
resource_id = "res_id"
operator = TableauOperator(resource="task", find=resource_id, method="run", task_id="t", dag=None)
assert operator._get_resource_id(resource_id) == resource_id
|
TestTableauOperator
|
python
|
kamyu104__LeetCode-Solutions
|
Python/transformed-array.py
|
{
"start": 37,
"end": 260
}
|
class ____(object):
def constructTransformedArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
return [nums[(i+nums[i])%len(nums)] for i in xrange(len(nums))]
|
Solution
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_extra_links.py
|
{
"start": 2146,
"end": 11488
}
|
class ____:
dag_id = "TEST_DAG_ID"
dag_run_id = "TEST_DAG_RUN_ID"
task_single_link = "TEST_SINGLE_LINK"
task_multiple_links = "TEST_MULTIPLE_LINKS"
task_mapped = "TEST_MAPPED_TASK"
default_time = timezone.datetime(2020, 1, 1)
plugin_name = "test_plugin"
@staticmethod
def _clear_db():
clear_db_dags()
clear_db_runs()
clear_db_xcom()
@pytest.fixture(autouse=True)
def setup(self, test_client, dag_maker, request, session) -> None:
"""
Setup extra links for testing.
:return: Dictionary with event extra link names with their corresponding link as the links.
"""
self._clear_db()
self.dag = self._create_dag(dag_maker)
dag_bag = DBDagBag()
test_client.app.dependency_overrides[dag_bag_from_app] = lambda: dag_bag
dag_maker.create_dagrun(
run_id=self.dag_run_id,
logical_date=self.default_time,
run_type=DagRunType.MANUAL,
state=DagRunState.SUCCESS,
data_interval=(timezone.datetime(2020, 1, 1), timezone.datetime(2020, 1, 2)),
run_after=timezone.datetime(2020, 1, 2),
triggered_by=DagRunTriggeredByType.TEST,
)
def teardown_method(self) -> None:
self._clear_db()
def _create_dag(self, dag_maker):
with dag_maker(
dag_id=self.dag_id, schedule=None, default_args={"start_date": self.default_time}, serialized=True
) as dag:
CustomOperator(task_id=self.task_single_link, bash_command="TEST_LINK_VALUE")
CustomOperator(
task_id=self.task_multiple_links, bash_command=["TEST_LINK_VALUE_1", "TEST_LINK_VALUE_2"]
)
_ = CustomOperator.partial(task_id=self.task_mapped).expand(
bash_command=["TEST_LINK_VALUE_1", "TEST_LINK_VALUE_2"]
)
return dag
@pytest.mark.parametrize(
("url", "expected_status_code", "expected_response"),
[
pytest.param(
"/dags/INVALID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_LINK/links",
404,
{"detail": "The Dag with ID: `INVALID` was not found"},
id="missing_dag",
),
pytest.param(
"/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/INVALID/links",
404,
{"detail": "Task with ID = INVALID not found"},
id="missing_task",
),
],
)
def test_should_respond_404(self, test_client, url, expected_status_code, expected_response):
response = test_client.get(url)
assert response.status_code == expected_status_code
assert response.json() == expected_response
def test_should_respond_200(self, test_client, session):
XCom.set(
key="search_query",
value="TEST_LINK_VALUE",
task_id=self.task_single_link,
dag_id=self.dag_id,
run_id=self.dag_run_id,
)
XCom.set(
key="_link_CustomOpLink",
value="http://google.com/custom_base_link?search=TEST_LINK_VALUE",
task_id=self.task_single_link,
dag_id=self.dag_id,
run_id=self.dag_run_id,
)
response = test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_single_link}/links",
)
assert response.status_code == 200
assert (
response.json()
== ExtraLinkCollectionResponse(
extra_links={"Google Custom": "http://google.com/custom_base_link?search=TEST_LINK_VALUE"},
total_entries=1,
).model_dump()
)
def test_should_respond_200_missing_xcom(self, test_client):
response = test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_single_link}/links",
)
assert response.status_code == 200
assert (
response.json()
== ExtraLinkCollectionResponse(extra_links={"Google Custom": None}, total_entries=1).model_dump()
)
def test_should_respond_200_multiple_links(self, test_client, session):
XCom.set(
key="search_query",
value=["TEST_LINK_VALUE_1", "TEST_LINK_VALUE_2"],
task_id=self.task_multiple_links,
dag_id=self.dag.dag_id,
run_id=self.dag_run_id,
session=session,
)
XCom.set(
key="bigquery_1",
value="https://console.cloud.google.com/bigquery?j=TEST_LINK_VALUE_1",
task_id=self.task_multiple_links,
dag_id=self.dag_id,
run_id=self.dag_run_id,
session=session,
)
XCom.set(
key="bigquery_2",
value="https://console.cloud.google.com/bigquery?j=TEST_LINK_VALUE_2",
task_id=self.task_multiple_links,
dag_id=self.dag_id,
run_id=self.dag_run_id,
session=session,
)
session.commit()
response = test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_multiple_links}/links",
)
assert response.status_code == 200
assert (
response.json()
== ExtraLinkCollectionResponse(
extra_links={
"BigQuery Console #1": "https://console.cloud.google.com/bigquery?j=TEST_LINK_VALUE_1",
"BigQuery Console #2": "https://console.cloud.google.com/bigquery?j=TEST_LINK_VALUE_2",
},
total_entries=2,
).model_dump()
)
def test_should_respond_200_multiple_links_missing_xcom(self, test_client):
response = test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_multiple_links}/links",
)
assert response.status_code == 200
assert (
response.json()
== ExtraLinkCollectionResponse(
extra_links={"BigQuery Console #1": None, "BigQuery Console #2": None},
total_entries=2,
).model_dump()
)
@pytest.mark.mock_plugin_manager(plugins=[AirflowPluginWithOperatorLinks])
def test_should_respond_200_support_plugins(self, test_client):
response = test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_single_link}/links",
)
assert response, response.status_code == 200
assert (
response.json()
== ExtraLinkCollectionResponse(
extra_links={
"Google Custom": None,
"Google": "https://www.google.com",
"S3": ("https://s3.amazonaws.com/airflow-logs/TEST_DAG_ID/TEST_SINGLE_LINK/"),
},
total_entries=3,
).model_dump()
)
def test_should_respond_200_mapped_task_instance(self, test_client, session):
for map_index, value in enumerate(["TEST_LINK_VALUE_1", "TEST_LINK_VALUE_2"]):
XCom.set(
key="search_query",
value=value,
task_id=self.task_mapped,
dag_id=self.dag_id,
run_id=self.dag_run_id,
map_index=map_index,
)
XCom.set(
key="_link_CustomOpLink",
value=f"http://google.com/custom_base_link?search={value}",
task_id=self.task_mapped,
dag_id=self.dag_id,
run_id=self.dag_run_id,
map_index=map_index,
)
session.commit()
response = test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_mapped}/links",
params={"map_index": map_index},
)
assert response.status_code == 200
assert (
response.json()
== ExtraLinkCollectionResponse(
extra_links={"Google Custom": f"http://google.com/custom_base_link?search={value}"},
total_entries=1,
).model_dump()
)
def test_should_respond_401_unauthenticated(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_single_link}/links",
)
assert response.status_code == 401
def test_should_respond_403_unauthorized(self, unauthorized_test_client):
response = unauthorized_test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_single_link}/links",
)
assert response.status_code == 403
def test_should_respond_404_invalid_map_index(self, test_client):
response = test_client.get(
f"/dags/{self.dag_id}/dagRuns/{self.dag_run_id}/taskInstances/{self.task_mapped}/links",
params={"map_index": 4},
)
assert response.status_code == 404
assert response.json() == {"detail": "TaskInstance not found"}
|
TestGetExtraLinks
|
python
|
pytorch__pytorch
|
test/test_cuda_multigpu.py
|
{
"start": 967,
"end": 50725
}
|
class ____(TestCase):
FIFTY_MIL_CYCLES = 50000000
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(
lambda: collections.defaultdict(int)
)
for segment in snapshot:
expandable = segment["is_expandable"]
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
if not expandable:
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment[
"allocated_size"
]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
expected["requested_bytes.all.current"] += segment["requested_size"]
expected["requested_bytes." + pool_str + ".current"] += segment[
"requested_size"
]
sum_requested = 0
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
sum_requested += block["requested_size"]
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split and not expandable:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block[
"size"
]
self.assertEqual(sum_requested, segment["requested_size"])
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize("cuda")
torch.cuda.synchronize("cuda:0")
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device("cuda:0"))
if TEST_MULTIGPU:
torch.cuda.synchronize("cuda:1")
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device("cuda:1"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
stat_key_n_sync = "num_sync_all_streams"
stat_key_n_alloc = "num_device_alloc"
stat_key_n_free = "num_device_free"
if empty_cache:
num_sync_1 = torch.cuda.memory_stats(device).get(stat_key_n_sync, -1)
self.assertGreaterEqual(num_sync_1, 0)
num_alloc_1 = torch.cuda.memory_stats(device).get(stat_key_n_alloc, -1)
# if current memory usage is greater than zero we must have
# allocated something
self.assertGreaterEqual(num_alloc_1, 0 if new_m == 0 else 1)
num_free_1 = torch.cuda.memory_stats(device).get(stat_key_n_free, -1)
self.assertGreaterEqual(num_free_1, 0)
# empty_cache will enforce the call of release_cached_blocks
torch.cuda.empty_cache()
num_sync_2 = torch.cuda.memory_stats(device).get(stat_key_n_sync, -1)
self.assertEqual(num_sync_1 + 1, num_sync_2)
num_alloc_2 = torch.cuda.memory_stats(device).get(stat_key_n_alloc, -1)
self.assertGreaterEqual(num_alloc_2, num_alloc_1)
num_free_2 = torch.cuda.memory_stats(device).get(stat_key_n_free, -1)
self.assertGreaterEqual(num_free_2, num_free_1)
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
@unittest.skipIf(TEST_CUDAMALLOCASYNC, "temporarily disabled")
@serialTest()
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
@unittest.skipIf(TEST_CUDAMALLOCASYNC, "temporarily disabled")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device="cuda:0", N=35)
gen1 = self._test_memory_stats_generator(
self, device=torch.device("cuda:1"), N=35
)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(
self, device=torch.device("cuda:1"), N=35
)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = torch.inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCudaMultiGPU.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCudaMultiGPU.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device("cuda:0")
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device("cuda:1")
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device="cuda")
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b"cuda:0", b"cuda:9"))
msg = r"Attempting to deserialize object on CUDA device 9"
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == "cuda:1":
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={"cuda:1": "cuda:0"})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device="cuda:1").storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ["byte", "char", "short", "int", "long", "half", "double"]:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device("cuda:0")
d1 = torch.device("cuda:1")
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device("cpu"))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device("cuda:0")
d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device("cpu"))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device("cuda:0")
d1 = torch.device("cuda:1")
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device("cuda:0"))
self.assertEqual(e0.device, torch.device("cuda:0"))
self.assertEqual(s1.device, torch.device("cuda:1"))
self.assertEqual(e1.device, torch.device("cuda:1"))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device("cuda:0"))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device("cuda:1"))
with torch.cuda.device(1):
self.assertEqual(torch.cuda.current_stream().device, torch.device("cuda:1"))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device("cuda:0")
d1 = torch.device("cuda:1")
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCudaMultiGPU.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device("cuda:0")
d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device("cuda:0"), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device("cuda:1"), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device("cuda:1"):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCudaMultiGPU.FIFTY_MIL_CYCLES))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [
TestCudaMultiGPU._stream_synchronize,
TestCudaMultiGPU._event_synchronize,
TestCudaMultiGPU._event_wait,
]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCudaMultiGPU._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p),
)
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device("cuda:0"):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCudaMultiGPU.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 30%.
self.assertGreater(parent_time + child_time, total_time * 1.3)
# This test is flaky for ROCm, see issue #62602
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device("cuda:0")
d1 = torch.device("cuda:1")
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCudaMultiGPU.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device("cuda:0")
d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCudaMultiGPU.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device("cuda:0")
d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCudaMultiGPU.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCudaMultiGPU.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
ext_stream = torch.cuda.get_stream_from_external(stream_v, device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
ext_stream = torch.cuda.get_stream_from_external(stream_v, device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being reused
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(1000 * cycles_per_ms)) # delay the copy by 1s
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg="allocation reused too soon")
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_rng_state_offset(self):
before = torch.cuda.get_rng_state()
torch.cuda._set_rng_state_offset(100)
offset = torch.cuda._get_rng_state_offset()
torch.cuda.set_rng_state(before)
self.assertEqual(offset, 100)
# Verifies that mem_get_info works, including when called for a different device
def test_mem_get_info(self):
def _test(device: Union[str, int, torch.device]):
# Prevent PyTorch from reusing the allocated memory
torch.cuda.empty_cache()
torch.cuda.synchronize()
before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(device)
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
t = torch.randn(1024 * 1024 * 8, device=device) # noqa: F841
if IS_JETSON:
# w/o syncing, mem_get_info will run before memory allocated has actually increased.
# This race condition causes consistent failure
torch.cuda.synchronize()
after_free_bytes, after_available_bytes = torch.cuda.mem_get_info(device)
self.assertLess(after_free_bytes, before_free_bytes)
self.assertEqual(before_available_bytes, after_available_bytes)
# Test calls with different device representations
_test(0)
_test(torch.device("cuda"))
_test(torch.device("cuda:0"))
_test("cuda")
_test("cuda:0")
if TEST_MULTIGPU:
_test(1)
_test(torch.device("cuda:1"))
_test("cuda:1")
# Test that wrap_with_cuda_memory_check successfully detects leak
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:0")))
no_leak()
regex = r"CUDA driver API confirmed .+ on device 0.+"
if IS_JETSON:
try:
leak_gpu0()
except RuntimeError as e:
import re
assert re.match(regex, str(e)), str(e) + "\n does not match: \n" + regex
else:
# assertRaisesRegex does not pass with Python for Jetson,
# even though the RuntimeError matches regex using re.match
with self.assertRaisesRegex(RuntimeError, regex):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:1")))
with self.assertRaisesRegex(
RuntimeError, r"CUDA driver API confirmed .+ on device 1.+"
):
leak_gpu1()
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.0
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.0
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call(
[
sys.executable,
"-c",
"""\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
""",
]
)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.amp.GradScaler(device="cuda", init_scale=2.0)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (
t1.clone(),
(t0.clone(), t1.clone()),
[t0.clone(), (t1.clone(), t0.clone())],
)
outputs = scaler.scale(outputs)
self.assertTrue(
outputs[0] == 8.0
and outputs[1][0] == 8.0
and outputs[1][1] == 8.0
and outputs[2][0] == 8.0
and outputs[2][1][0] == 8.0
and outputs[2][1][1] == 8.0
)
self.assertTrue(scaler._scale.device == t1.device)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
(
mod_control0,
mod_scaling0,
opt_control0,
opt_scaling0,
data,
loss_fn,
skip_iter,
) = _create_scaling_case()
(
mod_control1,
mod_scaling1,
opt_control1,
opt_scaling1,
) = _create_scaling_models_optimizers(device=dev1)
scaler = torch.amp.GradScaler(
device="cuda",
init_scale=128.0,
growth_factor=2.0,
enabled=enabled,
growth_interval=1,
)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(
0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1)
)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float("inf"))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(
len(scaler._found_inf_per_device(optimizer0)) == 1
)
self.assertTrue(
len(scaler._found_inf_per_device(optimizer1)) == 1
)
self.assertTrue(
scaler._found_inf_per_device(optimizer0)[dev0].item()
== 0.0
)
self.assertTrue(
scaler._found_inf_per_device(optimizer1)[dev1].item()
== float(i == skip_iter)
)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(
scaler.get_scale()
== (
128.0
* scaler.get_growth_factor() ** 3
* scaler.get_backoff_factor() ** 1
)
if enabled
else 1.0
)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(
chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters()),
):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
_x = torch.ones(10, device="cuda:0")
self.assertGreater(memory_allocated(0), current_alloc[0])
self.assertTrue(
all(
memory_allocated(torch.cuda.device(idx)) == current_alloc[idx]
for idx in range(1, device_count)
)
)
|
TestCudaMultiGPU
|
python
|
PyCQA__pylint
|
examples/deprecation_checker.py
|
{
"start": 1936,
"end": 4245
}
|
class ____(DeprecatedMixin, BaseChecker):
"""Class implementing deprecation checker."""
# DeprecatedMixin class is Mixin class implementing logic for searching deprecated methods and functions.
# The list of deprecated methods/functions is defined by the implementing class via
# deprecated_methods callback. DeprecatedMixin class is overriding attributes of BaseChecker hence must
# be specified *before* BaseChecker in list of base classes.
# The name defines a custom section of the config for this checker.
name = "deprecated"
# Register messages emitted by the checker.
msgs = {
**DeprecatedMixin.DEPRECATED_METHOD_MESSAGE,
**DeprecatedMixin.DEPRECATED_ARGUMENT_MESSAGE,
}
def deprecated_methods(self) -> set[str]:
"""Callback method called by DeprecatedMixin for every method/function found in the code.
Returns:
collections.abc.Container of deprecated function/method names.
"""
return {"mymodule.deprecated_function", "mymodule.MyClass.deprecated_method"}
def deprecated_arguments(self, method: str) -> tuple[tuple[int | None, str], ...]:
"""Callback returning the deprecated arguments of method/function.
Returns:
collections.abc.Iterable in form:
((POSITION1, PARAM1), (POSITION2: PARAM2) ...)
where
* POSITIONX - position of deprecated argument PARAMX in function definition.
If argument is keyword-only, POSITIONX should be None.
* PARAMX - name of the deprecated argument.
"""
if method == "mymodule.myfunction":
# myfunction() has two deprecated arguments:
# * deprecated_arg1 defined at 2nd position and
# * deprecated_arg2 defined at 5th position.
return ((2, "deprecated_arg1"), (5, "deprecated_arg2"))
if method == "mymodule.MyClass.mymethod":
# mymethod() has two deprecated arguments:
# * deprecated1 defined at 2nd position and
# * deprecated2 defined at 4th position.
return ((2, "deprecated1"), (4, "deprecated2"))
return ()
def register(linter: PyLinter) -> None:
linter.register_checker(DeprecationChecker(linter))
|
DeprecationChecker
|
python
|
chroma-core__chroma
|
chromadb/api/types.py
|
{
"start": 59033,
"end": 59127
}
|
class ____:
enabled: bool
config: IntInvertedIndexConfig
@dataclass
|
IntInvertedIndexType
|
python
|
rapidsai__cudf
|
python/cudf/cudf/core/join/_join_helpers.py
|
{
"start": 1140,
"end": 1363
}
|
class ____(_Indexer):
def get(self, obj: DataFrame) -> ColumnBase:
return obj._data[self.name]
def set(self, obj: DataFrame, value: ColumnBase):
obj._data.set_by_label(self.name, value)
|
_ColumnIndexer
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 838725,
"end": 839464
}
|
class ____(ValueChannelMixin, core.PositionValueDef):
"""
ThetaValue schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : dict, float, :class:`ExprRef`, Literal['height', 'width']
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "theta"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
|
ThetaValue
|
python
|
PyCQA__pyflakes
|
pyflakes/test/test_imports.py
|
{
"start": 265,
"end": 3814
}
|
class ____(TestCase):
def test_import_basic(self):
binding = Importation('a', None, 'a')
assert binding.source_statement == 'import a'
assert str(binding) == 'a'
def test_import_as(self):
binding = Importation('c', None, 'a')
assert binding.source_statement == 'import a as c'
assert str(binding) == 'a as c'
def test_import_submodule(self):
binding = SubmoduleImportation('a.b', None)
assert binding.source_statement == 'import a.b'
assert str(binding) == 'a.b'
def test_import_submodule_as(self):
# A submodule import with an as clause is not a SubmoduleImportation
binding = Importation('c', None, 'a.b')
assert binding.source_statement == 'import a.b as c'
assert str(binding) == 'a.b as c'
def test_import_submodule_as_source_name(self):
binding = Importation('a', None, 'a.b')
assert binding.source_statement == 'import a.b as a'
assert str(binding) == 'a.b as a'
def test_importfrom_relative(self):
binding = ImportationFrom('a', None, '.', 'a')
assert binding.source_statement == 'from . import a'
assert str(binding) == '.a'
def test_importfrom_relative_parent(self):
binding = ImportationFrom('a', None, '..', 'a')
assert binding.source_statement == 'from .. import a'
assert str(binding) == '..a'
def test_importfrom_relative_with_module(self):
binding = ImportationFrom('b', None, '..a', 'b')
assert binding.source_statement == 'from ..a import b'
assert str(binding) == '..a.b'
def test_importfrom_relative_with_module_as(self):
binding = ImportationFrom('c', None, '..a', 'b')
assert binding.source_statement == 'from ..a import b as c'
assert str(binding) == '..a.b as c'
def test_importfrom_member(self):
binding = ImportationFrom('b', None, 'a', 'b')
assert binding.source_statement == 'from a import b'
assert str(binding) == 'a.b'
def test_importfrom_submodule_member(self):
binding = ImportationFrom('c', None, 'a.b', 'c')
assert binding.source_statement == 'from a.b import c'
assert str(binding) == 'a.b.c'
def test_importfrom_member_as(self):
binding = ImportationFrom('c', None, 'a', 'b')
assert binding.source_statement == 'from a import b as c'
assert str(binding) == 'a.b as c'
def test_importfrom_submodule_member_as(self):
binding = ImportationFrom('d', None, 'a.b', 'c')
assert binding.source_statement == 'from a.b import c as d'
assert str(binding) == 'a.b.c as d'
def test_importfrom_star(self):
binding = StarImportation('a.b', None)
assert binding.source_statement == 'from a.b import *'
assert str(binding) == 'a.b.*'
def test_importfrom_star_relative(self):
binding = StarImportation('.b', None)
assert binding.source_statement == 'from .b import *'
assert str(binding) == '.b.*'
def test_importfrom_future(self):
binding = FutureImportation('print_function', None, None)
assert binding.source_statement == 'from __future__ import print_function'
assert str(binding) == '__future__.print_function'
def test_unusedImport_underscore(self):
"""
The magic underscore var should be reported as unused when used as an
import alias.
"""
self.flakes('import fu as _', m.UnusedImport)
|
TestImportationObject
|
python
|
tornadoweb__tornado
|
tornado/test/httpserver_test.py
|
{
"start": 9644,
"end": 11066
}
|
class ____(RequestHandler):
def prepare(self):
self.errors = {} # type: Dict[str, str]
fields = [
("method", str),
("uri", str),
("version", str),
("remote_ip", str),
("protocol", str),
("host", str),
("path", str),
("query", str),
]
for field, expected_type in fields:
self.check_type(field, getattr(self.request, field), expected_type)
self.check_type("header_key", list(self.request.headers.keys())[0], str)
self.check_type("header_value", list(self.request.headers.values())[0], str)
self.check_type("cookie_key", list(self.request.cookies.keys())[0], str)
self.check_type(
"cookie_value", list(self.request.cookies.values())[0].value, str
)
# secure cookies
self.check_type("arg_key", list(self.request.arguments.keys())[0], str)
self.check_type("arg_value", list(self.request.arguments.values())[0][0], bytes)
def post(self):
self.check_type("body", self.request.body, bytes)
self.write(self.errors)
def get(self):
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = f"expected {expected_type}, got {actual_type}"
|
TypeCheckHandler
|
python
|
doocs__leetcode
|
solution/3300-3399/3319.K-th Largest Perfect Subtree Size in Binary Tree/Solution.py
|
{
"start": 192,
"end": 726
}
|
class ____:
def kthLargestPerfectSubtree(self, root: Optional[TreeNode], k: int) -> int:
def dfs(root: Optional[TreeNode]) -> int:
if root is None:
return 0
l, r = dfs(root.left), dfs(root.right)
if l < 0 or l != r:
return -1
cnt = l + r + 1
nums.append(cnt)
return cnt
nums = []
dfs(root)
if len(nums) < k:
return -1
nums.sort(reverse=True)
return nums[k - 1]
|
Solution
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/req/req_file.py
|
{
"start": 14931,
"end": 20324
}
|
class ____(Exception):
def __init__(self, msg: str) -> None:
self.msg = msg
def build_parser() -> optparse.OptionParser:
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self: Any, msg: str) -> "NoReturn":
raise OptionParsingError(msg)
# NOTE: mypy disallows assigning to a method
# https://github.com/python/mypy/issues/2427
parser.exit = parser_exit # type: ignore
return parser
def join_lines(lines_enum: ReqFileLines) -> ReqFileLines:
"""Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
"""
primary_line_number = None
new_line: List[str] = []
for line_number, line in lines_enum:
if not line.endswith("\\") or COMMENT_RE.match(line):
if COMMENT_RE.match(line):
# this ensures comments are always matched later
line = " " + line
if new_line:
new_line.append(line)
assert primary_line_number is not None
yield primary_line_number, "".join(new_line)
new_line = []
else:
yield line_number, line
else:
if not new_line:
primary_line_number = line_number
new_line.append(line.strip("\\"))
# last line contains \
if new_line:
assert primary_line_number is not None
yield primary_line_number, "".join(new_line)
# TODO: handle space after '\'.
def ignore_comments(lines_enum: ReqFileLines) -> ReqFileLines:
"""
Strips comments and filter empty lines.
"""
for line_number, line in lines_enum:
line = COMMENT_RE.sub("", line)
line = line.strip()
if line:
yield line_number, line
def expand_env_variables(lines_enum: ReqFileLines) -> ReqFileLines:
"""Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discussion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
"""
for line_number, line in lines_enum:
for env_var, var_name in ENV_VAR_RE.findall(line):
value = os.getenv(var_name)
if not value:
continue
line = line.replace(env_var, value)
yield line_number, line
def get_file_content(url: str, session: "PipSession") -> Tuple[str, str]:
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
Respects # -*- coding: declarations on the retrieved files.
:param url: File path or url.
:param session: PipSession instance.
"""
scheme = urllib.parse.urlsplit(url).scheme
# Pip has special support for file:// URLs (LocalFSAdapter).
if scheme in ["http", "https", "file"]:
# Delay importing heavy network modules until absolutely necessary.
from pipenv.patched.pip._internal.network.utils import raise_for_status
resp = session.get(url)
raise_for_status(resp)
return resp.url, resp.text
# Assume this is a bare path.
try:
with open(url, "rb") as f:
raw_content = f.read()
except OSError as exc:
raise InstallationError(f"Could not open requirements file: {exc}")
content = _decode_req_file(raw_content, url)
return url, content
def _decode_req_file(data: bytes, url: str) -> str:
for bom, encoding in BOMS:
if data.startswith(bom):
return data[len(bom) :].decode(encoding)
for line in data.split(b"\n")[:2]:
if line[0:1] == b"#":
result = PEP263_ENCODING_RE.search(line)
if result is not None:
encoding = result.groups()[0].decode("ascii")
return data.decode(encoding)
try:
return data.decode(DEFAULT_ENCODING)
except UnicodeDecodeError:
locale_encoding = locale.getpreferredencoding(False) or sys.getdefaultencoding()
logging.warning(
"unable to decode data from %s with default encoding %s, "
"falling back to encoding from locale: %s. "
"If this is intentional you should specify the encoding with a "
"PEP-263 style comment, e.g. '# -*- coding: %s -*-'",
url,
DEFAULT_ENCODING,
locale_encoding,
locale_encoding,
)
return data.decode(locale_encoding)
|
OptionParsingError
|
python
|
kamyu104__LeetCode-Solutions
|
Python/lexicographically-smallest-negated-permutation-that-sums-to-target.py
|
{
"start": 52,
"end": 734
}
|
class ____(object):
def lexSmallestNegatedPerm(self, n, target):
"""
:type n: int
:type target: int
:rtype: List[int]
"""
def count(x):
return (x+1)*x//2
total = count(n)
if abs(target) > total or (target-total)%2:
return []
result = [0]*n
left, right = 0, n-1
for i in reversed(xrange(1, n+1)):
if target-(-i) <= count(i-1):
target -= -i
result[left] = -i
left += 1
else:
target -= i
result[right] = i
right -= 1
return result
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/idefics/modeling_idefics.py
|
{
"start": 37644,
"end": 48729
}
|
class ____(IdeficsPreTrainedModel):
"""
Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
Args:
config: IdeficsConfig
"""
def __init__(self, config: IdeficsConfig):
super().__init__(config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = IdeficsDecoupledEmbedding(
num_embeddings=config.vocab_size,
num_additional_embeddings=config.additional_vocab_size,
embedding_dim=config.hidden_size,
partially_freeze=config.freeze_text_layers,
padding_idx=self.padding_idx,
)
self.image_size = config.vision_config.image_size
self.vision_config = config.vision_config
# The module using it is not a PreTrainedModel subclass so we need this
self.vision_config._attn_implementation = config._attn_implementation
self.vision_model = IdeficsVisionTransformer(config.vision_config)
# Perceiver Resampler
if config.use_resampler:
perceiver_config = config.perceiver_config
self.perceiver_resampler = IdeficsPerceiverResampler(
config,
config.vision_config.embed_dim,
perceiver_config.resampler_depth,
perceiver_config.resampler_n_heads,
perceiver_config.resampler_head_dim,
perceiver_config.resampler_n_latents,
)
self.layers = nn.ModuleList(
[IdeficsDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
self.cross_layer_interval = config.cross_layer_interval
num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
self.gated_cross_attn_layers = nn.ModuleList(
[IdeficsGatedCrossAttentionLayer(config, layer_idx=i) for i in range(num_cross_layers)]
)
self.gradient_checkpointing = False
self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
# Initialize weights and apply final processing
self.post_init()
self.freeze_relevant_params(config)
def freeze_relevant_params(self, config=None):
if config is None:
config = self.config
if config.freeze_text_layers:
self.freeze_text_layers(config.freeze_text_module_exceptions)
if config.freeze_vision_layers:
freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
def freeze_text_layers(self, module_exceptions=[]):
for module in [self.layers, self.norm]:
freeze_model(module, module_exceptions=module_exceptions)
def freeze_vision_layers(self, module_exceptions=[]):
freeze_model(self.vision_model, module_exceptions=module_exceptions)
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_encoder_embeddings: Optional[torch.FloatTensor] = None,
perceiver_embeddings: Optional[torch.FloatTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
interpolate_pos_encoding: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, IdeficsBaseModelOutputWithPast]:
r"""
image_encoder_embeddings (`torch.FloatTensor`, *optional*):
The output of the image encoder.
perceiver_embeddings (`torch.FloatTensor`, *optional*):
The output of the perceiver resampler.
image_attention_mask (`torch.LongTensor`, *optional*):
The attention mask for the image encoder.
"""
device = input_ids.device if input_ids is not None else inputs_embeds.device
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
batch_size, seq_length, _ = inputs_embeds.shape
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
seq_length_with_past = seq_length + past_key_values_length
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + inputs_embeds.shape[1], device=inputs_embeds.device
)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids[:, -seq_length:]
elif position_ids is None:
position_ids = cache_position.unsqueeze(0)
if sum(x is None for x in [pixel_values, image_encoder_embeddings, perceiver_embeddings]) != 2:
raise ValueError(
"Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None."
)
elif pixel_values is not None:
pixel_values = pixel_values.to(dtype=self.dtype, device=device) # fp16 compatibility
batch_size, num_images = pixel_values.shape[:2]
pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:])
# Get sequence from the vision encoder
image_hidden_states = self.vision_model(
pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding
).last_hidden_state
elif image_encoder_embeddings is not None:
batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size()
image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device)
image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size)
if self.config.use_resampler:
if perceiver_embeddings is None:
perceiver_embeddings = self.perceiver_resampler(image_hidden_states)
image_seq_len, image_hidden_size = perceiver_embeddings.size(1), perceiver_embeddings.size(2)
else:
batch_size, num_images, image_seq_len, image_hidden_size = perceiver_embeddings.size()
image_hidden_states = perceiver_embeddings
elif perceiver_embeddings is None:
image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2)
else:
raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True")
image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size)
# # Hack to use the model in full language modeling mode
# image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device)
# Make image_attention_mask compatible with hidden states
text_seq_len = image_attention_mask.size(1)
image_attention_mask = image_attention_mask.unsqueeze(-1)
image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len)
if image_hidden_states is not None:
image_batch_size, image_sequence_length, _ = image_hidden_states.size()
image_hidden_shape = (image_batch_size, image_sequence_length)
if image_attention_mask is None:
image_attention_mask = torch.ones(image_hidden_shape, device=device)
image_attention_mask = self.invert_attention_mask(image_attention_mask)
else:
image_attention_mask = None
# cross_attention_gate:
# For any tokens attending to no images, the hidden_states coming out of the cross-attention should be zeroed-out.
# `image_attention_mask` has shape [bsz, 1, num_images, hidden_size] with elements equal to either 0.0 or a very negative number.
# If any of the elements are 0.0, then the token is attending to at least one image and the gate value is 1. Otherwise the gate value is 0.
# `cross_attention_gate` has shape [bsz, seq_len] with elements equal to either 0.0 or 1.0.
cross_attention_gate = ((((image_attention_mask == 0.0).any(dim=-1)).to(dtype=self.dtype)).squeeze(dim=1)).to(
device
)
# embed positions
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
# TODO(ls): Add cross attention values to respective lists
if idx % self.cross_layer_interval == 0:
cross_attn_block = self.gated_cross_attn_layers[idx // self.cross_layer_interval]
hidden_states = cross_attn_block(
hidden_states,
causal_mask,
image_hidden_states,
image_attention_mask=image_attention_mask,
cross_attention_gate=cross_attention_gate,
past_key_values=None, # not implemented
**kwargs,
)
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size)
return IdeficsBaseModelOutputWithPast(
last_hidden_state=hidden_states,
image_hidden_states=image_hidden_states,
past_key_values=past_key_values,
)
|
IdeficsModel
|
python
|
nedbat__coveragepy
|
tests/test_report_common.py
|
{
"start": 6255,
"end": 10598
}
|
class ____(CoverageTest):
"""Tests of Jinja-like behavior.
Jinja2 compiles a template into Python code, and then runs the Python code
to render the template. But during rendering, it uses the template name
(for example, "template.j2") as the file name, not the Python code file
name. Then during reporting, we will try to parse template.j2 as Python
code.
If the file can be parsed, it's included in the report (as a Python file!).
If it can't be parsed, then it's not included in the report.
These tests confirm that code doesn't raise an exception (as reported in
#1553), and that the current (incorrect) behavior remains stable. Ideally,
good.j2 wouldn't be listed at all, since we can't report on it accurately.
See https://github.com/coveragepy/coveragepy/issues/1553 for more detail, and
https://github.com/coveragepy/coveragepy/issues/1623 for an issue about this
behavior.
"""
def make_files(self) -> None:
"""Create test files: two Jinja templates, and data from rendering them."""
# A Jinja2 file that is syntactically acceptable Python (though it wont run).
self.make_file(
"good.j2",
"""\
{{ data }}
line2
line3
""",
)
# A Jinja2 file that is a Python syntax error.
self.make_file(
"bad.j2",
"""\
This is data: {{ data }}.
line 2
line 3
""",
)
self.make_data_file(
lines={
abs_file("good.j2"): [1, 3, 5, 7, 9],
abs_file("bad.j2"): [1, 3, 5, 7, 9],
},
)
def test_report(self) -> None:
self.make_files()
cov = coverage.Coverage()
cov.load()
cov.report(show_missing=True)
expected = textwrap.dedent("""\
Name Stmts Miss Cover Missing
---------------------------------------
good.j2 3 1 67% 2
---------------------------------------
TOTAL 3 1 67%
""")
assert expected == self.stdout()
def test_html(self) -> None:
self.make_files()
cov = coverage.Coverage()
cov.load()
cov.html_report()
contains(
"htmlcov/index.html",
"""\
<tbody>
<tr class="region">
<td class="name"><a href="good_j2.html">good.j2</a></td>
<td class="spacer"> </td>
<td>3</td>
<td>1</td>
<td>0</td>
<td class="spacer"> </td>
<td data-ratio="2 3">67%</td>
</tr>
</tbody>""",
)
doesnt_contain("htmlcov/index.html", "bad.j2")
def test_xml(self) -> None:
self.make_files()
cov = coverage.Coverage()
cov.load()
cov.xml_report()
contains("coverage.xml", 'filename="good.j2"')
contains(
"coverage.xml",
'<line number="1" hits="1"/>',
'<line number="2" hits="0"/>',
'<line number="3" hits="1"/>',
)
doesnt_contain("coverage.xml", 'filename="bad.j2"')
doesnt_contain("coverage.xml", '<line number="4"')
def test_json(self) -> None:
self.make_files()
cov = coverage.Coverage()
cov.load()
cov.json_report()
contains(
"coverage.json",
# Notice the .json report claims lines in good.j2 executed that
# don't even exist in good.j2...
'"files": {"good.j2": {"executed_lines": [1, 3, 5, 7, 9], '
+ '"summary": {"covered_lines": 2, "num_statements": 3',
)
doesnt_contain("coverage.json", "bad.j2")
def test_lcov(self) -> None:
self.make_files()
cov = coverage.Coverage()
cov.load()
cov.lcov_report()
with open("coverage.lcov", encoding="utf-8") as lcov:
actual = lcov.read()
expected = textwrap.dedent("""\
SF:good.j2
DA:1,1
DA:2,0
DA:3,1
LF:3
LH:2
end_of_record
""")
assert expected == actual
|
ReportWithJinjaTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 539774,
"end": 540212
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateProject"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project = sgqlc.types.Field("Project", graphql_name="project")
"""The new project."""
|
CreateProjectPayload
|
python
|
pypa__pipenv
|
pipenv/vendor/packaging/_elffile.py
|
{
"start": 673,
"end": 3282
}
|
class ____:
"""
Representation of an ELF executable.
"""
def __init__(self, f: IO[bytes]) -> None:
self._f = f
try:
ident = self._read("16B")
except struct.error:
raise ELFInvalid("unable to parse identification")
magic = bytes(ident[:4])
if magic != b"\x7fELF":
raise ELFInvalid(f"invalid magic: {magic!r}")
self.capacity = ident[4] # Format for program header (bitness).
self.encoding = ident[5] # Data structure encoding (endianness).
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, self._p_fmt, self._p_idx = {
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
}[(self.capacity, self.encoding)]
except KeyError:
raise ELFInvalid(
f"unrecognized capacity ({self.capacity}) or "
f"encoding ({self.encoding})"
)
try:
(
_,
self.machine, # Architecture type.
_,
_,
self._e_phoff, # Offset of program header.
_,
self.flags, # Processor-specific flags.
_,
self._e_phentsize, # Size of section.
self._e_phnum, # Number of sections.
) = self._read(e_fmt)
except struct.error as e:
raise ELFInvalid("unable to parse machine and section information") from e
def _read(self, fmt: str) -> tuple[int, ...]:
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
@property
def interpreter(self) -> str | None:
"""
The path recorded in the ``PT_INTERP`` section header.
"""
for index in range(self._e_phnum):
self._f.seek(self._e_phoff + self._e_phentsize * index)
try:
data = self._read(self._p_fmt)
except struct.error:
continue
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
continue
self._f.seek(data[self._p_idx[1]])
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
return None
|
ELFFile
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/decorator1.py
|
{
"start": 368,
"end": 466
}
|
class ____:
@Wrapper
def __init__(self, **kwargs):
print(f"{kwargs}")
Foo(bar=3)
|
Foo
|
python
|
kamyu104__LeetCode-Solutions
|
Python/rotate-image.py
|
{
"start": 31,
"end": 613
}
|
class ____(object):
# @param matrix, a list of lists of integers
# @return a list of lists of integers
def rotate(self, matrix):
n = len(matrix)
# anti-diagonal mirror
for i in xrange(n):
for j in xrange(n - i):
matrix[i][j], matrix[n-1-j][n-1-i] = matrix[n-1-j][n-1-i], matrix[i][j]
# horizontal mirror
for i in xrange(n / 2):
for j in xrange(n):
matrix[i][j], matrix[n-1-i][j] = matrix[n-1-i][j], matrix[i][j]
return matrix
# Time: O(n^2)
# Space: O(n^2)
|
Solution
|
python
|
django__django
|
django/contrib/postgres/operations.py
|
{
"start": 2805,
"end": 2936
}
|
class ____(CreateExtension):
def __init__(self, hints=None):
super().__init__("btree_gin", hints=hints)
|
BtreeGinExtension
|
python
|
huggingface__transformers
|
src/transformers/models/video_llava/modeling_video_llava.py
|
{
"start": 5855,
"end": 6857
}
|
class ____(PreTrainedModel):
config: VideoLlavaConfig
base_model_prefix = "model"
input_modalities = ("image", "video", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["VideoLlavaVisionAttention"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
std = (
self.config.initializer_range
if hasattr(self.config, "initializer_range")
else self.config.text_config.initializer_range
)
if hasattr(module, "class_embedding"):
init.normal_(module.class_embedding, mean=0.0, std=std)
@auto_docstring(
custom_intro="""
The VideoLlava model which consists of a vision backbone and a language model without language modeling head.
""",
)
|
VideoLlavaPreTrainedModel
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/interval/test_indexing.py
|
{
"start": 1747,
"end": 2250
}
|
class ____:
def test_where(self, listlike_box):
klass = listlike_box
idx = IntervalIndex.from_breaks(range(11), closed="right")
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
|
TestWhere
|
python
|
django-crispy-forms__django-crispy-forms
|
crispy_forms/bootstrap.py
|
{
"start": 34054,
"end": 35471
}
|
class ____(Field):
"""
Layout object for rendering fields as Inline in bootstrap.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
attrs : dict
Attributes to be applied to the field. These are converted into html
attributes. e.g. ``data_id: 'test'`` in the attrs dict will become
``data-id='test'`` on the field's ``<input>``.
Parameters
----------
*fields : str
Usually a single field, but can be any number of fields, to be rendered
with the same attributes applied.
css_class : str, optional
CSS classes to be applied to the field. These are added to any classes
included in the ``attrs`` dict. By default ``None``.
wrapper_class: str, optional
CSS classes to be used when rendering the Field. This class is usually
applied to the ``<div>`` which wraps the Field's ``<label>`` and
``<input>`` tags. By default ``None``.
template : str, optional
Overrides the default template, if provided. By default ``None``.
**kwargs : dict, optional
Additional attributes are converted into key="value", pairs. These
attributes are added to the ``<div>``.
Examples
--------
Example::
InlineField('field_name')
"""
template = "%s/layout/inline_field.html"
|
InlineField
|
python
|
redis__redis-py
|
redis/commands/search/suggestion.py
|
{
"start": 466,
"end": 1612
}
|
class ____:
"""
Internal class used to parse results from the `SUGGET` command.
This needs to consume either 1, 2, or 3 values at a time from
the return value depending on what objects were requested
"""
def __init__(self, with_scores: bool, with_payloads: bool, ret) -> None:
self.with_scores = with_scores
self.with_payloads = with_payloads
if with_scores and with_payloads:
self.sugsize = 3
self._scoreidx = 1
self._payloadidx = 2
elif with_scores:
self.sugsize = 2
self._scoreidx = 1
elif with_payloads:
self.sugsize = 2
self._payloadidx = 1
else:
self.sugsize = 1
self._scoreidx = -1
self._sugs = ret
def __iter__(self):
for i in range(0, len(self._sugs), self.sugsize):
ss = self._sugs[i]
score = float(self._sugs[i + self._scoreidx]) if self.with_scores else 1.0
payload = self._sugs[i + self._payloadidx] if self.with_payloads else None
yield Suggestion(ss, score, payload)
|
SuggestionParser
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/definitions/_internal/templater.py
|
{
"start": 1879,
"end": 8628
}
|
class ____:
"""
This renders the template fields of object.
:meta private:
"""
# For derived classes to define which fields will get jinjaified.
template_fields: Collection[str]
# Defines which files extensions to look for in the templated fields.
template_ext: Sequence[str]
def get_template_env(self, dag: DAG | None = None) -> jinja2.Environment:
"""Fetch a Jinja template environment from the Dag or instantiate empty environment if no Dag."""
# This is imported locally since Jinja2 is heavy and we don't need it
# for most of the functionalities. It is imported by get_template_env()
# though, so we don't need to put this after the 'if dag' check.
if dag:
return dag.get_template_env(force_sandboxed=False)
return SandboxedEnvironment(cache_size=0)
def prepare_template(self) -> None:
"""
Execute after the templated fields get replaced by their content.
If you need your object to alter the content of the file before the
template is rendered, it should override this method to do so.
"""
def resolve_template_files(self) -> None:
"""Get the content of files for template_field / template_ext."""
if self.template_ext:
for field in self.template_fields:
content = getattr(self, field, None)
if isinstance(content, str) and content.endswith(tuple(self.template_ext)):
env = self.get_template_env()
try:
setattr(self, field, env.loader.get_source(env, content)[0]) # type: ignore
except Exception:
log.exception("Failed to resolve template field %r", field)
elif isinstance(content, list):
env = self.get_template_env()
for i, item in enumerate(content):
if isinstance(item, str) and item.endswith(tuple(self.template_ext)):
try:
content[i] = env.loader.get_source(env, item)[0] # type: ignore
except Exception:
log.exception("Failed to get source %s", item)
self.prepare_template()
def _do_render_template_fields(
self,
parent: Any,
template_fields: Iterable[str],
context: Context,
jinja_env: jinja2.Environment,
seen_oids: set[int],
) -> None:
for attr_name in template_fields:
value = getattr(parent, attr_name)
rendered_content = self.render_template(
value,
context,
jinja_env,
seen_oids,
)
if rendered_content:
setattr(parent, attr_name, rendered_content)
def _render(self, template, context, dag=None) -> Any:
if dag and dag.render_template_as_native_obj:
return render_template_as_native(template, context)
return render_template_to_string(template, context)
def render_template(
self,
content: Any,
context: Context,
jinja_env: jinja2.Environment | None = None,
seen_oids: set[int] | None = None,
) -> Any:
"""
Render a templated string.
If *content* is a collection holding multiple templated strings, strings
in the collection will be templated recursively.
:param content: Content to template. Only strings can be templated (may
be inside a collection).
:param context: Dict with values to apply on templated content
:param jinja_env: Jinja environment. Can be provided to avoid
re-creating Jinja environments during recursion.
:param seen_oids: template fields already rendered (to avoid
*RecursionError* on circular dependencies)
:return: Templated content
"""
# "content" is a bad name, but we're stuck to it being public API.
value = content
del content
if seen_oids is not None:
oids = seen_oids
else:
oids = set()
if id(value) in oids:
return value
if not jinja_env:
jinja_env = self.get_template_env()
if isinstance(value, str):
if value.endswith(tuple(self.template_ext)): # A filepath.
template = jinja_env.get_template(value)
else:
template = jinja_env.from_string(value)
return self._render(template, context)
if isinstance(value, ObjectStoragePath):
return self._render_object_storage_path(value, context, jinja_env)
if resolve := getattr(value, "resolve", None):
return resolve(context)
# Fast path for common built-in collections.
if value.__class__ is tuple:
return tuple(self.render_template(element, context, jinja_env, oids) for element in value)
if isinstance(value, tuple): # Special case for named tuples.
return value.__class__(*(self.render_template(el, context, jinja_env, oids) for el in value))
if isinstance(value, list):
return [self.render_template(element, context, jinja_env, oids) for element in value]
if isinstance(value, dict):
return {k: self.render_template(v, context, jinja_env, oids) for k, v in value.items()}
if isinstance(value, set):
return {self.render_template(element, context, jinja_env, oids) for element in value}
# More complex collections.
self._render_nested_template_fields(value, context, jinja_env, oids)
return value
def _render_object_storage_path(
self, value: ObjectStoragePath, context: Context, jinja_env: jinja2.Environment
) -> ObjectStoragePath:
serialized_path = value.serialize()
path_version = value.__version__
serialized_path["path"] = self._render(jinja_env.from_string(serialized_path["path"]), context)
return value.deserialize(data=serialized_path, version=path_version)
def _render_nested_template_fields(
self,
value: Any,
context: Context,
jinja_env: jinja2.Environment,
seen_oids: set[int],
) -> None:
if id(value) in seen_oids:
return
seen_oids.add(id(value))
try:
nested_template_fields = value.template_fields
except AttributeError:
# content has no inner template fields
return
self._do_render_template_fields(value, nested_template_fields, context, jinja_env, seen_oids)
|
Templater
|
python
|
geekcomputers__Python
|
diceV2_dynamic.py
|
{
"start": 120,
"end": 2689
}
|
class ____:
def __init__(self):
self.sideCount = 6
def setSides(self, sides):
if sides > 3:
self.sides = sides
else:
print(
"This absolutely shouldn't ever happen. The programmer sucks or someone "
"has tweaked with code they weren't supposed to touch!"
)
def roll(self):
return random.randint(1, self.sides)
# =====================================================================
# Checks to make sure that the input is actually an integer.
# This implementation can be improved greatly of course.
def checkInput(sides):
try:
if int(sides) != 0:
if (
float(sides) % int(sides) == 0
): # excludes the possibility of inputted floats being rounded.
return int(sides)
else:
return int(sides)
except ValueError:
print("Invalid input!")
return None
# Picks a number that is at least of a certain size.
# That means in this program, the dices being possible to use in 3 dimensional space.
def pickNumber(item, question_string, lower_limit):
while True:
item = input(question_string)
item = checkInput(item)
if type(item) == int:
if item <= lower_limit:
print("Input too low!")
continue
else:
return item
# Main-function of the program that sets up the dices for the user as they want them.
def getDices():
dices = []
sides = None
diceAmount = None
sideLowerLimit = 3 # Do Not Touch!
diceLowerLimit = 1 # Do Not Touch!
sides = pickNumber(sides, "How many sides will the dices have?: ", sideLowerLimit)
diceAmount = pickNumber(
diceAmount, "How many dices will do you want?: ", diceLowerLimit
)
for i in range(0, diceAmount):
d = Dice()
d.setSides(sides)
dices.append(d)
return dices
# =================================================================
# Output section.
def output():
dices = getDices()
input("Do you wanna roll? press enter")
cont = True
while cont:
rollOutput = ""
for dice in dices:
rollOutput = rollOutput + str(dice.roll()) + ", "
rollOutput = rollOutput[:-2]
print(rollOutput)
print("do you want to roll again?")
ans = input("press enter to continue, and [exit] to exit")
if ans == "exit":
cont = False
if __name__ == "__main__":
output()
|
Dice
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/system/caps.py
|
{
"start": 840,
"end": 2409
}
|
class ____(BaseFactCollector):
name = 'caps'
_fact_ids = set(['system_capabilities',
'system_capabilities_enforced']) # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
rc = -1
facts_dict = {'system_capabilities_enforced': 'N/A',
'system_capabilities': 'N/A'}
if module:
capsh_path = module.get_bin_path('capsh')
if capsh_path:
# NOTE: -> get_caps_data()/parse_caps_data() for easier mocking -akl
try:
rc, out, err = module.run_command([capsh_path, "--print"], errors='surrogate_then_replace', handle_exceptions=False)
except OSError as ex:
module.error_as_warning('Could not query system capabilities.', exception=ex)
if rc == 0:
enforced_caps = []
enforced = 'NA'
for line in out.splitlines():
if len(line) < 1:
continue
if line.startswith('Current:'):
if line.split(':')[1].strip() == '=ep':
enforced = 'False'
else:
enforced = 'True'
enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
facts_dict['system_capabilities_enforced'] = enforced
facts_dict['system_capabilities'] = enforced_caps
return facts_dict
|
SystemCapabilitiesFactCollector
|
python
|
encode__django-rest-framework
|
tests/test_generics.py
|
{
"start": 14643,
"end": 14768
}
|
class ____(generics.ListCreateAPIView):
serializer_class = ClassASerializer
queryset = ClassA.objects.all()
|
ExampleView
|
python
|
doocs__leetcode
|
solution/2600-2699/2662.Minimum Cost of a Path With Special Roads/Solution.py
|
{
"start": 0,
"end": 644
}
|
class ____:
def minimumCost(
self, start: List[int], target: List[int], specialRoads: List[List[int]]
) -> int:
def dist(x1: int, y1: int, x2: int, y2: int) -> int:
return abs(x1 - x2) + abs(y1 - y2)
q = [(0, start[0], start[1])]
vis = set()
ans = inf
while q:
d, x, y = heappop(q)
if (x, y) in vis:
continue
vis.add((x, y))
ans = min(ans, d + dist(x, y, *target))
for x1, y1, x2, y2, cost in specialRoads:
heappush(q, (d + dist(x, y, x1, y1) + cost, x2, y2))
return ans
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/streams/base_streams.py
|
{
"start": 7389,
"end": 13106
}
|
class ____(ShopifyStream, ABC):
# Setting the check point interval to the limit of the records output
state_checkpoint_interval = 250
def __init__(self, config: Dict):
super().__init__(config)
# _filter_checkpointed_cursor used to checkpoint streams with cursor field - ID in job.get_adjusted_job_end
self._filter_checkpointed_cursor = None
@property
def filter_by_state_checkpoint(self) -> bool:
"""
This filtering flag stands to guarantee for the NestedSubstreams to emit the STATE correctly,
when we have the abnormal STATE distance between Parent and Substream
"""
return False
# Setting the default cursor field for all streams
cursor_field = "updated_at"
deleted_cursor_field = "deleted_at"
_checkpoint_cursor = None
@property
def default_state_comparison_value(self) -> Union[int, str]:
# certain streams are using `id` field as `cursor_field`, which requires to use `int` type,
# but many other use `str` values for this, we determine what to use based on `cursor_field` value
return 0 if self.cursor_field == "id" else ""
def get_updated_state(
self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]
) -> MutableMapping[str, Any]:
last_record_value = latest_record.get(self.cursor_field) or self.default_state_comparison_value
current_state_value = current_stream_state.get(self.cursor_field) or self.default_state_comparison_value
return {self.cursor_field: max(last_record_value, current_state_value)}
@stream_state_cache.cache_stream_state
def request_params(
self, stream_state: Optional[Mapping[str, Any]] = None, next_page_token: Optional[Mapping[str, Any]] = None, **kwargs
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, next_page_token=next_page_token, **kwargs)
# If there is a next page token then we should only send pagination-related parameters.
if not next_page_token:
params["order"] = f"{self.order_field} asc"
if stream_state:
params[self.filter_field] = stream_state.get(self.cursor_field)
return params
def track_checkpoint_cursor(self, record_value: Union[str, int], filter_record_value: Optional[str] = None) -> None:
"""
Tracks _checkpoint_cursor value (values from cursor field) and _filter_checkpointed_cursor value (value from filter field).
_filter_checkpointed_cursor value is only used when cursor field is ID for streams like Customer Address etc.
When after canceled/failed job source tries to adjust stream slice (see ShopifyBulkManager._adjust_slice_end()).
"""
if self.filter_by_state_checkpoint:
# set checkpoint cursor
if not self._checkpoint_cursor:
self._checkpoint_cursor = self.default_state_comparison_value
# track checkpoint cursor
if str(record_value) >= str(self._checkpoint_cursor):
self._checkpoint_cursor = record_value
if filter_record_value:
if not self._filter_checkpointed_cursor or str(filter_record_value) >= str(self._filter_checkpointed_cursor):
self._filter_checkpointed_cursor = filter_record_value
def should_checkpoint(self, index: int) -> bool:
return self.filter_by_state_checkpoint and index >= self.state_checkpoint_interval
# Parse the `records` with respect to the `stream_state` for the `Incremental refresh`
# cases where we slice the stream, the endpoints for those classes don't accept any other filtering,
# but they provide us with the updated_at field in most cases, so we used that as incremental filtering during the order slicing.
def filter_records_newer_than_state(
self,
stream_state: Optional[Mapping[str, Any]] = None,
records_slice: Optional[Iterable[Mapping]] = None,
) -> Iterable:
# Getting records >= state
if stream_state:
state_value = stream_state.get(self.cursor_field, self.default_state_comparison_value)
for index, record in enumerate(records_slice, 1):
if self.cursor_field in record:
record_value = record.get(self.cursor_field, self.default_state_comparison_value)
filter_record_value = record.get(self.filter_field) if self.filter_field else None
self.track_checkpoint_cursor(record_value, filter_record_value)
if record_value:
if record_value >= state_value:
yield record
else:
if self.should_checkpoint(index):
yield record
else:
# old entities could have cursor field in place, but set to null
self.logger.warning(
f"Stream `{self.name}`, Record ID: `{record.get(self.primary_key)}` cursor value is: {record_value}, record is emitted without state comparison"
)
yield record
else:
# old entities could miss the cursor field
self.logger.warning(
f"Stream `{self.name}`, Record ID: `{record.get(self.primary_key)}` missing cursor field: {self.cursor_field}, record is emitted without state comparison"
)
yield record
else:
yield from records_slice
|
IncrementalShopifyStream
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axisartist/floating_axes.py
|
{
"start": 3829,
"end": 4371
}
|
class ____(ExtremeFinderSimple):
# docstring inherited
def __init__(self, extremes):
"""
This subclass always returns the same bounding box.
Parameters
----------
extremes : (float, float, float, float)
The bounding box that this helper always returns.
"""
x0, x1, y0, y1 = extremes
self._tbbox = Bbox.from_extents(x0, y0, x1, y1)
def _find_transformed_bbox(self, trans, bbox):
# docstring inherited
return self._tbbox
|
ExtremeFinderFixed
|
python
|
doocs__leetcode
|
solution/0900-0999/0969.Pancake Sorting/Solution.py
|
{
"start": 0,
"end": 598
}
|
class ____:
def pancakeSort(self, arr: List[int]) -> List[int]:
def reverse(arr, j):
i = 0
while i < j:
arr[i], arr[j] = arr[j], arr[i]
i, j = i + 1, j - 1
n = len(arr)
ans = []
for i in range(n - 1, 0, -1):
j = i
while j > 0 and arr[j] != i + 1:
j -= 1
if j < i:
if j > 0:
ans.append(j + 1)
reverse(arr, j)
ans.append(i + 1)
reverse(arr, i)
return ans
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-aws/dagster_aws_tests/athena_tests/test_resources.py
|
{
"start": 218,
"end": 3153
}
|
class ____(ResourceWithAthenaConfig):
def get_client(self) -> FakeAthenaClient:
return FakeAthenaClient(
client=boto3.client("athena", region_name="us-east-1"),
workgroup=self.workgroup,
polling_interval=self.polling_interval,
max_polls=self.max_polls,
)
@pytest.fixture
def mock_athena_client(mock_s3_resource):
with mock_athena():
yield boto3.client("athena", region_name="us-east-1")
def test_execute_query(mock_athena_client):
athena = FakeAthenaClient(client=mock_athena_client)
assert athena.execute_query("SELECT 1", fetch_results=True) == [("1",)]
assert athena.execute_query(
"SELECT * FROM foo", fetch_results=True, expected_results=[(1, None), (2, 3)]
) == [("1",), ("2", "3")]
@pytest.mark.parametrize(
"expected_states",
[
["SUCCEEDED"],
["QUEUED", "SUCCEEDED"],
["QUEUED", "RUNNING", "SUCCEEDED"],
["QUEUED", "QUEUED", "SUCCEEDED"],
["QUEUED", "RUNNING", "RUNNING", "SUCCEEDED"],
],
)
def test_execute_query_state_transitions(mock_athena_client, expected_states):
athena = FakeAthenaClient(client=mock_athena_client)
athena.execute_query("SELECT 1", expected_states=expected_states)
@pytest.mark.parametrize(
"expected_states",
[
["FAILED"],
["CANCELLED"],
["QUEUED", "FAILED"],
["QUEUED", "RUNNING", "FAILED"],
["QUEUED", "CANCELLED"],
["QUEUED", "RUNNING", "CANCELLED"],
],
)
def test_execute_query_raises(mock_athena_client, expected_states):
athena = FakeAthenaClient(client=mock_athena_client)
with pytest.raises(AthenaError, match="state change reason"):
athena.execute_query("SELECT 1", expected_states=expected_states)
def test_execute_query_timeout(mock_athena_client):
athena = FakeAthenaClient(client=mock_athena_client, max_polls=1)
with pytest.raises(AthenaTimeout):
athena.execute_query("SELECT 1")
def test_execute_query_succeeds_on_last_poll(mock_athena_client):
athena = FakeAthenaClient(client=mock_athena_client, max_polls=1)
athena.execute_query("SELECT 1", expected_states=["SUCCEEDED"])
def test_op(mock_athena_client) -> None:
from dagster import build_op_context, op
@op(required_resource_keys={"athena"})
def example_athena_op(context):
return context.resources.athena.execute_query("SELECT 1", fetch_results=True)
context = build_op_context(resources={"athena": fake_athena_resource})
assert example_athena_op(context) == [("1",)]
def test_op_pythonic_resource(mock_athena_client) -> None:
from dagster import op
@op
def example_athena_op(athena: TestAthenaClientResource):
return athena.get_client().execute_query("SELECT 1", fetch_results=True)
assert example_athena_op(athena=TestAthenaClientResource.configure_at_launch()) == [("1",)]
|
TestAthenaClientResource
|
python
|
huggingface__transformers
|
src/transformers/models/d_fine/modular_d_fine.py
|
{
"start": 54695,
"end": 54741
}
|
class ____(RTDetrEncoder):
pass
|
DFineEncoder
|
python
|
pandas-dev__pandas
|
pandas/tests/resample/test_period_index.py
|
{
"start": 1369,
"end": 39512
}
|
class ____:
@pytest.mark.parametrize("freq", ["2D", "1h", "2h"])
def test_asfreq(self, frame_or_series, freq):
# GH 12884, 15944
obj = frame_or_series(range(5), index=period_range("2020-01-01", periods=5))
expected = obj.to_timestamp().resample(freq).asfreq()
result = obj.to_timestamp().resample(freq).asfreq()
tm.assert_almost_equal(result, expected)
start = obj.index[0].to_timestamp(how="start")
end = (obj.index[-1] + obj.index.freq).to_timestamp(how="start")
new_index = date_range(start=start, end=end, freq=freq, inclusive="left")
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq).asfreq()
tm.assert_almost_equal(result, expected)
result = obj.resample(freq).asfreq().to_timestamp().to_period()
tm.assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
index = period_range(datetime(2005, 1, 1), datetime(2005, 1, 10), freq="D")
s = Series(range(len(index)), index=index)
new_index = date_range(
s.index[0].to_timestamp(how="start"),
(s.index[-1]).to_timestamp(how="start"),
freq="1h",
)
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.to_timestamp().resample("1h").asfreq(fill_value=4.0)
tm.assert_series_equal(result, expected)
frame = s.to_frame("value")
new_index = date_range(
frame.index[0].to_timestamp(how="start"),
(frame.index[-1]).to_timestamp(how="start"),
freq="1h",
)
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.to_timestamp().resample("1h").asfreq(fill_value=3.0)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("freq", ["h", "12h", "2D", "W"])
@pytest.mark.parametrize("kwargs", [{"on": "date"}, {"level": "d"}])
def test_selection(self, freq, kwargs):
# This is a bug, these should be implemented
# GH 14008
index = period_range(datetime(2005, 1, 1), datetime(2005, 1, 10), freq="D")
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame(
{"date": index, "a": rng},
index=pd.MultiIndex.from_arrays([rng, index], names=["v", "d"]),
)
msg = (
"Resampling from level= or on= selection with a PeriodIndex is "
r"not currently supported, use \.set_index\(\.\.\.\) to "
"explicitly set index"
)
with pytest.raises(NotImplementedError, match=msg):
df.resample(freq, **kwargs)
@pytest.mark.parametrize("month", MONTHS)
@pytest.mark.parametrize("meth", ["ffill", "bfill"])
@pytest.mark.parametrize("conv", ["start", "end"])
@pytest.mark.parametrize(
("offset", "period"), [("D", "D"), ("B", "B"), ("ME", "M"), ("QE", "Q")]
)
def test_annual_upsample_cases(
self, offset, period, conv, meth, month, simple_period_range_series
):
ts = simple_period_range_series("1/1/1990", "12/31/1990", freq=f"Y-{month}")
warn = FutureWarning if period == "B" else None
msg = r"PeriodDtype\[B\] is deprecated"
with tm.assert_produces_warning(warn, match=msg):
result = getattr(ts.resample(period, convention=conv), meth)()
expected = result.to_timestamp(period, how=conv)
expected = expected.asfreq(offset, meth).to_period()
tm.assert_series_equal(result, expected)
def test_basic_downsample(self, simple_period_range_series):
ts = simple_period_range_series("1/1/1990", "6/30/1995", freq="M")
result = ts.resample("Y-DEC").mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range("1/1/1990", "6/30/1995", freq="Y-DEC")
tm.assert_series_equal(result, expected)
# this is ok
tm.assert_series_equal(ts.resample("Y-DEC").mean(), result)
tm.assert_series_equal(ts.resample("Y").mean(), result)
@pytest.mark.parametrize(
"rule,expected_error_msg",
[
("Y-DEC", "<YearEnd: month=12>"),
("Q-MAR", "<QuarterEnd: startingMonth=3>"),
("M", "<MonthEnd>"),
("W-THU", "<Week: weekday=3>"),
],
)
def test_not_subperiod(self, simple_period_range_series, rule, expected_error_msg):
# These are incompatible period rules for resampling
ts = simple_period_range_series("1/1/1990", "6/30/1995", freq="W-WED")
msg = (
"Frequency <Week: weekday=2> cannot be resampled to "
f"{expected_error_msg}, as they are not sub or super periods"
)
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@pytest.mark.parametrize("freq", ["D", "2D"])
def test_basic_upsample(self, freq, simple_period_range_series):
ts = simple_period_range_series("1/1/1990", "6/30/1995", freq="M")
result = ts.resample("Y-DEC").mean()
resampled = result.resample(freq, convention="end").ffill()
expected = result.to_timestamp(freq, how="end")
expected = expected.asfreq(freq, "ffill").to_period(freq)
tm.assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range("1/1/2000", periods=5, freq="Y")
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), rng)
result = ts.resample("M", convention="end").ffill(limit=2)
expected = ts.asfreq("M").reindex(result.index, method="ffill", limit=2)
tm.assert_series_equal(result, expected)
def test_annual_upsample(self, simple_period_range_series):
ts = simple_period_range_series("1/1/1990", "12/31/1995", freq="Y-DEC")
df = DataFrame({"a": ts})
rdf = df.resample("D").ffill()
exp = df["a"].resample("D").ffill()
tm.assert_series_equal(rdf["a"], exp)
def test_annual_upsample2(self):
rng = period_range("2000", "2003", freq="Y-DEC")
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample("M").ffill()
ex_index = period_range("2000-01", "2003-12", freq="M")
expected = ts.asfreq("M", how="start").reindex(ex_index, method="ffill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("month", MONTHS)
@pytest.mark.parametrize("convention", ["start", "end"])
@pytest.mark.parametrize(
("offset", "period"), [("D", "D"), ("B", "B"), ("ME", "M")]
)
def test_quarterly_upsample(
self, month, offset, period, convention, simple_period_range_series
):
freq = f"Q-{month}"
ts = simple_period_range_series("1/1/1990", "12/31/1991", freq=freq)
warn = FutureWarning if period == "B" else None
msg = r"PeriodDtype\[B\] is deprecated"
with tm.assert_produces_warning(warn, match=msg):
result = ts.resample(period, convention=convention).ffill()
expected = result.to_timestamp(period, how=convention)
expected = expected.asfreq(offset, "ffill").to_period()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("target", ["D", "B"])
@pytest.mark.parametrize("convention", ["start", "end"])
def test_monthly_upsample(self, target, convention, simple_period_range_series):
ts = simple_period_range_series("1/1/1990", "12/31/1995", freq="M")
warn = None if target == "D" else FutureWarning
msg = r"PeriodDtype\[B\] is deprecated"
with tm.assert_produces_warning(warn, match=msg):
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, "ffill").to_period()
tm.assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(
range(100),
index=date_range("20130101", freq="s", periods=100, name="idx"),
dtype="float",
)
s[10:30] = np.nan
index = PeriodIndex(
[Period("2013-01-01 00:00", "min"), Period("2013-01-01 00:01", "min")],
name="idx",
)
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample("min").mean()
tm.assert_series_equal(result, expected)
result2 = s.resample("min").mean().to_period()
tm.assert_series_equal(result2, expected)
@pytest.mark.parametrize(
"freq,expected_vals", [("M", [31, 29, 31, 9]), ("2M", [31 + 29, 31 + 9])]
)
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=period_range(start="2000", periods=100))
result = series.resample(freq).count()
expected_index = period_range(
start="2000", freq=freq, periods=len(expected_vals)
)
expected = Series(expected_vals, index=expected_index)
tm.assert_series_equal(result, expected)
def test_resample_same_freq(self, resample_method):
# GH12770
series = Series(range(3), index=period_range(start="2000", periods=3, freq="M"))
expected = series
result = getattr(series.resample("M"), resample_method)()
tm.assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
msg = (
"Frequency <MonthEnd> cannot be resampled to <Week: weekday=6>, "
"as they are not sub or super periods"
)
pi = period_range(start="2000", periods=3, freq="M")
ser = Series(range(3), index=pi)
rs = ser.resample("W")
with pytest.raises(IncompatibleFrequency, match=msg):
# TODO: should this raise at the resample call instead of at the mean call?
rs.mean()
@pytest.mark.parametrize(
"tz",
[
zoneinfo.ZoneInfo("America/Los_Angeles"),
dateutil.tz.gettz("America/Los_Angeles"),
],
)
def test_with_local_timezone(self, tz):
# see gh-5430
local_timezone = tz
start = datetime(
year=2013, month=11, day=1, hour=0, minute=0, tzinfo=timezone.utc
)
# 1 day later
end = datetime(
year=2013, month=11, day=2, hour=0, minute=0, tzinfo=timezone.utc
)
index = date_range(start, end, freq="h", name="idx")
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
msg = "Converting to PeriodArray/Index representation will drop timezone"
with tm.assert_produces_warning(UserWarning, match=msg):
result = series.resample("D").mean().to_period()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (
period_range(start=start, end=end, freq="D", name="idx") - offsets.Day()
)
expected = Series(1.0, index=expected_index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"tz",
[
zoneinfo.ZoneInfo("America/Los_Angeles"),
dateutil.tz.gettz("America/Los_Angeles"),
],
)
def test_resample_with_tz(self, tz, unit):
# GH 13238
dti = date_range("2017-01-01", periods=48, freq="h", tz=tz, unit=unit)
ser = Series(2, index=dti)
result = ser.resample("D").mean()
exp_dti = pd.DatetimeIndex(
["2017-01-01", "2017-01-02"], tz=tz, freq="D"
).as_unit(unit)
expected = Series(
2.0,
index=exp_dti,
)
tm.assert_series_equal(result, expected)
def test_resample_nonexistent_time_bin_edge(self):
# GH 19375
index = date_range("2017-03-12", "2017-03-12 1:45:00", freq="15min")
s = Series(np.zeros(len(index)), index=index)
expected = s.tz_localize("US/Pacific")
expected.index = pd.DatetimeIndex(expected.index, freq="900s")
result = expected.resample("900s").mean()
tm.assert_series_equal(result, expected)
def test_resample_nonexistent_time_bin_edge2(self):
# GH 23742
index = date_range(start="2017-10-10", end="2017-10-20", freq="1h")
index = index.tz_localize("UTC").tz_convert("America/Sao_Paulo")
df = DataFrame(data=list(range(len(index))), index=index)
result = df.groupby(pd.Grouper(freq="1D")).count()
expected = date_range(
start="2017-10-09",
end="2017-10-20",
freq="D",
tz="America/Sao_Paulo",
nonexistent="shift_forward",
inclusive="left",
)
tm.assert_index_equal(result.index, expected)
def test_resample_ambiguous_time_bin_edge(self):
# GH 10117
idx = date_range(
"2014-10-25 22:00:00",
"2014-10-26 00:30:00",
freq="30min",
tz="Europe/London",
)
expected = Series(np.zeros(len(idx)), index=idx)
result = expected.resample("30min").mean()
tm.assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(
np.arange(9, dtype="int64"),
index=date_range("2010-01-01", periods=9, freq="QE"),
)
last = s.resample("ME").ffill()
both = s.resample("ME").ffill().resample("ME").last().astype("int64")
tm.assert_series_equal(last, both)
@pytest.mark.parametrize("day", DAYS)
@pytest.mark.parametrize("target", ["D", "B"])
@pytest.mark.parametrize("convention", ["start", "end"])
def test_weekly_upsample(self, day, target, convention, simple_period_range_series):
freq = f"W-{day}"
ts = simple_period_range_series("1/1/1990", "07/31/1990", freq=freq)
warn = None if target == "D" else FutureWarning
msg = r"PeriodDtype\[B\] is deprecated"
with tm.assert_produces_warning(warn, match=msg):
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, "ffill").to_period()
tm.assert_series_equal(result, expected)
def test_resample_to_timestamps(self, simple_period_range_series):
ts = simple_period_range_series("1/1/1990", "12/31/1995", freq="M")
result = ts.resample("Y-DEC").mean().to_timestamp()
expected = ts.resample("Y-DEC").mean().to_timestamp(how="start")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("month", MONTHS)
def test_resample_to_quarterly(self, simple_period_range_series, month):
ts = simple_period_range_series("1990", "1992", freq=f"Y-{month}")
quar_ts = ts.resample(f"Q-{month}").ffill()
stamps = ts.to_timestamp("D", how="start")
qdates = period_range(
ts.index[0].asfreq("D", "start"),
ts.index[-1].asfreq("D", "end"),
freq=f"Q-{month}",
)
expected = stamps.reindex(qdates.to_timestamp("D", "s"), method="ffill")
expected.index = qdates
tm.assert_series_equal(quar_ts, expected)
@pytest.mark.parametrize("how", ["start", "end"])
def test_resample_to_quarterly_start_end(self, simple_period_range_series, how):
# conforms, but different month
ts = simple_period_range_series("1990", "1992", freq="Y-JUN")
result = ts.resample("Q-MAR", convention=how).ffill()
expected = ts.asfreq("Q-MAR", how=how)
expected = expected.reindex(result.index, method="ffill")
# FIXME: don't leave commented-out
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
tm.assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq="Y")
s = Series(np.random.default_rng(2).standard_normal(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample("Y").ffill()
expected = stamps.resample("YE").ffill().to_period("Y")
tm.assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq="Y")
s = Series(np.random.default_rng(2).standard_normal(5), index=rng)
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
s.resample("Y").ffill()
def test_resample_5minute(self):
rng = period_range("1/1/2000", "1/5/2000", freq="min")
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
expected = ts.to_timestamp().resample("5min").mean()
result = ts.resample("5min").mean().to_timestamp()
tm.assert_series_equal(result, expected)
expected = expected.to_period("5min")
result = ts.resample("5min").mean()
tm.assert_series_equal(result, expected)
result = ts.resample("5min").mean().to_timestamp().to_period()
tm.assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self, simple_period_range_series):
ts = simple_period_range_series("1/1/2000", "2/1/2000", freq="B")
result = ts.resample("D").asfreq()
expected = ts.asfreq("D").reindex(period_range("1/3/2000", "2/1/2000"))
tm.assert_series_equal(result, expected)
ts = simple_period_range_series("1/1/2000", "2/1/2000")
result = ts.resample("h", convention="s").asfreq()
exp_rng = period_range("1/1/2000", "2/1/2000 23:00", freq="h")
expected = ts.asfreq("h", how="s").reindex(exp_rng)
tm.assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start="1/1/2012", freq="5min", periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:"2012-01-04 06:55"]
result = subset.resample("10min").apply(len)
expected = s.resample("10min").apply(len).loc[result.index]
tm.assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range("1/1/2000", periods=10, freq="W-WED")
ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
result = ts.resample("W-THU").asfreq()
assert result.isna().all()
result = ts.resample("W-THU").asfreq().ffill()[:-1]
expected = ts.asfreq("W-THU").ffill()
tm.assert_series_equal(result, expected)
def test_resample_tz_localized(self, unit):
dr = date_range(start="2012-4-13", end="2012-5-1", unit=unit)
ts = Series(range(len(dr)), index=dr)
ts_utc = ts.tz_localize("UTC")
ts_local = ts_utc.tz_convert("America/Los_Angeles")
result = ts_local.resample("W").mean()
ts_local_naive = ts_local.copy()
ts_local_naive.index = ts_local_naive.index.tz_localize(None)
exp = ts_local_naive.resample("W").mean().tz_localize("America/Los_Angeles")
exp.index = pd.DatetimeIndex(exp.index, freq="W")
tm.assert_series_equal(result, exp)
# it works
result = ts_local.resample("D").mean()
def test_resample_tz_localized2(self):
# #2245
idx = date_range(
"2001-09-20 15:59", "2001-09-20 16:00", freq="min", tz="Australia/Sydney"
)
s = Series([1, 2], index=idx)
# GH#61985 changed this to behave like "B" rather than "24h"
result = s.resample("D", closed="right", label="right").mean()
ex_index = date_range("2001-09-20", periods=2, freq="D", tz="Australia/Sydney")
expected = Series([np.nan, 1.5], index=ex_index)
tm.assert_series_equal(result, expected)
# for good measure
msg = "Converting to PeriodArray/Index representation will drop timezone "
with tm.assert_produces_warning(UserWarning, match=msg):
result = s.resample("D").mean().to_period()
ex_index = period_range("2001-09-20", periods=1, freq="D")
expected = Series([1.5], index=ex_index)
tm.assert_series_equal(result, expected)
def test_resample_tz_localized3(self):
# GH 6397
# comparing an offset that doesn't propagate tz's
rng = date_range("1/1/2011", periods=20000, freq="h")
rng = rng.tz_localize("EST")
ts = DataFrame(index=rng)
ts["first"] = np.random.default_rng(2).standard_normal(len(rng))
ts["second"] = np.cumsum(np.random.default_rng(2).standard_normal(len(rng)))
expected = DataFrame(
{
"first": ts.resample("YE").sum()["first"],
"second": ts.resample("YE").mean()["second"],
},
columns=["first", "second"],
)
result = (
ts.resample("YE")
.agg({"first": "sum", "second": "mean"})
.reindex(columns=["first", "second"])
)
tm.assert_frame_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(
np.random.default_rng(2).standard_normal(21),
index=date_range(start="1/1/2012 9:30", freq="1min", periods=21),
)
s.iloc[0] = np.nan
result = s.resample("10min", closed="left", label="right").mean()
exp = s[1:].resample("10min", closed="left", label="right").mean()
tm.assert_series_equal(result, exp)
result = s.resample("10min", closed="left", label="left").mean()
exp = s[1:].resample("10min", closed="left", label="left").mean()
ex_index = date_range(start="1/1/2012 9:30", freq="10min", periods=3)
tm.assert_index_equal(result.index, ex_index)
tm.assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range("2000Q1", periods=10, freq="Q-DEC")
ts = Series(np.arange(10), index=rng)
result = ts.resample("Y").mean()
exp = ts.to_timestamp().resample("YE").mean().to_period()
tm.assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = date_range(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=["open", "high", "low", "close", "vol"], index=ind)
# it works!
df.resample("W-MON", closed="left", label="left").first()
def test_resample_with_dst_time_change(self):
# GH 15549
index = (
pd.DatetimeIndex([1457537600000000000, 1458059600000000000])
.tz_localize("UTC")
.tz_convert("America/Chicago")
)
df = DataFrame([1, 2], index=index)
result = df.resample("12h", closed="right", label="right").last().ffill()
expected_index_values = [
"2016-03-09 12:00:00-06:00",
"2016-03-10 00:00:00-06:00",
"2016-03-10 12:00:00-06:00",
"2016-03-11 00:00:00-06:00",
"2016-03-11 12:00:00-06:00",
"2016-03-12 00:00:00-06:00",
"2016-03-12 12:00:00-06:00",
"2016-03-13 00:00:00-06:00",
"2016-03-13 13:00:00-05:00",
"2016-03-14 01:00:00-05:00",
"2016-03-14 13:00:00-05:00",
"2016-03-15 01:00:00-05:00",
"2016-03-15 13:00:00-05:00",
]
index = (
pd.to_datetime(expected_index_values, utc=True)
.tz_convert("America/Chicago")
.as_unit(index.unit)
)
index = pd.DatetimeIndex(index, freq="12h")
expected = DataFrame(
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0],
index=index,
)
tm.assert_frame_equal(result, expected)
def test_resample_bms_2752(self):
# GH2753
timeseries = Series(
index=pd.bdate_range("20000101", "20000201"), dtype=np.float64
)
res1 = timeseries.resample("BMS").mean()
res2 = timeseries.resample("BMS").mean().resample("B").mean()
assert res1.index[0] == Timestamp("20000103")
assert res1.index[0] == res2.index[0]
@pytest.mark.xfail(reason="Commented out for more than 3 years. Should this work?")
def test_monthly_convention_span(self):
rng = period_range("2000-01", periods=3, freq="ME")
ts = Series(np.arange(3), index=rng)
# hacky way to get same thing
exp_index = period_range("2000-01-01", "2000-03-31", freq="D")
expected = ts.asfreq("D", how="end").reindex(exp_index)
expected = expected.fillna(method="bfill")
result = ts.resample("D").mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"from_freq, to_freq", [("D", "ME"), ("QE", "YE"), ("ME", "QE"), ("D", "W")]
)
def test_default_right_closed_label(self, from_freq, to_freq):
idx = date_range(start="8/15/2012", periods=100, freq=from_freq)
df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 2)), idx)
resampled = df.resample(to_freq).mean()
tm.assert_frame_equal(
resampled, df.resample(to_freq, closed="right", label="right").mean()
)
@pytest.mark.parametrize(
"from_freq, to_freq",
[("D", "MS"), ("QE", "YS"), ("ME", "QS"), ("h", "D"), ("min", "h")],
)
def test_default_left_closed_label(self, from_freq, to_freq):
idx = date_range(start="8/15/2012", periods=100, freq=from_freq)
df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 2)), idx)
resampled = df.resample(to_freq).mean()
tm.assert_frame_equal(
resampled, df.resample(to_freq, closed="left", label="left").mean()
)
def test_all_values_single_bin(self):
# GH#2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
ser = Series(np.random.default_rng(2).standard_normal(len(index)), index=index)
result = ser.resample("Y").mean()
tm.assert_almost_equal(result.iloc[0], ser.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# GH#4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(
np.random.default_rng(2).standard_normal((9, 3)),
index=date_range("2000-1-1", periods=9, unit="ns"),
)
result = df.resample("5D").mean()
expected = pd.concat([df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T
expected.index = pd.DatetimeIndex(
[Timestamp("2000-1-1"), Timestamp("2000-1-6")], dtype="M8[ns]", freq="5D"
)
tm.assert_frame_equal(result, expected)
def test_evenly_divisible_with_no_extra_bins2(self):
index = date_range(start="2001-5-4", periods=28)
df = DataFrame(
[
{
"REST_KEY": 1,
"DLY_TRN_QT": 80,
"DLY_SLS_AMT": 90,
"COOP_DLY_TRN_QT": 30,
"COOP_DLY_SLS_AMT": 20,
}
]
* 28
+ [
{
"REST_KEY": 2,
"DLY_TRN_QT": 70,
"DLY_SLS_AMT": 10,
"COOP_DLY_TRN_QT": 50,
"COOP_DLY_SLS_AMT": 20,
}
]
* 28,
index=index.append(index),
).sort_index()
index = date_range("2001-5-4", periods=4, freq="7D")
expected = DataFrame(
[
{
"REST_KEY": 14,
"DLY_TRN_QT": 14,
"DLY_SLS_AMT": 14,
"COOP_DLY_TRN_QT": 14,
"COOP_DLY_SLS_AMT": 14,
}
]
* 4,
index=index,
)
result = df.resample("7D").count()
tm.assert_frame_equal(result, expected)
expected = DataFrame(
[
{
"REST_KEY": 21,
"DLY_TRN_QT": 1050,
"DLY_SLS_AMT": 700,
"COOP_DLY_TRN_QT": 560,
"COOP_DLY_SLS_AMT": 280,
}
]
* 4,
index=index,
)
result = df.resample("7D").sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("freq, period_mult", [("h", 24), ("12h", 2)])
def test_upsampling_ohlc(self, freq, period_mult):
# GH 13083
pi = period_range(start="2000", freq="D", periods=10)
s = Series(range(len(pi)), index=pi)
expected = s.to_timestamp().resample(freq).ohlc().to_period(freq)
# timestamp-based resampling doesn't include all sub-periods
# of the last original period, so extend accordingly:
new_index = period_range(start="2000", freq=freq, periods=period_mult * len(pi))
expected = expected.reindex(new_index)
result = s.resample(freq).ohlc()
tm.assert_frame_equal(result, expected)
result = s.resample(freq).ohlc().to_timestamp().to_period()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"periods, values",
[
(
[
pd.NaT,
"1970-01-01 00:00:00",
pd.NaT,
"1970-01-01 00:00:02",
"1970-01-01 00:00:03",
],
[2, 3, 5, 7, 11],
),
(
[
pd.NaT,
pd.NaT,
"1970-01-01 00:00:00",
pd.NaT,
pd.NaT,
pd.NaT,
"1970-01-01 00:00:02",
"1970-01-01 00:00:03",
pd.NaT,
pd.NaT,
],
[1, 2, 3, 5, 6, 8, 7, 11, 12, 13],
),
],
)
@pytest.mark.parametrize(
"freq, expected_values",
[
("1s", [3, np.nan, 7, 11]),
("2s", [3, (7 + 11) / 2]),
("3s", [(3 + 7) / 2, 11]),
],
)
def test_resample_with_nat(self, periods, values, freq, expected_values):
# GH 13224
index = PeriodIndex(periods, freq="s")
frame = DataFrame(values, index=index)
expected_index = period_range(
"1970-01-01 00:00:00", periods=len(expected_values), freq=freq
)
expected = DataFrame(expected_values, index=expected_index)
result = frame.resample(freq).mean()
tm.assert_frame_equal(result, expected)
def test_resample_with_only_nat(self):
# GH 13224
pi = PeriodIndex([pd.NaT] * 3, freq="s")
frame = DataFrame([2, 3, 5], index=pi, columns=["a"])
expected_index = PeriodIndex(data=[], freq=pi.freq)
expected = DataFrame(index=expected_index, columns=["a"], dtype="float64")
result = frame.resample("1s").mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"start,end,start_freq,end_freq,offset",
[
("19910905", "19910909 03:00", "h", "24h", "10h"),
("19910905", "19910909 12:00", "h", "24h", "10h"),
("19910905", "19910909 23:00", "h", "24h", "10h"),
("19910905 10:00", "19910909", "h", "24h", "10h"),
("19910905 10:00", "19910909 10:00", "h", "24h", "10h"),
("19910905", "19910909 10:00", "h", "24h", "10h"),
("19910905 12:00", "19910909", "h", "24h", "10h"),
("19910905 12:00", "19910909 03:00", "h", "24h", "10h"),
("19910905 12:00", "19910909 12:00", "h", "24h", "10h"),
("19910905 12:00", "19910909 12:00", "h", "24h", "34h"),
("19910905 12:00", "19910909 12:00", "h", "17h", "10h"),
("19910905 12:00", "19910909 12:00", "h", "17h", "3h"),
("19910905", "19910913 06:00", "2h", "24h", "10h"),
("19910905", "19910905 01:39", "Min", "5Min", "3Min"),
("19910905", "19910905 03:18", "2Min", "5Min", "3Min"),
],
)
def test_resample_with_offset(self, start, end, start_freq, end_freq, offset):
# GH 23882 & 31809
pi = period_range(start, end, freq=start_freq)
ser = Series(np.arange(len(pi)), index=pi)
result = ser.resample(end_freq, offset=offset).mean()
result = result.to_timestamp(end_freq)
expected = ser.to_timestamp().resample(end_freq, offset=offset).mean()
tm.assert_series_equal(result, expected)
def test_resample_with_offset_month(self):
# GH 23882 & 31809
pi = period_range("19910905 12:00", "19910909 1:00", freq="h")
ser = Series(np.arange(len(pi)), index=pi)
result = ser.resample("M").mean()
result = result.to_timestamp("M")
expected = ser.to_timestamp().resample("ME").mean()
# TODO: is non-tick the relevant characteristic? (GH 33815)
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"first,last,freq,freq_to_offset,exp_first,exp_last",
[
("19910905", "19920406", "D", "D", "19910905", "19920406"),
("19910905 00:00", "19920406 06:00", "D", "D", "19910905", "19920406"),
(
"19910905 06:00",
"19920406 06:00",
"h",
"h",
"19910905 06:00",
"19920406 06:00",
),
("19910906", "19920406", "M", "ME", "1991-09", "1992-04"),
("19910831", "19920430", "M", "ME", "1991-08", "1992-04"),
("1991-08", "1992-04", "M", "ME", "1991-08", "1992-04"),
],
)
def test_get_period_range_edges(
self, first, last, freq, freq_to_offset, exp_first, exp_last
):
first = Period(first)
last = Period(last)
exp_first = Period(exp_first, freq=freq)
exp_last = Period(exp_last, freq=freq)
freq = pd.tseries.frequencies.to_offset(freq_to_offset)
result = _get_period_range_edges(first, last, freq)
expected = (exp_first, exp_last)
assert result == expected
def test_sum_min_count(self):
# GH 19974
index = date_range(start="2018", freq="ME", periods=6)
data = np.ones(6)
data[3:6] = np.nan
s = Series(data, index).to_period()
result = s.resample("Q").sum(min_count=1)
expected = Series(
[3.0, np.nan], index=PeriodIndex(["2018Q1", "2018Q2"], freq="Q-DEC")
)
tm.assert_series_equal(result, expected)
def test_resample_t_l_deprecated(self):
# GH#52536
msg_t = "Invalid frequency: T"
msg_l = "Invalid frequency: L"
with pytest.raises(ValueError, match=msg_l):
period_range(
"2020-01-01 00:00:00 00:00", "2020-01-01 00:00:00 00:01", freq="L"
)
rng_l = period_range(
"2020-01-01 00:00:00 00:00", "2020-01-01 00:00:00 00:01", freq="ms"
)
ser = Series(np.arange(len(rng_l)), index=rng_l)
with pytest.raises(ValueError, match=msg_t):
ser.resample("T").mean()
@pytest.mark.parametrize(
"freq, freq_depr, freq_depr_res",
[
("2Q", "2q", "2y"),
("2M", "2m", "2q"),
],
)
def test_resample_lowercase_frequency_raises(self, freq, freq_depr, freq_depr_res):
msg = f"Invalid frequency: {freq_depr}"
with pytest.raises(ValueError, match=msg):
period_range("2020-01-01", "2020-08-01", freq=freq_depr)
msg = f"Invalid frequency: {freq_depr_res}"
rng = period_range("2020-01-01", "2020-08-01", freq=freq)
ser = Series(np.arange(len(rng)), index=rng)
with pytest.raises(ValueError, match=msg):
ser.resample(freq_depr_res).mean()
@pytest.mark.parametrize(
"offset",
[
offsets.MonthBegin(),
offsets.BYearBegin(2),
offsets.BusinessHour(2),
],
)
def test_asfreq_invalid_period_offset(self, offset, frame_or_series):
# GH#55785
msg = re.escape(f"{offset} is not supported as period frequency")
obj = frame_or_series(range(5), index=period_range("2020-01-01", periods=5))
with pytest.raises(ValueError, match=msg):
obj.asfreq(freq=offset)
@pytest.mark.parametrize(
"freq",
[
("2ME"),
("2QE"),
("2QE-FEB"),
("2YE"),
("2YE-MAR"),
("2me"),
("2qe"),
("2ye-mar"),
],
)
def test_resample_frequency_ME_QE_YE_raises(frame_or_series, freq):
# GH#9586
msg = f"{freq[1:]} is not supported as period frequency"
obj = frame_or_series(range(5), index=period_range("2020-01-01", periods=5))
msg = f"Invalid frequency: {freq}"
with pytest.raises(ValueError, match=msg):
obj.resample(freq)
def test_corner_cases_period(simple_period_range_series):
# miscellaneous test coverage
len0pts = simple_period_range_series("2007-01", "2010-05", freq="M")[:0]
# it works
result = len0pts.resample("Y-DEC").mean()
assert len(result) == 0
@pytest.mark.parametrize("freq", ["2BME", "2CBME", "2SME", "2BQE-FEB", "2BYE-MAR"])
def test_resample_frequency_invalid_freq(frame_or_series, freq):
# GH#9586
msg = f"Invalid frequency: {freq}"
obj = frame_or_series(range(5), index=period_range("2020-01-01", periods=5))
with pytest.raises(ValueError, match=msg):
obj.resample(freq)
|
TestPeriodIndex
|
python
|
huggingface__transformers
|
src/transformers/models/megatron_bert/modeling_megatron_bert.py
|
{
"start": 52019,
"end": 56771
}
|
class ____(MegatronBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = MegatronBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
MegatronBertForMultipleChoice
|
python
|
scipy__scipy
|
scipy/signal/tests/test_signaltools.py
|
{
"start": 113436,
"end": 114619
}
|
class ____:
@skip_xp_backends(np_only=True, reason='list inputs are numpy specific')
def test_array_like(self, xp):
zi_expected = xp.asarray([5.0, -1.0])
zi = lfilter_zi([1.0, 0.0, 2.0], [1.0, -1.0, 0.5])
assert_array_almost_equal(zi, zi_expected)
def test_basic(self, xp):
a = xp.asarray([1.0, -1.0, 0.5])
b = xp.asarray([1.0, 0.0, 2.0])
zi_expected = xp.asarray([5.0, -1.0])
zi = lfilter_zi(b, a)
assert_array_almost_equal(zi, zi_expected)
def test_scale_invariance(self, xp):
# Regression test. There was a bug in which b was not correctly
# rescaled when a[0] was nonzero.
b = xp.asarray([2.0, 8, 5])
a = xp.asarray([1.0, 1, 8])
zi1 = lfilter_zi(b, a)
zi2 = lfilter_zi(2*b, 2*a)
xp_assert_close(zi2, zi1, rtol=1e-12)
@pytest.mark.parametrize('dtype', ['float32', 'float64'])
def test_types(self, dtype, xp):
dtype = getattr(xp, dtype)
b = xp.zeros((8), dtype=dtype)
a = xp.asarray([1], dtype=dtype)
assert signal.lfilter_zi(b, a).dtype == dtype
@make_xp_test_case(filtfilt, sosfiltfilt)
|
TestLFilterZI
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/templates.py
|
{
"start": 33402,
"end": 34169
}
|
class ____(DelegatingLexer):
"""
A lexer that highlights javascript code in genshi text templates.
"""
name = 'JavaScript+Genshi Text'
aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
'javascript+genshi']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+genshi',
'text/x-javascript+genshi',
'text/javascript+genshi']
def __init__(self, **options):
super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
GenshiTextLexer,
**options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
|
JavascriptGenshiLexer
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/typinganndata/mod_generics_cache.py
|
{
"start": 262,
"end": 508
}
|
class ____(Generic[T]):
class A(Generic[T]):
pass
my_inner_a1: 'B.A'
my_inner_a2: A
my_outer_a: 'A' # unless somebody calls get_type_hints with localns=B.__dict__
type Alias = int
OldStyle = TypeAliasType("OldStyle", int)
|
B
|
python
|
sympy__sympy
|
sympy/matrices/kind.py
|
{
"start": 114,
"end": 2843
}
|
class ____(Kind):
"""
Kind for all matrices in SymPy.
Basic class for this kind is ``MatrixBase`` and ``MatrixExpr``,
but any expression representing the matrix can have this.
Parameters
==========
element_kind : Kind
Kind of the element. Default is
:class:`sympy.core.kind.NumberKind`,
which means that the matrix contains only numbers.
Examples
========
Any instance of matrix class has kind ``MatrixKind``:
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 2, 2)
>>> A.kind
MatrixKind(NumberKind)
An expression representing a matrix may not be an instance of
the Matrix class, but it will have kind ``MatrixKind``:
>>> from sympy import MatrixExpr, Integral
>>> from sympy.abc import x
>>> intM = Integral(A, x)
>>> isinstance(intM, MatrixExpr)
False
>>> intM.kind
MatrixKind(NumberKind)
Use ``isinstance()`` to check for ``MatrixKind`` without specifying the
element kind. Use ``is`` to check the kind including the element kind:
>>> from sympy import Matrix
>>> from sympy.core import NumberKind
>>> from sympy.matrices import MatrixKind
>>> M = Matrix([1, 2])
>>> isinstance(M.kind, MatrixKind)
True
>>> M.kind is MatrixKind(NumberKind)
True
See Also
========
sympy.core.kind.NumberKind
sympy.core.kind.UndefinedKind
sympy.core.containers.TupleKind
sympy.sets.sets.SetKind
"""
def __new__(cls, element_kind=NumberKind):
obj = super().__new__(cls, element_kind)
obj.element_kind = element_kind
return obj
def __repr__(self):
return "MatrixKind(%s)" % self.element_kind
@Mul._kind_dispatcher.register(_NumberKind, MatrixKind)
def num_mat_mul(k1, k2):
"""
Return MatrixKind. The element kind is selected by recursive dispatching.
Do not need to dispatch in reversed order because KindDispatcher
searches for this automatically.
"""
# Deal with Mul._kind_dispatcher's commutativity
# XXX: this function is called with either k1 or k2 as MatrixKind because
# the Mul kind dispatcher is commutative. Maybe it shouldn't be. Need to
# swap the args here because NumberKind does not have an element_kind
# attribute.
if not isinstance(k2, MatrixKind):
k1, k2 = k2, k1
elemk = Mul._kind_dispatcher(k1, k2.element_kind)
return MatrixKind(elemk)
@Mul._kind_dispatcher.register(MatrixKind, MatrixKind)
def mat_mat_mul(k1, k2):
"""
Return MatrixKind. The element kind is selected by recursive dispatching.
"""
elemk = Mul._kind_dispatcher(k1.element_kind, k2.element_kind)
return MatrixKind(elemk)
|
MatrixKind
|
python
|
celery__celery
|
celery/canvas.py
|
{
"start": 55779,
"end": 56072
}
|
class ____(_basemap):
"""Map operation for tasks, using star arguments."""
_task_name = 'celery.starmap'
def __repr__(self):
task, it = self._unpack_args(self.kwargs)
return f'[{task.task}(*x) for x in {truncate(repr(it), 100)}]'
@Signature.register_type()
|
xstarmap
|
python
|
huggingface__transformers
|
utils/check_docstrings.py
|
{
"start": 1672,
"end": 58263
}
|
class ____:
"""Information about a single @auto_docstring decorated function or class."""
decorator_line: int # 1-based line number of the decorator
def_line: int # 1-based line number of the def/class statement
kind: str # 'function' or 'class'
body_start_line: (
int # 1-based line number where body starts (for functions) or __init__ body start (for classes with __init__)
)
args: list[str] # List of argument names (excluding self, *args, **kwargs) - for classes, these are __init__ args
custom_args_text: str | None = None # custom_args string if provided in decorator
# Class-specific fields (only populated when kind == 'class')
has_init: bool = False # Whether the class has an __init__ method
init_def_line: int | None = None # 1-based line number of __init__ def (if has_init)
is_model_output: bool = False # Whether the class inherits from ModelOutput
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
PATH_TO_TRANSFORMERS = Path("src").resolve() / "transformers"
# This is to make sure the transformers module imported is the one in the repo.
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
OPTIONAL_KEYWORD = "*optional*"
# Re pattern that catches args blocks in docstrings (with all variation around the name supported).
_re_args = re.compile(r"^\s*(Args?|Arguments?|Attributes?|Params?|Parameters?):\s*$")
# Re pattern that parses the start of an arg block: catches <name> (<description>) in those lines.
_re_parse_arg = re.compile(r"^(\s*)(\S+)\s+\((.+)\)(?:\:|$)")
# Re pattern that parses the end of a description of an arg (catches the default in *optional*, defaults to xxx).
_re_parse_description = re.compile(r"\*optional\*, defaults to (.*)$")
# Args that are always overridden in the docstring, for clarity we don't want to remove them from the docstring
ALWAYS_OVERRIDE = ["labels"]
# This is a temporary set of objects to ignore while we progressively fix them. Do not add anything here, fix the
# docstrings instead. If formatting should be ignored for the docstring, you can put a comment # no-format on the
# line before the docstring.
OBJECTS_TO_IGNORE = {
"ApertusConfig",
"Mxfp4Config",
"Qwen3OmniMoeConfig",
"Exaone4Config",
"SmolLM3Config",
"Gemma3nVisionConfig",
"Llama4Processor",
# Deprecated
"InputExample",
"InputFeatures",
# Missing arguments in the docstring
"ASTFeatureExtractor",
"AlbertModel",
"AlbertTokenizerFast",
"AlignTextModel",
"AlignVisionConfig",
"AudioClassificationPipeline",
"AutoformerConfig",
"AutomaticSpeechRecognitionPipeline",
"BarkCoarseConfig",
"BarkConfig",
"BarkFineConfig",
"BarkSemanticConfig",
"BartConfig",
"BartTokenizerFast",
"BarthezTokenizerFast",
"BeitModel",
"BertConfig",
"BertJapaneseTokenizer",
"CohereTokenizer",
"DebertaTokenizer",
"FNetTokenizer",
"FunnelTokenizer",
"GPT2Tokenizer",
"GPTNeoXTokenizer",
"GemmaTokenizer",
"HerbertTokenizer",
"LayoutLMv2Tokenizer",
"LayoutLMv3Tokenizer",
"LayoutXLMTokenizer",
"LlamaTokenizer",
"LlamaTokenizerFast",
"MBart50Tokenizer",
"NougatTokenizer",
"OpenAIGPTTokenizer",
"PythonBackend",
"ReformerTokenizer",
"SeamlessM4TTokenizer",
"SentencePieceBackend",
"SplinterTokenizer",
"TokenizersBackend",
"UdopTokenizer",
"WhisperTokenizer",
"XGLMTokenizer",
"XLMRobertaTokenizer",
"AlbertTokenizer",
"BarthezTokenizer",
"BigBirdTokenizer",
"BlenderbotTokenizer",
"CamembertTokenizer",
"CodeLlamaTokenizer",
"CodeLlamaTokenizerFast",
"BertModel",
"BertTokenizerFast",
"BigBirdConfig",
"BigBirdForQuestionAnswering",
"BigBirdModel",
"BigBirdPegasusConfig",
"BigBirdTokenizerFast",
"BitImageProcessor",
"BlenderbotConfig",
"BlenderbotSmallConfig",
"BlenderbotSmallTokenizerFast",
"BlenderbotTokenizerFast",
"Blip2VisionConfig",
"BlipTextConfig",
"BlipVisionConfig",
"BloomConfig",
"BLTConfig",
"BLTPatcherConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
"BrosModel",
"CamembertConfig",
"CamembertModel",
"CamembertTokenizerFast",
"CanineModel",
"CanineTokenizer",
"ChineseCLIPTextModel",
"ClapTextConfig",
"ConditionalDetrConfig",
"ConditionalDetrImageProcessor",
"ConvBertConfig",
"ConvBertTokenizerFast",
"ConvNextConfig",
"ConvNextV2Config",
"CpmAntTokenizer",
"CvtConfig",
"CvtModel",
"DeiTImageProcessor",
"DPRReaderTokenizer",
"DPRReaderTokenizerFast",
"DPTModel",
"Data2VecAudioConfig",
"Data2VecTextConfig",
"Data2VecTextModel",
"Data2VecVisionModel",
"DataCollatorForLanguageModeling",
"DebertaConfig",
"DebertaV2Config",
"DebertaV2Tokenizer",
"DebertaV2TokenizerFast",
"DecisionTransformerConfig",
"DeformableDetrConfig",
"DeformableDetrImageProcessor",
"DeiTModel",
"DepthEstimationPipeline",
"DetaConfig",
"DetaImageProcessor",
"DetrConfig",
"DetrImageProcessor",
"DinatModel",
"DINOv3ConvNextConfig",
"DINOv3ViTConfig",
"DistilBertConfig",
"DistilBertTokenizerFast",
"DocumentQuestionAnsweringPipeline",
"DonutSwinModel",
"EarlyStoppingCallback",
"EfficientFormerConfig",
"EfficientFormerImageProcessor",
"EfficientNetConfig",
"ElectraConfig",
"ElectraTokenizerFast",
"EncoderDecoderModel",
"ErnieMModel",
"ErnieModel",
"ErnieMTokenizer",
"EsmConfig",
"EsmModel",
"FNetConfig",
"FNetModel",
"FNetTokenizerFast",
"FSMTConfig",
"FeatureExtractionPipeline",
"FillMaskPipeline",
"FlaubertConfig",
"FlavaConfig",
"FlavaForPreTraining",
"FlavaImageModel",
"FlavaImageProcessor",
"FlavaMultimodalModel",
"FlavaTextConfig",
"FlavaTextModel",
"FocalNetModel",
"FunnelTokenizerFast",
"GPTBigCodeConfig",
"GPTJConfig",
"GPTNeoXConfig",
"GPTNeoXJapaneseConfig",
"GPTNeoXTokenizerFast",
"GPTSanJapaneseConfig",
"GitConfig",
"GitVisionConfig",
"Glm4vVisionConfig",
"Glm4vMoeVisionConfig",
"GraphormerConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
"HerbertTokenizerFast",
"HubertConfig",
"HubertForCTC",
"IBertConfig",
"IBertModel",
"IdeficsConfig",
"IdeficsProcessor",
"IJepaModel",
"ImageClassificationPipeline",
"ImageFeatureExtractionPipeline",
"ImageGPTConfig",
"ImageSegmentationPipeline",
"ImageTextToTextPipeline",
"AnyToAnyPipeline",
"ImageToImagePipeline",
"ImageToTextPipeline",
"InformerConfig",
"JukeboxPriorConfig",
"JukeboxTokenizer",
"LEDConfig",
"LEDTokenizerFast",
"LayoutLMForQuestionAnswering",
"LayoutLMTokenizerFast",
"LayoutLMv2Config",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2TokenizerFast",
"LayoutLMv3Config",
"LayoutLMv3ImageProcessor",
"LayoutLMv3TokenizerFast",
"LayoutXLMTokenizerFast",
"LevitConfig",
"LiltConfig",
"LiltModel",
"LongT5Config",
"LongformerConfig",
"LongformerModel",
"LongformerTokenizerFast",
"LukeModel",
"LukeTokenizer",
"LxmertTokenizerFast",
"M2M100Config",
"M2M100Tokenizer",
"MarkupLMProcessor",
"MaskGenerationPipeline",
"MBart50TokenizerFast",
"MBartConfig",
"MCTCTFeatureExtractor",
"MPNetConfig",
"MPNetModel",
"MPNetTokenizerFast",
"MT5Config",
"MT5TokenizerFast",
"MarianConfig",
"MarianTokenizer",
"MarkupLMConfig",
"MarkupLMModel",
"MarkupLMTokenizer",
"MarkupLMTokenizerFast",
"Mask2FormerConfig",
"MaskFormerConfig",
"MaxTimeCriteria",
"MegaConfig",
"MegaModel",
"MegatronBertConfig",
"MegatronBertForPreTraining",
"MegatronBertModel",
"MLCDVisionConfig",
"MobileBertConfig",
"MobileBertModel",
"MobileBertTokenizerFast",
"MobileNetV1ImageProcessor",
"MobileNetV1Model",
"MobileNetV2ImageProcessor",
"MobileNetV2Model",
"MobileViTModel",
"MobileViTV2Model",
"MLukeTokenizer",
"MraConfig",
"MusicgenDecoderConfig",
"MusicgenForConditionalGeneration",
"MusicgenMelodyForConditionalGeneration",
"MvpConfig",
"MvpTokenizerFast",
"MT5Tokenizer",
"NatModel",
"NerPipeline",
"NezhaConfig",
"NezhaModel",
"NllbMoeConfig",
"NllbTokenizer",
"NllbTokenizerFast",
"NystromformerConfig",
"OPTConfig",
"ObjectDetectionPipeline",
"OneFormerProcessor",
"OpenAIGPTTokenizerFast",
"OpenLlamaConfig",
"PLBartConfig",
"ParakeetCTCConfig",
"PegasusConfig",
"PegasusTokenizer",
"PegasusTokenizerFast",
"PegasusXConfig",
"PerceiverImageProcessor",
"PerceiverModel",
"PerceiverTokenizer",
"PersimmonConfig",
"Pipeline",
"Pix2StructConfig",
"Pix2StructTextConfig",
"PLBartTokenizer",
"Pop2PianoConfig",
"PreTrainedTokenizer",
"PreTrainedTokenizerBase",
"PreTrainedTokenizerFast",
"PrefixConstrainedLogitsProcessor",
"ProphetNetConfig",
"QDQBertConfig",
"QDQBertModel",
"QuestionAnsweringPipeline",
"RagConfig",
"RagModel",
"RagRetriever",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"ReformerConfig",
"ReformerTokenizerFast",
"RegNetConfig",
"RemBertConfig",
"RemBertModel",
"RemBertTokenizer",
"RemBertTokenizerFast",
"RetriBertConfig",
"RetriBertTokenizerFast",
"RoCBertConfig",
"RoCBertModel",
"RoCBertTokenizer",
"RoFormerConfig",
"RobertaConfig",
"RobertaModel",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormModel",
"RobertaTokenizerFast",
"SEWConfig",
"SEWDConfig",
"SEWDForCTC",
"SEWForCTC",
"SamConfig",
"SamPromptEncoderConfig",
"SamHQConfig",
"SamHQPromptEncoderConfig",
"SeamlessM4TConfig", # use of unconventional markdown
"SeamlessM4Tv2Config", # use of unconventional markdown
"Seq2SeqTrainingArguments",
"Speech2Text2Config",
"Speech2Text2Tokenizer",
"Speech2TextTokenizer",
"SpeechEncoderDecoderModel",
"SpeechT5Config",
"SpeechT5Model",
"SplinterConfig",
"SplinterTokenizerFast",
"SqueezeBertTokenizerFast",
"SummarizationPipeline",
"Swin2SRImageProcessor",
"Swinv2Model",
"SwitchTransformersConfig",
"T5Config",
"T5Tokenizer",
"T5TokenizerFast",
"TableQuestionAnsweringPipeline",
"TableTransformerConfig",
"TapasConfig",
"TapasModel",
"TapasTokenizer",
"Text2TextGenerationPipeline",
"TextClassificationPipeline",
"TextGenerationPipeline",
"TimeSeriesTransformerConfig",
"TokenClassificationPipeline",
"TrOCRConfig",
"Phi4MultimodalProcessor",
"TrainerState",
"TrainingArguments",
"TrajectoryTransformerConfig",
"TranslationPipeline",
"TvltImageProcessor",
"UMT5Config",
"UperNetConfig",
"UperNetForSemanticSegmentation",
"ViTHybridImageProcessor",
"ViTHybridModel",
"ViTMSNModel",
"ViTModel",
"VideoClassificationPipeline",
"ViltConfig",
"ViltForImagesAndTextClassification",
"ViltModel",
"VisionEncoderDecoderModel",
"VisionTextDualEncoderModel",
"VisualBertConfig",
"VisualBertModel",
"VisualQuestionAnsweringPipeline",
"VitMatteForImageMatting",
"VitsTokenizer",
"VivitModel",
"Wav2Vec2BertForCTC",
"Wav2Vec2CTCTokenizer",
"Wav2Vec2Config",
"Wav2Vec2ConformerConfig",
"Wav2Vec2ConformerForCTC",
"Wav2Vec2FeatureExtractor",
"Wav2Vec2PhonemeCTCTokenizer",
"WavLMConfig",
"WavLMForCTC",
"WhisperConfig",
"WhisperFeatureExtractor",
"WhisperForAudioClassification",
"XCLIPTextConfig",
"XCLIPVisionConfig",
"XGLMConfig",
"XGLMModel",
"XGLMTokenizerFast",
"XLMConfig",
"XLMProphetNetConfig",
"XLMRobertaConfig",
"XLMRobertaModel",
"XLMRobertaTokenizerFast",
"XLMRobertaXLConfig",
"XLMRobertaXLModel",
"XLNetConfig",
"XLNetTokenizerFast",
"XmodConfig",
"XmodModel",
"YolosImageProcessor",
"YolosModel",
"YosoConfig",
"ZeroShotAudioClassificationPipeline",
"ZeroShotClassificationPipeline",
"ZeroShotImageClassificationPipeline",
"ZeroShotObjectDetectionPipeline",
"Llama4TextConfig",
"BltConfig",
"BltPatcherConfig",
}
# In addition to the objects above, we also ignore objects with certain prefixes. If you add an item to the list
# below, make sure to add a comment explaining why.
OBJECT_TO_IGNORE_PREFIXES = [
"_", # Private objects are not documented
]
# Supported math operations when interpreting the value of defaults.
MATH_OPERATORS = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Pow: op.pow,
ast.BitXor: op.xor,
ast.USub: op.neg,
}
def find_indent(line: str) -> int:
"""
Returns the number of spaces that start a line indent.
"""
search = re.search(r"^(\s*)(?:\S|$)", line)
if search is None:
return 0
return len(search.groups()[0])
def stringify_default(default: Any) -> str:
"""
Returns the string representation of a default value, as used in docstring: numbers are left as is, all other
objects are in backtiks.
Args:
default (`Any`): The default value to process
Returns:
`str`: The string representation of that default.
"""
if isinstance(default, bool):
# We need to test for bool first as a bool passes isinstance(xxx, (int, float))
return f"`{default}`"
elif isinstance(default, enum.Enum):
# We need to test for enum first as an enum with int values will pass isinstance(xxx, (int, float))
return f"`{str(default)}`"
elif isinstance(default, int):
return str(default)
elif isinstance(default, float):
result = str(default)
return str(round(default, 2)) if len(result) > 6 else result
elif isinstance(default, str):
return str(default) if default.isnumeric() else f'`"{default}"`'
elif isinstance(default, type):
return f"`{default.__name__}`"
else:
return f"`{default}`"
def eval_math_expression(expression: str) -> float | int | None:
# Mainly taken from the excellent https://stackoverflow.com/a/9558001
"""
Evaluate (safely) a mathematial expression and returns its value.
Args:
expression (`str`): The expression to evaluate.
Returns:
`Optional[Union[float, int]]`: Returns `None` if the evaluation fails in any way and the value computed
otherwise.
Example:
```py
>>> eval_expr('2^6')
4
>>> eval_expr('2**6')
64
>>> eval_expr('1 + 2*3**(4^5) / (6 + -7)')
-5.0
```
"""
try:
return eval_node(ast.parse(expression, mode="eval").body)
except TypeError:
return
def eval_node(node):
if isinstance(node, ast.Constant) and type(node.value) in (int, float, complex):
return node.value
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return MATH_OPERATORS[type(node.op)](eval_node(node.left), eval_node(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return MATH_OPERATORS[type(node.op)](eval_node(node.operand))
else:
raise TypeError(node)
def replace_default_in_arg_description(description: str, default: Any) -> str:
"""
Catches the default value in the description of an argument inside a docstring and replaces it by the value passed.
Args:
description (`str`): The description of an argument in a docstring to process.
default (`Any`): The default value that would be in the docstring of that argument.
Returns:
`str`: The description updated with the new default value.
"""
# Lots of docstrings have `optional` or **opational** instead of *optional* so we do this fix here.
description = description.replace("`optional`", OPTIONAL_KEYWORD)
description = description.replace("**optional**", OPTIONAL_KEYWORD)
if default is inspect._empty:
# No default, make sure the description doesn't have any either
idx = description.find(OPTIONAL_KEYWORD)
if idx != -1:
description = description[:idx].rstrip()
if description.endswith(","):
description = description[:-1].rstrip()
elif default is None:
# Default None are not written, we just set `*optional*`. If there is default that is not None specified in the
# description, we do not erase it (as sometimes we set the default to `None` because the default is a mutable
# object).
idx = description.find(OPTIONAL_KEYWORD)
if idx == -1:
description = f"{description}, {OPTIONAL_KEYWORD}"
elif re.search(r"defaults to `?None`?", description) is not None:
len_optional = len(OPTIONAL_KEYWORD)
description = description[: idx + len_optional]
else:
str_default = None
# For numbers we may have a default that is given by a math operation (1/255 is really popular). We don't
# want to replace those by their actual values.
if isinstance(default, (int, float)) and re.search("defaults to `?(.*?)(?:`|$)", description) is not None:
# Grab the default and evaluate it.
current_default = re.search("defaults to `?(.*?)(?:`|$)", description).groups()[0]
if default == eval_math_expression(current_default):
try:
# If it can be directly converted to the type of the default, it's a simple value
str_default = str(type(default)(current_default))
except Exception:
# Otherwise there is a math operator so we add a code block.
str_default = f"`{current_default}`"
elif isinstance(default, enum.Enum) and default.name == current_default.split(".")[-1]:
# When the default is an Enum (this is often the case for PIL.Image.Resampling), and the docstring
# matches the enum name, keep the existing docstring rather than clobbering it with the enum value.
str_default = f"`{current_default}`"
if str_default is None:
str_default = stringify_default(default)
# Make sure default match
if OPTIONAL_KEYWORD not in description:
description = f"{description}, {OPTIONAL_KEYWORD}, defaults to {str_default}"
elif _re_parse_description.search(description) is None:
idx = description.find(OPTIONAL_KEYWORD)
len_optional = len(OPTIONAL_KEYWORD)
description = f"{description[: idx + len_optional]}, defaults to {str_default}"
else:
description = _re_parse_description.sub(rf"*optional*, defaults to {str_default}", description)
return description
def get_default_description(arg: inspect.Parameter) -> str:
"""
Builds a default description for a parameter that was not documented.
Args:
arg (`inspect.Parameter`): The argument in the signature to generate a description for.
Returns:
`str`: The description.
"""
if arg.annotation is inspect._empty:
arg_type = "<fill_type>"
elif hasattr(arg.annotation, "__name__"):
arg_type = arg.annotation.__name__
else:
arg_type = str(arg.annotation)
if arg.default is inspect._empty:
return f"`{arg_type}`"
elif arg.default is None:
return f"`{arg_type}`, {OPTIONAL_KEYWORD}"
else:
str_default = stringify_default(arg.default)
return f"`{arg_type}`, {OPTIONAL_KEYWORD}, defaults to {str_default}"
def find_source_file(obj: Any) -> Path:
"""
Finds the source file of an object.
Args:
obj (`Any`): The object whose source file we are looking for.
Returns:
`Path`: The source file.
"""
module = obj.__module__
obj_file = PATH_TO_TRANSFORMERS
for part in module.split(".")[1:]:
obj_file = obj_file / part
return obj_file.with_suffix(".py")
def match_docstring_with_signature(obj: Any) -> tuple[str, str] | None:
"""
Matches the docstring of an object with its signature.
Args:
obj (`Any`): The object to process.
Returns:
`Optional[Tuple[str, str]]`: Returns `None` if there is no docstring or no parameters documented in the
docstring, otherwise returns a tuple of two strings: the current documentation of the arguments in the
docstring and the one matched with the signature.
"""
if len(getattr(obj, "__doc__", "")) == 0:
# Nothing to do, there is no docstring.
return
# Read the docstring in the source code to see if there is a special command to ignore this object.
try:
source, _ = inspect.getsourcelines(obj)
except OSError:
source = []
# Find the line where the docstring starts
idx = 0
while idx < len(source) and '"""' not in source[idx]:
idx += 1
ignore_order = False
if idx < len(source):
line_before_docstring = source[idx - 1]
# Match '# no-format' (allowing surrounding whitespaces)
if re.search(r"^\s*#\s*no-format\s*$", line_before_docstring):
# This object is ignored by the auto-docstring tool
return
# Match '# ignore-order' (allowing surrounding whitespaces)
elif re.search(r"^\s*#\s*ignore-order\s*$", line_before_docstring):
ignore_order = True
# Read the signature. Skip on `TypedDict` objects for now. Inspect cannot
# parse their signature ("no signature found for builtin type <class 'dict'>")
if issubclass(obj, dict) and hasattr(obj, "__annotations__"):
return
signature = inspect.signature(obj).parameters
obj_doc_lines = obj.__doc__.split("\n")
# Get to the line where we start documenting arguments
idx = 0
while idx < len(obj_doc_lines) and _re_args.search(obj_doc_lines[idx]) is None:
idx += 1
if idx == len(obj_doc_lines):
# Nothing to do, no parameters are documented.
return
if "kwargs" in signature and signature["kwargs"].annotation != inspect._empty:
# Inspecting signature with typed kwargs is not supported yet.
return
indent = find_indent(obj_doc_lines[idx])
arguments = {}
current_arg = None
idx += 1
start_idx = idx
# Keep going until the arg section is finished (nonempty line at the same indent level) or the end of the docstring.
while idx < len(obj_doc_lines) and (
len(obj_doc_lines[idx].strip()) == 0 or find_indent(obj_doc_lines[idx]) > indent
):
if find_indent(obj_doc_lines[idx]) == indent + 4:
# New argument -> let's generate the proper doc for it
re_search_arg = _re_parse_arg.search(obj_doc_lines[idx])
if re_search_arg is not None:
_, name, description = re_search_arg.groups()
current_arg = name
if name in signature:
default = signature[name].default
if signature[name].kind is inspect._ParameterKind.VAR_KEYWORD:
default = None
new_description = replace_default_in_arg_description(description, default)
else:
new_description = description
init_doc = _re_parse_arg.sub(rf"\1\2 ({new_description}):", obj_doc_lines[idx])
arguments[current_arg] = [init_doc]
elif current_arg is not None:
arguments[current_arg].append(obj_doc_lines[idx])
idx += 1
# We went too far by one (perhaps more if there are a lot of new lines)
idx -= 1
if current_arg:
while len(obj_doc_lines[idx].strip()) == 0:
arguments[current_arg] = arguments[current_arg][:-1]
idx -= 1
# And we went too far by one again.
idx += 1
old_doc_arg = "\n".join(obj_doc_lines[start_idx:idx])
old_arguments = list(arguments.keys())
arguments = {name: "\n".join(doc) for name, doc in arguments.items()}
# Add missing arguments with a template
for name in set(signature.keys()) - set(arguments.keys()):
arg = signature[name]
# We ignore private arguments or *args/**kwargs (unless they are documented by the user)
if name.startswith("_") or arg.kind in [
inspect._ParameterKind.VAR_KEYWORD,
inspect._ParameterKind.VAR_POSITIONAL,
]:
arguments[name] = ""
else:
arg_desc = get_default_description(arg)
arguments[name] = " " * (indent + 4) + f"{name} ({arg_desc}): <fill_docstring>"
# Arguments are sorted by the order in the signature unless a special comment is put.
if ignore_order:
new_param_docs = [arguments[name] for name in old_arguments if name in signature]
missing = set(signature.keys()) - set(old_arguments)
new_param_docs.extend([arguments[name] for name in missing if len(arguments[name]) > 0])
else:
new_param_docs = [arguments[name] for name in signature if len(arguments[name]) > 0]
new_doc_arg = "\n".join(new_param_docs)
return old_doc_arg, new_doc_arg
def fix_docstring(obj: Any, old_doc_args: str, new_doc_args: str):
"""
Fixes the docstring of an object by replacing its arguments documentation by the one matched with the signature.
Args:
obj (`Any`):
The object whose dostring we are fixing.
old_doc_args (`str`):
The current documentation of the parameters of `obj` in the docstring (as returned by
`match_docstring_with_signature`).
new_doc_args (`str`):
The documentation of the parameters of `obj` matched with its signature (as returned by
`match_docstring_with_signature`).
"""
# Read the docstring in the source code and make sure we have the right part of the docstring
source, line_number = inspect.getsourcelines(obj)
# Get to the line where we start documenting arguments
idx = 0
while idx < len(source) and _re_args.search(source[idx]) is None:
idx += 1
if idx == len(source):
# Args are not defined in the docstring of this object. This can happen when the docstring is inherited.
# In this case, we are not trying to fix it on the child object.
return
# Get to the line where we stop documenting arguments
indent = find_indent(source[idx])
idx += 1
start_idx = idx
while idx < len(source) and (len(source[idx].strip()) == 0 or find_indent(source[idx]) > indent):
idx += 1
idx -= 1
while len(source[idx].strip()) == 0:
idx -= 1
idx += 1
# `old_doc_args` is built from `obj.__doc__`, which may have
# different indentation than the raw source from `inspect.getsourcelines`.
# We use `inspect.cleandoc` to remove indentation uniformly from both
# strings before comparing them.
source_args_as_str = "".join(source[start_idx:idx])
if inspect.cleandoc(source_args_as_str) != inspect.cleandoc(old_doc_args):
# Args are not fully defined in the docstring of this object
obj_file = find_source_file(obj)
actual_args_section = source_args_as_str.rstrip()
raise ValueError(
f"Cannot fix docstring of {obj.__name__} in {obj_file} because the argument section in the source code "
f"does not match the expected format. This usually happens when:\n"
f"1. The argument section is not properly indented\n"
f"2. The argument section contains unexpected formatting\n"
f"3. The docstring parsing failed to correctly identify the argument boundaries\n\n"
f"Expected argument section:\n{repr(old_doc_args)}\n\n"
f"Actual argument section found:\n{repr(actual_args_section)}\n\n"
)
obj_file = find_source_file(obj)
with open(obj_file, "r", encoding="utf-8") as f:
content = f.read()
# Replace content
lines = content.split("\n")
prev_line_indentation = find_indent(lines[line_number + start_idx - 2])
# Now increase the indentation of every line in new_doc_args by prev_line_indentation
new_doc_args = "\n".join([f"{' ' * prev_line_indentation}{line}" for line in new_doc_args.split("\n")])
lines = lines[: line_number + start_idx - 1] + [new_doc_args] + lines[line_number + idx - 1 :]
print(f"Fixing the docstring of {obj.__name__} in {obj_file}.")
with open(obj_file, "w", encoding="utf-8") as f:
f.write("\n".join(lines))
def _find_docstring_end_line(lines, docstring_start_line):
"""Find the line number where a docstring ends. Only handles triple double quotes."""
if docstring_start_line is None or docstring_start_line < 0 or docstring_start_line >= len(lines):
return None
start_line = lines[docstring_start_line]
if '"""' not in start_line:
return None
# Check if docstring starts and ends on the same line
if start_line.count('"""') >= 2:
return docstring_start_line
# Find the closing triple quotes on subsequent lines
for idx in range(docstring_start_line + 1, len(lines)):
if '"""' in lines[idx]:
return idx
return len(lines) - 1
def _is_auto_docstring_decorator(dec):
"""Return True if the decorator expression corresponds to `@auto_docstring`."""
# Handle @auto_docstring(...) - unwrap the Call to get the function
target = dec.func if isinstance(dec, ast.Call) else dec
# Check if it's named "auto_docstring"
return isinstance(target, ast.Name) and target.id == "auto_docstring"
def _extract_function_args(func_node: ast.FunctionDef | ast.AsyncFunctionDef) -> list[str]:
"""Extract argument names from a function node, excluding 'self', *args, **kwargs."""
all_args = (func_node.args.posonlyargs or []) + func_node.args.args + func_node.args.kwonlyargs
return [a.arg for a in all_args if a.arg != "self"]
def find_matching_model_files(check_all: bool = False):
"""
Find all model files in the transformers repo that should be checked for @auto_docstring,
excluding files with certain substrings.
Returns:
List of file paths.
"""
module_diff_files = None
if not check_all:
module_diff_files = set()
repo = Repo(PATH_TO_REPO)
# Diff from index to unstaged files
for modified_file_diff in repo.index.diff(None):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(os.path.join(PATH_TO_REPO, modified_file_diff.a_path))
# Diff from index to `main`
for modified_file_diff in repo.index.diff(repo.refs.main.commit):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(os.path.join(PATH_TO_REPO, modified_file_diff.a_path))
# quick escape route: if there are no module files in the diff, skip this check
if len(module_diff_files) == 0:
return None
modeling_glob_pattern = os.path.join(PATH_TO_TRANSFORMERS, "models/**/modeling_**")
potential_files = glob.glob(modeling_glob_pattern)
image_processing_glob_pattern = os.path.join(PATH_TO_TRANSFORMERS, "models/**/image_processing_*_fast.py")
potential_files += glob.glob(image_processing_glob_pattern)
matching_files = []
for file_path in potential_files:
if os.path.isfile(file_path):
matching_files.append(file_path)
if not check_all:
# intersect with module_diff_files
matching_files = sorted([file for file in matching_files if file in module_diff_files])
print(" Checking auto_docstrings in the following files:" + "\n - " + "\n - ".join(matching_files))
return matching_files
def find_files_with_auto_docstring(matching_files, decorator="@auto_docstring"):
"""
From a list of files, return those that contain the @auto_docstring decorator.
Fast path: simple substring presence check.
"""
auto_docstrings_files = []
for file_path in matching_files:
try:
with open(file_path, "r", encoding="utf-8") as f:
source = f.read()
except OSError:
continue
if decorator in source:
auto_docstrings_files.append(file_path)
return auto_docstrings_files
def get_args_in_dataclass(lines, dataclass_content):
dataclass_content = [line.split("#")[0] for line in dataclass_content]
dataclass_content = "\n".join(dataclass_content)
args_in_dataclass = re.findall(r"^ (\w+)(?:\s*:|\s*=|\s*$)", dataclass_content, re.MULTILINE)
if "self" in args_in_dataclass:
args_in_dataclass.remove("self")
return args_in_dataclass
def generate_new_docstring_for_signature(
lines,
args_in_signature,
sig_end_line,
docstring_start_line,
arg_indent=" ",
output_docstring_indent=8,
custom_args_dict={},
source_args_doc=[ModelArgs, ImageProcessorArgs],
):
"""
Generalized docstring generator for a function or class signature.
Args:
lines: List of lines from the file.
sig_start_line: Line index where the signature starts.
sig_end_line: Line index where the signature ends.
docstring_line: Line index where the docstring starts (or None if not present).
arg_indent: Indentation for missing argument doc entries.
Returns:
new_docstring, sig_end_line, docstring_end (last docstring line index)
"""
# Extract and clean signature
missing_docstring_args = []
docstring_args_ro_remove = []
fill_docstring_args = []
# Parse docstring if present
args_docstring_dict = {}
remaining_docstring = ""
if docstring_start_line is not None:
docstring_end_line = _find_docstring_end_line(lines, docstring_start_line)
docstring_content = lines[docstring_start_line : docstring_end_line + 1]
parsed_docstring, remaining_docstring = parse_docstring("\n".join(docstring_content))
args_docstring_dict.update(parsed_docstring)
else:
docstring_end_line = None
# Remove pre-existing entries for *args and untyped **kwargs from the docstring
# (No longer needed since *args are excluded from args_in_signature)
# Remove args that are the same as the ones in the source args doc
for arg in args_docstring_dict:
if arg in get_args_doc_from_source(source_args_doc) and arg not in ALWAYS_OVERRIDE:
source_arg_doc = get_args_doc_from_source(source_args_doc)[arg]
if source_arg_doc["description"].strip("\n ") == args_docstring_dict[arg]["description"].strip("\n "):
if source_arg_doc.get("shape") is not None and args_docstring_dict[arg].get("shape") is not None:
if source_arg_doc.get("shape").strip("\n ") == args_docstring_dict[arg].get("shape").strip("\n "):
docstring_args_ro_remove.append(arg)
elif (
source_arg_doc.get("additional_info") is not None
and args_docstring_dict[arg].get("additional_info") is not None
):
if source_arg_doc.get("additional_info").strip("\n ") == args_docstring_dict[arg].get(
"additional_info"
).strip("\n "):
docstring_args_ro_remove.append(arg)
else:
docstring_args_ro_remove.append(arg)
args_docstring_dict = {
arg: args_docstring_dict[arg] for arg in args_docstring_dict if arg not in docstring_args_ro_remove
}
# Fill missing args
for arg in args_in_signature:
if (
arg not in args_docstring_dict
and arg not in get_args_doc_from_source(source_args_doc)
and arg not in custom_args_dict
):
missing_docstring_args.append(arg)
args_docstring_dict[arg] = {
"type": "<fill_type>",
"optional": False,
"shape": None,
"description": "\n <fill_docstring>",
"default": None,
"additional_info": None,
}
# Handle docstring of inherited args (for dataclasses)
ordered_args_docstring_dict = OrderedDict(
(arg, args_docstring_dict[arg]) for arg in args_docstring_dict if arg not in args_in_signature
)
# Add args in the order of the signature
ordered_args_docstring_dict.update(
(arg, args_docstring_dict[arg]) for arg in args_in_signature if arg in args_docstring_dict
)
# Build new docstring
new_docstring = ""
if len(ordered_args_docstring_dict) > 0 or remaining_docstring:
new_docstring += 'r"""\n'
for arg in ordered_args_docstring_dict:
additional_info = ordered_args_docstring_dict[arg]["additional_info"] or ""
custom_arg_description = ordered_args_docstring_dict[arg]["description"]
if "<fill_docstring>" in custom_arg_description and arg not in missing_docstring_args:
fill_docstring_args.append(arg)
if custom_arg_description.endswith('"""'):
custom_arg_description = "\n".join(custom_arg_description.split("\n")[:-1])
new_docstring += (
f"{arg} ({ordered_args_docstring_dict[arg]['type']}{additional_info}):{custom_arg_description}\n"
)
close_docstring = True
if remaining_docstring:
if remaining_docstring.endswith('"""'):
close_docstring = False
end_docstring = "\n" if close_docstring else ""
new_docstring += f"{set_min_indent(remaining_docstring, 0)}{end_docstring}"
if close_docstring:
new_docstring += '"""'
new_docstring = set_min_indent(new_docstring, output_docstring_indent)
return (
new_docstring,
sig_end_line,
docstring_end_line if docstring_end_line is not None else sig_end_line - 1,
missing_docstring_args,
fill_docstring_args,
docstring_args_ro_remove,
)
def generate_new_docstring_for_function(
lines,
item: DecoratedItem,
custom_args_dict,
):
"""
Wrapper for function docstring generation using the generalized helper.
"""
sig_end_line = item.body_start_line - 1 # Convert to 0-based
args_in_signature = item.args
docstring_start_line = sig_end_line if '"""' in lines[sig_end_line] else None
return generate_new_docstring_for_signature(
lines,
args_in_signature,
sig_end_line,
docstring_start_line,
arg_indent=" ",
custom_args_dict=custom_args_dict,
)
def generate_new_docstring_for_class(
lines,
item: DecoratedItem,
custom_args_dict,
source: str,
):
"""
Wrapper for class docstring generation (via __init__) using the generalized helper.
Returns the new docstring and relevant signature/docstring indices.
"""
# Use pre-extracted information from DecoratedItem (no need to search or re-parse!)
if item.has_init:
# Class has an __init__ method - use its args and body start
sig_end_line = item.body_start_line - 1 # Convert from body start to sig end (0-based)
args_in_signature = item.args
output_docstring_indent = 8
source_args_doc = [ModelArgs, ImageProcessorArgs]
elif item.is_model_output:
# ModelOutput class - extract args from dataclass attributes
current_line_end = item.def_line - 1 # Convert to 0-based
sig_end_line = current_line_end + 1
docstring_end = _find_docstring_end_line(lines, sig_end_line)
model_output_class_start = docstring_end + 1 if docstring_end is not None else sig_end_line - 1
model_output_class_end = model_output_class_start
while model_output_class_end < len(lines) and (
lines[model_output_class_end].startswith(" ") or lines[model_output_class_end] == ""
):
model_output_class_end += 1
dataclass_content = lines[model_output_class_start : model_output_class_end - 1]
args_in_signature = get_args_in_dataclass(lines, dataclass_content)
output_docstring_indent = 4
source_args_doc = [ModelOutputArgs]
else:
# Class has no __init__ and is not a ModelOutput - nothing to document
return "", None, None, [], [], []
docstring_start_line = sig_end_line if '"""' in lines[sig_end_line] else None
return generate_new_docstring_for_signature(
lines,
args_in_signature,
sig_end_line,
docstring_start_line,
arg_indent="",
custom_args_dict=custom_args_dict,
output_docstring_indent=output_docstring_indent,
source_args_doc=source_args_doc,
)
def _build_ast_indexes(source: str) -> list[DecoratedItem]:
"""Parse source once and return list of all @auto_docstring decorated items.
Returns:
List of DecoratedItem objects, one for each @auto_docstring decorated function or class.
"""
tree = ast.parse(source)
# First pass: collect top-level string variables (for resolving custom_args variable references)
var_to_string: dict[str, str] = {}
for node in tree.body:
# Handle: ARGS = "some string"
if isinstance(node, ast.Assign) and isinstance(node.value, ast.Constant):
if isinstance(node.value.value, str):
for target in node.targets:
if isinstance(target, ast.Name):
var_to_string[target.id] = node.value.value
# Handle: ARGS: str = "some string"
elif isinstance(node, ast.AnnAssign) and isinstance(node.value, ast.Constant):
if isinstance(node.value.value, str) and isinstance(node.target, ast.Name):
var_to_string[node.target.id] = node.value.value
# Second pass: find all @auto_docstring decorated functions/classes
decorated_items: list[DecoratedItem] = []
for node in ast.walk(tree):
if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
continue
# Find @auto_docstring decorator and extract custom_args if present
decorator_line = None
custom_args_text = None
for dec in node.decorator_list:
if not _is_auto_docstring_decorator(dec):
continue
decorator_line = dec.lineno
# Extract custom_args from @auto_docstring(custom_args=...)
if isinstance(dec, ast.Call):
for kw in dec.keywords:
if kw.arg == "custom_args":
if isinstance(kw.value, ast.Constant) and isinstance(kw.value.value, str):
custom_args_text = kw.value.value.strip()
elif isinstance(kw.value, ast.Name):
custom_args_text = var_to_string.get(kw.value.id, "").strip()
break
if decorator_line is None: # No @auto_docstring decorator found
continue
# Extract info for this decorated item
kind = "class" if isinstance(node, ast.ClassDef) else "function"
body_start_line = node.body[0].lineno if node.body else node.lineno + 1
# Extract function arguments (skip self, *args, **kwargs)
arg_names = []
has_init = False
init_def_line = None
is_model_output = False
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
# For functions, extract args directly
arg_names = _extract_function_args(node)
elif isinstance(node, ast.ClassDef):
# For classes, look for __init__ method and check if it's a ModelOutput
# Check if class inherits from ModelOutput
for base in node.bases:
if isinstance(base, ast.Name) and "ModelOutput" in base.id:
is_model_output = True
break
# Look for __init__ method in the class body
for class_item in node.body:
if isinstance(class_item, ast.FunctionDef) and class_item.name == "__init__":
has_init = True
init_def_line = class_item.lineno
arg_names = _extract_function_args(class_item)
# Update body_start_line to be the __init__ body start
body_start_line = class_item.body[0].lineno if class_item.body else class_item.lineno + 1
break
decorated_items.append(
DecoratedItem(
decorator_line=decorator_line,
def_line=node.lineno,
kind=kind,
body_start_line=body_start_line,
args=arg_names,
custom_args_text=custom_args_text,
has_init=has_init,
init_def_line=init_def_line,
is_model_output=is_model_output,
)
)
return sorted(decorated_items, key=lambda x: x.decorator_line)
def update_file_with_new_docstrings(
candidate_file,
lines,
decorated_items: list[DecoratedItem],
source: str,
overwrite=False,
):
"""
For a given file, update the docstrings for all @auto_docstring candidates and write the new content.
"""
if not decorated_items:
return [], [], []
missing_docstring_args_warnings = []
fill_docstring_args_warnings = []
docstring_args_ro_remove_warnings = []
# Build new file content by processing decorated items and unchanged sections
content_base_file_new_lines = []
last_line_added = 0 # Track the last line we've already added to output (0-based)
for index, item in enumerate(decorated_items):
def_line_0 = item.def_line - 1 # Convert to 0-based
# Parse custom_args if present
custom_args_dict = {}
if item.custom_args_text:
custom_args_dict, _ = parse_docstring(item.custom_args_text)
# Generate new docstring based on kind
if item.kind == "function":
(
new_docstring,
sig_line_end,
docstring_end,
missing_docstring_args,
fill_docstring_args,
docstring_args_ro_remove,
) = generate_new_docstring_for_function(lines, item, custom_args_dict)
else: # class
(
new_docstring,
sig_line_end,
docstring_end,
missing_docstring_args,
fill_docstring_args,
docstring_args_ro_remove,
) = generate_new_docstring_for_class(lines, item, custom_args_dict, source)
# If sig_line_end is None, this item couldn't be processed (e.g., class with no __init__)
# In this case, we don't modify anything and just continue to the next item
if sig_line_end is None:
continue
# Add all lines from last processed line up to current def line
content_base_file_new_lines += lines[last_line_added:def_line_0]
# Collect warnings
for arg in missing_docstring_args:
missing_docstring_args_warnings.append(f" - {arg} line {def_line_0}")
for arg in fill_docstring_args:
fill_docstring_args_warnings.append(f" - {arg} line {def_line_0}")
for arg in docstring_args_ro_remove:
docstring_args_ro_remove_warnings.append(f" - {arg} line {def_line_0}")
# Add lines from current def through signature
content_base_file_new_lines += lines[def_line_0:sig_line_end]
# Add new docstring if generated
if new_docstring:
content_base_file_new_lines += new_docstring.split("\n")
# Update last_line_added to skip the old docstring
last_line_added = (docstring_end + 1) if docstring_end is not None else sig_line_end
# Add any remaining lines after the last decorated item
content_base_file_new_lines += lines[last_line_added:]
content_base_file_new = "\n".join(content_base_file_new_lines)
if overwrite:
with open(candidate_file, "w", encoding="utf-8") as f:
f.write(content_base_file_new)
return (
missing_docstring_args_warnings,
fill_docstring_args_warnings,
docstring_args_ro_remove_warnings,
)
def check_auto_docstrings(overwrite: bool = False, check_all: bool = False):
"""
Check docstrings of all public objects that are decorated with `@auto_docstrings`.
This function orchestrates the process by finding relevant files, scanning for decorators,
generating new docstrings, and updating files as needed.
"""
# 1. Find all model files to check
matching_files = find_matching_model_files(check_all)
if matching_files is None:
return
# 2. Find files that contain the @auto_docstring decorator
auto_docstrings_files = find_files_with_auto_docstring(matching_files)
# 3. For each file, update docstrings for all candidates
for candidate_file in auto_docstrings_files:
with open(candidate_file, "r", encoding="utf-8") as f:
content = f.read()
lines = content.split("\n")
# Parse file once to find all @auto_docstring decorated items
decorated_items = _build_ast_indexes(content)
if not decorated_items:
continue
# Update docstrings for all decorated items
missing_docstring_args_warnings, fill_docstring_args_warnings, docstring_args_ro_remove_warnings = (
update_file_with_new_docstrings(
candidate_file,
lines,
decorated_items,
content,
overwrite=overwrite,
)
)
if missing_docstring_args_warnings:
if not overwrite:
print(
"Some docstrings are missing. Run `make fix-copies` or `python utils/check_docstrings.py --fix_and_overwrite` to generate the docstring templates where needed."
)
print(f"[ERROR] Missing docstring for the following arguments in {candidate_file}:")
for warning in missing_docstring_args_warnings:
print(warning)
if docstring_args_ro_remove_warnings:
if not overwrite:
print(
"Some docstrings are redundant with the ones in `auto_docstring.py` and will be removed. Run `make fix-copies` or `python utils/check_docstrings.py --fix_and_overwrite` to remove the redundant docstrings."
)
print(f"[ERROR] Redundant docstring for the following arguments in {candidate_file}:")
for warning in docstring_args_ro_remove_warnings:
print(warning)
if fill_docstring_args_warnings:
print(f"[ERROR] Docstring needs to be filled for the following arguments in {candidate_file}:")
for warning in fill_docstring_args_warnings:
print(warning)
if missing_docstring_args_warnings or docstring_args_ro_remove_warnings or fill_docstring_args_warnings:
raise ValueError(
"There was at least one problem when checking docstrings of objects decorated with @auto_docstring."
)
def check_docstrings(overwrite: bool = False, check_all: bool = False):
"""
Check docstrings of all public objects that are callables and are documented. By default, only checks the diff.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether to fix inconsistencies or not.
check_all (`bool`, *optional*, defaults to `False`):
Whether to check all files.
"""
module_diff_files = None
if not check_all:
module_diff_files = set()
repo = Repo(PATH_TO_REPO)
# Diff from index to unstaged files
for modified_file_diff in repo.index.diff(None):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(modified_file_diff.a_path)
# Diff from index to `main`
for modified_file_diff in repo.index.diff(repo.refs.main.commit):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(modified_file_diff.a_path)
# quick escape route: if there are no module files in the diff, skip this check
if len(module_diff_files) == 0:
return
print(" Checking docstrings in the following files:" + "\n - " + "\n - ".join(module_diff_files))
failures = []
hard_failures = []
to_clean = []
for name in dir(transformers):
# Skip objects that are private or not documented.
if (
any(name.startswith(prefix) for prefix in OBJECT_TO_IGNORE_PREFIXES)
or ignore_undocumented(name)
or name in OBJECTS_TO_IGNORE
):
continue
obj = getattr(transformers, name)
if not callable(obj) or not isinstance(obj, type) or getattr(obj, "__doc__", None) is None:
continue
# If we are checking against the diff, we skip objects that are not part of the diff.
if module_diff_files is not None:
object_file = find_source_file(getattr(transformers, name))
object_file_relative_path = "src/" + str(object_file).split("/src/")[1]
if object_file_relative_path not in module_diff_files:
continue
# Check docstring
try:
result = match_docstring_with_signature(obj)
if result is not None:
old_doc, new_doc = result
else:
old_doc, new_doc = None, None
except Exception as e:
print(e)
hard_failures.append(name)
continue
if old_doc != new_doc:
if overwrite:
fix_docstring(obj, old_doc, new_doc)
else:
failures.append(name)
elif not overwrite and new_doc is not None and ("<fill_type>" in new_doc or "<fill_docstring>" in new_doc):
to_clean.append(name)
# Deal with errors
error_message = ""
if len(hard_failures) > 0:
error_message += (
"The argument part of the docstrings of the following objects could not be processed, check they are "
"properly formatted."
)
error_message += "\n" + "\n".join([f"- {name}" for name in hard_failures])
if len(failures) > 0:
error_message += (
"The following objects docstrings do not match their signature. Run `make fix-copies` to fix this. "
"In some cases, this error may be raised incorrectly by the docstring checker. If you think this is the "
"case, you can manually check the docstrings and then add the object name to `OBJECTS_TO_IGNORE` in "
"`utils/check_docstrings.py`."
)
error_message += "\n" + "\n".join([f"- {name}" for name in failures])
if len(to_clean) > 0:
error_message += (
"The following objects docstrings contain templates you need to fix: search for `<fill_type>` or "
"`<fill_docstring>`."
)
error_message += "\n" + "\n".join([f"- {name}" for name in to_clean])
if len(error_message) > 0:
error_message = "There was at least one problem when checking docstrings of public objects.\n" + error_message
raise ValueError(error_message)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
parser.add_argument(
"--check_all", action="store_true", help="Whether to check all files. By default, only checks the diff"
)
args = parser.parse_args()
check_auto_docstrings(overwrite=args.fix_and_overwrite, check_all=args.check_all)
check_docstrings(overwrite=args.fix_and_overwrite, check_all=args.check_all)
|
DecoratedItem
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py
|
{
"start": 4707,
"end": 5100
}
|
class ____(
typing.Iterator[int]
): # Y022 Use "collections.abc.Iterator[T]" instead of "typing.Iterator[T]" (PEP 585 syntax)
def __iter__(self) -> Iterator[int]:
... # Y034 "__iter__" methods in classes like "BadIterator2" usually return "self" at runtime. Consider using "typing_extensions.Self" in "BadIterator2.__iter__", e.g. "def __iter__(self) -> Self: ..."
|
BadIterator2
|
python
|
mlflow__mlflow
|
mlflow/genai/judges/tools/types.py
|
{
"start": 747,
"end": 1070
}
|
class ____:
"""Information about a single span."""
span_id: str
name: str
span_type: str
start_time_ms: float
end_time_ms: float
duration_ms: float
parent_id: str | None
status: SpanStatus
is_root: bool
attribute_names: list[str]
@experimental(version="3.5.0")
@dataclass
|
SpanInfo
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/installers.py
|
{
"start": 9485,
"end": 10870
}
|
class ____(RegexLexer):
"""
Lexer that highlights debian sources.list files.
.. versionadded:: 0.7
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list', 'debsources']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Text), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.splitlines():
line = line.strip()
if line.startswith('deb ') or line.startswith('deb-src '):
return True
|
SourcesListLexer
|
python
|
walkccc__LeetCode
|
solutions/2862. Maximum Element-Sum of a Complete Subset of Indices/2862.py
|
{
"start": 0,
"end": 453
}
|
class ____:
def maximumSum(self, nums: list[int]) -> int:
ans = 0
oddPowerToSum = collections.Counter()
def divideSquares(val: int) -> int:
for num in range(2, val + 1):
while val % (num * num) == 0:
val //= (num * num)
return val
for i, num in enumerate(nums):
oddPower = divideSquares(i + 1)
oddPowerToSum[oddPower] += num
ans = max(ans, oddPowerToSum[oddPower])
return ans
|
Solution
|
python
|
streamlit__streamlit
|
lib/streamlit/testing/v1/element_tree.py
|
{
"start": 15068,
"end": 15603
}
|
class ____(Element):
proto: ArrowProto = field(repr=False)
def __init__(self, proto: ArrowProto, root: ElementTree) -> None:
self.key = None
self.proto = proto
self.root = root
self.type = "arrow_data_frame"
@property
def value(self) -> PandasDataframe:
return dataframe_util.convert_arrow_bytes_to_pandas_df(self.proto.data)
SingleDateValue: TypeAlias = date | datetime
DateValue: TypeAlias = SingleDateValue | Sequence[SingleDateValue] | None
@dataclass(repr=False)
|
Dataframe
|
python
|
django__django
|
tests/staticfiles_tests/test_storage.py
|
{
"start": 26475,
"end": 26985
}
|
class ____(CollectionTestCase):
run_collectstatic_in_setUp = False
def test_collectstatistic_no_post_process_replaced_paths(self):
stdout = StringIO()
self.run_collectstatic(verbosity=1, stdout=stdout)
self.assertIn("post-processed", stdout.getvalue())
@override_settings(
STORAGES={
**settings.STORAGES,
STATICFILES_STORAGE_ALIAS: {
"BACKEND": "staticfiles_tests.storage.SimpleStorage",
},
}
)
|
TestCollectionNoPostProcessReplacedPaths
|
python
|
keras-team__keras
|
keras/src/quantizers/quantizers.py
|
{
"start": 23319,
"end": 33630
}
|
class ____(Quantizer):
"""A class that handles the quantization of weights using GPTQ method.
This class provides methods to find quantization parameters (scale and zero)
for a given tensor and can be used to quantize weights in a GPTQ context.
Args:
weight_bits: (int) The number of bits to quantize to (e.g., 4).
per_channel: (bool) A flag indicating whether quantization is
applied per-channel (`True`) or per-tensor (`False`).
Defaults to `False`.
symmetric: (bool) A flag indicating whether symmetric (`True`) or
asymmetric (`False`) quantization is used. Defaults to `False`.
group_size: (int) The size of weight groups for quantization. A
value of -1 indicates that grouping is not used.
Defaults to -1.
"""
def __init__(
self,
config=GPTQConfig(tokenizer=None, dataset=None),
compute_dtype="float32",
):
Quantizer.__init__(self)
self.weight_bits = config.weight_bits
self.per_channel = config.per_channel
self.symmetric = config.symmetric
self.group_size = config.group_size
self.compute_dtype = compute_dtype
# These are now determined later by `find_params`
self.scale = None
self.zero = None
self.maxq = None
def find_params(self, input_tensor, weight=True):
"""Finds quantization parameters (scale and zero) for a given tensor."""
self.scale, self.zero, self.maxq = compute_quantization_parameters(
input_tensor,
bits=self.weight_bits,
symmetric=self.symmetric,
per_channel=self.per_channel,
group_size=self.group_size,
weight=weight,
compute_dtype=self.compute_dtype,
)
return self.scale, self.zero, self.maxq
def get_config(self):
config = super().get_config()
config.update(
{
"weight_bits": self.weight_bits,
"per_channel": self.per_channel,
"symmetric": self.symmetric,
"group_size": self.group_size,
}
)
return config
@classmethod
def from_config(cls, config):
gptq = GPTQConfig(
tokenizer=None,
dataset=None,
weight_bits=config["weight_bits"],
per_channel=config["per_channel"],
symmetric=config["symmetric"],
group_size=config["group_size"],
)
return cls(gptq)
def compute_quantization_parameters(
x,
*,
bits,
symmetric=False,
per_channel=False,
group_size=-1,
weight=False,
compute_dtype="float32",
):
"""
Computes the scale and zero-point for quantization.
This function calculates the scale and zero-point required for quantizing
a given tensor `x` based on the specified parameters. It supports grouped,
per-channel, per-tensor, symmetric, and asymmetric quantization - along
with any combinations of these.
Args:
x: KerasTensor. The input tensor to quantize.
bits: int. The number of bits to quantize to (e.g., 4).
symmetric: bool. Whether to use symmetric quantization.
per_channel: bool. Whether to quantize per channel.
group_size: int. The group size for quantization.
weight: bool. Whether the input tensor is a weight tensor.
Returns:
scale: KerasTensor. The scale tensor for quantization.
zero: KerasTensor. The zero tensor for quantization.
maxq: scalar. The maximum quantization value.
"""
if x is None:
raise ValueError(f"Input tensor {x} cannot be None.")
# For weights, we typically expect at least a 2D tensor.
if weight and len(x.shape) < 2:
raise ValueError(
f"Input weight tensor {x} must have a rank of at "
f"least 2, but got rank {len(x.shape)}."
)
if ops.size(x) == 0:
raise ValueError("Input tensor 'x' cannot be empty.")
original_shape = x.shape
if per_channel:
if weight:
if group_size != -1:
input_reshaped = ops.reshape(x, [-1, group_size])
else:
input_reshaped = ops.reshape(x, [original_shape[0], -1])
else: # per-tensor
input_reshaped = ops.reshape(x, [1, -1])
# Find min/max values
min_values = ops.min(input_reshaped, axis=1)
max_values = ops.max(input_reshaped, axis=1)
# Apply symmetric quantization logic if enabled
if symmetric:
max_values = ops.maximum(ops.abs(min_values), max_values)
min_values = ops.where(
ops.less(min_values, 0), ops.negative(max_values), min_values
)
# Ensure range is not zero to avoid division errors
zero_range = ops.equal(min_values, max_values)
min_values = ops.where(zero_range, ops.subtract(min_values, 1), min_values)
max_values = ops.where(zero_range, ops.add(max_values, 1), max_values)
maxq = ops.cast(ops.subtract(ops.power(2, bits), 1), compute_dtype)
# Calculate scale and zero-point
scale = ops.divide(ops.subtract(max_values, min_values), maxq)
if symmetric:
zero = ops.full_like(scale, ops.divide(ops.add(maxq, 1), 2))
else:
zero = ops.round(ops.divide(ops.negative(min_values), scale))
# Ensure scale is non-zero
scale = ops.where(ops.less_equal(scale, 0), 1e-8, scale)
if weight:
# Per-channel, non-grouped case: simple reshape is correct.
if per_channel and group_size == -1:
scale = ops.reshape(scale, [-1, 1])
zero = ops.reshape(zero, [-1, 1])
elif not per_channel:
num_rows = original_shape[0]
scale = ops.tile(ops.reshape(scale, (1, 1)), (num_rows, 1))
zero = ops.tile(ops.reshape(zero, (1, 1)), (num_rows, 1))
if per_channel:
scale = ops.reshape(scale, [-1, 1])
zero = ops.reshape(zero, [-1, 1])
zero = ops.cast(zero, "uint8")
return scale, zero, maxq
def quantize_with_zero_point(input_tensor, scale, zero, maxq):
"""Quantize a float tensor into discrete levels [0, maxq] using
per-tensor/per-channel/grouped scaling.
Returns `q` (same dtype as inputs/scales; float is fine) where values are in
[0, maxq].
Args:
input_tensor: KerasTensor. The input tensor to quantize.
scale: KerasTensor. The scale tensor for quantization.
zero: KerasTensor. The zero tensor for quantization.
maxq: KerasTensor. The maximum quantization value.
Returns:
KerasTensor. The quantized tensor.
"""
# Guard against divide-by-zero
epsilon = ops.cast(1e-8, dtype=scale.dtype)
safe_scale = ops.where(ops.equal(scale, 0), epsilon, scale)
quantized_tensor = ops.round(
ops.add(
ops.divide(input_tensor, safe_scale), ops.cast(zero, scale.dtype)
)
)
quantized_tensor = ops.clip(quantized_tensor, 0, maxq)
return quantized_tensor
def dequantize_with_zero_point(input_tensor, scale, zero):
"""
Dequantizes a quantized tensor using the provided scale and zero tensors.
Args:
input_tensor: KerasTensor. The quantized tensor to dequantize.
scale: KerasTensor. The scale tensor for dequantization.
zero: KerasTensor. The zero tensor for dequantization.
Returns:
KerasTensor. The dequantized tensor.
"""
return ops.multiply(
scale, ops.subtract(input_tensor, ops.cast(zero, scale.dtype))
)
def quantize_with_sz_map(weights_matrix, scale, zero, g_idx, maxq):
"""Quantize the weight matrix from group params.
This function uses the provided scale and zero tensors to quantize the
input weights_matrix according to the group indices. It maps each column
of the weights_matrix to its corresponding group parameters and performs
the quantization operation.
Args:
weights_matrix: 2D tensor of shape [out_features, in_features].
scale: Per-group scale tensor of shape [out_features, n_groups].
zero: Per-group zero-point tensor of shape [out_features, n_groups].
g_idx: Integer tensor of shape [in_features,] mapping each column to
its group index.
maxq: Scalar (float) representing the maximum integer quantization
level (e.g., 2^bits - 1).
Returns:
A tensor with the same shape as `weights_matrix` containing the
quantized weights produced using the provided group parameters.
"""
groups = ops.cast(g_idx, "int32")
scale_cols = ops.take(scale, groups, axis=1) # [out_features, in_features]
zero_cols = ops.take(zero, groups, axis=1) # [out_features, in_features]
# Quantize elementwise, then cast to int
return quantize_with_zero_point(weights_matrix, scale_cols, zero_cols, maxq)
def dequantize_with_sz_map(weights_matrix, scale, zero, g_idx):
"""Rebuild a dequantized weight matrix from group params.
This function uses the provided scale and zero tensors to dequantize the
input weights_matrix according to the group indices. It maps each column
of the weights_matrix to its corresponding group parameters and performs
the dequantization operation.
Args:
weights_matrix: 2D tensor of shape [out_features, in_features].
scale: Per-group scale tensor of shape [out_features, n_groups].
zero: Per-group zero-point tensor of shape [out_features, n_groups].
g_idx: Integer tensor of shape [in_features,] mapping each column to
its group index.
maxq: Scalar (float) representing the maximum integer quantization
level (e.g., 2^bits - 1).
Returns:
A tensor with the same shape as `weights_matrix` containing the
dequantized weights produced using the provided group parameters.
"""
# Map group indices to scales and zeros
groups = ops.cast(g_idx, "int32")
scales_mapped = ops.take(scale, groups, axis=1)
zeros_mapped = ops.take(zero, groups, axis=1)
zeros_mapped = ops.cast(zeros_mapped, scales_mapped.dtype)
quantized = ops.multiply(
ops.subtract(weights_matrix, zeros_mapped), scales_mapped
)
return quantized
|
GPTQQuantizer
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/paramNames1.py
|
{
"start": 1598,
"end": 2116
}
|
class ____(type):
def __new__(mcls): ...
# This should not generate a error because the class derives
# from type and is assumed to be a metaclass.
def foo1(cls):
return 3
# This should generate an error.
def foo2(mcls):
return 3
def foo3(self):
return 3
@classmethod
def foo4(cls):
return 3
@classmethod
def foo5(metacls):
return 3
# This should generate an error.
@classmethod
def foo6(bar):
return 3
|
Metaclass
|
python
|
etianen__django-reversion
|
tests/test_app/tests/test_commands.py
|
{
"start": 2669,
"end": 3055
}
|
class ____(TestModelMixin, TestBase):
databases = {"default", "postgres"}
def testCreateInitialRevisionsModelDb(self):
obj = TestModel.objects.db_manager("postgres").create()
self.callCommand("createinitialrevisions", model_db="postgres")
self.assertSingleRevision((obj,), comment="Initial version.", model_db="postgres")
|
CreateInitialRevisionsModelDbTest
|
python
|
getsentry__sentry
|
src/sentry/statistical_detectors/base.py
|
{
"start": 543,
"end": 1175
}
|
class ____(ABC):
@classmethod
@abstractmethod
def from_redis_dict(cls, data: Any) -> DetectorState: ...
@abstractmethod
def to_redis_dict(self) -> Mapping[str | bytes, bytes | float | int | str]: ...
@abstractmethod
def should_auto_resolve(self, target: float, rel_threshold: float) -> bool: ...
@abstractmethod
def should_escalate(
self, baseline: float, regressed: float, min_change: float, rel_threshold: float
) -> bool: ...
@classmethod
@abstractmethod
def empty(cls) -> DetectorState: ...
@abstractmethod
def get_moving_avg(self) -> float: ...
|
DetectorState
|
python
|
getsentry__sentry
|
src/sentry/auth/manager.py
|
{
"start": 378,
"end": 1334
}
|
class ____:
def __init__(self) -> None:
self.__values: dict[str, type[Provider]] = {}
def __iter__(self) -> Iterator[tuple[str, type[Provider]]]:
yield from self.__values.items()
def get(self, key: str, **kwargs: Any) -> Provider:
try:
cls = self.__values[key]
except KeyError:
raise ProviderNotRegistered(key)
return cls(**kwargs)
def exists(self, key: str) -> bool:
return key in self.__values
def register(self, cls: type[Provider]) -> None:
self.__values[cls.key] = cls
def unregister(self, cls: type[Provider]) -> None:
try:
if self.__values[cls.key] != cls:
# don't allow unregistering of arbitrary provider
raise ProviderNotRegistered(cls.key)
except KeyError:
# we gracefully handle a missing provider
return
del self.__values[cls.key]
|
ProviderManager
|
python
|
PyCQA__pylint
|
tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_ex_returned.py
|
{
"start": 236,
"end": 386
}
|
class ____:
"""__getnewargs_ex__ returns <type 'tuple'>"""
def __getnewargs_ex__(self):
return ((1,), {"2": "2"})
|
FirstGoodGetNewArgsEx
|
python
|
numpy__numpy
|
numpy/polynomial/tests/test_hermite_e.py
|
{
"start": 6188,
"end": 10115
}
|
class ____:
def test_hermeint(self):
# check exceptions
assert_raises(TypeError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
assert_raises(ValueError, herme.hermeint, [0], lbnd=[0])
assert_raises(ValueError, herme.hermeint, [0], scl=[0])
assert_raises(TypeError, herme.hermeint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0] * (i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [1 / scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [2 / scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k])
res = herme.hermeint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
res = herme.hermeint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c) for c in c2d])
res = herme.hermeint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
res = herme.hermeint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
|
TestIntegral
|
python
|
apache__airflow
|
providers/apache/pinot/tests/unit/apache/pinot/hooks/test_pinot.py
|
{
"start": 7452,
"end": 10151
}
|
class ____:
def setup_method(self):
self.conn = conn = mock.MagicMock()
self.conn.host = "host"
self.conn.port = "1000"
self.conn.login = ""
self.conn.password = ""
self.conn.conn_type = "http"
self.conn.extra_dejson = {"endpoint": "query/sql"}
self.cur = mock.MagicMock(rowcount=0)
self.conn.cursor.return_value = self.cur
self.conn.__enter__.return_value = self.cur
self.conn.__exit__.return_value = None
class TestPinotDBApiHook(PinotDbApiHook):
def get_conn(self):
return conn
def get_connection(self, conn_id):
return conn
self.db_hook = TestPinotDBApiHook
def test_get_uri(self):
"""
Test on getting a pinot connection uri
"""
db_hook = self.db_hook()
assert db_hook.get_uri() == "http://host:1000/query/sql"
def test_get_conn(self):
"""
Test on getting a pinot connection
"""
conn = self.db_hook().get_conn()
assert conn.host == "host"
assert conn.port == "1000"
assert conn.conn_type == "http"
assert conn.extra_dejson.get("endpoint") == "query/sql"
def test_get_records(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook().get_records(statement)
def test_get_first(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook().get_first(statement)
def test_get_df_pandas(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook().get_df(statement, df_type="pandas")
assert column == df.columns[0]
for i, item in enumerate(result_sets):
assert item[0] == df.values.tolist()[i][0]
def test_get_df_polars(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
mock_execute = mock.MagicMock()
mock_execute.description = [(column, None, None, None, None, None, None)]
mock_execute.fetchall.return_value = result_sets
self.cur.execute.return_value = mock_execute
df = self.db_hook().get_df(statement, df_type="polars")
assert column == df.columns[0]
assert result_sets[0][0] == df.row(0)[0]
assert result_sets[1][0] == df.row(1)[0]
|
TestPinotDbApiHook
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/coercions.py
|
{
"start": 20067,
"end": 20515
}
|
class ____(RoleImpl):
__slots__ = ()
def _implicit_coercions(
self,
element: Any,
resolved: Any,
argname: Optional[str] = None,
**kw: Any,
) -> Any:
if isinstance(element, ExecutableOption):
return element
else:
self._raise_for_expected(element, argname, resolved)
def _literal_coercion(self, element, **kw):
return element
|
ExecutableOptionImpl
|
python
|
redis__redis-py
|
redis/commands/core.py
|
{
"start": 226391,
"end": 226962
}
|
class ____(ScriptCommands):
async def script_debug(self, *args) -> None:
return super().script_debug()
def register_script(
self: "redis.asyncio.client.Redis",
script: ScriptTextT,
) -> AsyncScript:
"""
Register a Lua ``script`` specifying the ``keys`` it will touch.
Returns a Script object that is callable and hides the complexity of
deal with scripts, keys, and shas. This is the preferred way to work
with Lua scripts.
"""
return AsyncScript(self, script)
|
AsyncScriptCommands
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/dviread.py
|
{
"start": 48623,
"end": 53486
}
|
class ____:
@cache # A singleton.
def __new__(cls):
self = object.__new__(cls)
self._proc = self._new_proc()
return self
def _new_proc(self):
return subprocess.Popen(
["luatex", "--luaonly", str(cbook._get_data_path("kpsewhich.lua"))],
# mktexpk logs to stderr; suppress that.
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
# Store generated pk fonts in our own cache.
env={"MT_VARTEXFONTS": str(Path(mpl.get_cachedir(), "vartexfonts")),
**os.environ})
def search(self, filename):
if self._proc.poll() is not None: # Dead, restart it.
self._proc = self._new_proc()
self._proc.stdin.write(os.fsencode(filename) + b"\n")
self._proc.stdin.flush()
out = self._proc.stdout.readline().rstrip()
return None if out == b"nil" else os.fsdecode(out)
@lru_cache
def find_tex_file(filename):
"""
Find a file in the texmf tree using kpathsea_.
The kpathsea library, provided by most existing TeX distributions, both
on Unix-like systems and on Windows (MikTeX), is invoked via a long-lived
luatex process if luatex is installed, or via kpsewhich otherwise.
.. _kpathsea: https://www.tug.org/kpathsea/
Parameters
----------
filename : str or path-like
Raises
------
FileNotFoundError
If the file is not found.
"""
# we expect these to always be ascii encoded, but use utf-8
# out of caution
if isinstance(filename, bytes):
filename = filename.decode('utf-8', errors='replace')
try:
lk = _LuatexKpsewhich()
except FileNotFoundError:
lk = None # Fallback to directly calling kpsewhich, as below.
if lk:
path = lk.search(filename)
else:
if sys.platform == 'win32':
# On Windows only, kpathsea can use utf-8 for cmd args and output.
# The `command_line_encoding` environment variable is set to force
# it to always use utf-8 encoding. See Matplotlib issue #11848.
kwargs = {'env': {**os.environ, 'command_line_encoding': 'utf-8'},
'encoding': 'utf-8'}
else: # On POSIX, run through the equivalent of os.fsdecode().
kwargs = {'env': {**os.environ},
'encoding': sys.getfilesystemencoding(),
'errors': 'surrogateescape'}
kwargs['env'].update(
MT_VARTEXFONTS=str(Path(mpl.get_cachedir(), "vartexfonts")))
try:
path = cbook._check_and_log_subprocess(
['kpsewhich', '-mktex=pk', filename], _log, **kwargs,
).rstrip('\n')
except (FileNotFoundError, RuntimeError):
path = None
if path:
return path
else:
raise FileNotFoundError(
f"Matplotlib's TeX implementation searched for a file named "
f"{filename!r} in your texmf tree, but could not find it")
@lru_cache
def _fontfile(cls, suffix, texname):
return cls(find_tex_file(texname + suffix))
_tfmfile = partial(_fontfile, Tfm, ".tfm")
_vffile = partial(_fontfile, Vf, ".vf")
if __name__ == '__main__':
import itertools
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("filename")
parser.add_argument("dpi", nargs="?", type=float, default=None)
parser.add_argument("-d", "--debug", action="store_true")
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
def _print_fields(*args):
print(" ".join(map("{:>11}".format, args)))
with Dvi(args.filename, args.dpi) as dvi:
for page in dvi:
print(f"=== NEW PAGE === "
f"(w: {page.width}, h: {page.height}, d: {page.descent})")
print("--- GLYPHS ---")
for font, group in itertools.groupby(page.text, lambda text: text.font):
font_name = (font.texname.decode("utf8") if os.name == "nt"
else os.fsdecode(font.texname))
if isinstance(font._metrics, Tfm):
print(f"font: {font_name} at {font.resolve_path()}")
else:
print(f"font: {font_name}")
print(f"scale: {font._scale / 2 ** 20}")
_print_fields("x", "y", "glyph", "chr", "w")
for text in group:
_print_fields(text.x, text.y, text.glyph,
text._as_unicode_or_name(), text.width)
if page.boxes:
print("--- BOXES ---")
_print_fields("x", "y", "h", "w")
for box in page.boxes:
_print_fields(box.x, box.y, box.height, box.width)
|
_LuatexKpsewhich
|
python
|
crytic__slither
|
slither/detectors/attributes/locked_ether.py
|
{
"start": 529,
"end": 4861
}
|
class ____(AbstractDetector): # pylint: disable=too-many-nested-blocks
ARGUMENT = "locked-ether"
HELP = "Contracts that lock ether"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether"
WIKI_TITLE = "Contracts that lock Ether"
WIKI_DESCRIPTION = "Contract with a `payable` function, but without a withdrawal capacity."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
pragma solidity 0.4.24;
contract Locked{
function receive() payable public{
}
}
```
Every Ether sent to `Locked` will be lost."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Remove the payable attribute or add a withdraw function."
@staticmethod
def do_no_send_ether(contract: Contract) -> bool:
functions = contract.all_functions_called
to_explore = functions
explored = []
while to_explore: # pylint: disable=too-many-nested-blocks
functions = to_explore
explored += to_explore
to_explore = []
for function in functions:
calls = [ir.function.name for ir in function.internal_calls]
if "suicide(address)" in calls or "selfdestruct(address)" in calls:
return False
for node in function.nodes:
for ir in node.irs:
if isinstance(
ir,
(Send, Transfer, HighLevelCall, LowLevelCall, NewContract),
):
if ir.call_value and ir.call_value != 0:
return False
if isinstance(ir, (LowLevelCall)) and ir.function_name in [
"delegatecall",
"callcode",
]:
return False
if isinstance(ir, SolidityCall):
call_can_send_ether = ir.function in [
SolidityFunction(
"delegatecall(uint256,uint256,uint256,uint256,uint256,uint256)"
),
SolidityFunction(
"callcode(uint256,uint256,uint256,uint256,uint256,uint256,uint256)"
),
SolidityFunction(
"call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)"
),
]
nonzero_call_value = call_can_send_ether and (
not isinstance(ir.arguments[2], Constant)
or ir.arguments[2].value != 0
)
if nonzero_call_value:
return False
# If a new internal call or librarycall
# Add it to the list to explore
# InternalCall if to follow internal call in libraries
if isinstance(ir, (InternalCall, LibraryCall)):
if not ir.function in explored:
to_explore.append(ir.function)
return True
def _detect(self) -> List[Output]:
results = []
for contract in self.compilation_unit.contracts_derived:
if contract.is_signature_only():
continue
funcs_payable = [function for function in contract.functions if function.payable]
if funcs_payable:
if self.do_no_send_ether(contract):
info: DETECTOR_INFO = ["Contract locking ether found:\n"]
info += ["\tContract ", contract, " has payable functions:\n"]
for function in funcs_payable:
info += ["\t - ", function, "\n"]
info += "\tBut does not have a function to withdraw the ether\n"
json = self.generate_result(info)
results.append(json)
return results
|
LockedEther
|
python
|
charliermarsh__ruff
|
python/ruff-ecosystem/ruff_ecosystem/main.py
|
{
"start": 4571,
"end": 5009
}
|
class ____(json.JSONEncoder):
def default(self, o: object):
if isinstance(o, Serializable):
return o.jsonable()
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
if isinstance(o, set):
return tuple(o)
if isinstance(o, Path):
return str(o)
if isinstance(o, Exception):
return str(o)
return super().default(o)
|
JSONEncoder
|
python
|
PrefectHQ__prefect
|
tests/events/server/test_in_memory_ordering.py
|
{
"start": 22200,
"end": 23044
}
|
class ____:
def test_get_task_run_recorder_causal_ordering(self):
"""Test that the factory function returns the correct scoped instance."""
from prefect.server.events.ordering import get_task_run_recorder_causal_ordering
CausalOrdering.clear_all_scopes()
# Get instance from factory function
ordering1 = get_task_run_recorder_causal_ordering()
assert ordering1.scope == "task-run-recorder"
assert isinstance(ordering1, CausalOrdering)
# Multiple calls should return the same instance
ordering2 = get_task_run_recorder_causal_ordering()
assert ordering1 is ordering2
# Direct instantiation with same scope should return same instance
ordering3 = CausalOrdering(scope="task-run-recorder")
assert ordering1 is ordering3
|
TestFactoryFunction
|
python
|
graphql-python__graphene
|
graphene/relay/connection.py
|
{
"start": 2367,
"end": 2429
}
|
class ____(ObjectTypeOptions):
node = None
|
ConnectionOptions
|
python
|
pyca__cryptography
|
src/cryptography/x509/extensions.py
|
{
"start": 39109,
"end": 41006
}
|
class ____(ExtensionType):
oid = ExtensionOID.PRIVATE_KEY_USAGE_PERIOD
def __init__(
self,
not_before: datetime.datetime | None,
not_after: datetime.datetime | None,
) -> None:
if (
not isinstance(not_before, datetime.datetime)
and not_before is not None
):
raise TypeError("not_before must be a datetime.datetime or None")
if (
not isinstance(not_after, datetime.datetime)
and not_after is not None
):
raise TypeError("not_after must be a datetime.datetime or None")
if not_before is None and not_after is None:
raise ValueError(
"At least one of not_before and not_after must not be None"
)
if (
not_before is not None
and not_after is not None
and not_before > not_after
):
raise ValueError("not_before must be before not_after")
self._not_before = not_before
self._not_after = not_after
@property
def not_before(self) -> datetime.datetime | None:
return self._not_before
@property
def not_after(self) -> datetime.datetime | None:
return self._not_after
def __repr__(self) -> str:
return (
f"<PrivateKeyUsagePeriod(not_before={self.not_before}, "
f"not_after={self.not_after})>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, PrivateKeyUsagePeriod):
return NotImplemented
return (
self.not_before == other.not_before
and self.not_after == other.not_after
)
def __hash__(self) -> int:
return hash((self.not_before, self.not_after))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
|
PrivateKeyUsagePeriod
|
python
|
getsentry__sentry
|
src/sentry/issues/run.py
|
{
"start": 633,
"end": 3812
}
|
class ____(ProcessingStrategyFactory[KafkaPayload]):
def __init__(
self,
max_batch_size: int,
max_batch_time: int,
# not needed in batched-parallel mode
num_processes: int | None = None,
input_block_size: int | None = None,
output_block_size: int | None = None,
mode: Literal["batched-parallel", "parallel"] | None = None,
):
super().__init__()
self.max_batch_size = max_batch_size
self.max_batch_time = max_batch_time
self.input_block_size = input_block_size
self.output_block_size = output_block_size
self.batched = mode == "batched-parallel"
# either use multi-process pool or a thread pool
if self.batched:
self.worker: ThreadPoolExecutor | None = ThreadPoolExecutor()
self.pool: MultiprocessingPool | None = None
else:
# make sure num_processes is not None
assert num_processes is not None
self.pool = MultiprocessingPool(num_processes)
self.worker = None
def create_parallel_worker(
self,
commit: Commit,
) -> ProcessingStrategy[KafkaPayload]:
assert self.pool is not None
return run_task_with_multiprocessing(
function=process_message,
next_step=CommitOffsets(commit),
max_batch_size=self.max_batch_size,
max_batch_time=self.max_batch_time,
pool=self.pool,
input_block_size=self.input_block_size,
output_block_size=self.output_block_size,
)
def create_batched_parallel_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]:
assert self.worker is not None
batch_processor = RunTask(
function=functools.partial(process_batch, self.worker),
next_step=CommitOffsets(commit),
)
return BatchStep(
max_batch_size=self.max_batch_size,
max_batch_time=self.max_batch_time,
next_step=batch_processor,
)
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
if self.batched:
return self.create_batched_parallel_worker(commit)
else:
return self.create_parallel_worker(commit)
def shutdown(self) -> None:
if self.pool:
self.pool.close()
if self.worker:
self.worker.shutdown()
def process_message(message: Message[KafkaPayload]) -> None:
from sentry.issues.occurrence_consumer import _process_message
try:
payload = orjson.loads(message.payload.value)
_process_message(payload)
except Exception:
logger.exception("failed to process message payload")
def process_batch(worker: ThreadPoolExecutor, messages: Message[ValuesBatch[KafkaPayload]]) -> None:
from sentry.issues.occurrence_consumer import process_occurrence_batch
try:
process_occurrence_batch(worker, messages)
except Exception:
logger.exception("failed to process batch payload")
|
OccurrenceStrategyFactory
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/objects/table_node_mapping.py
|
{
"start": 402,
"end": 549
}
|
class ____(BaseModel):
"""Lightweight representation of a SQL table."""
table_name: str
context_str: Optional[str] = None
|
SQLTableSchema
|
python
|
getsentry__sentry
|
src/sentry/hybridcloud/rpc/__init__.py
|
{
"start": 628,
"end": 892
}
|
class ____(Enum):
def __eq__(self, other: Any) -> bool:
value = other
if isinstance(other, Enum):
value = other.value
return self.value == value
def __hash__(self) -> int:
return hash(self.value)
|
ValueEqualityEnum
|
python
|
sympy__sympy
|
sympy/physics/control/lti.py
|
{
"start": 144005,
"end": 161725
}
|
class ____(MIMOLinearTimeInvariant):
r"""
A class for representing closed-loop feedback interconnection between two
MIMO input/output systems.
Parameters
==========
sys1 : MIMOSeries, TransferFunctionMatrix, StateSpaceBase
The MIMO system placed on the feedforward path.
sys2 : MIMOSeries, TransferFunctionMatrix, StateSpaceBase
The system placed on the feedback path
(often a feedback controller).
sign : int, optional
The sign of feedback. Can either be ``1``
(for positive feedback) or ``-1`` (for negative feedback).
Default value is `-1`.
Raises
======
ValueError
When ``sys1`` and ``sys2`` are not using the
same complex variable of the Laplace transform or z-transform.
Forward path model should have an equal number of inputs/outputs
to the feedback path outputs/inputs.
When product of ``sys1`` and ``sys2`` is not a square matrix.
When the equivalent MIMO system is not invertible.
TypeError
When either ``sys1`` or ``sys2`` is not a ``MIMOSeries``,
``TransferFunctionMatrix`` or a ``StateSpaceBase`` object.
Examples
========
>>> from sympy import Matrix, pprint
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import StateSpace, TransferFunctionMatrix, MIMOFeedback
>>> plant_mat = Matrix([[1, 1/s], [0, 1]])
>>> controller_mat = Matrix([[10, 0], [0, 10]]) # Constant Gain
>>> plant = TransferFunctionMatrix.from_Matrix(plant_mat, s)
>>> controller = TransferFunctionMatrix.from_Matrix(controller_mat, s)
>>> feedback = MIMOFeedback(plant, controller) # Negative Feedback (default)
>>> pprint(feedback, use_unicode=False)
/ [1 1] [10 0 ] \-1 [1 1]
| [- -] [-- - ] | [- -]
| [1 s] [1 1 ] | [1 s]
|I + [ ] *[ ] | * [ ]
| [0 1] [0 10] | [0 1]
| [- -] [- --] | [- -]
\ [1 1]{t} [1 1 ]{t}/ [1 1]{t}
To get the equivalent system matrix, use either ``doit`` or ``rewrite`` method.
>>> pprint(feedback.doit(), use_unicode=False)
[1 1 ]
[-- -----]
[11 121*s]
[ ]
[0 1 ]
[- -- ]
[1 11 ]{t}
To negate the ``MIMOFeedback`` object, use ``-`` operator.
>>> neg_feedback = -feedback
>>> pprint(neg_feedback.doit(), use_unicode=False)
[-1 -1 ]
[--- -----]
[11 121*s]
[ ]
[ 0 -1 ]
[ - --- ]
[ 1 11 ]{t}
``MIMOFeedback`` can also be used to connect MIMO ``StateSpace`` systems.
>>> A1 = Matrix([[4, 1], [2, -3]])
>>> B1 = Matrix([[5, 2], [-3, -3]])
>>> C1 = Matrix([[2, -4], [0, 1]])
>>> D1 = Matrix([[3, 2], [1, -1]])
>>> A2 = Matrix([[-3, 4, 2], [-1, -3, 0], [2, 5, 3]])
>>> B2 = Matrix([[1, 4], [-3, -3], [-2, 1]])
>>> C2 = Matrix([[4, 2, -3], [1, 4, 3]])
>>> D2 = Matrix([[-2, 4], [0, 1]])
>>> ss1 = StateSpace(A1, B1, C1, D1)
>>> ss2 = StateSpace(A2, B2, C2, D2)
>>> F1 = MIMOFeedback(ss1, ss2)
>>> F1
MIMOFeedback(StateSpace(Matrix([
[4, 1],
[2, -3]]), Matrix([
[ 5, 2],
[-3, -3]]), Matrix([
[2, -4],
[0, 1]]), Matrix([
[3, 2],
[1, -1]])), StateSpace(Matrix([
[-3, 4, 2],
[-1, -3, 0],
[ 2, 5, 3]]), Matrix([
[ 1, 4],
[-3, -3],
[-2, 1]]), Matrix([
[4, 2, -3],
[1, 4, 3]]), Matrix([
[-2, 4],
[ 0, 1]])), -1)
``doit()`` can be used to find ``StateSpace`` equivalent for the system containing ``StateSpace`` objects.
>>> F1.doit()
StateSpace(Matrix([
[ 3, -3/4, -15/4, -37/2, -15],
[ 7/2, -39/8, 9/8, 39/4, 9],
[ 3, -41/4, -45/4, -51/2, -19],
[-9/2, 129/8, 73/8, 171/4, 36],
[-3/2, 47/8, 31/8, 85/4, 18]]), Matrix([
[-1/4, 19/4],
[ 3/8, -21/8],
[ 1/4, 29/4],
[ 3/8, -93/8],
[ 5/8, -35/8]]), Matrix([
[ 1, -15/4, -7/4, -21/2, -9],
[1/2, -13/8, -13/8, -19/4, -3]]), Matrix([
[-1/4, 11/4],
[ 1/8, 9/8]]))
See Also
========
Feedback, MIMOSeries, MIMOParallel
"""
def __new__(cls, sys1, sys2, sign=-1):
if not isinstance(sys1,
(TransferFunctionMatrix, MIMOSeries, StateSpaceBase)):
raise TypeError("Unsupported type for `sys1` in MIMO Feedback.")
if not isinstance(sys2,
(TransferFunctionMatrix, MIMOSeries, StateSpaceBase)):
raise TypeError("Unsupported type for `sys2` in MIMO Feedback.")
if sys1.num_inputs != sys2.num_outputs or \
sys1.num_outputs != sys2.num_inputs:
raise ValueError(filldedent("""
Product of `sys1` and `sys2` must
yield a square matrix."""))
if sign not in (-1, 1):
raise ValueError(filldedent("""
Unsupported type for feedback. `sign` arg should
either be 1 (positive feedback loop) or -1
(negative feedback loop)."""))
obj = super().__new__(cls, sys1, sys2, _sympify(sign))
if sys1.is_StateSpace_object or sys2.is_StateSpace_object:
obj.is_StateSpace_object = True
else:
if not _is_invertible(sys1, sys2, sign):
raise ValueError("Non-Invertible system inputted.")
obj.is_StateSpace_object = False
if not obj.is_StateSpace_object and sys1.var != sys2.var:
raise ValueError(filldedent("""
Both `sys1` and `sys2` should be using the
same complex variable."""))
_check_time_compatibility([sys1, sys2])
obj._is_continuous = sys1.is_continuous
return obj
@property
def sys1(self):
r"""
Returns the system placed on the feedforward path of the MIMO feedback interconnection.
Examples
========
>>> from sympy import pprint
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction, TransferFunctionMatrix, MIMOFeedback
>>> tf1 = TransferFunction(s**2 + s + 1, s**2 - s + 1, s)
>>> tf2 = TransferFunction(1, s, s)
>>> tf3 = TransferFunction(1, 1, s)
>>> sys1 = TransferFunctionMatrix([[tf1, tf2], [tf2, tf1]])
>>> sys2 = TransferFunctionMatrix([[tf3, tf3], [tf3, tf2]])
>>> F_1 = MIMOFeedback(sys1, sys2, 1)
>>> F_1.sys1
TransferFunctionMatrix(((TransferFunction(s**2 + s + 1, s**2 - s + 1, s), TransferFunction(1, s, s)), (TransferFunction(1, s, s), TransferFunction(s**2 + s + 1, s**2 - s + 1, s))))
>>> pprint(_, use_unicode=False)
[ 2 ]
[s + s + 1 1 ]
[---------- - ]
[ 2 s ]
[s - s + 1 ]
[ ]
[ 2 ]
[ 1 s + s + 1]
[ - ----------]
[ s 2 ]
[ s - s + 1]{t}
"""
return self.args[0]
@property
def sys2(self):
r"""
Returns the feedback controller of the MIMO feedback interconnection.
Examples
========
>>> from sympy import pprint
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction, TransferFunctionMatrix, MIMOFeedback
>>> tf1 = TransferFunction(s**2, s**3 - s + 1, s)
>>> tf2 = TransferFunction(1, s, s)
>>> tf3 = TransferFunction(1, 1, s)
>>> sys1 = TransferFunctionMatrix([[tf1, tf2], [tf2, tf1]])
>>> sys2 = TransferFunctionMatrix([[tf1, tf3], [tf3, tf2]])
>>> F_1 = MIMOFeedback(sys1, sys2)
>>> F_1.sys2
TransferFunctionMatrix(((TransferFunction(s**2, s**3 - s + 1, s), TransferFunction(1, 1, s)), (TransferFunction(1, 1, s), TransferFunction(1, s, s))))
>>> pprint(_, use_unicode=False)
[ 2 ]
[ s 1]
[---------- -]
[ 3 1]
[s - s + 1 ]
[ ]
[ 1 1]
[ - -]
[ 1 s]{t}
"""
return self.args[1]
@property
def var(self):
r"""
Returns the complex variable of the Laplace transform used by all
the transfer functions involved in the MIMO feedback loop.
Examples
========
>>> from sympy.abc import p
>>> from sympy.physics.control.lti import TransferFunction, TransferFunctionMatrix, MIMOFeedback
>>> tf1 = TransferFunction(p, 1 - p, p)
>>> tf2 = TransferFunction(1, p, p)
>>> tf3 = TransferFunction(1, 1, p)
>>> sys1 = TransferFunctionMatrix([[tf1, tf2], [tf2, tf1]])
>>> sys2 = TransferFunctionMatrix([[tf1, tf3], [tf3, tf2]])
>>> F_1 = MIMOFeedback(sys1, sys2, 1) # Positive feedback
>>> F_1.var
p
"""
return self.sys1.var
@property
def sign(self):
r"""
Returns the type of feedback interconnection of two models. ``1``
for Positive and ``-1`` for Negative.
"""
return self.args[2]
@property
def sensitivity(self):
r"""
Returns the sensitivity function matrix of the feedback loop.
Sensitivity of a closed-loop system is the ratio of change
in the open loop gain to the change in the closed loop gain.
.. note::
This method would not return the complementary
sensitivity function.
Examples
========
>>> from sympy import pprint
>>> from sympy.abc import p
>>> from sympy.physics.control.lti import TransferFunction, TransferFunctionMatrix, MIMOFeedback
>>> tf1 = TransferFunction(p, 1 - p, p)
>>> tf2 = TransferFunction(1, p, p)
>>> tf3 = TransferFunction(1, 1, p)
>>> sys1 = TransferFunctionMatrix([[tf1, tf2], [tf2, tf1]])
>>> sys2 = TransferFunctionMatrix([[tf1, tf3], [tf3, tf2]])
>>> F_1 = MIMOFeedback(sys1, sys2, 1) # Positive feedback
>>> F_2 = MIMOFeedback(sys1, sys2) # Negative feedback
>>> pprint(F_1.sensitivity, use_unicode=False)
[ 4 3 2 5 4 2 ]
[- p + 3*p - 4*p + 3*p - 1 p - 2*p + 3*p - 3*p + 1 ]
[---------------------------- -----------------------------]
[ 4 3 2 5 4 3 2 ]
[ p + 3*p - 8*p + 8*p - 3 p + 3*p - 8*p + 8*p - 3*p]
[ ]
[ 4 3 2 3 2 ]
[ p - p - p + p 3*p - 6*p + 4*p - 1 ]
[ -------------------------- -------------------------- ]
[ 4 3 2 4 3 2 ]
[ p + 3*p - 8*p + 8*p - 3 p + 3*p - 8*p + 8*p - 3 ]
>>> pprint(F_2.sensitivity, use_unicode=False)
[ 4 3 2 5 4 2 ]
[p - 3*p + 2*p + p - 1 p - 2*p + 3*p - 3*p + 1]
[------------------------ --------------------------]
[ 4 3 5 4 2 ]
[ p - 3*p + 2*p - 1 p - 3*p + 2*p - p ]
[ ]
[ 4 3 2 4 3 ]
[ p - p - p + p 2*p - 3*p + 2*p - 1 ]
[ ------------------- --------------------- ]
[ 4 3 4 3 ]
[ p - 3*p + 2*p - 1 p - 3*p + 2*p - 1 ]
"""
_sys1_mat = self.sys1.doit()._expr_mat
_sys2_mat = self.sys2.doit()._expr_mat
return (eye(self.sys1.num_inputs) - \
self.sign*_sys1_mat*_sys2_mat).inv()
@property
def num_inputs(self):
"""Returns the number of inputs of the system."""
return self.sys1.num_inputs
@property
def num_outputs(self):
"""Returns the number of outputs of the system."""
return self.sys1.num_outputs
def doit(self, cancel=True, expand=False, **hints):
r"""
Returns the resultant transfer function matrix obtained by the
feedback interconnection.
Examples
========
>>> from sympy import pprint
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction, TransferFunctionMatrix, MIMOFeedback
>>> tf1 = TransferFunction(s, 1 - s, s)
>>> tf2 = TransferFunction(1, s, s)
>>> tf3 = TransferFunction(5, 1, s)
>>> tf4 = TransferFunction(s - 1, s, s)
>>> tf5 = TransferFunction(0, 1, s)
>>> sys1 = TransferFunctionMatrix([[tf1, tf2], [tf3, tf4]])
>>> sys2 = TransferFunctionMatrix([[tf3, tf5], [tf5, tf5]])
>>> F_1 = MIMOFeedback(sys1, sys2, 1)
>>> pprint(F_1, use_unicode=False)
/ [ s 1 ] [5 0] \-1 [ s 1 ]
| [----- - ] [- -] | [----- - ]
| [1 - s s ] [1 1] | [1 - s s ]
|I - [ ] *[ ] | * [ ]
| [ 5 s - 1] [0 0] | [ 5 s - 1]
| [ - -----] [- -] | [ - -----]
\ [ 1 s ]{t} [1 1]{t}/ [ 1 s ]{t}
>>> pprint(F_1.doit(), use_unicode=False)
[ -s s - 1 ]
[------- ----------- ]
[6*s - 1 s*(6*s - 1) ]
[ ]
[5*s - 5 (s - 1)*(6*s + 24)]
[------- ------------------]
[6*s - 1 s*(6*s - 1) ]{t}
If the user wants the resultant ``TransferFunctionMatrix`` object without
canceling the common factors then the ``cancel`` kwarg should be passed ``False``.
>>> pprint(F_1.doit(cancel=False), use_unicode=False)
[ s*(s - 1) s - 1 ]
[ ----------------- ----------- ]
[ (1 - s)*(6*s - 1) s*(6*s - 1) ]
[ ]
[s*(25*s - 25) + 5*(1 - s)*(6*s - 1) s*(s - 1)*(6*s - 1) + s*(25*s - 25)]
[----------------------------------- -----------------------------------]
[ (1 - s)*(6*s - 1) 2 ]
[ s *(6*s - 1) ]{t}
If the user wants the expanded form of the resultant transfer function matrix,
the ``expand`` kwarg should be passed as ``True``.
>>> pprint(F_1.doit(expand=True), use_unicode=False)
[ -s s - 1 ]
[------- -------- ]
[6*s - 1 2 ]
[ 6*s - s ]
[ ]
[ 2 ]
[5*s - 5 6*s + 18*s - 24]
[------- ----------------]
[6*s - 1 2 ]
[ 6*s - s ]{t}
"""
if self.is_StateSpace_object:
ss_class = StateSpace if self.is_continuous else DiscreteStateSpace
sys1_ss = self.sys1.doit().rewrite(ss_class)
sys2_ss = self.sys2.doit().rewrite(ss_class)
A1, B1, C1, D1 = sys1_ss.A, sys1_ss.B, sys1_ss.C, sys1_ss.D
A2, B2, C2, D2 = sys2_ss.A, sys2_ss.B, sys2_ss.C, sys2_ss.D
# Create identity matrices
I_inputs = eye(self.num_inputs)
I_outputs = eye(self.num_outputs)
# Compute F and its inverse
F = I_inputs - self.sign * D2 * D1
E = F.inv()
# Compute intermediate matrices
E_D2 = E * D2
E_C2 = E * C2
T1 = I_outputs + self.sign * D1 * E_D2
T2 = I_inputs + self.sign * E_D2 * D1
A = Matrix.vstack(
Matrix.hstack(A1 + self.sign * B1 * E_D2 * C1, self.sign * B1 * E_C2),
Matrix.hstack(B2 * T1 * C1, A2 + self.sign * B2 * D1 * E_C2)
)
B = Matrix.vstack(B1 * T2, B2 * D1 * T2)
C = Matrix.hstack(T1 * C1, self.sign * D1 * E_C2)
D = D1 * T2
return create_state_space(A, B, C, D, self.sampling_time)
_mat = self.sensitivity * self.sys1.doit()._expr_mat
_resultant_tfm = _to_TFM(_mat, self.var, self.sampling_time)
if cancel:
_resultant_tfm = _resultant_tfm.simplify()
if expand:
_resultant_tfm = _resultant_tfm.expand()
return _resultant_tfm
def _eval_rewrite_as_TransferFunctionMatrix(self, sys1, sys2, sign, **kwargs):
return self.doit()
def __neg__(self):
return MIMOFeedback(-self.sys1, -self.sys2, self.sign)
@property
def sampling_time(self):
return self.sys1.sampling_time
def _to_TFM(mat, var, sampling_time):
"""Private method to convert ImmutableMatrix to TransferFunctionMatrix
efficiently"""
if sampling_time == 0:
to_tf = lambda expr: \
TransferFunction.from_rational_expression(expr,var)
else:
to_tf = lambda expr: \
DiscreteTransferFunction.from_rational_expression(expr,var, sampling_time)
arg = [[to_tf(expr) for expr in row] for row in mat.tolist()]
return TransferFunctionMatrix(arg)
|
MIMOFeedback
|
python
|
huggingface__transformers
|
src/transformers/models/beit/modeling_beit.py
|
{
"start": 41714,
"end": 43156
}
|
class ____(nn.Module):
"""
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
align_corners (bool): align_corners argument of F.interpolate.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, pool_scales: tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
super().__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.blocks = []
for i, pool_scale in enumerate(pool_scales):
block = BeitPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
self.blocks.append(block)
self.add_module(str(i), block)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
ppm_outs = []
for ppm in self.blocks:
ppm_out = ppm(x)
upsampled_ppm_out = nn.functional.interpolate(
ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
|
BeitPyramidPoolingModule
|
python
|
getsentry__sentry
|
src/sentry/users/services/user/model.py
|
{
"start": 4807,
"end": 4875
}
|
class ____(TypedDict):
user_id: int
email: str
|
UserIdEmailArgs
|
python
|
getsentry__sentry
|
src/sentry/notifications/utils/actions.py
|
{
"start": 161,
"end": 920
}
|
class ____:
name: str
# Optional label. This falls back to name.
label: str | None = None
type: Literal["button", "select"] = "button"
# If this is a button type, a url is required.
url: str | None = None
# If this is a select type, the selected value.
value: str | None = None
# Denotes the type of action, used for routing
action_id: str | None = None
style: Literal["primary", "danger", "default"] | None = None
# TODO(mgaeta): Refactor this to be provider-agnostic
selected_options: Sequence[Mapping[str, Any]] | None = None
option_groups: Sequence[Mapping[str, Any]] | None = None
block_id: str | None = None
elements: Sequence[Mapping[str, Any]] | None = None
@dataclass
|
MessageAction
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/message_create_params.py
|
{
"start": 10398,
"end": 10675
}
|
class ____(MessageCreateParamsBase, total=False):
stream: Literal[False]
"""Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
"""
|
MessageCreateParamsNonStreaming
|
python
|
ray-project__ray
|
python/ray/train/tests/test_iter_torch_batches_gpu.py
|
{
"start": 6297,
"end": 6681
}
|
class ____(PandasBatchCollateFn):
"""Collate function that returns id and value as a tuple of tensors."""
def __call__(self, batch: pd.DataFrame) -> Tuple[torch.Tensor, torch.Tensor]:
tensor_dict = convert_ndarray_batch_to_torch_tensor_batch(
batch.to_dict("series")
)
return tensor_dict["id"], tensor_dict["value"]
|
TuplePandasBatchCollateFn
|
python
|
pypa__warehouse
|
tests/unit/metrics/test_event_handlers.py
|
{
"start": 7283,
"end": 7783
}
|
class ____:
@pytest.mark.parametrize(
("matched_route", "route_tag"),
[(None, "route:null"), (pretend.stub(name="foo"), "route:foo")],
)
def test_emits_metric(self, pyramid_request, metrics, matched_route, route_tag):
pyramid_request.matched_route = matched_route
on_before_retry(pretend.stub(request=pyramid_request))
assert metrics.increment.calls == [
pretend.call("pyramid.request.retry", tags=[route_tag])
]
|
TestOnBeforeRetry
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/button_test.py
|
{
"start": 3200,
"end": 27180
}
|
class ____(DeltaGeneratorTestCase):
"""Test ability to marshall button protos."""
def test_button(self):
"""Test that it can be called."""
st.button("the label")
c = self.get_delta_from_queue().new_element.button
assert c.label == "the label"
assert not c.default
assert c.form_id == ""
assert c.type == "secondary"
assert not c.is_form_submitter
assert not c.disabled
@parameterized.expand(
[
(name, command, type_)
for name, command in get_button_command_matrix()
if name != "page_link"
for type_ in ["primary", "secondary", "tertiary"]
]
)
def test_type(self, name: str, command: Callable[..., Any], type_: str):
"""Test that it can be called with type param."""
command(type=type_)
c = getattr(self.get_delta_from_queue().new_element, name)
assert c.type == type_
@parameterized.expand(
[
(name, command, icon)
for name, command in get_button_command_matrix()
for icon in ["⚡", ":material/thumb_up:"]
]
)
def test_icon(self, name: str, command: Callable[..., Any], icon: str):
"""Test that it can be called with an icon."""
command(icon=icon)
c = getattr(self.get_delta_from_queue().new_element, name)
assert c.icon == icon
@parameterized.expand(get_button_command_matrix())
def test_just_disabled(self, name: str, command: Callable[..., Any]):
"""Test that it can be called with disabled param."""
command(disabled=True)
c = getattr(self.get_delta_from_queue().new_element, name)
assert c.disabled
@parameterized.expand(
[
(name, command)
for name, command in get_button_command_matrix()
if name in {"button", "download_button", "link_button"}
]
)
def test_shortcut_serialization(
self, name: str, command: Callable[..., Any]
) -> None:
"""Test that shortcuts are serialized for supported buttons."""
command(shortcut="Ctrl+K")
proto = getattr(self.get_delta_from_queue().new_element, name)
assert proto.shortcut == "ctrl+k"
def test_cmd_shortcut_alias(self) -> None:
"""Test that Cmd shortcuts are normalized."""
st.button("the label", shortcut="Cmd+O")
proto = self.get_delta_from_queue().new_element.button
assert proto.shortcut == "cmd+o"
@parameterized.expand(
[
(name, command)
for name, command in get_button_command_matrix()
if name in {"button", "download_button", "link_button"}
]
)
def test_shortcut_ignores_case_and_whitespace(
self, name: str, command: Callable[..., Any]
) -> None:
"""Test that shortcuts ignore casing and extraneous whitespace."""
command(shortcut=" CtRl + OptIon + ShIfT + N ")
proto = getattr(self.get_delta_from_queue().new_element, name)
assert proto.shortcut == "ctrl+alt+shift+n"
@parameterized.expand(
[
(name, command)
for name, command in get_button_command_matrix()
if name in {"button", "download_button", "link_button"}
]
)
def test_modifier_only_shortcuts_raise(
self, name: str, command: Callable[..., Any]
) -> None:
"""Test that modifier-only shortcuts raise an exception."""
with pytest.raises(StreamlitAPIException):
command(shortcut="ctrl")
with pytest.raises(StreamlitAPIException):
command(shortcut=" shift ")
@parameterized.expand(
[
("upper_r", "R"),
("lower_r", "r"),
("shift_r", "Shift+R"),
("ctrl_c", "Ctrl+C"),
("cmd_c", "cmd+c"),
]
)
def test_reserved_shortcuts_raise(self, _name: str, shortcut: str) -> None:
"""Test that reserved shortcuts raise an exception."""
with pytest.raises(StreamlitAPIException):
st.button("reserved", shortcut=shortcut)
def test_invalid_shortcut_raises(self) -> None:
"""Test that invalid shortcuts raise an exception."""
with pytest.raises(StreamlitAPIException):
st.button("invalid", shortcut="A+B")
def test_stable_id_button_with_key(self):
"""Test that the button ID is stable when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
st.button(
label="Label 1",
key="button_key",
help="Help 1",
type="secondary",
disabled=False,
width="content",
on_click=lambda: st.write("Button clicked"),
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
)
c1 = self.get_delta_from_queue().new_element.button
id1 = c1.id
st.button(
label="Label 2",
key="button_key",
help="Help 2",
type="primary",
disabled=True,
width="stretch",
on_click=lambda: st.write("Other button clicked"),
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
)
c2 = self.get_delta_from_queue().new_element.button
id2 = c2.id
assert id1 == id2
def test_stable_id_download_button_with_key(self):
"""Test that the download button ID is stable when a key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
st.download_button(
label="Label 1",
data="data1",
file_name="file1.txt",
mime="text/plain",
key="download_button_key",
help="Help 1",
type="secondary",
disabled=False,
width="content",
on_click=lambda: st.write("Button clicked"),
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
)
c1 = self.get_delta_from_queue().new_element.download_button
id1 = c1.id
st.download_button(
label="Label 2",
data="data2",
file_name="file2.txt",
mime="text/csv",
key="download_button_key",
help="Help 2",
type="primary",
disabled=True,
width="stretch",
on_click=lambda: st.write("Other button clicked"),
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
)
c2 = self.get_delta_from_queue().new_element.download_button
id2 = c2.id
assert id1 == id2
def test_use_container_width_true(self):
"""Test use_container_width=True is mapped to width='stretch'."""
for button_type, button_func, width in get_button_command_matrix(
test_params=["stretch", "content", 200]
):
with self.subTest(button_type, width=width):
button_func(
label=f"test_use_container_width_true {button_type} {width}",
use_container_width=True,
width=width,
)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
with self.subTest("no width"):
for button_type, button_func in get_button_command_matrix():
with self.subTest(button_type):
button_func(use_container_width=True)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
def test_use_container_width_false(self):
"""Test use_container_width=False is mapped to width='content'."""
for button_type, button_func, width in get_button_command_matrix(
test_params=[
"stretch",
"content",
200,
]
):
with self.subTest(button_type, width=width):
button_func(
label=f"test_use_container_width_false {button_type} {width}",
use_container_width=False,
width=width,
)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
with self.subTest("no width"):
for button_type, button_func in get_button_command_matrix():
with self.subTest(button_type):
button_func(use_container_width=False)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
st.cache_data(lambda: st.button("the label"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_button_width_content(self):
"""Test button elements with width set to content."""
for button_type, button_func in get_button_command_matrix():
with self.subTest(button_type):
button_func(width="content")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_button_width_stretch(self):
"""Test button elements with width set to stretch."""
for button_type, button_func in get_button_command_matrix():
with self.subTest(button_type):
button_func(width="stretch")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
def test_button_width_pixels(self):
"""Test button elements with width set to pixels."""
test_cases = get_button_command_matrix()
for button_type, button_func in test_cases:
with self.subTest(f"{button_type} with fixed width"):
button_func(width=200)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == 200
def test_button_width_default(self):
"""Test button elements use content width by default."""
for button_type, button_func in get_button_command_matrix():
with self.subTest(button_type):
button_func()
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_button_invalid_width(self):
"""Test button elements with invalid width values."""
test_cases = get_button_command_matrix(
test_params=["invalid", -100, 0, 100.5, None]
)
for button_type, button_func, width in test_cases:
with self.subTest(f"{button_type} with width {width}"):
with pytest.raises(StreamlitAPIException):
button_func(width=width)
@parameterized.expand(
[
(name, command)
for name, command in get_button_command_matrix()
if name != "page_link"
]
)
def test_invalid_type(self, name: str, command: Callable[..., Any]):
"""Test with invalid type parameter."""
with pytest.raises(StreamlitAPIException) as exc_info:
command(type="invalid")
assert 'must be "primary", "secondary", or "tertiary"' in str(exc_info.value)
@parameterized.expand(
[
(name, command, "help text 1")
for name, command in get_button_command_matrix()
]
+ [
(
name,
command,
"""
This is a multiline help text.
It should be dedented properly.
""",
)
for name, command in get_button_command_matrix()
]
)
def test_with_help(self, name: str, command: Callable[..., Any], help_text: str):
"""Test with help parameter."""
command(help=help_text)
c = getattr(self.get_delta_from_queue().new_element, name)
assert c.help == dedent(help_text)
def test_download_button_in_form(self):
"""Test that download_button raises error when used in form."""
with st.form("test_form"):
with pytest.raises(StreamlitAPIException) as exc_info:
st.download_button("test", data="data")
assert "can't be used in an `st.form()`" in str(exc_info.value)
def test_button_serde_serialize(self):
"""Test ButtonSerde serialize method."""
from streamlit.elements.widgets.button import ButtonSerde
serde = ButtonSerde()
# Test serialization with True value
assert serde.serialize(True) is True
# Test serialization with False value
assert serde.serialize(False) is False
# Test serialization with non-boolean values
assert serde.serialize(1) is True
assert serde.serialize(0) is False
assert serde.serialize("") is False
assert serde.serialize("text") is True
def test_page_link_in_sidebar(self):
"""Test that page_link in sidebar uses stretch width."""
# Create a mock sidebar that returns True for in_sidebar
with patch("streamlit.elements.widgets.button.in_sidebar", return_value=True):
st.sidebar.page_link("https://example.com", label="Test", width="content")
el = self.get_delta_from_queue().new_element
# Even though we specified content, it should be stretch in sidebar
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
def test_page_link_with_path_object(self):
"""Test page_link with pathlib.Path object."""
# Create a mock context with pages
ctx = MagicMock()
ctx.main_script_path = "/app/main.py"
ctx.pages_manager.get_pages.return_value = {
"page1": {
"script_path": "/app/pages/page1.py",
"page_name": "Page 1",
"url_pathname": "page1",
"page_script_hash": "hash123",
}
}
with patch(
"streamlit.elements.widgets.button.get_script_run_ctx", return_value=ctx
):
with patch(
"streamlit.file_util.get_main_script_directory", return_value="/app"
):
with patch("os.path.realpath", return_value="/app/pages/page1.py"):
st.page_link(Path("pages/page1.py"))
c = self.get_delta_from_queue().new_element.page_link
assert c.page == "page1"
assert c.page_script_hash == "hash123"
assert c.label == "Page 1"
def test_page_link_page_not_found(self):
"""Test page_link with non-existent page."""
ctx = MagicMock()
ctx.main_script_path = "/app/main.py"
ctx.pages_manager.get_pages.return_value = {}
with patch(
"streamlit.elements.widgets.button.get_script_run_ctx", return_value=ctx
):
with patch(
"streamlit.file_util.get_main_script_directory", return_value="/app"
):
with patch(
"os.path.realpath", return_value="/app/pages/nonexistent.py"
):
with pytest.raises(StreamlitPageNotFoundError):
st.page_link("pages/nonexistent.py")
def test_page_link_with_streamlit_page(self):
"""Test page_link with StreamlitPage object."""
# Create a StreamlitPage manually without going through the constructor
# that checks for file existence
page = MagicMock(spec=StreamlitPage)
page._page = Path("/app/page.py")
page._title = "Test Page"
page._icon = "🏠"
page._url_path = "test-page"
page._script_hash = "test_hash"
page._default = False
page.title = "Test Page"
page.icon = "🏠"
page.url_path = "test-page"
ctx = MagicMock()
with patch(
"streamlit.elements.widgets.button.get_script_run_ctx", return_value=ctx
):
st.page_link(page)
c = self.get_delta_from_queue().new_element.page_link
assert c.page == "test-page"
assert c.page_script_hash == "test_hash"
assert c.label == "Test Page"
assert c.icon == "🏠"
def test_marshall_file_with_text_io(self):
"""Test marshall_file with TextIOWrapper."""
# Create a TextIOWrapper
text_io = io.TextIOWrapper(io.BytesIO(), encoding="utf-8", write_through=True)
text_io.write("Hello, World!")
text_io.seek(0)
proto = DownloadButtonProto()
marshall_file("test_coords", text_io, proto, None)
# The mock runtime in DeltaGeneratorTestCase creates a real MediaFileManager
assert proto.url.startswith("/media/")
assert proto.url.endswith(".txt")
def test_marshall_file_with_bytes_io(self):
"""Test marshall_file with BytesIO."""
bytes_data = io.BytesIO(b"Binary data")
proto = DownloadButtonProto()
marshall_file("test_coords", bytes_data, proto, None)
# The mock runtime in DeltaGeneratorTestCase creates a real MediaFileManager
assert proto.url.startswith("/media/")
assert proto.url.endswith(".bin")
def test_marshall_file_with_buffered_reader(self):
"""Test marshall_file with BufferedReader."""
# Create a temporary file to test BufferedReader
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as tmp:
tmp.write(b"Test data")
tmp_path = tmp.name
try:
with open(tmp_path, "rb") as f:
proto = DownloadButtonProto()
marshall_file("test_coords", f, proto, None)
# The mock runtime in DeltaGeneratorTestCase creates a real MediaFileManager
assert proto.url.startswith("/media/")
assert proto.url.endswith(".bin")
finally:
os.unlink(tmp_path)
def test_marshall_file_with_raw_io(self):
"""Test marshall_file with RawIOBase."""
# Create a custom RawIOBase for testing
class MockRawIO(io.RawIOBase):
def __init__(self, data):
self.data = data
self.pos = 0
def read(self, size=-1):
if size == -1:
result = self.data[self.pos :]
self.pos = len(self.data)
else:
result = self.data[self.pos : self.pos + size]
self.pos += len(result)
return result
def seek(self, pos, whence=0):
if whence == 0:
self.pos = pos
return self.pos
raw_io = MockRawIO(b"Raw IO data")
proto = DownloadButtonProto()
marshall_file("test_coords", raw_io, proto, None)
# The mock runtime in DeltaGeneratorTestCase creates a real MediaFileManager
assert proto.url.startswith("/media/")
assert proto.url.endswith(".bin")
def test_marshall_file_invalid_data_type(self):
"""Test marshall_file with invalid data type."""
proto = DownloadButtonProto()
with pytest.raises(StreamlitAPIException) as exc_info:
marshall_file("test_coords", {"invalid": "data"}, proto, None)
assert "Invalid binary data format" in str(exc_info.value)
def test_marshall_file_with_runtime(self):
"""Test marshall_file with runtime exists."""
# Mock runtime.exists() to return True
mock_runtime = MagicMock()
mock_runtime.exists.return_value = True
mock_instance = MagicMock()
mock_instance.media_file_mgr.add.return_value = "/media/test_file"
mock_runtime.get_instance.return_value = mock_instance
with patch("streamlit.elements.widgets.button.runtime", mock_runtime):
proto = DownloadButtonProto()
marshall_file("test_coords", "test data", proto, None, "test.txt")
assert proto.url == "/media/test_file"
mock_instance.media_file_mgr.add.assert_called_once()
def test_marshall_file_empty_raw_io(self):
"""Test marshall_file with RawIOBase that returns None."""
class EmptyRawIO(io.RawIOBase):
def read(self, size=-1):
return None
def seek(self, pos, whence=0):
return 0
raw_io = EmptyRawIO()
proto = DownloadButtonProto()
# The mock runtime in DeltaGeneratorTestCase creates a real MediaFileManager
# so the URL will be populated
marshall_file("test_coords", raw_io, proto, None)
# Empty data should result in an empty file being added
assert proto.url.startswith("/media/")
assert proto.url.endswith(".bin")
def test_download_button_on_click_rerun(self):
"""Test download_button with on_click='rerun'."""
st.download_button("test", data="data", on_click="rerun")
c = self.get_delta_from_queue().new_element.download_button
assert c.ignore_rerun is False
def test_download_button_on_click_none(self):
"""Test download_button with on_click=None (should behave like 'rerun')."""
st.download_button("test", data="data", on_click=None)
c = self.get_delta_from_queue().new_element.download_button
assert c.ignore_rerun is False
def test_download_button_on_click_callback(self):
"""Test download_button with callback function."""
def callback():
pass
st.download_button("test", data="data", on_click=callback)
c = self.get_delta_from_queue().new_element.download_button
assert c.ignore_rerun is False
|
ButtonTest
|
python
|
GoogleCloudPlatform__python-docs-samples
|
dataflow/flex-templates/pipeline_with_dependencies/src/my_package/my_transforms.py
|
{
"start": 719,
"end": 925
}
|
class ____(beam.DoFn):
"""Parses each line of input text into words."""
def process(self, element: str) -> Iterable[str]:
return re.findall(r"[\w\']+", element, re.UNICODE)
|
WordExtractingDoFn
|
python
|
celery__celery
|
t/smoke/tests/test_tasks.py
|
{
"start": 594,
"end": 4040
}
|
class ____(SuiteOperations):
@pytest.fixture
def default_worker_app(self, default_worker_app: Celery) -> Celery:
app = default_worker_app
app.conf.worker_prefetch_multiplier = 1
app.conf.worker_concurrency = 1
return app
@pytest.mark.parametrize(
"method,expected_error",
[
(TaskTermination.Method.SIGKILL, WorkerLostError),
(TaskTermination.Method.SYSTEM_EXIT, WorkerLostError),
(TaskTermination.Method.DELAY_TIMEOUT, TimeLimitExceeded),
# Exhausting the memory messes up the CI environment
# (TaskTermination.Method.EXHAUST_MEMORY, WorkerLostError),
],
)
def test_child_process_respawn(
self,
celery_setup: CeleryTestSetup,
method: TaskTermination.Method,
expected_error: Exception,
):
pinfo_before = celery_setup.worker.get_running_processes_info(
["pid", "name"],
filters={"name": "celery"},
)
with pytest.raises(expected_error):
self.apply_self_termination_task(celery_setup.worker, method).get()
# Allowing the worker to respawn the child process before we continue
@retry(
stop=stop_after_attempt(42),
wait=wait_fixed(0.1),
reraise=True,
)
def wait_for_two_celery_processes():
pinfo_current = celery_setup.worker.get_running_processes_info(
["pid", "name"],
filters={"name": "celery"},
)
if len(pinfo_current) != 2:
assert False, f"Child process did not respawn with method: {method.name}"
wait_for_two_celery_processes()
pinfo_after = celery_setup.worker.get_running_processes_info(
["pid", "name"],
filters={"name": "celery"},
)
pids_before = {item["pid"] for item in pinfo_before}
pids_after = {item["pid"] for item in pinfo_after}
assert len(pids_before | pids_after) == 3
@pytest.mark.parametrize(
"method,expected_log,expected_exception_msg",
[
(
TaskTermination.Method.SIGKILL,
"Worker exited prematurely: signal 9 (SIGKILL)",
None,
),
(
TaskTermination.Method.SYSTEM_EXIT,
"Worker exited prematurely: exitcode 1",
None,
),
(
TaskTermination.Method.DELAY_TIMEOUT,
"Hard time limit (2s) exceeded for t.smoke.tasks.self_termination_delay_timeout",
"TimeLimitExceeded(2,)",
),
# Exhausting the memory messes up the CI environment
# (
# TaskTermination.Method.EXHAUST_MEMORY,
# "Worker exited prematurely: signal 9 (SIGKILL)",
# None,
# ),
],
)
def test_terminated_task_logs_correct_error(
self,
celery_setup: CeleryTestSetup,
method: TaskTermination.Method,
expected_log: str,
expected_exception_msg: str | None,
):
try:
self.apply_self_termination_task(celery_setup.worker, method).get()
except Exception as err:
assert expected_exception_msg or expected_log in str(err)
celery_setup.worker.assert_log_exists(expected_log)
|
test_task_termination
|
python
|
scipy__scipy
|
scipy/cluster/tests/test_vq.py
|
{
"start": 6585,
"end": 10174
}
|
class ____:
def test_py_vq(self, xp):
initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
# label1.dtype varies between int32 and int64 over platforms
label1 = py_vq(xp.asarray(X), xp.asarray(initc))[0]
xp_assert_equal(label1, xp.asarray(LABEL1, dtype=xp.int64),
check_dtype=False)
@pytest.mark.skipif(SCIPY_ARRAY_API,
reason='`np.matrix` unsupported in array API mode')
def test_py_vq_matrix(self):
initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
# label1.dtype varies between int32 and int64 over platforms
label1 = py_vq(matrix(X), matrix(initc))[0]
assert_array_equal(label1, LABEL1)
def test_vq(self, xp):
initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
label1, _ = _vq.vq(X, initc)
assert_array_equal(label1, LABEL1)
_, _ = vq(xp.asarray(X), xp.asarray(initc))
@pytest.mark.skipif(SCIPY_ARRAY_API,
reason='`np.matrix` unsupported in array API mode')
def test_vq_matrix(self):
initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
label1, _ = _vq.vq(matrix(X), matrix(initc))
assert_array_equal(label1, LABEL1)
_, _ = vq(matrix(X), matrix(initc))
def test_vq_1d(self, xp):
# Test special rank 1 vq algo, python implementation.
data = X[:, 0]
initc = data[:3]
a, b = _vq.vq(data, initc)
data = xp.asarray(data)
initc = xp.asarray(initc)
ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
# ta.dtype varies between int32 and int64 over platforms
xp_assert_equal(ta, xp.asarray(a, dtype=xp.int64), check_dtype=False)
xp_assert_equal(tb, xp.asarray(b))
def test__vq_sametype(self):
a = np.asarray([1.0, 2.0])
b = a.astype(np.float32)
assert_raises(TypeError, _vq.vq, a, b)
def test__vq_invalid_type(self):
a = np.asarray([1, 2], dtype=int)
assert_raises(TypeError, _vq.vq, a, a)
def test_vq_large_nfeat(self, xp):
X = np.random.rand(20, 20)
code_book = np.random.rand(3, 20)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(
xp.asarray(X), xp.asarray(code_book)
)
xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
# codes1.dtype varies between int32 and int64 over platforms
xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
X = X.astype(np.float32)
code_book = code_book.astype(np.float32)
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(
xp.asarray(X), xp.asarray(code_book)
)
xp_assert_close(dis1, xp.asarray(dis0, dtype=xp.float64), rtol=1e-5)
# codes1.dtype varies between int32 and int64 over platforms
xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
def test_vq_large_features(self, xp):
X = np.random.rand(10, 5) * 1000000
code_book = np.random.rand(2, 5) * 1000000
codes0, dis0 = _vq.vq(X, code_book)
codes1, dis1 = py_vq(
xp.asarray(X), xp.asarray(code_book)
)
xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
# codes1.dtype varies between int32 and int64 over platforms
xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
# Whole class skipped on GPU for now;
# once pdist/cdist are hooked up for CuPy, more tests will work
@make_xp_test_case(kmeans, kmeans2)
|
TestVq
|
python
|
astropy__astropy
|
astropy/units/tests/test_quantity_non_ufuncs.py
|
{
"start": 85332,
"end": 92730
}
|
class ____:
tested_module = np.lib.recfunctions
@classmethod
def setup_class(cls):
cls.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
cls.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
cls.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], cls.pv_dtype)
cls.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], cls.pv_t_dtype
)
cls.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
cls.pv_t_unit = u.StructuredUnit((cls.pv_unit, u.s), ("pv", "t"))
cls.q_pv = cls.pv << cls.pv_unit
cls.q_pv_t = cls.pv_t << cls.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
# For a structured array, all elements should be treated as dimensionless.
arr = rfn.merge_arrays((self.q_pv["p"], self.q_pv.value))
expected_value = rfn.merge_arrays((self.q_pv["p"].value, self.q_pv.value))
assert_array_equal(arr.value, expected_value)
assert arr.unit == u.Unit((self.q_pv["p"].unit, (u.one, u.one)))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
# For a structured array, all elements should be treated as dimensionless.
arr = rfn.merge_arrays((self.q_pv["p"], self.q_pv.value), flatten=True)
expected_value = rfn.merge_arrays(
(self.q_pv["p"].value, self.q_pv.value), flatten=True
)
assert_array_equal(arr.value, expected_value)
assert arr.unit == u.Unit((self.q_pv["p"].unit, u.one, u.one))
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
if NUMPY_LT_2_2:
# ref https://github.com/numpy/numpy/issues/27451
all_wrapped_functions |= SUPPORTED_NEP35_FUNCTIONS
tested_functions = get_covered_functions(locals())
untested_functions = set()
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped_functions == (tested_functions | untested_functions)
|
TestRecFunctions
|
python
|
scikit-learn__scikit-learn
|
sklearn/svm/_classes.py
|
{
"start": 52030,
"end": 58252
}
|
class ____(RegressorMixin, BaseLibSVM):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
nu : float, default=0.5
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
C : float, default=1.0
Penalty parameter C of the error term. For an intuitive visualization
of the effects of scaling the regularization parameter C, see
:ref:`sphx_glr_auto_examples_svm_plot_svm_scale_c.py`.
kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
default='rbf'
Specifies the kernel type to be used in the algorithm.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
For an intuitive visualization of different kernel types see
See :ref:`sphx_glr_auto_examples_svm_plot_svm_regression.py`
degree : int, default=3
Degree of the polynomial kernel function ('poly').
Must be non-negative. Ignored by all other kernels.
gamma : {'scale', 'auto'} or float, default='scale'
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
- if ``gamma='scale'`` (default) is passed then it uses
1 / (n_features * X.var()) as value of gamma,
- if 'auto', uses 1 / n_features
- if float, must be non-negative.
.. versionchanged:: 0.22
The default value of ``gamma`` changed from 'auto' to 'scale'.
coef0 : float, default=0.0
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : bool, default=True
Whether to use the shrinking heuristic.
See the :ref:`User Guide <shrinking_svm>`.
tol : float, default=1e-3
Tolerance for stopping criterion.
cache_size : float, default=200
Specify the size of the kernel cache (in MB).
verbose : bool, default=False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, default=-1
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
coef_ : ndarray of shape (1, n_features)
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
dual_coef_ : ndarray of shape (1, n_SV)
Coefficients of the support vector in the decision function.
fit_status_ : int
0 if correctly fitted, 1 otherwise (will raise warning)
intercept_ : ndarray of shape (1,)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run by the optimization routine to fit the model.
.. versionadded:: 1.1
n_support_ : ndarray of shape (1,), dtype=int32
Number of support vectors.
shape_fit_ : tuple of int of shape (n_dimensions_of_X,)
Array dimensions of training vector ``X``.
support_ : ndarray of shape (n_SV,)
Indices of support vectors.
support_vectors_ : ndarray of shape (n_SV, n_features)
Support vectors.
See Also
--------
NuSVC : Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR : Epsilon Support Vector Machine for regression implemented with
libsvm.
References
----------
.. [1] `LIBSVM: A Library for Support Vector Machines
<http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf>`_
.. [2] `Platt, John (1999). "Probabilistic Outputs for Support Vector
Machines and Comparisons to Regularized Likelihood Methods"
<https://citeseerx.ist.psu.edu/doc_view/pid/42e5ed832d4310ce4378c44d05570439df28a393>`_
Examples
--------
>>> from sklearn.svm import NuSVR
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> regr = make_pipeline(StandardScaler(), NuSVR(C=1.0, nu=0.1))
>>> regr.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('nusvr', NuSVR(nu=0.1))])
"""
_impl = "nu_svr"
_parameter_constraints: dict = {**BaseLibSVM._parameter_constraints}
for unused_param in ["class_weight", "epsilon", "probability", "random_state"]:
_parameter_constraints.pop(unused_param)
def __init__(
self,
*,
nu=0.5,
C=1.0,
kernel="rbf",
degree=3,
gamma="scale",
coef0=0.0,
shrinking=True,
tol=1e-3,
cache_size=200,
verbose=False,
max_iter=-1,
):
super().__init__(
kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
C=C,
nu=nu,
epsilon=0.0,
shrinking=shrinking,
probability=False,
cache_size=cache_size,
class_weight=None,
verbose=verbose,
max_iter=max_iter,
random_state=None,
)
|
NuSVR
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/triggers/comprehend.py
|
{
"start": 1100,
"end": 2531
}
|
class ____(AwsBaseWaiterTrigger):
"""
Trigger when a Comprehend pii entities detection job is complete.
:param job_id: The id of the Comprehend pii entities detection job.
:param waiter_delay: The amount of time in seconds to wait between attempts. (default: 120)
:param waiter_max_attempts: The maximum number of attempts to be made. (default: 75)
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
*,
job_id: str,
waiter_delay: int = 120,
waiter_max_attempts: int = 75,
aws_conn_id: str | None = "aws_default",
) -> None:
super().__init__(
serialized_fields={"job_id": job_id},
waiter_name="pii_entities_detection_job_complete",
waiter_args={"JobId": job_id},
failure_message="Comprehend start pii entities detection job failed.",
status_message="Status of Comprehend start pii entities detection job is",
status_queries=["PiiEntitiesDetectionJobProperties.JobStatus"],
return_key="job_id",
return_value=job_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return ComprehendHook(aws_conn_id=self.aws_conn_id)
|
ComprehendPiiEntitiesDetectionJobCompletedTrigger
|
python
|
django-import-export__django-import-export
|
tests/core/migrations/0010_uuidbook.py
|
{
"start": 104,
"end": 636
}
|
class ____(migrations.Migration):
dependencies = [
("core", "0009_auto_20211111_0807"),
]
operations = [
migrations.CreateModel(
name="UUIDBook",
fields=[
(
"id",
models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False
),
),
("name", models.CharField(max_length=100, verbose_name="Book name")),
],
),
]
|
Migration
|
python
|
davidhalter__jedi
|
jedi/inference/gradual/typing.py
|
{
"start": 13911,
"end": 13957
}
|
class ____(BaseTypingInstance):
pass
|
Generic
|
python
|
sympy__sympy
|
sympy/functions/elementary/miscellaneous.py
|
{
"start": 9694,
"end": 21258
}
|
class ____(Expr, LatticeOp):
def __new__(cls, *args, **assumptions):
from sympy.core.parameters import global_parameters
evaluate = assumptions.pop('evaluate', global_parameters.evaluate)
args = (sympify(arg) for arg in args)
# first standard filter, for cls.zero and cls.identity
# also reshape Max(a, Max(b, c)) to Max(a, b, c)
if evaluate:
try:
args = frozenset(cls._new_args_filter(args))
except ShortCircuit:
return cls.zero
# remove redundant args that are easily identified
args = cls._collapse_arguments(args, **assumptions)
# find local zeros
args = cls._find_localzeros(args, **assumptions)
args = frozenset(args)
if not args:
return cls.identity
if len(args) == 1:
return list(args).pop()
# base creation
obj = Expr.__new__(cls, *ordered(args), **assumptions)
obj._argset = args
return obj
@classmethod
def _collapse_arguments(cls, args, **assumptions):
"""Remove redundant args.
Examples
========
>>> from sympy import Min, Max
>>> from sympy.abc import a, b, c, d, e
Any arg in parent that appears in any
parent-like function in any of the flat args
of parent can be removed from that sub-arg:
>>> Min(a, Max(b, Min(a, c, d)))
Min(a, Max(b, Min(c, d)))
If the arg of parent appears in an opposite-than parent
function in any of the flat args of parent that function
can be replaced with the arg:
>>> Min(a, Max(b, Min(c, d, Max(a, e))))
Min(a, Max(b, Min(a, c, d)))
"""
if not args:
return args
args = list(ordered(args))
if cls == Min:
other = Max
else:
other = Min
# find global comparable max of Max and min of Min if a new
# value is being introduced in these args at position 0 of
# the ordered args
if args[0].is_number:
sifted = mins, maxs = [], []
for i in args:
for v in walk(i, Min, Max):
if v.args[0].is_comparable:
sifted[isinstance(v, Max)].append(v)
small = Min.identity
for i in mins:
v = i.args[0]
if v.is_number and (v < small) == True:
small = v
big = Max.identity
for i in maxs:
v = i.args[0]
if v.is_number and (v > big) == True:
big = v
# at the point when this function is called from __new__,
# there may be more than one numeric arg present since
# local zeros have not been handled yet, so look through
# more than the first arg
if cls == Min:
for arg in args:
if not arg.is_number:
break
if (arg < small) == True:
small = arg
elif cls == Max:
for arg in args:
if not arg.is_number:
break
if (arg > big) == True:
big = arg
T = None
if cls == Min:
if small != Min.identity:
other = Max
T = small
elif big != Max.identity:
other = Min
T = big
if T is not None:
# remove numerical redundancy
for i in range(len(args)):
a = args[i]
if isinstance(a, other):
a0 = a.args[0]
if ((a0 > T) if other == Max else (a0 < T)) == True:
args[i] = cls.identity
# remove redundant symbolic args
def do(ai, a):
if not isinstance(ai, (Min, Max)):
return ai
cond = a in ai.args
if not cond:
return ai.func(*[do(i, a) for i in ai.args],
evaluate=False)
if isinstance(ai, cls):
return ai.func(*[do(i, a) for i in ai.args if i != a],
evaluate=False)
return a
for i, a in enumerate(args):
args[i + 1:] = [do(ai, a) for ai in args[i + 1:]]
# factor out common elements as for
# Min(Max(x, y), Max(x, z)) -> Max(x, Min(y, z))
# and vice versa when swapping Min/Max -- do this only for the
# easy case where all functions contain something in common;
# trying to find some optimal subset of args to modify takes
# too long
def factor_minmax(args):
is_other = lambda arg: isinstance(arg, other)
other_args, remaining_args = sift(args, is_other, binary=True)
if not other_args:
return args
# Min(Max(x, y, z), Max(x, y, u, v)) -> {x,y}, ({z}, {u,v})
arg_sets = [set(arg.args) for arg in other_args]
common = set.intersection(*arg_sets)
if not common:
return args
new_other_args = list(common)
arg_sets_diff = [arg_set - common for arg_set in arg_sets]
# If any set is empty after removing common then all can be
# discarded e.g. Min(Max(a, b, c), Max(a, b)) -> Max(a, b)
if all(arg_sets_diff):
other_args_diff = [other(*s, evaluate=False) for s in arg_sets_diff]
new_other_args.append(cls(*other_args_diff, evaluate=False))
other_args_factored = other(*new_other_args, evaluate=False)
return remaining_args + [other_args_factored]
if len(args) > 1:
args = factor_minmax(args)
return args
@classmethod
def _new_args_filter(cls, arg_sequence):
"""
Generator filtering args.
first standard filter, for cls.zero and cls.identity.
Also reshape ``Max(a, Max(b, c))`` to ``Max(a, b, c)``,
and check arguments for comparability
"""
for arg in arg_sequence:
# pre-filter, checking comparability of arguments
if not isinstance(arg, Expr) or arg.is_extended_real is False or (
arg.is_number and
not arg.is_comparable):
raise ValueError("The argument '%s' is not comparable." % arg)
if arg == cls.zero:
raise ShortCircuit(arg)
elif arg == cls.identity:
continue
elif arg.func == cls:
yield from arg.args
else:
yield arg
@classmethod
def _find_localzeros(cls, values, **options):
"""
Sequentially allocate values to localzeros.
When a value is identified as being more extreme than another member it
replaces that member; if this is never true, then the value is simply
appended to the localzeros.
"""
localzeros = set()
for v in values:
is_newzero = True
localzeros_ = list(localzeros)
for z in localzeros_:
if id(v) == id(z):
is_newzero = False
else:
con = cls._is_connected(v, z)
if con:
is_newzero = False
if con is True or con == cls:
localzeros.remove(z)
localzeros.update([v])
if is_newzero:
localzeros.update([v])
return localzeros
@classmethod
def _is_connected(cls, x, y):
"""
Check if x and y are connected somehow.
"""
for i in range(2):
if x == y:
return True
t, f = Max, Min
for op in "><":
for j in range(2):
try:
if op == ">":
v = x >= y
else:
v = x <= y
except TypeError:
return False # non-real arg
if not v.is_Relational:
return t if v else f
t, f = f, t
x, y = y, x
x, y = y, x # run next pass with reversed order relative to start
# simplification can be expensive, so be conservative
# in what is attempted
x = factor_terms(x - y)
y = S.Zero
return False
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da.is_zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = super().fdiff(i)
l.append(df * da)
return Add(*l)
def _eval_rewrite_as_Abs(self, *args, **kwargs):
from sympy.functions.elementary.complexes import Abs
s = (args[0] + self.func(*args[1:]))/2
d = abs(args[0] - self.func(*args[1:]))/2
return (s + d if isinstance(self, Max) else s - d).rewrite(Abs)
def evalf(self, n=15, **options):
return self.func(*[a.evalf(n, **options) for a in self.args])
def n(self, *args, **kwargs):
return self.evalf(*args, **kwargs)
_eval_is_algebraic = lambda s: _torf(i.is_algebraic for i in s.args)
_eval_is_antihermitian = lambda s: _torf(i.is_antihermitian for i in s.args)
_eval_is_commutative = lambda s: _torf(i.is_commutative for i in s.args)
_eval_is_complex = lambda s: _torf(i.is_complex for i in s.args)
_eval_is_composite = lambda s: _torf(i.is_composite for i in s.args)
_eval_is_even = lambda s: _torf(i.is_even for i in s.args)
_eval_is_finite = lambda s: _torf(i.is_finite for i in s.args)
_eval_is_hermitian = lambda s: _torf(i.is_hermitian for i in s.args)
_eval_is_imaginary = lambda s: _torf(i.is_imaginary for i in s.args)
_eval_is_infinite = lambda s: _torf(i.is_infinite for i in s.args)
_eval_is_integer = lambda s: _torf(i.is_integer for i in s.args)
_eval_is_irrational = lambda s: _torf(i.is_irrational for i in s.args)
_eval_is_negative = lambda s: _torf(i.is_negative for i in s.args)
_eval_is_noninteger = lambda s: _torf(i.is_noninteger for i in s.args)
_eval_is_nonnegative = lambda s: _torf(i.is_nonnegative for i in s.args)
_eval_is_nonpositive = lambda s: _torf(i.is_nonpositive for i in s.args)
_eval_is_nonzero = lambda s: _torf(i.is_nonzero for i in s.args)
_eval_is_odd = lambda s: _torf(i.is_odd for i in s.args)
_eval_is_polar = lambda s: _torf(i.is_polar for i in s.args)
_eval_is_positive = lambda s: _torf(i.is_positive for i in s.args)
_eval_is_prime = lambda s: _torf(i.is_prime for i in s.args)
_eval_is_rational = lambda s: _torf(i.is_rational for i in s.args)
_eval_is_real = lambda s: _torf(i.is_real for i in s.args)
_eval_is_extended_real = lambda s: _torf(i.is_extended_real for i in s.args)
_eval_is_transcendental = lambda s: _torf(i.is_transcendental for i in s.args)
_eval_is_zero = lambda s: _torf(i.is_zero for i in s.args)
|
MinMaxBase
|
python
|
getsentry__sentry
|
src/sentry/integrations/bitbucket_server/integration.py
|
{
"start": 7963,
"end": 9315
}
|
class ____:
"""
Complete the OAuth dance by exchanging our request token
into an access token.
"""
@method_decorator(csrf_exempt)
def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase:
with IntegrationPipelineViewEvent(
IntegrationPipelineViewType.OAUTH_CALLBACK,
IntegrationDomain.SOURCE_CODE_MANAGEMENT,
BitbucketServerIntegrationProvider.key,
).capture() as lifecycle:
config = pipeline.fetch_state("installation_data")
assert config is not None
client = BitbucketServerSetupClient(
config.get("url"),
config.get("consumer_key"),
config.get("private_key"),
config.get("verify_ssl"),
)
try:
access_token = client.get_access_token(
pipeline.fetch_state("request_token"), request.GET["oauth_token"]
)
pipeline.bind_state("access_token", access_token)
return pipeline.next_step()
except ApiError as error:
lifecycle.record_failure(str(error))
return pipeline.error(
f"Could not fetch an access token from Bitbucket. {str(error)}"
)
|
OAuthCallbackView
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/map/_layer.py
|
{
"start": 235,
"end": 24279
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.map"
_path_str = "layout.map.layer"
_valid_props = {
"below",
"circle",
"color",
"coordinates",
"fill",
"line",
"maxzoom",
"minzoom",
"name",
"opacity",
"source",
"sourceattribution",
"sourcelayer",
"sourcetype",
"symbol",
"templateitemname",
"type",
"visible",
}
@property
def below(self):
"""
Determines if the layer will be inserted before the layer with
the specified ID. If omitted or set to '', the layer will be
inserted above every existing layer.
The 'below' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["below"]
@below.setter
def below(self, val):
self["below"] = val
@property
def circle(self):
"""
The 'circle' property is an instance of Circle
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.map.layer.Circle`
- A dict of string/value properties that will be passed
to the Circle constructor
Returns
-------
plotly.graph_objs.layout.map.layer.Circle
"""
return self["circle"]
@circle.setter
def circle(self, val):
self["circle"] = val
@property
def color(self):
"""
Sets the primary layer color. If `type` is "circle", color
corresponds to the circle color (map.layer.paint.circle-color)
If `type` is "line", color corresponds to the line color
(map.layer.paint.line-color) If `type` is "fill", color
corresponds to the fill color (map.layer.paint.fill-color) If
`type` is "symbol", color corresponds to the icon color
(map.layer.paint.icon-color)
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coordinates(self):
"""
Sets the coordinates array contains [longitude, latitude] pairs
for the image corners listed in clockwise order: top left, top
right, bottom right, bottom left. Only has an effect for
"image" `sourcetype`.
The 'coordinates' property accepts values of any type
Returns
-------
Any
"""
return self["coordinates"]
@coordinates.setter
def coordinates(self, val):
self["coordinates"] = val
@property
def fill(self):
"""
The 'fill' property is an instance of Fill
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.map.layer.Fill`
- A dict of string/value properties that will be passed
to the Fill constructor
Returns
-------
plotly.graph_objs.layout.map.layer.Fill
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.map.layer.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.layout.map.layer.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def maxzoom(self):
"""
Sets the maximum zoom level (map.layer.maxzoom). At zoom levels
equal to or greater than the maxzoom, the layer will be hidden.
The 'maxzoom' property is a number and may be specified as:
- An int or float in the interval [0, 24]
Returns
-------
int|float
"""
return self["maxzoom"]
@maxzoom.setter
def maxzoom(self, val):
self["maxzoom"] = val
@property
def minzoom(self):
"""
Sets the minimum zoom level (map.layer.minzoom). At zoom levels
less than the minzoom, the layer will be hidden.
The 'minzoom' property is a number and may be specified as:
- An int or float in the interval [0, 24]
Returns
-------
int|float
"""
return self["minzoom"]
@minzoom.setter
def minzoom(self, val):
self["minzoom"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the layer. If `type` is "circle", opacity
corresponds to the circle opacity (map.layer.paint.circle-
opacity) If `type` is "line", opacity corresponds to the line
opacity (map.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity (map.layer.paint.fill-
opacity) If `type` is "symbol", opacity corresponds to the
icon/text opacity (map.layer.paint.text-opacity)
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def source(self):
"""
Sets the source data for this layer (map.layer.source). When
`sourcetype` is set to "geojson", `source` can be a URL to a
GeoJSON or a GeoJSON object. When `sourcetype` is set to
"vector" or "raster", `source` can be a URL or an array of tile
URLs. When `sourcetype` is set to "image", `source` can be a
URL to an image.
The 'source' property accepts values of any type
Returns
-------
Any
"""
return self["source"]
@source.setter
def source(self, val):
self["source"] = val
@property
def sourceattribution(self):
"""
Sets the attribution for this source.
The 'sourceattribution' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["sourceattribution"]
@sourceattribution.setter
def sourceattribution(self, val):
self["sourceattribution"] = val
@property
def sourcelayer(self):
"""
Specifies the layer to use from a vector tile source
(map.layer.source-layer). Required for "vector" source type
that supports multiple layers.
The 'sourcelayer' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["sourcelayer"]
@sourcelayer.setter
def sourcelayer(self, val):
self["sourcelayer"] = val
@property
def sourcetype(self):
"""
Sets the source type for this layer, that is the type of the
layer data.
The 'sourcetype' property is an enumeration that may be specified as:
- One of the following enumeration values:
['geojson', 'vector', 'raster', 'image']
Returns
-------
Any
"""
return self["sourcetype"]
@sourcetype.setter
def sourcetype(self, val):
self["sourcetype"] = val
@property
def symbol(self):
"""
The 'symbol' property is an instance of Symbol
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.map.layer.Symbol`
- A dict of string/value properties that will be passed
to the Symbol constructor
Returns
-------
plotly.graph_objs.layout.map.layer.Symbol
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def type(self):
"""
Sets the layer type, that is the how the layer data set in
`source` will be rendered With `sourcetype` set to "geojson",
the following values are allowed: "circle", "line", "fill" and
"symbol". but note that "line" and "fill" are not compatible
with Point GeoJSON geometries. With `sourcetype` set to
"vector", the following values are allowed: "circle", "line",
"fill" and "symbol". With `sourcetype` set to "raster" or
"image", only the "raster" value is allowed.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['circle', 'line', 'fill', 'symbol', 'raster']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def visible(self):
"""
Determines whether this layer is displayed
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
below
Determines if the layer will be inserted before the
layer with the specified ID. If omitted or set to '',
the layer will be inserted above every existing layer.
circle
:class:`plotly.graph_objects.layout.map.layer.Circle`
instance or dict with compatible properties
color
Sets the primary layer color. If `type` is "circle",
color corresponds to the circle color
(map.layer.paint.circle-color) If `type` is "line",
color corresponds to the line color
(map.layer.paint.line-color) If `type` is "fill", color
corresponds to the fill color (map.layer.paint.fill-
color) If `type` is "symbol", color corresponds to the
icon color (map.layer.paint.icon-color)
coordinates
Sets the coordinates array contains [longitude,
latitude] pairs for the image corners listed in
clockwise order: top left, top right, bottom right,
bottom left. Only has an effect for "image"
`sourcetype`.
fill
:class:`plotly.graph_objects.layout.map.layer.Fill`
instance or dict with compatible properties
line
:class:`plotly.graph_objects.layout.map.layer.Line`
instance or dict with compatible properties
maxzoom
Sets the maximum zoom level (map.layer.maxzoom). At
zoom levels equal to or greater than the maxzoom, the
layer will be hidden.
minzoom
Sets the minimum zoom level (map.layer.minzoom). At
zoom levels less than the minzoom, the layer will be
hidden.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the layer. If `type` is "circle",
opacity corresponds to the circle opacity
(map.layer.paint.circle-opacity) If `type` is "line",
opacity corresponds to the line opacity
(map.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity
(map.layer.paint.fill-opacity) If `type` is "symbol",
opacity corresponds to the icon/text opacity
(map.layer.paint.text-opacity)
source
Sets the source data for this layer (map.layer.source).
When `sourcetype` is set to "geojson", `source` can be
a URL to a GeoJSON or a GeoJSON object. When
`sourcetype` is set to "vector" or "raster", `source`
can be a URL or an array of tile URLs. When
`sourcetype` is set to "image", `source` can be a URL
to an image.
sourceattribution
Sets the attribution for this source.
sourcelayer
Specifies the layer to use from a vector tile source
(map.layer.source-layer). Required for "vector" source
type that supports multiple layers.
sourcetype
Sets the source type for this layer, that is the type
of the layer data.
symbol
:class:`plotly.graph_objects.layout.map.layer.Symbol`
instance or dict with compatible properties
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Sets the layer type, that is the how the layer data set
in `source` will be rendered With `sourcetype` set to
"geojson", the following values are allowed: "circle",
"line", "fill" and "symbol". but note that "line" and
"fill" are not compatible with Point GeoJSON
geometries. With `sourcetype` set to "vector", the
following values are allowed: "circle", "line", "fill"
and "symbol". With `sourcetype` set to "raster" or
"image", only the "raster" value is allowed.
visible
Determines whether this layer is displayed
"""
def __init__(
self,
arg=None,
below=None,
circle=None,
color=None,
coordinates=None,
fill=None,
line=None,
maxzoom=None,
minzoom=None,
name=None,
opacity=None,
source=None,
sourceattribution=None,
sourcelayer=None,
sourcetype=None,
symbol=None,
templateitemname=None,
type=None,
visible=None,
**kwargs,
):
"""
Construct a new Layer object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.map.Layer`
below
Determines if the layer will be inserted before the
layer with the specified ID. If omitted or set to '',
the layer will be inserted above every existing layer.
circle
:class:`plotly.graph_objects.layout.map.layer.Circle`
instance or dict with compatible properties
color
Sets the primary layer color. If `type` is "circle",
color corresponds to the circle color
(map.layer.paint.circle-color) If `type` is "line",
color corresponds to the line color
(map.layer.paint.line-color) If `type` is "fill", color
corresponds to the fill color (map.layer.paint.fill-
color) If `type` is "symbol", color corresponds to the
icon color (map.layer.paint.icon-color)
coordinates
Sets the coordinates array contains [longitude,
latitude] pairs for the image corners listed in
clockwise order: top left, top right, bottom right,
bottom left. Only has an effect for "image"
`sourcetype`.
fill
:class:`plotly.graph_objects.layout.map.layer.Fill`
instance or dict with compatible properties
line
:class:`plotly.graph_objects.layout.map.layer.Line`
instance or dict with compatible properties
maxzoom
Sets the maximum zoom level (map.layer.maxzoom). At
zoom levels equal to or greater than the maxzoom, the
layer will be hidden.
minzoom
Sets the minimum zoom level (map.layer.minzoom). At
zoom levels less than the minzoom, the layer will be
hidden.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the layer. If `type` is "circle",
opacity corresponds to the circle opacity
(map.layer.paint.circle-opacity) If `type` is "line",
opacity corresponds to the line opacity
(map.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity
(map.layer.paint.fill-opacity) If `type` is "symbol",
opacity corresponds to the icon/text opacity
(map.layer.paint.text-opacity)
source
Sets the source data for this layer (map.layer.source).
When `sourcetype` is set to "geojson", `source` can be
a URL to a GeoJSON or a GeoJSON object. When
`sourcetype` is set to "vector" or "raster", `source`
can be a URL or an array of tile URLs. When
`sourcetype` is set to "image", `source` can be a URL
to an image.
sourceattribution
Sets the attribution for this source.
sourcelayer
Specifies the layer to use from a vector tile source
(map.layer.source-layer). Required for "vector" source
type that supports multiple layers.
sourcetype
Sets the source type for this layer, that is the type
of the layer data.
symbol
:class:`plotly.graph_objects.layout.map.layer.Symbol`
instance or dict with compatible properties
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Sets the layer type, that is the how the layer data set
in `source` will be rendered With `sourcetype` set to
"geojson", the following values are allowed: "circle",
"line", "fill" and "symbol". but note that "line" and
"fill" are not compatible with Point GeoJSON
geometries. With `sourcetype` set to "vector", the
following values are allowed: "circle", "line", "fill"
and "symbol". With `sourcetype` set to "raster" or
"image", only the "raster" value is allowed.
visible
Determines whether this layer is displayed
Returns
-------
Layer
"""
super().__init__("layers")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.map.Layer
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.map.Layer`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("below", arg, below)
self._set_property("circle", arg, circle)
self._set_property("color", arg, color)
self._set_property("coordinates", arg, coordinates)
self._set_property("fill", arg, fill)
self._set_property("line", arg, line)
self._set_property("maxzoom", arg, maxzoom)
self._set_property("minzoom", arg, minzoom)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("source", arg, source)
self._set_property("sourceattribution", arg, sourceattribution)
self._set_property("sourcelayer", arg, sourcelayer)
self._set_property("sourcetype", arg, sourcetype)
self._set_property("symbol", arg, symbol)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("type", arg, type)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Layer
|
python
|
django__django
|
tests/admin_changelist/models.py
|
{
"start": 818,
"end": 1014
}
|
class ____(models.Model):
name = models.CharField(max_length=20)
file = models.FileField(upload_to="documents/", blank=True, null=True)
url = models.URLField(blank=True, null=True)
|
Genre
|
python
|
catalyst-team__catalyst
|
catalyst/data/sampler.py
|
{
"start": 3556,
"end": 8599
}
|
class ____(Sampler):
"""
This kind of sampler can be used for both metric learning and classification task.
BatchSampler with the given strategy for the C unique classes dataset:
- Selection `num_classes` of C classes for each batch
- Selection `num_samples` instances for each class in the batch
The epoch ends after `num_batches`.
So, the batch sise is `num_classes` * `num_samples`.
One of the purposes of this sampler is to be used for
forming triplets and pos/neg pairs inside the batch.
To guarante existance of these pairs in the batch,
`num_classes` and `num_samples` should be > 1. (1)
This type of sampling can be found in the classical paper of Person Re-Id,
where P (`num_classes`) equals 32 and K (`num_samples`) equals 4:
`In Defense of the Triplet Loss for Person Re-Identification`_.
Args:
labels: list of classes labeles for each elem in the dataset
num_classes: number of classes in a batch, should be > 1
num_samples: number of instances of each class in a batch, should be > 1
num_batches: number of batches in epoch
(default = len(labels) // (num_classes * num_samples))
.. _In Defense of the Triplet Loss for Person Re-Identification:
https://arxiv.org/abs/1703.07737
Python API examples:
.. code-block:: python
import os
from torch import nn, optim
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.data import ToTensor, BatchBalanceClassSampler
from catalyst.contrib.datasets import MNIST
train_data = MNIST(os.getcwd(), train=True, download=True)
train_labels = train_data.targets.cpu().numpy().tolist()
train_sampler = BatchBalanceClassSampler(
train_labels, num_classes=10, num_samples=4)
valid_data = MNIST(os.getcwd(), train=False)
loaders = {
"train": DataLoader(train_data, batch_sampler=train_sampler),
"valid": DataLoader(valid_data, batch_size=32),
}
model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.02)
runner = dl.SupervisedRunner()
# model training
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
logdir="./logs",
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
verbose=True,
)
"""
def __init__(
self,
labels: Union[List[int], np.ndarray],
num_classes: int,
num_samples: int,
num_batches: int = None,
):
"""Sampler initialisation."""
super().__init__(labels)
classes = set(labels)
assert isinstance(num_classes, int) and isinstance(num_samples, int)
assert (1 < num_classes <= len(classes)) and (1 < num_samples)
assert all(
n > 1 for n in Counter(labels).values()
), "Each class shoud contain at least 2 instances to fit (1)"
labels = np.array(labels)
self._labels = list(set(labels.tolist()))
self._num_classes = num_classes
self._num_samples = num_samples
self._batch_size = self._num_classes * self._num_samples
self._num_batches = num_batches or len(labels) // self._batch_size
self.lbl2idx = {
label: np.arange(len(labels))[labels == label].tolist()
for label in set(labels)
}
@property
def batch_size(self) -> int:
"""
Returns:
this value should be used in DataLoader as batch size
"""
return self._batch_size
@property
def batches_in_epoch(self) -> int:
"""
Returns:
number of batches in an epoch
"""
return self._num_batches
def __len__(self) -> int:
"""
Returns:
number of samples in an epoch
"""
return self._num_batches # * self._batch_size
def __iter__(self) -> Iterator[int]:
"""
Returns:
indeces for sampling dataset elems during an epoch
"""
indices = []
for _ in range(self._num_batches):
batch_indices = []
classes_for_batch = random.sample(self._labels, self._num_classes)
while self._num_classes != len(set(classes_for_batch)):
classes_for_batch = random.sample(self._labels, self._num_classes)
for cls_id in classes_for_batch:
replace_flag = self._num_samples > len(self.lbl2idx[cls_id])
batch_indices += np.random.choice(
self.lbl2idx[cls_id], self._num_samples, replace=replace_flag
).tolist()
indices.append(batch_indices)
return iter(indices)
|
BatchBalanceClassSampler
|
python
|
pypa__pipenv
|
pipenv/vendor/importlib_metadata/__init__.py
|
{
"start": 7357,
"end": 8967
}
|
class ____(tuple):
"""
An immutable collection of selectable EntryPoint objects.
"""
__slots__ = ()
def __getitem__(self, name: str) -> EntryPoint: # type: ignore[override] # Work with str instead of int
"""
Get the EntryPoint in self matching name.
"""
try:
return next(iter(self.select(name=name)))
except StopIteration:
raise KeyError(name)
def __repr__(self):
"""
Repr with classname and tuple constructor to
signal that we deviate from regular tuple behavior.
"""
return '%s(%r)' % (self.__class__.__name__, tuple(self))
def select(self, **params) -> EntryPoints:
"""
Select entry points from self that match the
given parameters (typically group and/or name).
"""
return EntryPoints(ep for ep in self if py39.ep_matches(ep, **params))
@property
def names(self) -> Set[str]:
"""
Return the set of all names of all entry points.
"""
return {ep.name for ep in self}
@property
def groups(self) -> Set[str]:
"""
Return the set of all groups of all entry points.
"""
return {ep.group for ep in self}
@classmethod
def _from_text_for(cls, text, dist):
return cls(ep._for(dist) for ep in cls._from_text(text))
@staticmethod
def _from_text(text):
return (
EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
for item in Sectioned.section_pairs(text or '')
)
|
EntryPoints
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/c/_ast.py
|
{
"start": 20008,
"end": 21031
}
|
class ____(ASTExpression):
def __init__(self, typ: ASTType, expr: ASTExpression) -> None:
self.typ = typ
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTCastExpr):
return NotImplemented
return self.typ == other.typ and self.expr == other.expr
def __hash__(self) -> int:
return hash((self.typ, self.expr))
def _stringify(self, transform: StringifyTransform) -> str:
res = ['(']
res.extend((
transform(self.typ),
')',
transform(self.expr),
))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_punctuation('(', '(')
self.typ.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
self.expr.describe_signature(signode, mode, env, symbol)
|
ASTCastExpr
|
python
|
joke2k__faker
|
faker/providers/automotive/de_CH/__init__.py
|
{
"start": 48,
"end": 1151
}
|
class ____(AutomotiveProvider):
"""Implement automotive provider for ``de_CH`` locale.
Sources:
- https://de.wikipedia.org/wiki/Kontrollschild_(Schweiz)#Kantone
"""
__canton = (
("AG", "%## ###"),
("AR", "%# ###"),
("AI", "%# ###"),
("BL", "%## ###"),
("BS", "%## ###"),
("BE", "%## ###"),
("FR", "%## ###"),
("GE", "%## ###"),
("GL", "%# ###"),
("GR", "%## ###"),
("JU", "%# ###"),
("LU", "%## ###"),
("NE", "%## ###"),
("NW", "%# ###"),
("OW", "%# ###"),
("SH", "%# ###"),
("SZ", "%## ###"),
("SO", "%## ###"),
("SG", "%## ###"),
("TI", "%## ###"),
("TG", "%## ###"),
("UR", "%# ###"),
("VD", "%## ###"),
("VS", "%## ###"),
("ZG", "%## ###"),
("ZH", "%## ###"),
)
def license_plate(self) -> str:
"""Generate a license plate."""
plate: tuple = self.random_element(self.__canton)
return f"{plate[0]}-{self.numerify(plate[1])}".strip()
|
Provider
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.