language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
mlflow__mlflow
|
mlflow/store/tracking/dbmodels/models.py
|
{
"start": 61084,
"end": 63077
}
|
class ____(Base):
"""
DB model for storing scorer version information. These are recorded in
``scorer_versions`` table.
"""
__tablename__ = "scorer_versions"
scorer_id = Column(
String(36), ForeignKey("scorers.scorer_id", ondelete="CASCADE"), nullable=False
)
"""
Scorer ID: `String` (limit 36 characters). *Foreign Key* into ``scorers`` table.
"""
scorer_version = Column(Integer, nullable=False)
"""
Scorer version: `Integer`. Part of *Primary Key* for ``scorer_versions`` table.
"""
serialized_scorer = Column(Text, nullable=False)
"""
Serialized scorer data: `Text`. Contains the serialized scorer object.
"""
creation_time = Column(BigInteger(), default=get_current_time_millis)
"""
Creation time of scorer version: `BigInteger`. Automatically set to current time when created.
"""
# Relationship to the parent scorer
scorer = relationship("SqlScorer", backref=backref("scorer_versions", cascade="all"))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlScorer`.
"""
__table_args__ = (
PrimaryKeyConstraint("scorer_id", "scorer_version", name="scorer_version_pk"),
Index(f"index_{__tablename__}_scorer_id", "scorer_id"),
)
def __repr__(self):
return f"<SqlScorerVersion ({self.scorer_id}, {self.scorer_version})>"
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
Returns:
mlflow.entities.ScorerVersion.
"""
from mlflow.entities.scorer import ScorerVersion
return ScorerVersion(
experiment_id=str(self.scorer.experiment_id),
scorer_name=self.scorer.scorer_name,
scorer_version=self.scorer_version,
serialized_scorer=self.serialized_scorer,
creation_time=self.creation_time,
scorer_id=self.scorer_id,
)
|
SqlScorerVersion
|
python
|
python-poetry__poetry
|
tests/repositories/fixtures/distribution_hashes.py
|
{
"start": 277,
"end": 14366
}
|
class ____:
sha256: str = ""
md5: str = ""
KNOWN_DISTRIBUTION_HASHES = {
"SQLAlchemy-1.2.12.tar.gz": DistributionHash(
"b5a127599b3f27847fba6119de0fcb70832a8041b103701a708b7c7d044faa38",
"4a2617b5254748828d09349fc4eff6bd",
),
"Twisted-18.9.0.tar.bz2": DistributionHash(
"4335327da58be11dd6e482ec6b85eb055bcc953a9570cd59e7840a2ce9419a8e",
"35ff4705ea90a76bf972ff3b229546ca",
),
"attrs-17.4.0-py2.py3-none-any.whl": DistributionHash(
"1fbfc10ebc8c876dcbab17f016b80ae1a4f0c1413461a695871427960795beb4",
"7fe37931797b16c7fa158017457a9ea9",
),
"attrs-17.4.0.tar.gz": DistributionHash(
"eb7536a1e6928190b3008c5b350bdf9850d619fff212341cd096f87a27a5e564",
"c03e5b3608d9071fbd098850d8922668",
),
"black-19.10b0-py36-none-any.whl": DistributionHash(
"13001c5b7dbc81137164b43137320a1785e95ce84e4db849279786877ac6d7f6",
"acc537b0f3f7ebf575616490d7cc14f4",
),
"black-19.10b0.tar.gz": DistributionHash(
"6cada614d5d2132698c6d5fff384657273d922c4fffa6a2f0de9e03e25b8913a",
"c383543109a66a5a99113e6326db5251",
),
"black-21.11b0-py3-none-any.whl": DistributionHash(
"38f6ad54069912caf2fa2d4f25d0c5dedef4b2338a0cb545dbe2fdf54a6a8891",
"92942a9efabf8e321a11360667ad2494",
),
"black-21.11b0.tar.gz": DistributionHash(
"f23c482185d842e2f19d506e55c004061167e3c677c063ecd721042c62086ada",
"f01267bf2613f825dd6684629c1c829e",
),
"cleo-1.0.0a5-py3-none-any.whl": DistributionHash(
"d0cfea878b77be28be027033e6af419b705abe47278067a7c3a298f39cf825c5",
"19ed7de77063e8f16bc459276ccbe197",
),
"cleo-1.0.0a5.tar.gz": DistributionHash(
"88f0a4275a17f2ab4d013786b8b9522d4c60bd37d8fc9b3def0fb27f4ac1e694",
"92e181952976e09b9d1c583da6c3e2fc",
),
"clikit-0.2.4-py2.py3-none-any.whl": DistributionHash(
"27316bf6382b04be8fb2f60c85d538fd2b2b03f0f1eba5c88f7d7eddbefc2778",
"93a51e8bf259c29692e51a7cbca6d664",
),
"clikit-0.2.4.tar.gz": DistributionHash(
"0fdd41e86e8b118a8b1e94ef2835925ada541d481c9b3b2fc635fa68713e6125",
"f7cdbad3508038a04561f646aae68146",
),
"colorama-0.3.9-py2.py3-none-any.whl": DistributionHash(
"78a441d2e984c790526cdef1cfd8415a366979ef5b3186771a055b35886953bf",
"8021c861015b5f590be41190bc3f8eed",
),
"colorama-0.3.9.tar.gz": DistributionHash(
"4c5a15209723ce1330a5c193465fe221098f761e9640d823a2ce7c03f983137f",
"8323a5b84fdf7ad810804e51fc256b39",
),
"demo-0.1.0-py2.py3-none-any.whl": DistributionHash(
"70e704135718fffbcbf61ed1fc45933cfd86951a744b681000eaaa75da31f17a",
"15507846fd4299596661d0197bfb4f90",
),
"demo-0.1.0.tar.gz": DistributionHash(
"9fa123ad707a5c6c944743bf3e11a0e80d86cb518d3cf25320866ca3ef43e2ad",
"d1912c917363a64e127318655f7d1fe7",
),
"demo-0.1.2-py2.py3-none-any.whl": DistributionHash(
"55dde4e6828081de7a1e429f33180459c333d9da593db62a3d75a8f5e505dde1",
"53b4e10d2bfa81a4206221c4b87843d9",
),
"demo_invalid_record-0.1.0-py2.py3-none-any.whl": DistributionHash(
"d1e5a3f18f24a2ad3717c6f9c55f8c26060f39b2cddf28b18c355786728cb4dd",
"18041168d415370d5019ec7e2b1ed0b5",
),
"demo_invalid_record2-0.1.0-py2.py3-none-any.whl": DistributionHash(
"e730fca385b52e77fc58d73812f0dc236fad489ef6026716d1a4317ab4738c3c",
"a21ee67e833f50e9f0ecdfe1c0484b93",
),
"demo_metadata_version_23-0.1.0-py2.py3-none-any.whl": DistributionHash(
"7592aa158137726d9579e5d4347bd03a88f9fc82e11061303215feaaf000d32c",
"434114a36f986671d132033e130f26b7",
),
"demo_metadata_version_24-0.1.0-py2.py3-none-any.whl": DistributionHash(
"f0d306c48d665e4a0051c660cc39f5ed7b7d51427050bfbca525e95d9fad2587",
"c0cbc2e5f2736a487ff960a8c39defbe",
),
"demo_metadata_version_299-0.1.0-py2.py3-none-any.whl": DistributionHash(
"9678f9e59454a281bf7780661d719122766111dc9432ad20823ce6569d10edb2",
"2eb53ee23408e65de909e20d9575afe3",
),
"demo_metadata_version_unknown-0.1.0-py2.py3-none-any.whl": DistributionHash(
"d716cd66546468ec3d4d40f4a4ecc813e3e4c661e155ecbc3a932f47d46d6e05",
"749f823ff755a2f46bfb5ab25fdf9810",
),
"demo_missing_dist_info-0.1.0-py2.py3-none-any.whl": DistributionHash(
"cf8eaade81dd1db42f60c0e9c8610c1c12006baa9f7ad994b1c2bae92ea4b426",
"da33c6088e72fbaaf873999606767353",
),
"demo_no_pkg_info-0.1.0.tar.gz": DistributionHash(
"f1e2a977c506dfb6b43495e2ffeee618b90029bac92fcb3038a53268197afa0c",
"eeaf257d6b2c3b01def567751b21c1e8",
),
"discord.py-2.0.0-py3-none-any.whl": DistributionHash(
"25b9739ba456622655203a0925b354c0ba96ac6c740562e7c37791c2f6b594fb",
"65394fc868632423cedb6be7259db970",
),
"discord.py-2.0.0.tar.gz": DistributionHash(
"b86fa9dd562684f7a52564e6dfe0216f6c172a009c0d86b8dea8bdd6ffa6b1f4",
"6c0505a6032342b29f31f9979f37d277",
),
"futures-3.2.0-py2-none-any.whl": DistributionHash(
"41353b36198757a766cfc82dc9b60e88ecb28e543dd92473b2cc74fc7bf205af",
"f81c5c27f3ba2efc008cc96363a81c5e",
),
"futures-3.2.0.tar.gz": DistributionHash(
"baf0d469c9e541b747986b7404cd63a5496955bd0c43a3cc068c449b09b7d4a4",
"40eb168dab84e606df3fdb7e67fe27b7",
),
"hbmqtt-0.9.6.tar.gz": DistributionHash(
"379f1d9044997c69308ac2e01621c817b5394e1fbe0696e62538ae2dd0aa7e07",
"b284e3118882f169aa618a856cd91c5f",
),
"ipython-5.7.0-py2-none-any.whl": DistributionHash(
"4608e3e0500fe8142659d149891400fc0b9fa250051814b569457ae4688943dc",
"20da5e0b1f79dccb37f033a885d798d7",
),
"ipython-5.7.0-py3-none-any.whl": DistributionHash(
"4292c026552a77b2edc0543941516eddd6fe1a4b681a76ac40b3f585d2fca76f",
"2844fa01618fe27ab99ad455d605b47d",
),
"ipython-5.7.0.tar.gz": DistributionHash(
"4e7fb265e0264498bd0d62c6261936a658bf3d38beb8a7b10cd2c6327c62ac2a",
"01f2808ebe78ff2f28dc39be3aa635ca",
),
"ipython-7.5.0-py3-none-any.whl": DistributionHash(
"1b4c76bf1e8dd9067a4f5ab4695d4c5ad81c30d7d06f7592f4c069c389e37f37",
"f40ea889fb7adf989760c5e7a38bd112",
),
"ipython-7.5.0.tar.gz": DistributionHash(
"cd2a17ac273fea8bf8953118a2d83bad94f592f0db3e83fff9129a1842e36dbe",
"0e8c1d7c14f309f6cd2dfd4e48e75cb1",
),
"isodate-0.7.0-py3-none-any.whl": DistributionHash(
"04505f97eb100b66dff1239859e6e04ab913714c453d6ab9591adbf418285847",
"1af9e3ee3f5669186356afd2dbe7ce81",
),
"isodate-0.7.0.tar.gz": DistributionHash(
"167c3615c0bd2e498c9bae7a1aba5863a17e52299aafd89f17a3a091187dca74",
"5668b7b7120797f03330363000afc35a",
),
"isort-4.3.4-py2-none-any.whl": DistributionHash(
"383c39c10b5db83e8d150ac5b84d74bda96e3a1b06a30257f022dcbcd21f54b9",
"42bccda292eca3c91eadf3eb781a224f",
),
"isort-4.3.4-py3-none-any.whl": DistributionHash(
"5668dce9fb48544c57ed626982e190c8ea99e3a612850453e9c3b193b9fa2edc",
"6c3b582d7782633ec23917b00a97a2fe",
),
"isort-4.3.4.tar.gz": DistributionHash(
"234ad07e1e2780c27fa56364eefa734bee991b0d744337ef7e7ce3d5b1b59f39",
"9244631852cf8bd8559f7ab78bf4ec78",
),
"jupyter-1.0.0-py2.py3-none-any.whl": DistributionHash(
"1de1f2be45629dd6f7f9558e2385ddf6901849699ef1044c52d171a9b520a420",
"27f411f164e0878104d76d868127f76f",
),
"jupyter-1.0.0.tar.gz": DistributionHash(
"3ef1e86ba0556ea5922b846416a41acfd2625830d996c7d06d80c90bed1dc193",
"78acaec88533ea6b6e761e7d086a1d04",
),
"jupyter-1.0.0.zip": DistributionHash(
"4a855b9717c3ea24fd8ca4fd91ab5995894aecc4d20e7f39c28786a2c1869fae",
"7b7a957694a73ac0c19fe46c216c0ea0",
),
"more-itertools-4.1.0.tar.gz": DistributionHash(
"bab2dc6f4be8f9a4a72177842c5283e2dff57c167439a03e3d8d901e854f0f2e",
"bf351a1050242ce3af7e475a4da1a26b",
),
"more_itertools-4.1.0-py2-none-any.whl": DistributionHash(
"0f461c2cd4ec16611396f9ee57f40433de3d59e95475d84c0c829cde02f746cd",
"703e1e0922de1f11823da60af1488b7a",
),
"more_itertools-4.1.0-py3-none-any.whl": DistributionHash(
"580b6002d1f28feb5bcb8303278d59cf17dfbd19a63a5c2375112dae72c9bf98",
"ae17a45d13e9dc319794c40fa739c38f",
),
"pastel-0.1.0-py3-none-any.whl": DistributionHash(
"754d192c088e256d52a3f825c3b9e14252d5adc70f53656453f6431e50a70b99",
"cf7c53ab0a5d7e7c721425b24b486124",
),
"pastel-0.1.0.tar.gz": DistributionHash(
"22f14474c4120b37c54ac2173b49b0ac1de9283ca714be6eb3ea8b39296285a9",
"43ea5f07660f630da18ae1827f5b4333",
),
"pluggy-0.6.0-py2-none-any.whl": DistributionHash(
"f5f767d398f18aa177976bf9c4d0c05d96487a7d8f07062251585803aaf56246",
"095eed084713c9b2a9a01520485e20fb",
),
"pluggy-0.6.0-py3-none-any.whl": DistributionHash(
"d34798b80853ab688de1a3ca5b99ba4de91c459c19c76a555dc939979ae67eb0",
"2b6dc266f54023dfb26726686ee6b227",
),
"pluggy-0.6.0.tar.gz": DistributionHash(
"a982e208d054867661d27c6d2a86b17ba05fbb6b1bdc01f42660732dd107f865",
"ef8a88abcd501afd47cb22245fe4315a",
),
"poetry_core-1.5.0-py3-none-any.whl": DistributionHash(
"e216b70f013c47b82a72540d34347632c5bfe59fd54f5fe5d51f6a68b19aaf84",
"be7589b4902793e66d7d979bd8581591",
),
"poetry_core-1.5.0.tar.gz": DistributionHash(
"0ae8d28caf5c12ec1714b16d2e7157ddd52397ea6bfdeba5a9432e449a0184da",
"3f9b36a7a94cd235bfd5f05794828445",
),
"poetry_core-2.0.1-py3-none-any.whl": DistributionHash(
"a3c7009536522cda4eb0fb3805c9dc935b5537f8727dd01efb9c15e51a17552b",
"a52cf4beef0de009e0a9a36c9e6962f5",
),
"poetry_core-2.0.1.tar.gz": DistributionHash(
"d2acdaec3b93dc1ab43adaeb0e9a8a6a6b3701c4535b5baab4b718ab12c8993c",
"1b1bb959cd760ac509de9b38ae67fc3b",
),
"py-1.5.3-py2.py3-none-any.whl": DistributionHash(
"ef4a94f47156178e42ef8f2b131db420e0f4b6aa0b3936b6dbde6ad6487476a5",
"b316b380701661cb67732ecdaef30eeb",
),
"py-1.5.3.tar.gz": DistributionHash(
"2df2c513c3af11de15f58189ba5539ddc4768c6f33816dc5c03950c8bd6180fa",
"623e80cfc06df930414a9ce4bf0fd6c9",
),
"pytest-3.5.0-py2.py3-none-any.whl": DistributionHash(
"427b4582bda18e92ad1967e8b1e071e2c53e6cb7e3e5f090cb3ca443455be23f",
"4a8651dec151e76f283bf59e333286f9",
),
"pytest-3.5.0.tar.gz": DistributionHash(
"677b1d6decd29c041fe64276f29f79fbe66e40c59e445eb251366b4a8ab8bf68",
"ccd78dac54112045f561c4df86631f19",
),
"pytest-3.5.1-py2.py3-none-any.whl": DistributionHash(
"d327df3686046c5b374a9776d9e11606f7dba6fb3db5cf5d60ebc78a31e0768e",
"1e81fba94885bef80170545d045924eb",
),
"pytest-3.5.1.tar.gz": DistributionHash(
"b8fe151f3e181801dd38583a1c03818fbc662a8fce96c9063a0af624613e78f8",
"961104636090457187851ccb9ef0f677",
),
"python-language-server-0.21.2.tar.gz": DistributionHash(
"91b564e092f3135b2bac70dbd23d283da5ad50269766a76648787b69fe702c7e",
"677602ec38bc1c7b72de6128d90d846b",
),
"requests-2.18.4-py2.py3-none-any.whl": DistributionHash(
"098be851f30be5bcb2c7537798d44314f576e53818ba9def25141ae4dce8b25d",
"e770e65750c42f40b97b0ed738d0f859",
),
"requests-2.18.4.tar.gz": DistributionHash(
"ec62f7e0e9d4814656b0172dbd592fea06127c6556ff5651eb5d2c8768671fd4",
"942a6a383dc94da90cf58f5adcf028a4",
),
"setuptools-67.6.1-py3-none-any.whl": DistributionHash(
"e728ca814a823bf7bf60162daf9db95b93d532948c4c0bea762ce62f60189078",
"3b5b846e000da033d54eeaaf7915126e",
),
"setuptools-67.6.1.tar.gz": DistributionHash(
"a737d365c957dd3fced9ddd246118e95dce7a62c3dc49f37e7fdd9e93475d785",
"ee2562f783544d1f95022c906dd3cf98",
),
"six-1.11.0-py2.py3-none-any.whl": DistributionHash(
"534e9875e44a507adec601c29b3cbd2ca6dae7df92bf3dd20c7289b2f99f7466",
"9500094701f7201ddd065c60abcefef1",
),
"six-1.11.0.tar.gz": DistributionHash(
"268a4ccb159c1a2d2c79336b02e75058387b0cdbb4cea2f07846a758f48a356d",
"25d3568604f921dd23532b88a0ce17e7",
),
"tomlkit-0.5.2-py2.py3-none-any.whl": DistributionHash(
"dea8ff39e9e2170f1b2f465520482eec71e7909cfff53dcb076b585d50f8ccc8",
"4045c5f6848fbc93c38df2296a441f07",
),
"tomlkit-0.5.2.tar.gz": DistributionHash(
"4a226ccf11ee5a2e76bfc185747b54ee7718706aeb3aabb981327249dbe2b1d4",
"7c31987ef6fba2cd64715cae27fade64",
),
"tomlkit-0.5.3-py2.py3-none-any.whl": DistributionHash(
"35f06da5835e85f149a4701d43e730adcc09f1b362e5fc2300d77bdd26280908",
"3a90c70a5067d5727110838094ab8674",
),
"tomlkit-0.5.3.tar.gz": DistributionHash(
"e2f785651609492c771d9887ccb2369d891d16595d2d97972e2cbe5e8fb3439f",
"cdbdc302a184d1f1e38d5e0810e3b212",
),
"wheel-0.40.0-py3-none-any.whl": DistributionHash(
"d236b20e7cb522daf2390fa84c55eea81c5c30190f90f29ae2ca1ad8355bf247",
"517d39f133bd7b1ff17caf09784b7543",
),
"wheel-0.40.0.tar.gz": DistributionHash(
"5cb7e75751aa82e1b7db3fd52f5a9d59e7b06905630bed135793295931528740",
"5f175a8d693f74878964d4fd29729ab7",
),
"zipp-3.5.0-py3-none-any.whl": DistributionHash(
"ec508cd5a3ed3d126293cafb34611469f2aef7342f575c3b6e072b995dc9da1f",
"da62cbd850ba32ba93817aab0f03a855",
),
"zipp-3.5.0.tar.gz": DistributionHash(
"239d50954a15aa4b283023f18dc451ba811fb4d263f4dd6855642e4d1c80cc9f",
"16bf2a24fae340052e8565c264d21092",
),
}
@pytest.fixture
def dist_hash_getter() -> DistributionHashGetter:
def get_hash(name: str) -> DistributionHash:
return KNOWN_DISTRIBUTION_HASHES.get(name, DistributionHash())
return get_hash
|
DistributionHash
|
python
|
numba__numba
|
numba/tests/test_mixed_tuple_unroller.py
|
{
"start": 48347,
"end": 57418
}
|
class ____(TestCase):
def test_invalid_use_of_unroller(self):
@njit
def foo():
x = (10, 20)
r = 0
for a in literal_unroll(x, x):
r += a
return r
with self.assertRaises(errors.UnsupportedError) as raises:
foo()
self.assertIn(
"literal_unroll takes one argument, found 2",
str(raises.exception),
)
def test_non_constant_list(self):
@njit
def foo(y):
x = [10, y]
r = 0
for a in literal_unroll(x):
r += a
return r
with self.assertRaises(errors.UnsupportedError) as raises:
foo(10)
self.assertIn(
("Found non-constant value at position 1 in a list argument to "
"literal_unroll"),
str(raises.exception)
)
@unittest.skip("numba.literally not supported yet")
def test_literally_constant_list(self):
# FAIL. May need to consider it in a future PR
from numba import literally
@njit
def foo(y):
x = [10, literally(y)]
r = 0
for a in literal_unroll(x):
r += a
return r
# Found non-constant value at position 1 in a list argument to
# literal_unroll
foo(12)
@njit
def bar():
return foo(12)
# Found non-constant value at position 1 in a list argument to
# literal_unroll
bar()
@unittest.skip("inlining of foo doesn't have const prop so y isn't const")
def test_inlined_unroll_list(self):
@njit(inline='always')
def foo(y):
x = [10, y]
r = 0
for a in literal_unroll(x):
r += a
return r
@njit
def bar():
return foo(12)
self.assertEqual(bar(), 10 + 12)
def test_unroll_tuple_arg(self):
@njit
def foo(y):
x = (10, y)
r = 0
for a in literal_unroll(x):
r += a
return r
self.assertEqual(foo(12), foo.py_func(12))
self.assertEqual(foo(1.2), foo.py_func(1.2))
def test_unroll_tuple_arg2(self):
@njit
def foo(x):
r = 0
for a in literal_unroll(x):
r += a
return r
self.assertEqual(foo((12, 1.2)), foo.py_func((12, 1.2)))
self.assertEqual(foo((12, 1.2)), foo.py_func((12, 1.2)))
def test_unroll_tuple_alias(self):
@njit
def foo():
x = (10, 1.2)
out = 0
for i in literal_unroll(x):
j = i
k = j
out += j + k + i
return out
self.assertEqual(foo(), foo.py_func())
def test_unroll_tuple_nested(self):
@njit
def foo():
x = ((10, 1.2), (1j, 3.))
out = 0
for i in literal_unroll(x):
for j in (i):
out += j
return out
with self.assertRaises(errors.TypingError) as raises:
foo()
self.assertIn("getiter", str(raises.exception))
re = r".*Tuple\(int[0-9][0-9], float64\).*"
self.assertRegex(str(raises.exception), re)
def test_unroll_tuple_of_dict(self):
@njit
def foo():
x = {}
x["a"] = 1
x["b"] = 2
y = {}
y[3] = "c"
y[4] = "d"
for it in literal_unroll((x, y)):
for k, v in it.items():
print(k, v)
with captured_stdout() as stdout:
foo()
lines = stdout.getvalue().splitlines()
self.assertEqual(
lines,
['a 1', 'b 2', '3 c', '4 d'],
)
def test_unroll_named_tuple(self):
ABC = namedtuple('ABC', ['a', 'b', 'c'])
@njit
def foo():
abc = ABC(1, 2j, 3.4)
out = 0
for i in literal_unroll(abc):
out += i
return out
self.assertEqual(foo(), foo.py_func())
def test_unroll_named_tuple_arg(self):
ABC = namedtuple('ABC', ['a', 'b', 'c'])
@njit
def foo(x):
out = 0
for i in literal_unroll(x):
out += i
return out
abc = ABC(1, 2j, 3.4)
self.assertEqual(foo(abc), foo.py_func(abc))
def test_unroll_named_unituple(self):
ABC = namedtuple('ABC', ['a', 'b', 'c'])
@njit
def foo():
abc = ABC(1, 2, 3)
out = 0
for i in literal_unroll(abc):
out += i
return out
self.assertEqual(foo(), foo.py_func())
def test_unroll_named_unituple_arg(self):
ABC = namedtuple('ABC', ['a', 'b', 'c'])
@njit
def foo(x):
out = 0
for i in literal_unroll(x):
out += i
return out
abc = ABC(1, 2, 3)
self.assertEqual(foo(abc), foo.py_func(abc))
def test_unroll_global_tuple(self):
@njit
def foo():
out = 0
for i in literal_unroll(_X_GLOBAL):
out += i
return out
self.assertEqual(foo(), foo.py_func())
def test_unroll_freevar_tuple(self):
x = (10, 11)
@njit
def foo():
out = 0
for i in literal_unroll(x):
out += i
return out
self.assertEqual(foo(), foo.py_func())
def test_unroll_function_tuple(self):
@njit
def a():
return 1
@njit
def b():
return 2
x = (a, b)
@njit
def foo():
out = 0
for f in literal_unroll(x):
out += f()
return out
self.assertEqual(foo(), foo.py_func())
def test_unroll_indexing_list(self):
# See issue #5477
@njit
def foo(cont):
i = 0
acc = 0
normal_list = [a for a in cont]
heter_tuple = ('a', 25, 0.23, None)
for item in literal_unroll(heter_tuple):
acc += normal_list[i]
i += 1
print(item)
return i, acc
data = [j for j in range(4)]
# send stdout to nowhere, just check return values
with captured_stdout():
self.assertEqual(foo(data), foo.py_func(data))
# now capture stdout for jit function and check
with captured_stdout() as stdout:
foo(data)
lines = stdout.getvalue().splitlines()
self.assertEqual(
lines,
['a', '25', '0.23', 'None'],
)
def test_unroller_as_freevar(self):
mixed = (np.ones((1,)), np.ones((1, 1)), np.ones((1, 1, 1)))
from numba import literal_unroll as freevar_unroll
@njit
def foo():
out = 0
for i in freevar_unroll(mixed):
out += i.ndim
return out
self.assertEqual(foo(), foo.py_func())
def test_unroll_with_non_conformant_loops_present(self):
# See issue #8311
@njit('(Tuple((int64, float64)),)')
def foo(tup):
for t in literal_unroll(tup):
pass
x = 1
while x == 1:
x = 0
def test_literal_unroll_legalize_var_names01(self):
# See issue #8939
test = np.array([(1, 2), (2, 3)], dtype=[("a1", "f8"), ("a2", "f8")])
fields = tuple(test.dtype.fields.keys())
@njit
def foo(arr):
res = 0
for k in literal_unroll(fields):
res = res + np.abs(arr[k]).sum()
return res
self.assertEqual(foo(test), 8.0)
def test_literal_unroll_legalize_var_names02(self):
# See issue #8939
test = np.array([(1, 2), (2, 3)],
dtype=[("a1[0]", "f8"), ("a2[1]", "f8")])
fields = tuple(test.dtype.fields.keys())
@njit
def foo(arr):
res = 0
for k in literal_unroll(fields):
res = res + np.abs(arr[k]).sum()
return res
self.assertEqual(foo(test), 8.0)
def capture(real_pass):
""" Returns a compiler pass that captures the mutation state reported
by the pass used in the argument"""
@register_pass(mutates_CFG=False, analysis_only=True)
class ResultCapturer(AnalysisPass):
_name = "capture_%s" % real_pass._name
_real_pass = real_pass
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
result = real_pass().run_pass(state)
mutation_results = state.metadata.setdefault('mutation_results', {})
mutation_results[real_pass] = result
return result
return ResultCapturer
|
TestMore
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/dagster_run.py
|
{
"start": 24408,
"end": 24545
}
|
class ____(NamedTuple):
tag_key: str
tag_values: list[str]
bucket_limit: Optional[int]
@public
@record(kw_only=False)
|
TagBucket
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1585734,
"end": 1585904
}
|
class ____(sgqlc.types.Union):
"""Types that can represent a repository ruleset bypass actor."""
__schema__ = github_schema
__types__ = (App, Team)
|
BypassActor
|
python
|
jazzband__django-waffle
|
waffle/migrations/0001_initial.py
|
{
"start": 105,
"end": 4472
}
|
class ____(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Flag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='The human/computer readable name.', unique=True, max_length=100)),
('everyone', models.NullBooleanField(help_text='Flip this flag on (Yes) or off (No) for everyone, overriding all other settings. Leave as Unknown to use normally.')),
('percent', models.DecimalField(help_text='A number between 0.0 and 99.9 to indicate a percentage of users for whom this flag will be active.', null=True, max_digits=3, decimal_places=1, blank=True)),
('testing', models.BooleanField(default=False, help_text='Allow this flag to be set for a session for user testing.')),
('superusers', models.BooleanField(default=True, help_text='Flag always active for superusers?')),
('staff', models.BooleanField(default=False, help_text='Flag always active for staff?')),
('authenticated', models.BooleanField(default=False, help_text='Flag always active for authenticate users?')),
('languages', models.TextField(default='', help_text='Activate this flag for users with one of these languages (comma separated list)', blank=True)),
('rollout', models.BooleanField(default=False, help_text='Activate roll-out mode?')),
('note', models.TextField(help_text='Note where this Flag is used.', blank=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, help_text='Date when this Flag was created.', db_index=True)),
('modified', models.DateTimeField(default=django.utils.timezone.now, help_text='Date when this Flag was last modified.')),
('groups', models.ManyToManyField(help_text='Activate this flag for these user groups.', to='auth.Group', blank=True)),
('users', models.ManyToManyField(help_text='Activate this flag for these users.', to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Sample',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='The human/computer readable name.', unique=True, max_length=100)),
('percent', models.DecimalField(help_text='A number between 0.0 and 100.0 to indicate a percentage of the time this sample will be active.', max_digits=4, decimal_places=1)),
('note', models.TextField(help_text='Note where this Sample is used.', blank=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, help_text='Date when this Sample was created.', db_index=True)),
('modified', models.DateTimeField(default=django.utils.timezone.now, help_text='Date when this Sample was last modified.')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Switch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='The human/computer readable name.', unique=True, max_length=100)),
('active', models.BooleanField(default=False, help_text='Is this flag active?')),
('note', models.TextField(help_text='Note where this Switch is used.', blank=True)),
('created', models.DateTimeField(default=django.utils.timezone.now, help_text='Date when this Switch was created.', db_index=True)),
('modified', models.DateTimeField(default=django.utils.timezone.now, help_text='Date when this Switch was last modified.')),
],
options={
'verbose_name_plural': 'Switches',
},
bases=(models.Model,),
),
]
|
Migration
|
python
|
h5py__h5py
|
h5py/tests/test_datatype.py
|
{
"start": 470,
"end": 1091
}
|
class ____(TestCase):
"""
Feature: repr() works sensibly on datatype objects
"""
def test_repr(self):
""" repr() on datatype objects """
name = make_name()
self.f[name] = np.dtype('S10')
dt = self.f[name]
self.assertIsInstance(repr(dt), str)
if is_main_thread():
self.f.close()
self.assertIsInstance(repr(dt), str)
def test_appropriate_low_level_id(self):
" Binding a group to a non-TypeID identifier fails with ValueError "
with self.assertRaises(ValueError):
Datatype(self.f['/'].id)
|
TestCreation
|
python
|
getsentry__sentry
|
src/sentry/monitors/types.py
|
{
"start": 905,
"end": 1086
}
|
class ____(TypedDict):
"""
See `CheckinItem` for definition
"""
ts: str
partition: int
message: CheckIn
payload: CheckinPayload
@dataclass
|
CheckinItemData
|
python
|
django__django
|
tests/validation/models.py
|
{
"start": 5677,
"end": 6107
}
|
class ____(models.Model):
name = models.CharField(max_length=255)
color = models.CharField(max_length=32)
rank = models.IntegerField()
class Meta:
constraints = [
models.UniqueConstraint(
fields=["name", "color"], name="name_color_uniq_validation"
),
models.UniqueConstraint(fields=["rank"], name="rank_uniq_validation"),
]
|
UniqueConstraintProduct
|
python
|
huggingface__transformers
|
tests/models/superpoint/test_modeling_superpoint.py
|
{
"start": 4095,
"end": 9483
}
|
class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (SuperPointForKeypointDetection,) if is_torch_available() else ()
test_resize_embeddings = False
has_attentions = False
from_pretrained_id = "magic-leap-community/superpoint"
def setUp(self):
self.model_tester = SuperPointModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=SuperPointConfig,
has_text_modality=False,
hidden_size=37,
common_properties=["encoder_hidden_sizes", "decoder_hidden_size"],
)
def test_config(self):
self.config_tester.run_common_tests()
@is_flaky(description="The `indices` computed with `topk()` in `top_k_keypoints` is not stable.")
def test_batching_equivalence(self):
super().test_batching_equivalence()
@unittest.skip(reason="SuperPointForKeypointDetection does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="SuperPointForKeypointDetection does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="SuperPointForKeypointDetection does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="SuperPointForKeypointDetection does not support training")
def test_training(self):
pass
@unittest.skip(reason="SuperPointForKeypointDetection does not support training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="SuperPointForKeypointDetection does not support training")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="SuperPointForKeypointDetection does not support training")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="SuperPoint does not output any loss term in the forward pass")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_keypoint_detection(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_keypoint_detection(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
# SuperPoint's feature maps are of shape (batch_size, num_channels, width, height)
for i, conv_layer_size in enumerate(self.model_tester.encoder_hidden_sizes[:-1]):
self.assertListEqual(
list(hidden_states[i].shape[-3:]),
[
conv_layer_size,
self.model_tester.image_height // (2 ** (i + 1)),
self.model_tester.image_width // (2 ** (i + 1)),
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
model = SuperPointForKeypointDetection.from_pretrained(self.from_pretrained_id)
self.assertIsNotNone(model)
def test_forward_labels_should_be_none(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
model_inputs = self._prepare_for_class(inputs_dict, model_class)
# Provide an arbitrary sized Tensor as labels to model inputs
model_inputs["labels"] = torch.rand((128, 128))
with self.assertRaises(ValueError) as cm:
model(**model_inputs)
self.assertEqual(ValueError, cm.exception.__class__)
def prepare_imgs():
image1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000004016.png")
return [image1, image2]
@require_torch
@require_vision
|
SuperPointModelTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/encode-and-decode-tinyurl.py
|
{
"start": 45,
"end": 1044
}
|
class ____(object):
def __init__(self):
self.__random_length = 6
self.__tiny_url = "http://tinyurl.com/"
self.__alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.__lookup = {}
def encode(self, longUrl):
"""Encodes a URL to a shortened URL.
:type longUrl: str
:rtype: str
"""
def getRand():
rand = []
for _ in xrange(self.__random_length):
rand += self.__alphabet[random.randint(0, len(self.__alphabet)-1)]
return "".join(rand)
key = getRand()
while key in self.__lookup:
key = getRand()
self.__lookup[key] = longUrl
return self.__tiny_url + key
def decode(self, shortUrl):
"""Decodes a shortened URL to its original URL.
:type shortUrl: str
:rtype: str
"""
return self.__lookup[shortUrl[len(self.__tiny_url):]]
from hashlib import sha256
|
Codec
|
python
|
yandexdataschool__Practical_RL
|
week04_approx_rl/dqn/atari_wrappers.py
|
{
"start": 715,
"end": 2593
}
|
class ____(Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super().__init__(env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, terminated, truncated, info = self.env.step(action)
self.was_real_done = terminated or truncated
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
terminated = True
self.lives = lives
return obs, reward, terminated, truncated, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs, info = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, terminated, truncated, info = self.env.step(0)
# The no-op step can lead to a game over, so we need to check it again
# to see if we should reset the environment and avoid the
# monitor.py `RuntimeError: Tried to step environment that needs reset`
if terminated or truncated:
obs, info = self.env.reset(**kwargs)
self.lives = self.env.unwrapped.ale.lives()
return obs, info
|
EpisodicLifeEnv
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/execution_time/comms.py
|
{
"start": 24823,
"end": 25082
}
|
class ____(TISkippedDownstreamTasksStatePayload):
"""Update state of downstream tasks within a task instance to 'skipped', while updating current task to success state."""
type: Literal["SkipDownstreamTasks"] = "SkipDownstreamTasks"
|
SkipDownstreamTasks
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 112487,
"end": 112982
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("project_id", "repository_id", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
LinkRepositoryToProjectInput
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/actions.py
|
{
"start": 35516,
"end": 37539
}
|
class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create an artifact."""
key: Optional[ArtifactKey] = Field(
default=None, description="An optional unique reference key for this artifact."
)
type: Optional[str] = Field(
default=None,
description=(
"An identifier that describes the shape of the data field. e.g. 'result',"
" 'table', 'markdown'"
),
)
description: Optional[str] = Field(
default=None, description="A markdown-enabled description of the artifact."
)
data: Optional[Union[Dict[str, Any], Any]] = Field(
default=None,
description=(
"Data associated with the artifact, e.g. a result.; structure depends on"
" the artifact type."
),
)
metadata_: Optional[
Annotated[dict[str, str], AfterValidator(validate_max_metadata_length)]
] = Field(
default=None,
description=(
"User-defined artifact metadata. Content must be string key and value"
" pairs."
),
)
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run associated with the artifact."
)
task_run_id: Optional[UUID] = Field(
default=None, description="The task run associated with the artifact."
)
@classmethod
def from_result(cls, data: Any | dict[str, Any]) -> "ArtifactCreate":
artifact_info: dict[str, Any] = dict()
if isinstance(data, dict):
artifact_key = data.pop("artifact_key", None)
if artifact_key:
artifact_info["key"] = artifact_key
artifact_type = data.pop("artifact_type", None)
if artifact_type:
artifact_info["type"] = artifact_type
description = data.pop("artifact_description", None)
if description:
artifact_info["description"] = description
return cls(data=data, **artifact_info)
|
ArtifactCreate
|
python
|
scipy__scipy
|
scipy/sparse/_csr.py
|
{
"start": 14779,
"end": 18601
}
|
class ____(spmatrix, _csr_base):
"""
Compressed Sparse Row matrix.
This can be instantiated in several ways:
csr_matrix(D)
where D is a 2-D ndarray
csr_matrix(S)
with another sparse array or matrix S (equivalent to S.tocsr())
csr_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
If the shape parameter is not supplied, the matrix dimensions
are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
size
data
CSR format data array of the matrix
indices
CSR format index array of the matrix
indptr
CSR format index pointer array of the matrix
has_sorted_indices
has_canonical_format
T
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSR format
- efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
- efficient row slicing
- fast matrix vector products
Disadvantages of the CSR format
- slow column slicing operations (consider CSC)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Canonical Format
- Within each row, indices are sorted by column.
- There are no duplicate entries.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> csr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
Duplicate entries are summed together:
>>> row = np.array([0, 1, 2, 0])
>>> col = np.array([0, 1, 1, 0])
>>> data = np.array([1, 2, 4, 8])
>>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[9, 0, 0],
[0, 2, 0],
[0, 4, 0]])
As an example of how to construct a CSR matrix incrementally,
the following snippet builds a term-document matrix from texts:
>>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
>>> indptr = [0]
>>> indices = []
>>> data = []
>>> vocabulary = {}
>>> for d in docs:
... for term in d:
... index = vocabulary.setdefault(term, len(vocabulary))
... indices.append(index)
... data.append(1)
... indptr.append(len(indices))
...
>>> csr_matrix((data, indices, indptr), dtype=int).toarray()
array([[2, 1, 0, 0],
[0, 1, 1, 1]])
"""
|
csr_matrix
|
python
|
django__django
|
tests/get_earliest_or_latest/tests.py
|
{
"start": 164,
"end": 7223
}
|
class ____(TestCase):
"""Tests for the earliest() and latest() objects methods"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._article_get_latest_by = Article._meta.get_latest_by
def tearDown(self):
Article._meta.get_latest_by = self._article_get_latest_by
def test_earliest(self):
# Because no Articles exist yet, earliest() raises ArticleDoesNotExist.
with self.assertRaises(Article.DoesNotExist):
Article.objects.earliest()
a1 = Article.objects.create(
headline="Article 1",
pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1),
)
a2 = Article.objects.create(
headline="Article 2",
pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28),
)
a3 = Article.objects.create(
headline="Article 3",
pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 8, 27),
)
a4 = Article.objects.create(
headline="Article 4",
pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30),
)
# Get the earliest Article.
self.assertEqual(Article.objects.earliest(), a1)
# Get the earliest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).earliest(), a2
)
# Pass a custom field name to earliest() to change the field that's
# used to determine the earliest object.
self.assertEqual(Article.objects.earliest("expire_date"), a2)
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).earliest(
"expire_date"
),
a2,
)
# earliest() overrides any other ordering specified on the query.
# Refs #11283.
self.assertEqual(Article.objects.order_by("id").earliest(), a1)
# Error is raised if the user forgot to add a get_latest_by
# in the Model.Meta
Article.objects.model._meta.get_latest_by = None
with self.assertRaisesMessage(
ValueError,
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta.",
):
Article.objects.earliest()
# Earliest publication date, earliest expire date.
self.assertEqual(
Article.objects.filter(pub_date=datetime(2005, 7, 28)).earliest(
"pub_date", "expire_date"
),
a4,
)
# Earliest publication date, latest expire date.
self.assertEqual(
Article.objects.filter(pub_date=datetime(2005, 7, 28)).earliest(
"pub_date", "-expire_date"
),
a3,
)
# Meta.get_latest_by may be a tuple.
Article.objects.model._meta.get_latest_by = ("pub_date", "expire_date")
self.assertEqual(
Article.objects.filter(pub_date=datetime(2005, 7, 28)).earliest(), a4
)
def test_earliest_sliced_queryset(self):
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].earliest()
def test_latest(self):
# Because no Articles exist yet, latest() raises ArticleDoesNotExist.
with self.assertRaises(Article.DoesNotExist):
Article.objects.latest()
a1 = Article.objects.create(
headline="Article 1",
pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1),
)
a2 = Article.objects.create(
headline="Article 2",
pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28),
)
a3 = Article.objects.create(
headline="Article 3",
pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 8, 27),
)
a4 = Article.objects.create(
headline="Article 4",
pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30),
)
# Get the latest Article.
self.assertEqual(Article.objects.latest(), a4)
# Get the latest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__lt=datetime(2005, 7, 27)).latest(), a1
)
# Pass a custom field name to latest() to change the field that's used
# to determine the latest object.
self.assertEqual(Article.objects.latest("expire_date"), a1)
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).latest(
"expire_date"
),
a3,
)
# latest() overrides any other ordering specified on the query
# (#11283).
self.assertEqual(Article.objects.order_by("id").latest(), a4)
# Error is raised if get_latest_by isn't in Model.Meta.
Article.objects.model._meta.get_latest_by = None
with self.assertRaisesMessage(
ValueError,
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta.",
):
Article.objects.latest()
# Latest publication date, latest expire date.
self.assertEqual(
Article.objects.filter(pub_date=datetime(2005, 7, 27)).latest(
"pub_date", "expire_date"
),
a3,
)
# Latest publication date, earliest expire date.
self.assertEqual(
Article.objects.filter(pub_date=datetime(2005, 7, 27)).latest(
"pub_date", "-expire_date"
),
a2,
)
# Meta.get_latest_by may be a tuple.
Article.objects.model._meta.get_latest_by = ("pub_date", "expire_date")
self.assertEqual(
Article.objects.filter(pub_date=datetime(2005, 7, 27)).latest(), a3
)
def test_latest_sliced_queryset(self):
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].latest()
def test_latest_manual(self):
# You can still use latest() with a model that doesn't have
# "get_latest_by" set -- just pass in the field name manually.
Person.objects.create(name="Ralph", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Stephanie", birthday=datetime(1960, 2, 3))
msg = (
"earliest() and latest() require either fields as positional arguments "
"or 'get_latest_by' in the model's Meta."
)
with self.assertRaisesMessage(ValueError, msg):
Person.objects.latest()
self.assertEqual(Person.objects.latest("birthday"), p2)
|
EarliestOrLatestTests
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 550519,
"end": 550955
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("CreatedPullRequestReviewContribution", graphql_name="node")
"""The item at the end of the edge."""
|
CreatedPullRequestReviewContributionEdge
|
python
|
walkccc__LeetCode
|
solutions/2696. Minimum String Length After Removing Substrings/2696.py
|
{
"start": 0,
"end": 325
}
|
class ____:
def minLength(self, s: str) -> int:
stack = []
def match(c: str) -> bool:
return stack and stack[-1] == c
for c in s:
if c == 'B' and match('A'):
stack.pop()
elif c == 'D' and match('C'):
stack.pop()
else:
stack.append(c)
return len(stack)
|
Solution
|
python
|
uqfoundation__dill
|
dill/_dill.py
|
{
"start": 36717,
"end": 91730
}
|
class ____(dict):
def __ror__(self, a):
return a
_dictproxy_helper_instance = _dictproxy_helper()
__d = {}
try:
# In CPython 3.9 and later, this trick can be used to exploit the
# implementation of the __or__ function of MappingProxyType to get the true
# mapping referenced by the proxy. It may work for other implementations,
# but is not guaranteed.
MAPPING_PROXY_TRICK = __d is (DictProxyType(__d) | _dictproxy_helper_instance)
except Exception:
MAPPING_PROXY_TRICK = False
del __d
# _CELL_REF and _CELL_EMPTY are used to stay compatible with versions of dill
# whose _create_cell functions do not have a default value.
# _CELL_REF can be safely removed entirely (replaced by empty tuples for calls
# to _create_cell) once breaking changes are allowed.
_CELL_REF = None
_CELL_EMPTY = Sentinel('_CELL_EMPTY')
def _create_cell(contents=None):
if contents is not _CELL_EMPTY:
value = contents
return (lambda: value).__closure__[0]
def _create_weakref(obj, *args):
from weakref import ref
if obj is None: # it's dead
from collections import UserDict
return ref(UserDict(), *args)
return ref(obj, *args)
def _create_weakproxy(obj, callable=False, *args):
from weakref import proxy
if obj is None: # it's dead
if callable: return proxy(lambda x:x, *args)
from collections import UserDict
return proxy(UserDict(), *args)
return proxy(obj, *args)
def _eval_repr(repr_str):
return eval(repr_str)
def _create_array(f, args, state, npdict=None):
#array = numpy.core.multiarray._reconstruct(*args)
array = f(*args)
array.__setstate__(state)
if npdict is not None: # we also have saved state in __dict__
array.__dict__.update(npdict)
return array
def _create_dtypemeta(scalar_type):
if NumpyDType is True: __hook__() # a bit hacky I think
if scalar_type is None:
return NumpyDType
return type(NumpyDType(scalar_type))
def _create_namedtuple(name, fieldnames, modulename, defaults=None):
class_ = _import_module(modulename + '.' + name, safe=True)
if class_ is not None:
return class_
import collections
t = collections.namedtuple(name, fieldnames, defaults=defaults, module=modulename)
return t
def _create_capsule(pointer, name, context, destructor):
attr_found = False
try:
# based on https://github.com/python/cpython/blob/f4095e53ab708d95e019c909d5928502775ba68f/Objects/capsule.c#L209-L231
uname = name.decode('utf8')
for i in range(1, uname.count('.')+1):
names = uname.rsplit('.', i)
try:
module = __import__(names[0])
except ImportError:
pass
obj = module
for attr in names[1:]:
obj = getattr(obj, attr)
capsule = obj
attr_found = True
break
except Exception:
pass
if attr_found:
if _PyCapsule_IsValid(capsule, name):
return capsule
raise UnpicklingError("%s object exists at %s but a PyCapsule object was expected." % (type(capsule), name))
else:
#warnings.warn('Creating a new PyCapsule %s for a C data structure that may not be present in memory. Segmentation faults or other memory errors are possible.' % (name,), UnpicklingWarning)
capsule = _PyCapsule_New(pointer, name, destructor)
_PyCapsule_SetContext(capsule, context)
return capsule
def _getattr(objclass, name, repr_str):
# hack to grab the reference directly
try: #XXX: works only for __builtin__ ?
attr = repr_str.split("'")[3]
return eval(attr+'.__dict__["'+name+'"]')
except Exception:
try:
attr = objclass.__dict__
if type(attr) is DictProxyType:
if sys.hexversion > 0x30f00a0 and name in ('__weakref__','__dict__'):
attr = _dictproxy_helper.__dict__[name]
else:
attr = attr[name]
else:
attr = getattr(objclass,name)
except (AttributeError, KeyError):
attr = getattr(objclass,name)
return attr
def _get_attr(self, name):
# stop recursive pickling
return getattr(self, name, None) or getattr(__builtin__, name)
def _import_module(import_name, safe=False):
try:
if import_name.startswith('__runtime__.'):
return sys.modules[import_name]
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
submodule = getattr(__import__(module, None, None, [obj]), obj)
if isinstance(submodule, (ModuleType, type)):
return submodule
return __import__(import_name, None, None, [obj])
else:
return __import__(import_name)
except (ImportError, AttributeError, KeyError):
if safe:
return None
raise
# https://github.com/python/cpython/blob/a8912a0f8d9eba6d502c37d522221f9933e976db/Lib/pickle.py#L322-L333
def _getattribute(obj, name):
for subpath in name.split('.'):
if subpath == '<locals>':
raise AttributeError("Can't get local attribute {!r} on {!r}"
.format(name, obj))
try:
parent = obj
obj = getattr(obj, subpath)
except AttributeError:
raise AttributeError("Can't get attribute {!r} on {!r}"
.format(name, obj))
return obj, parent
def _locate_function(obj, pickler=None):
module_name = getattr(obj, '__module__', None)
if module_name in ['__main__', None] or \
pickler and is_dill(pickler, child=False) and pickler._session and module_name == pickler._main.__name__:
return False
if hasattr(obj, '__qualname__'):
module = _import_module(module_name, safe=True)
try:
found, _ = _getattribute(module, obj.__qualname__)
return found is obj
except AttributeError:
return False
else:
found = _import_module(module_name + '.' + obj.__name__, safe=True)
return found is obj
def _setitems(dest, source):
for k, v in source.items():
dest[k] = v
def _save_with_postproc(pickler, reduction, is_pickler_dill=None, obj=Getattr.NO_DEFAULT, postproc_list=None):
if obj is Getattr.NO_DEFAULT:
obj = Reduce(reduction) # pragma: no cover
if is_pickler_dill is None:
is_pickler_dill = is_dill(pickler, child=True)
if is_pickler_dill:
# assert id(obj) not in pickler._postproc, str(obj) + ' already pushed on stack!'
# if not hasattr(pickler, 'x'): pickler.x = 0
# print(pickler.x*' ', 'push', obj, id(obj), pickler._recurse)
# pickler.x += 1
if postproc_list is None:
postproc_list = []
# Recursive object not supported. Default to a global instead.
if id(obj) in pickler._postproc:
name = '%s.%s ' % (obj.__module__, getattr(obj, '__qualname__', obj.__name__)) if hasattr(obj, '__module__') else ''
warnings.warn('Cannot pickle %r: %shas recursive self-references that trigger a RecursionError.' % (obj, name), PicklingWarning)
pickler.save_global(obj)
return
pickler._postproc[id(obj)] = postproc_list
# TODO: Use state_setter in Python 3.8 to allow for faster cPickle implementations
pickler.save_reduce(*reduction, obj=obj)
if is_pickler_dill:
# pickler.x -= 1
# print(pickler.x*' ', 'pop', obj, id(obj))
postproc = pickler._postproc.pop(id(obj))
# assert postproc_list == postproc, 'Stack tampered!'
for reduction in reversed(postproc):
if reduction[0] is _setitems:
# use the internal machinery of pickle.py to speedup when
# updating a dictionary in postproc
dest, source = reduction[1]
if source:
pickler.write(pickler.get(pickler.memo[id(dest)][0]))
if sys.hexversion < 0x30e00a1:
pickler._batch_setitems(iter(source.items()))
else:
pickler._batch_setitems(iter(source.items()), obj=obj)
else:
# Updating with an empty dictionary. Same as doing nothing.
continue
else:
pickler.save_reduce(*reduction)
# pop None created by calling preprocessing step off stack
pickler.write(POP)
#@register(CodeType)
#def save_code(pickler, obj):
# logger.trace(pickler, "Co: %s", obj)
# pickler.save_reduce(_unmarshal, (marshal.dumps(obj),), obj=obj)
# logger.trace(pickler, "# Co")
# return
# The following function is based on 'save_codeobject' from 'cloudpickle'
# Copyright (c) 2012, Regents of the University of California.
# Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_.
# License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE
@register(CodeType)
def save_code(pickler, obj):
logger.trace(pickler, "Co: %s", obj)
if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname,
obj.co_firstlineno, obj.co_linetable, obj.co_endlinetable,
obj.co_columntable, obj.co_exceptiontable, obj.co_freevars,
obj.co_cellvars
)
elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)
with warnings.catch_warnings():
if not OLD312a7: # issue 597
warnings.filterwarnings('ignore', category=DeprecationWarning)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname,
obj.co_firstlineno, obj.co_linetable, obj.co_exceptiontable,
obj.co_freevars, obj.co_cellvars
)
elif hasattr(obj, "co_qualname"): # pypy 3.11 7.3.19+ (17 args)
args = (
obj.co_lnotab, obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname,
obj.co_firstlineno, obj.co_linetable, obj.co_freevars,
obj.co_cellvars
)
elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_linetable, obj.co_freevars,
obj.co_cellvars
)
elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else: # python 3.7 (15 args)
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals,
obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts,
obj.co_names, obj.co_varnames, obj.co_filename,
obj.co_name, obj.co_firstlineno, obj.co_lnotab,
obj.co_freevars, obj.co_cellvars
)
pickler.save_reduce(_create_code, args, obj=obj)
logger.trace(pickler, "# Co")
return
def _repr_dict(obj):
"""Make a short string representation of a dictionary."""
return "<%s object at %#012x>" % (type(obj).__name__, id(obj))
@register(dict)
def save_module_dict(pickler, obj):
if is_dill(pickler, child=False) and obj == pickler._main.__dict__ and \
not (pickler._session and pickler._first_pass):
logger.trace(pickler, "D1: %s", _repr_dict(obj)) # obj
pickler.write(bytes('c__builtin__\n__main__\n', 'UTF-8'))
logger.trace(pickler, "# D1")
elif (not is_dill(pickler, child=False)) and (obj == _main_module.__dict__):
logger.trace(pickler, "D3: %s", _repr_dict(obj)) # obj
pickler.write(bytes('c__main__\n__dict__\n', 'UTF-8')) #XXX: works in general?
logger.trace(pickler, "# D3")
elif '__name__' in obj and obj != _main_module.__dict__ \
and type(obj['__name__']) is str \
and obj is getattr(_import_module(obj['__name__'],True), '__dict__', None):
logger.trace(pickler, "D4: %s", _repr_dict(obj)) # obj
pickler.write(bytes('c%s\n__dict__\n' % obj['__name__'], 'UTF-8'))
logger.trace(pickler, "# D4")
else:
logger.trace(pickler, "D2: %s", _repr_dict(obj)) # obj
if is_dill(pickler, child=False) and pickler._session:
# we only care about session the first pass thru
pickler._first_pass = False
StockPickler.save_dict(pickler, obj)
logger.trace(pickler, "# D2")
return
if not OLD310 and MAPPING_PROXY_TRICK:
def save_dict_view(dicttype):
def save_dict_view_for_function(func):
def _save_dict_view(pickler, obj):
logger.trace(pickler, "Dkvi: <%s>", obj)
mapping = obj.mapping | _dictproxy_helper_instance
pickler.save_reduce(func, (mapping,), obj=obj)
logger.trace(pickler, "# Dkvi")
return _save_dict_view
return [
(funcname, save_dict_view_for_function(getattr(dicttype, funcname)))
for funcname in ('keys', 'values', 'items')
]
else:
# The following functions are based on 'cloudpickle'
# https://github.com/cloudpipe/cloudpickle/blob/5d89947288a18029672596a4d719093cc6d5a412/cloudpickle/cloudpickle.py#L922-L940
# Copyright (c) 2012, Regents of the University of California.
# Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_.
# License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE
def save_dict_view(dicttype):
def save_dict_keys(pickler, obj):
logger.trace(pickler, "Dk: <%s>", obj)
dict_constructor = _shims.Reduce(dicttype.fromkeys, (list(obj),))
pickler.save_reduce(dicttype.keys, (dict_constructor,), obj=obj)
logger.trace(pickler, "# Dk")
def save_dict_values(pickler, obj):
logger.trace(pickler, "Dv: <%s>", obj)
dict_constructor = _shims.Reduce(dicttype, (enumerate(obj),))
pickler.save_reduce(dicttype.values, (dict_constructor,), obj=obj)
logger.trace(pickler, "# Dv")
def save_dict_items(pickler, obj):
logger.trace(pickler, "Di: <%s>", obj)
pickler.save_reduce(dicttype.items, (dicttype(obj),), obj=obj)
logger.trace(pickler, "# Di")
return (
('keys', save_dict_keys),
('values', save_dict_values),
('items', save_dict_items)
)
for __dicttype in (
dict,
OrderedDict
):
__obj = __dicttype()
for __funcname, __savefunc in save_dict_view(__dicttype):
__tview = type(getattr(__obj, __funcname)())
if __tview not in Pickler.dispatch:
Pickler.dispatch[__tview] = __savefunc
del __dicttype, __obj, __funcname, __tview, __savefunc
@register(ClassType)
def save_classobj(pickler, obj): #FIXME: enable pickler._byref
if not _locate_function(obj, pickler):
logger.trace(pickler, "C1: %s", obj)
pickler.save_reduce(ClassType, (obj.__name__, obj.__bases__,
obj.__dict__), obj=obj)
#XXX: or obj.__dict__.copy()), obj=obj) ?
logger.trace(pickler, "# C1")
else:
logger.trace(pickler, "C2: %s", obj)
name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
StockPickler.save_global(pickler, obj, name=name)
logger.trace(pickler, "# C2")
return
@register(typing._GenericAlias)
def save_generic_alias(pickler, obj):
args = obj.__args__
if type(obj.__reduce__()) is str:
logger.trace(pickler, "Ga0: %s", obj)
StockPickler.save_global(pickler, obj, name=obj.__reduce__())
logger.trace(pickler, "# Ga0")
elif obj.__origin__ is tuple and (not args or args == ((),)):
logger.trace(pickler, "Ga1: %s", obj)
pickler.save_reduce(_create_typing_tuple, (args,), obj=obj)
logger.trace(pickler, "# Ga1")
else:
logger.trace(pickler, "Ga2: %s", obj)
StockPickler.save_reduce(pickler, *obj.__reduce__(), obj=obj)
logger.trace(pickler, "# Ga2")
return
if ThreadHandleType:
@register(ThreadHandleType)
def save_thread_handle(pickler, obj):
logger.trace(pickler, "Th: %s", obj)
pickler.save_reduce(_create_thread_handle, (obj.ident, obj.is_done()), obj=obj)
logger.trace(pickler, "# Th")
return
@register(LockType) #XXX: copied Thread will have new Event (due to new Lock)
def save_lock(pickler, obj):
logger.trace(pickler, "Lo: %s", obj)
pickler.save_reduce(_create_lock, (obj.locked(),), obj=obj)
logger.trace(pickler, "# Lo")
return
@register(RLockType)
def save_rlock(pickler, obj):
logger.trace(pickler, "RL: %s", obj)
r = obj.__repr__() # don't use _release_save as it unlocks the lock
count = int(r.split('count=')[1].split()[0].rstrip('>'))
owner = int(r.split('owner=')[1].split()[0])
pickler.save_reduce(_create_rlock, (count,owner,), obj=obj)
logger.trace(pickler, "# RL")
return
#@register(SocketType) #FIXME: causes multiprocess test_pickling FAIL
def save_socket(pickler, obj):
logger.trace(pickler, "So: %s", obj)
pickler.save_reduce(*reduce_socket(obj))
logger.trace(pickler, "# So")
return
def _save_file(pickler, obj, open_):
if obj.closed:
position = 0
else:
obj.flush()
if obj in (sys.__stdout__, sys.__stderr__, sys.__stdin__):
position = -1
else:
position = obj.tell()
if is_dill(pickler, child=True) and pickler._fmode == FILE_FMODE:
f = open_(obj.name, "r")
fdata = f.read()
f.close()
else:
fdata = ""
if is_dill(pickler, child=True):
strictio = pickler._strictio
fmode = pickler._fmode
else:
strictio = False
fmode = 0 # HANDLE_FMODE
pickler.save_reduce(_create_filehandle, (obj.name, obj.mode, position,
obj.closed, open_, strictio,
fmode, fdata), obj=obj)
return
@register(FileType) #XXX: in 3.x has buffer=0, needs different _create?
@register(BufferedReaderType)
@register(BufferedWriterType)
@register(TextWrapperType)
def save_file(pickler, obj):
logger.trace(pickler, "Fi: %s", obj)
f = _save_file(pickler, obj, open)
logger.trace(pickler, "# Fi")
return f
if BufferedRandomType:
@register(BufferedRandomType)
def save_file(pickler, obj):
logger.trace(pickler, "Fi: %s", obj)
f = _save_file(pickler, obj, open)
logger.trace(pickler, "# Fi")
return f
if PyTextWrapperType:
@register(PyBufferedReaderType)
@register(PyBufferedWriterType)
@register(PyTextWrapperType)
def save_file(pickler, obj):
logger.trace(pickler, "Fi: %s", obj)
f = _save_file(pickler, obj, _open)
logger.trace(pickler, "# Fi")
return f
if PyBufferedRandomType:
@register(PyBufferedRandomType)
def save_file(pickler, obj):
logger.trace(pickler, "Fi: %s", obj)
f = _save_file(pickler, obj, _open)
logger.trace(pickler, "# Fi")
return f
# The following two functions are based on 'saveCStringIoInput'
# and 'saveCStringIoOutput' from spickle
# Copyright (c) 2011 by science+computing ag
# License: http://www.apache.org/licenses/LICENSE-2.0
if InputType:
@register(InputType)
def save_stringi(pickler, obj):
logger.trace(pickler, "Io: %s", obj)
if obj.closed:
value = ''; position = 0
else:
value = obj.getvalue(); position = obj.tell()
pickler.save_reduce(_create_stringi, (value, position, \
obj.closed), obj=obj)
logger.trace(pickler, "# Io")
return
@register(OutputType)
def save_stringo(pickler, obj):
logger.trace(pickler, "Io: %s", obj)
if obj.closed:
value = ''; position = 0
else:
value = obj.getvalue(); position = obj.tell()
pickler.save_reduce(_create_stringo, (value, position, \
obj.closed), obj=obj)
logger.trace(pickler, "# Io")
return
if LRUCacheType is not None:
from functools import lru_cache
@register(LRUCacheType)
def save_lru_cache(pickler, obj):
logger.trace(pickler, "LRU: %s", obj)
if OLD39:
kwargs = obj.cache_info()
args = (kwargs.maxsize,)
else:
kwargs = obj.cache_parameters()
args = (kwargs['maxsize'], kwargs['typed'])
if args != lru_cache.__defaults__:
wrapper = Reduce(lru_cache, args, is_callable=True)
else:
wrapper = lru_cache
pickler.save_reduce(wrapper, (obj.__wrapped__,), obj=obj)
logger.trace(pickler, "# LRU")
return
@register(SuperType)
def save_super(pickler, obj):
logger.trace(pickler, "Su: %s", obj)
pickler.save_reduce(super, (obj.__thisclass__, obj.__self__), obj=obj)
logger.trace(pickler, "# Su")
return
if IS_PYPY:
@register(MethodType)
def save_instancemethod0(pickler, obj):
code = getattr(obj.__func__, '__code__', None)
if code is not None and type(code) is not CodeType \
and getattr(obj.__self__, obj.__name__) == obj:
# Some PyPy builtin functions have no module name
logger.trace(pickler, "Me2: %s", obj)
# TODO: verify that this works for all PyPy builtin methods
pickler.save_reduce(getattr, (obj.__self__, obj.__name__), obj=obj)
logger.trace(pickler, "# Me2")
return
logger.trace(pickler, "Me1: %s", obj)
pickler.save_reduce(MethodType, (obj.__func__, obj.__self__), obj=obj)
logger.trace(pickler, "# Me1")
return
else:
@register(MethodType)
def save_instancemethod0(pickler, obj):
logger.trace(pickler, "Me1: %s", obj)
pickler.save_reduce(MethodType, (obj.__func__, obj.__self__), obj=obj)
logger.trace(pickler, "# Me1")
return
if not IS_PYPY:
@register(MemberDescriptorType)
@register(GetSetDescriptorType)
@register(MethodDescriptorType)
@register(WrapperDescriptorType)
@register(ClassMethodDescriptorType)
def save_wrapper_descriptor(pickler, obj):
logger.trace(pickler, "Wr: %s", obj)
pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__,
obj.__repr__()), obj=obj)
logger.trace(pickler, "# Wr")
return
else:
@register(MemberDescriptorType)
@register(GetSetDescriptorType)
def save_wrapper_descriptor(pickler, obj):
logger.trace(pickler, "Wr: %s", obj)
pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__,
obj.__repr__()), obj=obj)
logger.trace(pickler, "# Wr")
return
@register(CellType)
def save_cell(pickler, obj):
try:
f = obj.cell_contents
except ValueError: # cell is empty
logger.trace(pickler, "Ce3: %s", obj)
# _shims._CELL_EMPTY is defined in _shims.py to support PyPy 2.7.
# It unpickles to a sentinel object _dill._CELL_EMPTY, also created in
# _shims.py. This object is not present in Python 3 because the cell's
# contents can be deleted in newer versions of Python. The reduce object
# will instead unpickle to None if unpickled in Python 3.
# When breaking changes are made to dill, (_shims._CELL_EMPTY,) can
# be replaced by () OR the delattr function can be removed repending on
# whichever is more convienient.
pickler.save_reduce(_create_cell, (_shims._CELL_EMPTY,), obj=obj)
# Call the function _delattr on the cell's cell_contents attribute
# The result of this function call will be None
pickler.save_reduce(_shims._delattr, (obj, 'cell_contents'))
# pop None created by calling _delattr off stack
pickler.write(POP)
logger.trace(pickler, "# Ce3")
return
if is_dill(pickler, child=True):
if id(f) in pickler._postproc:
# Already seen. Add to its postprocessing.
postproc = pickler._postproc[id(f)]
else:
# Haven't seen it. Add to the highest possible object and set its
# value as late as possible to prevent cycle.
postproc = next(iter(pickler._postproc.values()), None)
if postproc is not None:
logger.trace(pickler, "Ce2: %s", obj)
# _CELL_REF is defined in _shims.py to support older versions of
# dill. When breaking changes are made to dill, (_CELL_REF,) can
# be replaced by ()
pickler.save_reduce(_create_cell, (_CELL_REF,), obj=obj)
postproc.append((_shims._setattr, (obj, 'cell_contents', f)))
logger.trace(pickler, "# Ce2")
return
logger.trace(pickler, "Ce1: %s", obj)
pickler.save_reduce(_create_cell, (f,), obj=obj)
logger.trace(pickler, "# Ce1")
return
if MAPPING_PROXY_TRICK:
@register(DictProxyType)
def save_dictproxy(pickler, obj):
logger.trace(pickler, "Mp: %s", _repr_dict(obj)) # obj
mapping = obj | _dictproxy_helper_instance
pickler.save_reduce(DictProxyType, (mapping,), obj=obj)
logger.trace(pickler, "# Mp")
return
else:
@register(DictProxyType)
def save_dictproxy(pickler, obj):
logger.trace(pickler, "Mp: %s", _repr_dict(obj)) # obj
pickler.save_reduce(DictProxyType, (obj.copy(),), obj=obj)
logger.trace(pickler, "# Mp")
return
@register(SliceType)
def save_slice(pickler, obj):
logger.trace(pickler, "Sl: %s", obj)
pickler.save_reduce(slice, (obj.start, obj.stop, obj.step), obj=obj)
logger.trace(pickler, "# Sl")
return
@register(XRangeType)
@register(EllipsisType)
@register(NotImplementedType)
def save_singleton(pickler, obj):
logger.trace(pickler, "Si: %s", obj)
pickler.save_reduce(_eval_repr, (obj.__repr__(),), obj=obj)
logger.trace(pickler, "# Si")
return
def _proxy_helper(obj): # a dead proxy returns a reference to None
"""get memory address of proxy's reference object"""
_repr = repr(obj)
try: _str = str(obj)
except ReferenceError: # it's a dead proxy
return id(None)
if _str == _repr: return id(obj) # it's a repr
try: # either way, it's a proxy from here
address = int(_str.rstrip('>').split(' at ')[-1], base=16)
except ValueError: # special case: proxy of a 'type'
if not IS_PYPY:
address = int(_repr.rstrip('>').split(' at ')[-1], base=16)
else:
objects = iter(gc.get_objects())
for _obj in objects:
if repr(_obj) == _str: return id(_obj)
# all bad below... nothing found so throw ReferenceError
msg = "Cannot reference object for proxy at '%s'" % id(obj)
raise ReferenceError(msg)
return address
def _locate_object(address, module=None):
"""get object located at the given memory address (inverse of id(obj))"""
special = [None, True, False] #XXX: more...?
for obj in special:
if address == id(obj): return obj
if module:
objects = iter(module.__dict__.values())
else: objects = iter(gc.get_objects())
for obj in objects:
if address == id(obj): return obj
# all bad below... nothing found so throw ReferenceError or TypeError
try: address = hex(address)
except TypeError:
raise TypeError("'%s' is not a valid memory address" % str(address))
raise ReferenceError("Cannot reference object at '%s'" % address)
@register(ReferenceType)
def save_weakref(pickler, obj):
refobj = obj()
logger.trace(pickler, "R1: %s", obj)
#refobj = ctypes.pythonapi.PyWeakref_GetObject(obj) # dead returns "None"
pickler.save_reduce(_create_weakref, (refobj,), obj=obj)
logger.trace(pickler, "# R1")
return
@register(ProxyType)
@register(CallableProxyType)
def save_weakproxy(pickler, obj):
# Must do string substitution here and use %r to avoid ReferenceError.
logger.trace(pickler, "R2: %r" % obj)
refobj = _locate_object(_proxy_helper(obj))
pickler.save_reduce(_create_weakproxy, (refobj, callable(obj)), obj=obj)
logger.trace(pickler, "# R2")
return
def _is_builtin_module(module):
if not hasattr(module, "__file__"): return True
if module.__file__ is None: return False
# If a module file name starts with prefix, it should be a builtin
# module, so should always be pickled as a reference.
names = ["base_prefix", "base_exec_prefix", "exec_prefix", "prefix", "real_prefix"]
rp = os.path.realpath
# See https://github.com/uqfoundation/dill/issues/566
return (
any(
module.__file__.startswith(getattr(sys, name))
or rp(module.__file__).startswith(rp(getattr(sys, name)))
for name in names
if hasattr(sys, name)
)
or module.__file__.endswith(EXTENSION_SUFFIXES)
or 'site-packages' in module.__file__
)
def _is_imported_module(module):
return getattr(module, '__loader__', None) is not None or module in sys.modules.values()
@register(ModuleType)
def save_module(pickler, obj):
if False: #_use_diff:
if obj.__name__.split('.', 1)[0] != "dill":
try:
changed = diff.whats_changed(obj, seen=pickler._diff_cache)[0]
except RuntimeError: # not memorised module, probably part of dill
pass
else:
logger.trace(pickler, "M2: %s with diff", obj)
logger.info("Diff: %s", changed.keys())
pickler.save_reduce(_import_module, (obj.__name__,), obj=obj,
state=changed)
logger.trace(pickler, "# M2")
return
logger.trace(pickler, "M1: %s", obj)
pickler.save_reduce(_import_module, (obj.__name__,), obj=obj)
logger.trace(pickler, "# M1")
else:
builtin_mod = _is_builtin_module(obj)
is_session_main = is_dill(pickler, child=True) and obj is pickler._main
if (obj.__name__ not in ("builtins", "dill", "dill._dill") and not builtin_mod
or is_session_main):
logger.trace(pickler, "M1: %s", obj)
# Hack for handling module-type objects in load_module().
mod_name = obj.__name__ if _is_imported_module(obj) else '__runtime__.%s' % obj.__name__
# Second references are saved as __builtin__.__main__ in save_module_dict().
main_dict = obj.__dict__.copy()
for item in ('__builtins__', '__loader__'):
main_dict.pop(item, None)
for item in IPYTHON_SINGLETONS: #pragma: no cover
if getattr(main_dict.get(item), '__module__', '').startswith('IPython'):
del main_dict[item]
pickler.save_reduce(_import_module, (mod_name,), obj=obj, state=main_dict)
logger.trace(pickler, "# M1")
elif obj.__name__ == "dill._dill":
logger.trace(pickler, "M2: %s", obj)
pickler.save_global(obj, name="_dill")
logger.trace(pickler, "# M2")
else:
logger.trace(pickler, "M2: %s", obj)
pickler.save_reduce(_import_module, (obj.__name__,), obj=obj)
logger.trace(pickler, "# M2")
return
# The following function is based on '_extract_class_dict' from 'cloudpickle'
# Copyright (c) 2012, Regents of the University of California.
# Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_.
# License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE
def _get_typedict_type(cls, clsdict, attrs, postproc_list):
"""Retrieve a copy of the dict of a class without the inherited methods"""
if len(cls.__bases__) == 1:
inherited_dict = cls.__bases__[0].__dict__
else:
inherited_dict = {}
for base in reversed(cls.__bases__):
inherited_dict.update(base.__dict__)
to_remove = []
for name, value in dict.items(clsdict):
try:
base_value = inherited_dict[name]
if value is base_value and hasattr(value, '__qualname__'):
to_remove.append(name)
except KeyError:
pass
for name in to_remove:
dict.pop(clsdict, name)
if issubclass(type(cls), type):
clsdict.pop('__dict__', None)
clsdict.pop('__weakref__', None)
# clsdict.pop('__prepare__', None)
return clsdict, attrs
def _get_typedict_abc(obj, _dict, attrs, postproc_list):
if hasattr(abc, '_get_dump'):
(registry, _, _, _) = abc._get_dump(obj)
register = obj.register
postproc_list.extend((register, (reg(),)) for reg in registry)
elif hasattr(obj, '_abc_registry'):
registry = obj._abc_registry
register = obj.register
postproc_list.extend((register, (reg,)) for reg in registry)
else:
raise PicklingError("Cannot find registry of ABC %s", obj)
if '_abc_registry' in _dict:
_dict.pop('_abc_registry', None)
_dict.pop('_abc_cache', None)
_dict.pop('_abc_negative_cache', None)
# _dict.pop('_abc_negative_cache_version', None)
else:
_dict.pop('_abc_impl', None)
return _dict, attrs
@register(TypeType)
def save_type(pickler, obj, postproc_list=None):
if obj in _typemap:
logger.trace(pickler, "T1: %s", obj)
# if obj in _incedental_types:
# warnings.warn('Type %r may only exist on this implementation of Python and cannot be unpickled in other implementations.' % (obj,), PicklingWarning)
pickler.save_reduce(_load_type, (_typemap[obj],), obj=obj)
logger.trace(pickler, "# T1")
elif obj.__bases__ == (tuple,) and all([hasattr(obj, attr) for attr in ('_fields','_asdict','_make','_replace')]):
# special case: namedtuples
logger.trace(pickler, "T6: %s", obj)
obj_name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
if obj.__name__ != obj_name:
if postproc_list is None:
postproc_list = []
postproc_list.append((setattr, (obj, '__qualname__', obj_name)))
if not obj._field_defaults:
_save_with_postproc(pickler, (_create_namedtuple, (obj.__name__, obj._fields, obj.__module__)), obj=obj, postproc_list=postproc_list)
else:
defaults = [obj._field_defaults[field] for field in obj._fields if field in obj._field_defaults]
_save_with_postproc(pickler, (_create_namedtuple, (obj.__name__, obj._fields, obj.__module__, defaults)), obj=obj, postproc_list=postproc_list)
logger.trace(pickler, "# T6")
return
# special caes: NoneType, NotImplementedType, EllipsisType, EnumMeta, etc
elif obj is type(None):
logger.trace(pickler, "T7: %s", obj)
#XXX: pickler.save_reduce(type, (None,), obj=obj)
pickler.write(GLOBAL + b'__builtin__\nNoneType\n')
logger.trace(pickler, "# T7")
elif obj is NotImplementedType:
logger.trace(pickler, "T7: %s", obj)
pickler.save_reduce(type, (NotImplemented,), obj=obj)
logger.trace(pickler, "# T7")
elif obj is EllipsisType:
logger.trace(pickler, "T7: %s", obj)
pickler.save_reduce(type, (Ellipsis,), obj=obj)
logger.trace(pickler, "# T7")
elif obj is EnumMeta:
logger.trace(pickler, "T7: %s", obj)
pickler.write(GLOBAL + b'enum\nEnumMeta\n')
logger.trace(pickler, "# T7")
elif obj is ExceptHookArgsType: #NOTE: must be after NoneType for pypy
logger.trace(pickler, "T7: %s", obj)
pickler.write(GLOBAL + b'threading\nExceptHookArgs\n')
logger.trace(pickler, "# T7")
else:
_byref = getattr(pickler, '_byref', None)
obj_recursive = id(obj) in getattr(pickler, '_postproc', ())
incorrectly_named = not _locate_function(obj, pickler)
if not _byref and not obj_recursive and incorrectly_named: # not a function, but the name was held over
if postproc_list is None:
postproc_list = []
# thanks to Tom Stepleton pointing out pickler._session unneeded
logger.trace(pickler, "T2: %s", obj)
_dict, attrs = _get_typedict_type(obj, obj.__dict__.copy(), None, postproc_list) # copy dict proxy to a dict
#print (_dict)
#print ("%s\n%s" % (type(obj), obj.__name__))
#print ("%s\n%s" % (obj.__bases__, obj.__dict__))
slots = _dict.get('__slots__', ())
if type(slots) == str:
# __slots__ accepts a single string
slots = (slots,)
for name in slots:
_dict.pop(name, None)
if isinstance(obj, abc.ABCMeta):
logger.trace(pickler, "ABC: %s", obj)
_dict, attrs = _get_typedict_abc(obj, _dict, attrs, postproc_list)
logger.trace(pickler, "# ABC")
qualname = getattr(obj, '__qualname__', None)
if attrs is not None:
for k, v in attrs.items():
postproc_list.append((setattr, (obj, k, v)))
# TODO: Consider using the state argument to save_reduce?
if qualname is not None:
postproc_list.append((setattr, (obj, '__qualname__', qualname)))
if not hasattr(obj, '__orig_bases__'):
_save_with_postproc(pickler, (_create_type, (
type(obj), obj.__name__, obj.__bases__, _dict
)), obj=obj, postproc_list=postproc_list)
else:
# This case will always work, but might be overkill.
_metadict = {
'metaclass': type(obj)
}
if _dict:
_dict_update = PartialType(_setitems, source=_dict)
else:
_dict_update = None
_save_with_postproc(pickler, (new_class, (
obj.__name__, obj.__orig_bases__, _metadict, _dict_update
)), obj=obj, postproc_list=postproc_list)
logger.trace(pickler, "# T2")
else:
obj_name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
logger.trace(pickler, "T4: %s", obj)
if incorrectly_named:
warnings.warn(
"Cannot locate reference to %r." % (obj,),
PicklingWarning,
stacklevel=3,
)
if obj_recursive:
warnings.warn(
"Cannot pickle %r: %s.%s has recursive self-references that "
"trigger a RecursionError." % (obj, obj.__module__, obj_name),
PicklingWarning,
stacklevel=3,
)
#print (obj.__dict__)
#print ("%s\n%s" % (type(obj), obj.__name__))
#print ("%s\n%s" % (obj.__bases__, obj.__dict__))
StockPickler.save_global(pickler, obj, name=obj_name)
logger.trace(pickler, "# T4")
return
@register(property)
@register(abc.abstractproperty)
def save_property(pickler, obj):
logger.trace(pickler, "Pr: %s", obj)
pickler.save_reduce(type(obj), (obj.fget, obj.fset, obj.fdel, obj.__doc__),
obj=obj)
logger.trace(pickler, "# Pr")
@register(staticmethod)
@register(classmethod)
@register(abc.abstractstaticmethod)
@register(abc.abstractclassmethod)
def save_classmethod(pickler, obj):
logger.trace(pickler, "Cm: %s", obj)
orig_func = obj.__func__
# if type(obj.__dict__) is dict:
# if obj.__dict__:
# state = obj.__dict__
# else:
# state = None
# else:
# state = (None, {'__dict__', obj.__dict__})
pickler.save_reduce(type(obj), (orig_func,), obj=obj)
logger.trace(pickler, "# Cm")
@register(FunctionType)
def save_function(pickler, obj):
if not _locate_function(obj, pickler):
if type(obj.__code__) is not CodeType:
# Some PyPy builtin functions have no module name, and thus are not
# able to be located
module_name = getattr(obj, '__module__', None)
if module_name is None:
module_name = __builtin__.__name__
module = _import_module(module_name, safe=True)
_pypy_builtin = False
try:
found, _ = _getattribute(module, obj.__qualname__)
if getattr(found, '__func__', None) is obj:
_pypy_builtin = True
except AttributeError:
pass
if _pypy_builtin:
logger.trace(pickler, "F3: %s", obj)
pickler.save_reduce(getattr, (found, '__func__'), obj=obj)
logger.trace(pickler, "# F3")
return
logger.trace(pickler, "F1: %s", obj)
_recurse = getattr(pickler, '_recurse', None)
_postproc = getattr(pickler, '_postproc', None)
_main_modified = getattr(pickler, '_main_modified', None)
_original_main = getattr(pickler, '_original_main', __builtin__)#'None'
postproc_list = []
if _recurse:
# recurse to get all globals referred to by obj
from .detect import globalvars
globs_copy = globalvars(obj, recurse=True, builtin=True)
# Add the name of the module to the globs dictionary to prevent
# the duplication of the dictionary. Pickle the unpopulated
# globals dictionary and set the remaining items after the function
# is created to correctly handle recursion.
globs = {'__name__': obj.__module__}
else:
globs_copy = obj.__globals__
# If the globals is the __dict__ from the module being saved as a
# session, substitute it by the dictionary being actually saved.
if _main_modified and globs_copy is _original_main.__dict__:
globs_copy = getattr(pickler, '_main', _original_main).__dict__
globs = globs_copy
# If the globals is a module __dict__, do not save it in the pickle.
elif globs_copy is not None and obj.__module__ is not None and \
getattr(_import_module(obj.__module__, True), '__dict__', None) is globs_copy:
globs = globs_copy
else:
globs = {'__name__': obj.__module__}
if globs_copy is not None and globs is not globs_copy:
# In the case that the globals are copied, we need to ensure that
# the globals dictionary is updated when all objects in the
# dictionary are already created.
glob_ids = {id(g) for g in globs_copy.values()}
for stack_element in _postproc:
if stack_element in glob_ids:
_postproc[stack_element].append((_setitems, (globs, globs_copy)))
break
else:
postproc_list.append((_setitems, (globs, globs_copy)))
closure = obj.__closure__
state_dict = {}
for fattrname in ('__doc__', '__kwdefaults__', '__annotations__'):
fattr = getattr(obj, fattrname, None)
if fattr is not None:
state_dict[fattrname] = fattr
if obj.__qualname__ != obj.__name__:
state_dict['__qualname__'] = obj.__qualname__
if '__name__' not in globs or obj.__module__ != globs['__name__']:
state_dict['__module__'] = obj.__module__
state = obj.__dict__
if type(state) is not dict:
state_dict['__dict__'] = state
state = None
if state_dict:
state = state, state_dict
_save_with_postproc(pickler, (_create_function, (
obj.__code__, globs, obj.__name__, obj.__defaults__,
closure
), state), obj=obj, postproc_list=postproc_list)
# Lift closure cell update to earliest function (#458)
if _postproc:
topmost_postproc = next(iter(_postproc.values()), None)
if closure and topmost_postproc:
for cell in closure:
possible_postproc = (setattr, (cell, 'cell_contents', obj))
try:
topmost_postproc.remove(possible_postproc)
except ValueError:
continue
# Change the value of the cell
pickler.save_reduce(*possible_postproc)
# pop None created by calling preprocessing step off stack
pickler.write(POP)
logger.trace(pickler, "# F1")
else:
logger.trace(pickler, "F2: %s", obj)
name = getattr(obj, '__qualname__', getattr(obj, '__name__', None))
StockPickler.save_global(pickler, obj, name=name)
logger.trace(pickler, "# F2")
return
if HAS_CTYPES and hasattr(ctypes, 'pythonapi'):
_PyCapsule_New = ctypes.pythonapi.PyCapsule_New
_PyCapsule_New.argtypes = (ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p)
_PyCapsule_New.restype = ctypes.py_object
_PyCapsule_GetPointer = ctypes.pythonapi.PyCapsule_GetPointer
_PyCapsule_GetPointer.argtypes = (ctypes.py_object, ctypes.c_char_p)
_PyCapsule_GetPointer.restype = ctypes.c_void_p
_PyCapsule_GetDestructor = ctypes.pythonapi.PyCapsule_GetDestructor
_PyCapsule_GetDestructor.argtypes = (ctypes.py_object,)
_PyCapsule_GetDestructor.restype = ctypes.c_void_p
_PyCapsule_GetContext = ctypes.pythonapi.PyCapsule_GetContext
_PyCapsule_GetContext.argtypes = (ctypes.py_object,)
_PyCapsule_GetContext.restype = ctypes.c_void_p
_PyCapsule_GetName = ctypes.pythonapi.PyCapsule_GetName
_PyCapsule_GetName.argtypes = (ctypes.py_object,)
_PyCapsule_GetName.restype = ctypes.c_char_p
_PyCapsule_IsValid = ctypes.pythonapi.PyCapsule_IsValid
_PyCapsule_IsValid.argtypes = (ctypes.py_object, ctypes.c_char_p)
_PyCapsule_IsValid.restype = ctypes.c_bool
_PyCapsule_SetContext = ctypes.pythonapi.PyCapsule_SetContext
_PyCapsule_SetContext.argtypes = (ctypes.py_object, ctypes.c_void_p)
_PyCapsule_SetDestructor = ctypes.pythonapi.PyCapsule_SetDestructor
_PyCapsule_SetDestructor.argtypes = (ctypes.py_object, ctypes.c_void_p)
_PyCapsule_SetName = ctypes.pythonapi.PyCapsule_SetName
_PyCapsule_SetName.argtypes = (ctypes.py_object, ctypes.c_char_p)
_PyCapsule_SetPointer = ctypes.pythonapi.PyCapsule_SetPointer
_PyCapsule_SetPointer.argtypes = (ctypes.py_object, ctypes.c_void_p)
#from _socket import CAPI as _testcapsule
_testcapsule_name = b'dill._dill._testcapsule'
_testcapsule = _PyCapsule_New(
ctypes.cast(_PyCapsule_New, ctypes.c_void_p),
ctypes.c_char_p(_testcapsule_name),
None
)
PyCapsuleType = type(_testcapsule)
@register(PyCapsuleType)
def save_capsule(pickler, obj):
logger.trace(pickler, "Cap: %s", obj)
name = _PyCapsule_GetName(obj)
#warnings.warn('Pickling a PyCapsule (%s) does not pickle any C data structures and could cause segmentation faults or other memory errors when unpickling.' % (name,), PicklingWarning)
pointer = _PyCapsule_GetPointer(obj, name)
context = _PyCapsule_GetContext(obj)
destructor = _PyCapsule_GetDestructor(obj)
pickler.save_reduce(_create_capsule, (pointer, name, context, destructor), obj=obj)
logger.trace(pickler, "# Cap")
_incedental_reverse_typemap['PyCapsuleType'] = PyCapsuleType
_reverse_typemap['PyCapsuleType'] = PyCapsuleType
_incedental_types.add(PyCapsuleType)
else:
_testcapsule = None
@register(ContextType)
def save_context(pickler, obj):
logger.trace(pickler, "Cx: %s", obj)
pickler.save_reduce(ContextType, tuple(obj.items()), obj=obj)
logger.trace(pickler, "# Cx")
#############################
# A quick fix for issue #500
# This should be removed when a better solution is found.
if hasattr(dataclasses, "_HAS_DEFAULT_FACTORY_CLASS"):
@register(dataclasses._HAS_DEFAULT_FACTORY_CLASS)
def save_dataclasses_HAS_DEFAULT_FACTORY_CLASS(pickler, obj):
logger.trace(pickler, "DcHDF: %s", obj)
pickler.write(GLOBAL + b"dataclasses\n_HAS_DEFAULT_FACTORY\n")
logger.trace(pickler, "# DcHDF")
if hasattr(dataclasses, "MISSING"):
@register(type(dataclasses.MISSING))
def save_dataclasses_MISSING_TYPE(pickler, obj):
logger.trace(pickler, "DcM: %s", obj)
pickler.write(GLOBAL + b"dataclasses\nMISSING\n")
logger.trace(pickler, "# DcM")
if hasattr(dataclasses, "KW_ONLY"):
@register(type(dataclasses.KW_ONLY))
def save_dataclasses_KW_ONLY_TYPE(pickler, obj):
logger.trace(pickler, "DcKWO: %s", obj)
pickler.write(GLOBAL + b"dataclasses\nKW_ONLY\n")
logger.trace(pickler, "# DcKWO")
if hasattr(dataclasses, "_FIELD_BASE"):
@register(dataclasses._FIELD_BASE)
def save_dataclasses_FIELD_BASE(pickler, obj):
logger.trace(pickler, "DcFB: %s", obj)
pickler.write(GLOBAL + b"dataclasses\n" + obj.name.encode() + b"\n")
logger.trace(pickler, "# DcFB")
#############################
# quick sanity checking
def pickles(obj,exact=False,safe=False,**kwds):
"""
Quick check if object pickles with dill.
If *exact=True* then an equality test is done to check if the reconstructed
object matches the original object.
If *safe=True* then any exception will raised in copy signal that the
object is not picklable, otherwise only pickling errors will be trapped.
Additional keyword arguments are as :func:`dumps` and :func:`loads`.
"""
if safe: exceptions = (Exception,) # RuntimeError, ValueError
else:
exceptions = (TypeError, AssertionError, NotImplementedError, PicklingError, UnpicklingError)
try:
pik = copy(obj, **kwds)
#FIXME: should check types match first, then check content if "exact"
try:
#FIXME: should be "(pik == obj).all()" for numpy comparison, though that'll fail if shapes differ
result = bool(pik.all() == obj.all())
except (AttributeError, TypeError):
warnings.filterwarnings('ignore') #FIXME: be specific
result = pik == obj
if warnings.filters: del warnings.filters[0]
if hasattr(result, 'toarray'): # for unusual types like sparse matrix
result = result.toarray().all()
if result: return True
if not exact:
result = type(pik) == type(obj)
if result: return result
# class instances might have been dumped with byref=False
return repr(type(pik)) == repr(type(obj)) #XXX: InstanceType?
return False
except exceptions:
return False
def check(obj, *args, **kwds):
"""
Check pickling of an object across another process.
*python* is the path to the python interpreter (defaults to sys.executable)
Set *verbose=True* to print the unpickled object in the other process.
Additional keyword arguments are as :func:`dumps` and :func:`loads`.
"""
# == undocumented ==
# python -- the string path or executable name of the selected python
# verbose -- if True, be verbose about printing warning messages
# all other args and kwds are passed to dill.dumps #FIXME: ignore on load
verbose = kwds.pop('verbose', False)
python = kwds.pop('python', None)
if python is None:
import sys
python = sys.executable
# type check
isinstance(python, str)
import subprocess
fail = True
try:
_obj = dumps(obj, *args, **kwds)
fail = False
finally:
if fail and verbose:
print("DUMP FAILED")
#FIXME: fails if python interpreter path contains spaces
# Use the following instead (which also processes the 'ignore' keyword):
# ignore = kwds.pop('ignore', None)
# unpickle = "dill.loads(%s, ignore=%s)"%(repr(_obj), repr(ignore))
# cmd = [python, "-c", "import dill; print(%s)"%unpickle]
# msg = "SUCCESS" if not subprocess.call(cmd) else "LOAD FAILED"
msg = "%s -c import dill; print(dill.loads(%s))" % (python, repr(_obj))
msg = "SUCCESS" if not subprocess.call(msg.split(None,2)) else "LOAD FAILED"
if verbose:
print(msg)
return
# use to protect against missing attributes
def is_dill(pickler, child=None):
"check the dill-ness of your pickler"
if child is False or not hasattr(pickler.__class__, 'mro'):
return 'dill' in pickler.__module__
return Pickler in pickler.__class__.mro()
def _extend():
"""extend pickle with all of dill's registered types"""
# need to have pickle not choke on _main_module? use is_dill(pickler)
for t,func in Pickler.dispatch.items():
try:
StockPickler.dispatch[t] = func
except Exception: #TypeError, PicklingError, UnpicklingError
logger.trace(pickler, "skip: %s", t)
return
del diff, _use_diff, use_diff
# EOF
|
_dictproxy_helper
|
python
|
PyCQA__pylint
|
tests/functional/n/no/no_member_augassign.py
|
{
"start": 302,
"end": 386
}
|
class ____:
value: int
obj_b = B()
obj_b.value = 1 + obj_b.value # [no-member]
|
B
|
python
|
django__django
|
django/contrib/gis/db/models/lookups.py
|
{
"start": 4376,
"end": 4612
}
|
class ____(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or
is above B's bounding box.
"""
lookup_name = "overlaps_above"
@BaseSpatialField.register_lookup
|
OverlapsAboveLookup
|
python
|
django__django
|
tests/model_forms/models.py
|
{
"start": 13328,
"end": 13659
}
|
class ____(models.Model):
title = models.CharField(max_length=30)
_should_error = False
def __setattr__(self, key, value):
if self._should_error is True:
raise ValidationError(message={key: "Cannot set attribute"}, code="invalid")
super().__setattr__(key, value)
|
StrictAssignmentFieldSpecific
|
python
|
doocs__leetcode
|
solution/2400-2499/2424.Longest Uploaded Prefix/Solution.py
|
{
"start": 0,
"end": 404
}
|
class ____:
def __init__(self, n: int):
self.r = 0
self.s = set()
def upload(self, video: int) -> None:
self.s.add(video)
while self.r + 1 in self.s:
self.r += 1
def longest(self) -> int:
return self.r
# Your LUPrefix object will be instantiated and called as such:
# obj = LUPrefix(n)
# obj.upload(video)
# param_2 = obj.longest()
|
LUPrefix
|
python
|
ray-project__ray
|
python/ray/data/_internal/logical/operators/read_operator.py
|
{
"start": 551,
"end": 7413
}
|
class ____(
AbstractMap,
SourceOperator,
LogicalOperatorSupportsProjectionPushdown,
LogicalOperatorSupportsPredicatePushdown,
):
"""Logical operator for read."""
# TODO: make this a frozen dataclass. https://github.com/ray-project/ray/issues/55747
def __init__(
self,
datasource: Datasource,
datasource_or_legacy_reader: Union[Datasource, Reader],
parallelism: int,
num_outputs: Optional[int] = None,
ray_remote_args: Optional[Dict[str, Any]] = None,
concurrency: Optional[int] = None,
):
super().__init__(
name=f"Read{datasource.get_name()}",
input_op=None,
num_outputs=num_outputs,
ray_remote_args=ray_remote_args,
)
self._datasource = datasource
self._datasource_or_legacy_reader = datasource_or_legacy_reader
self._parallelism = parallelism
self._concurrency = concurrency
self._detected_parallelism = None
def output_data(self):
return None
def set_detected_parallelism(self, parallelism: int):
"""
Set the true parallelism that should be used during execution. This
should be specified by the user or detected by the optimizer.
"""
self._detected_parallelism = parallelism
def get_detected_parallelism(self) -> int:
"""
Get the true parallelism that should be used during execution.
"""
return self._detected_parallelism
def estimated_num_outputs(self) -> Optional[int]:
return self._num_outputs or self._estimate_num_outputs()
def infer_metadata(self) -> BlockMetadata:
"""A ``BlockMetadata`` that represents the aggregate metadata of the outputs.
This method gets metadata from the read tasks. It doesn't trigger any actual
execution.
"""
return self._cached_output_metadata.metadata
def infer_schema(self):
return self._cached_output_metadata.schema
def _estimate_num_outputs(self) -> Optional[int]:
metadata = self._cached_output_metadata.metadata
target_max_block_size = DataContext.get_current().target_max_block_size
# In either case of
# - Total byte-size estimate not available
# - Target max-block-size not being configured
#
# We fallback to estimating number of outputs to be equivalent to the
# number of input files being read (if any)
if metadata.size_bytes is None or target_max_block_size is None:
# NOTE: If there's no input files specified, return the count (could be 0)
return (
len(metadata.input_files) if metadata.input_files is not None else None
)
# Otherwise, estimate total number of blocks from estimated total
# byte size
return math.ceil(metadata.size_bytes / target_max_block_size)
@functools.cached_property
def _cached_output_metadata(self) -> "BlockMetadataWithSchema":
# Legacy datasources might not implement `get_read_tasks`.
if self._datasource.should_create_reader:
empty_meta = BlockMetadata(None, None, None, None)
return BlockMetadataWithSchema(metadata=empty_meta, schema=None)
# HACK: Try to get a single read task to get the metadata.
read_tasks = self._datasource.get_read_tasks(1)
if len(read_tasks) == 0:
# If there are no read tasks, the dataset is probably empty.
empty_meta = BlockMetadata(None, None, None, None)
return BlockMetadataWithSchema(metadata=empty_meta, schema=None)
# `get_read_tasks` isn't guaranteed to return exactly one read task.
metadata = [read_task.metadata for read_task in read_tasks]
if all(meta.num_rows is not None for meta in metadata):
num_rows = sum(meta.num_rows for meta in metadata)
original_num_rows = num_rows
# Apply per-block limit if set
if self._per_block_limit is not None:
num_rows = min(num_rows, self._per_block_limit)
else:
num_rows = None
original_num_rows = None
if all(meta.size_bytes is not None for meta in metadata):
size_bytes = sum(meta.size_bytes for meta in metadata)
# Pro-rate the byte size if we applied a row limit
if (
self._per_block_limit is not None
and original_num_rows is not None
and original_num_rows > 0
):
size_bytes = int(size_bytes * (num_rows / original_num_rows))
else:
size_bytes = None
input_files = []
for meta in metadata:
if meta.input_files is not None:
input_files.extend(meta.input_files)
meta = BlockMetadata(
num_rows=num_rows,
size_bytes=size_bytes,
input_files=input_files,
exec_stats=None,
)
schemas = [
read_task.schema for read_task in read_tasks if read_task.schema is not None
]
from ray.data._internal.util import unify_schemas_with_validation
schema = None
if schemas:
schema = unify_schemas_with_validation(schemas)
return BlockMetadataWithSchema(metadata=meta, schema=schema)
def supports_projection_pushdown(self) -> bool:
return self._datasource.supports_projection_pushdown()
def get_projection_map(self) -> Optional[Dict[str, str]]:
return self._datasource.get_projection_map()
def apply_projection(
self,
projection_map: Optional[Dict[str, str]],
) -> "Read":
clone = copy.copy(self)
projected_datasource = self._datasource.apply_projection(projection_map)
clone._datasource = projected_datasource
clone._datasource_or_legacy_reader = projected_datasource
return clone
def get_column_renames(self) -> Optional[Dict[str, str]]:
return self._datasource.get_column_renames()
def supports_predicate_pushdown(self) -> bool:
return self._datasource.supports_predicate_pushdown()
def get_current_predicate(self) -> Optional[Expr]:
return self._datasource.get_current_predicate()
def apply_predicate(self, predicate_expr: Expr) -> "Read":
predicated_datasource = self._datasource.apply_predicate(predicate_expr)
clone = copy.copy(self)
clone._datasource = predicated_datasource
clone._datasource_or_legacy_reader = predicated_datasource
return clone
def can_modify_num_rows(self) -> bool:
# NOTE: Returns true, since most of the readers expands its input
# and produce many rows for every single row of the input
return True
|
Read
|
python
|
mlflow__mlflow
|
mlflow/cli/__init__.py
|
{
"start": 1463,
"end": 37666
}
|
class ____(click.Group):
def get_command(self, ctx, cmd_name):
# `mlflow ui` is an alias for `mlflow server`
cmd_name = "server" if cmd_name == "ui" else cmd_name
return super().get_command(ctx, cmd_name)
def _load_env_file(ctx: click.Context, param: click.Parameter, value: str | None) -> str | None:
"""
Click callback to load environment variables from a dotenv file.
This function is designed to be used as an eager callback for the --env-file option,
ensuring that environment variables are loaded before any command execution.
"""
if value is not None:
env_path = Path(value)
if not env_path.exists():
raise click.BadParameter(f"Environment file '{value}' does not exist.")
# Load the environment file
# override=False means existing environment variables take precedence
load_dotenv(env_path, override=False)
# Log that we've loaded the env file (using click.echo for CLI output)
click.echo(f"Loaded environment variables from: {value}")
return value
@click.group(cls=AliasedGroup)
@click.version_option(version=version.VERSION)
@click.option(
"--env-file",
type=click.Path(exists=False),
callback=_load_env_file,
expose_value=True,
is_eager=True,
help="Load environment variables from a dotenv file before executing the command. "
"Variables in the file will be loaded but won't override existing environment variables.",
)
def cli(env_file):
pass
@cli.command()
@click.argument("uri")
@click.option(
"--entry-point",
"-e",
metavar="NAME",
default="main",
help="Entry point within project. [default: main]. If the entry point is not found, "
"attempts to run the project file with the specified name as a script, "
"using 'python' to run .py files and the default shell (specified by "
"environment variable $SHELL) to run .sh files",
)
@click.option(
"--version",
"-v",
metavar="VERSION",
help="Version of the project to run, as a Git commit reference for Git projects.",
)
@click.option(
"--param-list",
"-P",
metavar="NAME=VALUE",
multiple=True,
help="A parameter for the run, of the form -P name=value. Provided parameters that "
"are not in the list of parameters for an entry point will be passed to the "
"corresponding entry point as command-line arguments in the form `--name value`",
)
@click.option(
"--docker-args",
"-A",
metavar="NAME=VALUE",
multiple=True,
help="A `docker run` argument or flag, of the form -A name=value (e.g. -A gpus=all) "
"or -A name (e.g. -A t). The argument will then be passed as "
"`docker run --name value` or `docker run --name` respectively. ",
)
@click.option(
"--experiment-name",
envvar=MLFLOW_EXPERIMENT_NAME.name,
help="Name of the experiment under which to launch the run. If not "
"specified, 'experiment-id' option will be used to launch run.",
)
@click.option(
"--experiment-id",
envvar=MLFLOW_EXPERIMENT_ID.name,
type=click.STRING,
help="ID of the experiment under which to launch the run.",
)
# TODO: Add tracking server argument once we have it working.
@click.option(
"--backend",
"-b",
metavar="BACKEND",
default="local",
help="Execution backend to use for run. Supported values: 'local', 'databricks', "
"kubernetes (experimental). Defaults to 'local'. If running against "
"Databricks, will run against a Databricks workspace determined as follows: "
"if a Databricks tracking URI of the form 'databricks://profile' has been set "
"(e.g. by setting the MLFLOW_TRACKING_URI environment variable), will run "
"against the workspace specified by <profile>. Otherwise, runs against the "
"workspace specified by the default Databricks CLI profile. See "
"https://github.com/databricks/databricks-cli for more info on configuring a "
"Databricks CLI profile.",
)
@click.option(
"--backend-config",
"-c",
metavar="FILE",
help="Path to JSON file (must end in '.json') or JSON string which will be passed "
"as config to the backend. The exact content which should be "
"provided is different for each execution backend and is documented "
"at https://www.mlflow.org/docs/latest/projects.html.",
)
@cli_args.ENV_MANAGER_PROJECTS
@click.option(
"--storage-dir",
envvar="MLFLOW_TMP_DIR",
help="Only valid when ``backend`` is local. "
"MLflow downloads artifacts from distributed URIs passed to parameters of "
"type 'path' to subdirectories of storage_dir.",
)
@click.option(
"--run-id",
metavar="RUN_ID",
help="If specified, the given run ID will be used instead of creating a new run. "
"Note: this argument is used internally by the MLflow project APIs "
"and should not be specified.",
)
@click.option(
"--run-name",
metavar="RUN_NAME",
help="The name to give the MLflow Run associated with the project execution. If not specified, "
"the MLflow Run name is left unset.",
)
@click.option(
"--build-image",
is_flag=True,
default=False,
show_default=True,
help=(
"Only valid for Docker projects. If specified, build a new Docker image that's based on "
"the image specified by the `image` field in the MLproject file, and contains files in the "
"project directory."
),
)
def run(
uri,
entry_point,
version,
param_list,
docker_args,
experiment_name,
experiment_id,
backend,
backend_config,
env_manager,
storage_dir,
run_id,
run_name,
build_image,
):
"""
Run an MLflow project from the given URI.
For local runs, the run will block until it completes.
Otherwise, the project will run asynchronously.
If running locally (the default), the URI can be either a Git repository URI or a local path.
If running on Databricks, the URI must be a Git repository.
By default, Git projects run in a new working directory with the given parameters, while
local projects run from the project's root directory.
"""
if experiment_id is not None and experiment_name is not None:
raise click.UsageError("Specify only one of 'experiment-name' or 'experiment-id' options.")
param_dict = _user_args_to_dict(param_list)
args_dict = _user_args_to_dict(docker_args, argument_type="A")
if backend_config is not None and os.path.splitext(backend_config)[-1] != ".json":
try:
backend_config = json.loads(backend_config)
except ValueError as e:
raise click.UsageError(f"Invalid backend config JSON. Parse error: {e}") from e
if backend == "kubernetes":
if backend_config is None:
raise click.UsageError("Specify 'backend_config' when using kubernetes mode.")
try:
projects.run(
uri,
entry_point,
version,
experiment_name=experiment_name,
experiment_id=experiment_id,
parameters=param_dict,
docker_args=args_dict,
backend=backend,
backend_config=backend_config,
env_manager=env_manager,
storage_dir=storage_dir,
synchronous=backend in ("local", "kubernetes") or backend is None,
run_id=run_id,
run_name=run_name,
build_image=build_image,
)
except projects.ExecutionException as e:
_logger.error("=== %s ===", e)
sys.exit(1)
def _user_args_to_dict(arguments, argument_type="P"):
user_dict = {}
for arg in arguments:
split = arg.split("=", maxsplit=1)
# Docker arguments such as `t` don't require a value -> set to True if specified
if len(split) == 1 and argument_type == "A":
name = split[0]
value = True
elif len(split) == 2:
name = split[0]
value = split[1]
else:
raise click.UsageError(
f"Invalid format for -{argument_type} parameter: '{arg}'. "
f"Use -{argument_type} name=value."
)
if name in user_dict:
raise click.UsageError(f"Repeated parameter: '{name}'")
user_dict[name] = value
return user_dict
def _validate_server_args(
ctx=None,
gunicorn_opts=None,
workers=None,
waitress_opts=None,
uvicorn_opts=None,
allowed_hosts=None,
cors_allowed_origins=None,
x_frame_options=None,
disable_security_middleware=None,
):
if sys.platform == "win32":
if gunicorn_opts is not None:
raise NotImplementedError(
"gunicorn is not supported on Windows, cannot specify --gunicorn-opts"
)
num_server_opts_specified = sum(
1 for opt in [gunicorn_opts, waitress_opts, uvicorn_opts] if opt is not None
)
if num_server_opts_specified > 1:
raise click.UsageError(
"Cannot specify multiple server options. Choose one of: "
"'--gunicorn-opts', '--waitress-opts', or '--uvicorn-opts'."
)
using_flask_only = gunicorn_opts is not None or waitress_opts is not None
# NB: Only check for security params that are explicitly passed via CLI (not env vars)
# This allows Docker containers to set env vars while using gunicorn
from click.core import ParameterSource
security_params_specified = False
if ctx:
security_params_specified = any(
[
ctx.get_parameter_source("allowed_hosts") == ParameterSource.COMMANDLINE,
ctx.get_parameter_source("cors_allowed_origins") == ParameterSource.COMMANDLINE,
(
ctx.get_parameter_source("disable_security_middleware")
== ParameterSource.COMMANDLINE
),
]
)
if using_flask_only and security_params_specified:
raise click.UsageError(
"Security middleware parameters (--allowed-hosts, --cors-allowed-origins, "
"--disable-security-middleware) are only supported with "
"the default uvicorn server. They cannot be used with --gunicorn-opts or "
"--waitress-opts. To use security features, run without specifying a server "
"option (uses uvicorn by default) or explicitly use --uvicorn-opts."
)
def _validate_static_prefix(ctx, param, value):
"""
Validate that the static_prefix option starts with a "/" and does not end in a "/".
Conforms to the callback interface of click documented at
http://click.pocoo.org/5/options/#callbacks-for-validation.
"""
if value is not None:
if not value.startswith("/"):
raise UsageError("--static-prefix must begin with a '/'.")
if value.endswith("/"):
raise UsageError("--static-prefix should not end with a '/'.")
return value
@cli.command()
@click.pass_context
@click.option(
"--backend-store-uri",
envvar="MLFLOW_BACKEND_STORE_URI",
metavar="PATH",
default=None,
help="URI to which to persist experiment and run data. Acceptable URIs are "
"SQLAlchemy-compatible database connection strings "
"(e.g. 'sqlite:///path/to/file.db') or local filesystem URIs "
"(e.g. 'file:///absolute/path/to/directory'). By default, data will be logged "
"to the ./mlruns directory.",
)
@click.option(
"--registry-store-uri",
envvar="MLFLOW_REGISTRY_STORE_URI",
metavar="URI",
default=None,
help="URI to which to persist registered models. Acceptable URIs are "
"SQLAlchemy-compatible database connection strings (e.g. 'sqlite:///path/to/file.db'). "
"If not specified, `backend-store-uri` is used.",
)
@click.option(
"--default-artifact-root",
envvar="MLFLOW_DEFAULT_ARTIFACT_ROOT",
metavar="URI",
default=None,
help="Directory in which to store artifacts for any new experiments created. For tracking "
"server backends that rely on SQL, this option is required in order to store artifacts. "
"Note that this flag does not impact already-created experiments with any previous "
"configuration of an MLflow server instance. "
f"By default, data will be logged to the {DEFAULT_ARTIFACTS_URI} uri proxy if "
"the --serve-artifacts option is enabled. Otherwise, the default location will "
f"be {DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH}.",
)
@cli_args.SERVE_ARTIFACTS
@click.option(
"--artifacts-only",
envvar="MLFLOW_ARTIFACTS_ONLY",
is_flag=True,
default=False,
help="If specified, configures the mlflow server to be used only for proxied artifact serving. "
"With this mode enabled, functionality of the mlflow tracking service (e.g. run creation, "
"metric logging, and parameter logging) is disabled. The server will only expose "
"endpoints for uploading, downloading, and listing artifacts. "
"Default: False",
)
@cli_args.ARTIFACTS_DESTINATION
@cli_args.HOST
@cli_args.PORT
@cli_args.WORKERS
@cli_args.ALLOWED_HOSTS
@cli_args.CORS_ALLOWED_ORIGINS
@cli_args.DISABLE_SECURITY_MIDDLEWARE
@cli_args.X_FRAME_OPTIONS
@click.option(
"--static-prefix",
envvar="MLFLOW_STATIC_PREFIX",
default=None,
callback=_validate_static_prefix,
help="A prefix which will be prepended to the path of all static paths.",
)
@click.option(
"--gunicorn-opts",
envvar="MLFLOW_GUNICORN_OPTS",
default=None,
help="Additional command line options forwarded to gunicorn processes.",
)
@click.option(
"--waitress-opts", default=None, help="Additional command line options for waitress-serve."
)
@click.option(
"--uvicorn-opts",
envvar="MLFLOW_UVICORN_OPTS",
default=None,
help="Additional command line options forwarded to uvicorn processes (used by default).",
)
@click.option(
"--expose-prometheus",
envvar="MLFLOW_EXPOSE_PROMETHEUS",
default=None,
help="Path to the directory where metrics will be stored. If the directory "
"doesn't exist, it will be created. "
"Activate prometheus exporter to expose metrics on /metrics endpoint.",
)
@click.option(
"--app-name",
default=None,
type=click.Choice([e.name for e in get_entry_points("mlflow.app")]),
show_default=True,
help=(
"Application name to be used for the tracking server. "
"If not specified, 'mlflow.server:app' will be used."
),
)
@click.option(
"--dev",
is_flag=True,
default=False,
show_default=True,
help=(
"If enabled, run the server with debug logging and auto-reload. "
"Should only be used for development purposes. "
"Cannot be used with '--gunicorn-opts' or '--uvicorn-opts'. "
"Unsupported on Windows."
),
)
def server(
ctx,
backend_store_uri,
registry_store_uri,
default_artifact_root,
serve_artifacts,
artifacts_only,
artifacts_destination,
host,
port,
workers,
allowed_hosts,
cors_allowed_origins,
disable_security_middleware,
x_frame_options,
static_prefix,
gunicorn_opts,
waitress_opts,
expose_prometheus,
app_name,
dev,
uvicorn_opts,
):
"""
Run the MLflow tracking server with built-in security middleware.
The server listens on http://localhost:5000 by default and only accepts connections
from the local machine. To let the server accept connections from other machines, you will need
to pass ``--host 0.0.0.0`` to listen on all network interfaces
(or a specific interface address).
See https://mlflow.org/docs/latest/tracking/server-security.html for detailed documentation
and guidance on security configurations for the MLflow tracking server.
"""
from mlflow.server import _run_server
from mlflow.server.handlers import initialize_backend_stores
# Get env_file from parent context
env_file = ctx.parent.params.get("env_file") if ctx.parent else None
if dev:
if is_windows():
raise click.UsageError("'--dev' is not supported on Windows.")
if gunicorn_opts:
raise click.UsageError("'--dev' and '--gunicorn-opts' cannot be specified together.")
if uvicorn_opts:
raise click.UsageError("'--dev' and '--uvicorn-opts' cannot be specified together.")
if app_name:
raise click.UsageError(
"'--dev' cannot be used with '--app-name'. Development mode with auto-reload "
"is only supported for the default MLflow tracking server."
)
uvicorn_opts = "--reload --log-level debug"
_validate_server_args(
ctx=ctx,
gunicorn_opts=gunicorn_opts,
workers=workers,
waitress_opts=waitress_opts,
uvicorn_opts=uvicorn_opts,
allowed_hosts=allowed_hosts,
cors_allowed_origins=cors_allowed_origins,
x_frame_options=x_frame_options,
disable_security_middleware=disable_security_middleware,
)
if disable_security_middleware:
os.environ["MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE"] = "true"
else:
if allowed_hosts:
os.environ["MLFLOW_SERVER_ALLOWED_HOSTS"] = allowed_hosts
if allowed_hosts == "*":
click.echo(
"WARNING: Accepting ALL hosts. "
"This may leave the server vulnerable to DNS rebinding attacks."
)
if cors_allowed_origins:
os.environ["MLFLOW_SERVER_CORS_ALLOWED_ORIGINS"] = cors_allowed_origins
if cors_allowed_origins == "*":
click.echo(
"WARNING: Allowing ALL origins for CORS. "
"This allows ANY website to access your MLflow data. "
"This configuration is only recommended for local development."
)
if x_frame_options:
os.environ["MLFLOW_SERVER_X_FRAME_OPTIONS"] = x_frame_options
if not backend_store_uri:
backend_store_uri = _get_default_tracking_uri()
click.echo(f"Backend store URI not provided. Using {backend_store_uri}")
if not registry_store_uri:
registry_store_uri = backend_store_uri
click.echo(f"Registry store URI not provided. Using {registry_store_uri}")
default_artifact_root = resolve_default_artifact_root(
serve_artifacts, default_artifact_root, backend_store_uri
)
artifacts_only_config_validation(artifacts_only, backend_store_uri)
try:
initialize_backend_stores(backend_store_uri, registry_store_uri, default_artifact_root)
except Exception as e:
_logger.error("Error initializing backend store")
_logger.exception(e)
sys.exit(1)
if disable_security_middleware:
click.echo(
"[MLflow] WARNING: Security middleware is DISABLED. "
"Your MLflow server is vulnerable to various attacks.",
err=True,
)
elif not allowed_hosts and not cors_allowed_origins:
click.echo(
"[MLflow] Security middleware enabled with default settings (localhost-only). "
"To allow connections from other hosts, use --host 0.0.0.0 and configure "
"--allowed-hosts and --cors-allowed-origins.",
err=True,
)
else:
parts = ["[MLflow] Security middleware enabled"]
if allowed_hosts:
hosts_list = allowed_hosts.split(",")[:3]
if len(allowed_hosts.split(",")) > 3:
hosts_list.append(f"and {len(allowed_hosts.split(',')) - 3} more")
parts.append(f"Allowed hosts: {', '.join(hosts_list)}")
if cors_allowed_origins:
origins_list = cors_allowed_origins.split(",")[:3]
if len(cors_allowed_origins.split(",")) > 3:
origins_list.append(f"and {len(cors_allowed_origins.split(',')) - 3} more")
parts.append(f"CORS origins: {', '.join(origins_list)}")
click.echo(". ".join(parts) + ".", err=True)
try:
_run_server(
file_store_path=backend_store_uri,
registry_store_uri=registry_store_uri,
default_artifact_root=default_artifact_root,
serve_artifacts=serve_artifacts,
artifacts_only=artifacts_only,
artifacts_destination=artifacts_destination,
host=host,
port=port,
static_prefix=static_prefix,
workers=workers,
gunicorn_opts=gunicorn_opts,
waitress_opts=waitress_opts,
expose_prometheus=expose_prometheus,
app_name=app_name,
uvicorn_opts=uvicorn_opts,
env_file=env_file,
)
except ShellCommandException:
eprint("Running the mlflow server failed. Please see the logs above for details.")
sys.exit(1)
@cli.command(short_help="Permanently delete runs in the `deleted` lifecycle stage.")
@click.option(
"--older-than",
default=None,
help="Optional. Remove run(s) older than the specified time limit. "
"Specify a string in #d#h#m#s format. Float values are also supported. "
"For example: --older-than 1d2h3m4s, --older-than 1.2d3h4m5s",
)
@click.option(
"--backend-store-uri",
metavar="PATH",
default=None,
help="URI of the backend store from which to delete runs. Acceptable URIs are "
"SQLAlchemy-compatible database connection strings "
"(e.g. 'sqlite:///path/to/file.db') or local filesystem URIs "
"(e.g. 'file:///absolute/path/to/directory'). By default, data will be deleted "
"from the ./mlruns directory.",
)
@click.option(
"--artifacts-destination",
envvar="MLFLOW_ARTIFACTS_DESTINATION",
metavar="URI",
default=None,
help=(
"The base artifact location from which to resolve artifact upload/download/list requests "
"(e.g. 's3://my-bucket'). This option only applies when the tracking server is configured "
"to stream artifacts and the experiment's artifact root location is http or "
"mlflow-artifacts URI. Otherwise, the default artifact location will be used."
),
)
@click.option(
"--run-ids",
default=None,
help="Optional comma separated list of runs to be permanently deleted. If run ids"
" are not specified, data is removed for all runs in the `deleted`"
" lifecycle stage.",
)
@click.option(
"--experiment-ids",
default=None,
help="Optional comma separated list of experiments to be permanently deleted including "
"all of their associated runs. If experiment ids are not specified, data is removed for all "
"experiments in the `deleted` lifecycle stage.",
)
@click.option(
"--logged-model-ids",
default=None,
help="Optional comma separated list of logged model IDs to be permanently deleted."
" If logged model IDs are not specified, data is removed for all logged models in the `deleted`"
" lifecycle stage.",
)
@click.option(
"--tracking-uri",
default=os.environ.get("MLFLOW_TRACKING_URI"),
help="Tracking URI to use for deleting 'deleted' runs e.g. http://127.0.0.1:8080",
)
def gc(
older_than,
backend_store_uri,
artifacts_destination,
run_ids,
experiment_ids,
logged_model_ids,
tracking_uri,
):
"""
Permanently delete runs in the `deleted` lifecycle stage from the specified backend store.
This command deletes all artifacts and metadata associated with the specified runs.
If the provided artifact URL is invalid, the artifact deletion will be bypassed,
and the gc process will continue.
.. attention::
If you are running an MLflow tracking server with artifact proxying enabled,
you **must** set the ``MLFLOW_TRACKING_URI`` environment variable before running
this command. Otherwise, the ``gc`` command will not be able to resolve
artifact URIs and will not be able to delete the associated artifacts.
**What gets deleted:**
This command permanently removes:
- **Run metadata**: Parameters, metrics, tags, and all other run information from the
backend store
- **Artifacts**: All files stored in the run's artifact location (models, plots, data
files, etc.)
- **Experiment metadata**: When deleting experiments, removes the experiment record and
all associated data
.. note::
This command only considers lifecycle stage and the specified deletion criteria.
It does **not** check for pinned runs, registered models, or tags. Pinning is a
UI-only feature that has no effect on garbage collection. Runs must be in the
`deleted` lifecycle stage before they can be permanently deleted.
**Examples:**
.. code-block:: bash
# Delete all runs that have been in the deleted state for more than 30 days
mlflow gc --older-than 30d
# Delete specific runs by ID (they must be in deleted state)
mlflow gc --run-ids 'run1,run2,run3'
# Delete all runs in specific experiments (experiments must be in deleted state)
mlflow gc --experiment-ids 'exp1,exp2'
# Combine criteria: delete runs older than 7 days in specific experiments
mlflow gc --older-than 7d --experiment-ids 'exp1,exp2'
"""
from mlflow.utils.time import get_current_time_millis
backend_store = _get_store(backend_store_uri, artifacts_destination)
skip_experiments = False
skip_logged_models = False
if not hasattr(backend_store, "_hard_delete_run"):
raise MlflowException(
"This cli can only be used with a backend that allows hard-deleting runs"
)
if not hasattr(backend_store, "_hard_delete_experiment"):
warnings.warn(
"The specified backend does not allow hard-deleting experiments. Experiments"
" will be skipped.",
FutureWarning,
stacklevel=2,
)
skip_experiments = True
if not hasattr(backend_store, "_hard_delete_logged_model"):
warnings.warn(
"The specified backend does not allow hard-deleting logged models. Logged models"
" will be skipped.",
FutureWarning,
stacklevel=2,
)
skip_logged_models = True
time_delta = 0
if older_than is not None:
regex = re.compile(
r"^((?P<days>[\.\d]+?)d)?((?P<hours>[\.\d]+?)h)?((?P<minutes>[\.\d]+?)m)"
r"?((?P<seconds>[\.\d]+?)s)?$"
)
parts = regex.match(older_than)
if parts is None:
raise MlflowException(
f"Could not parse any time information from '{older_than}'. "
"Examples of valid strings: '8h', '2d8h5m20s', '2m4s'",
error_code=INVALID_PARAMETER_VALUE,
)
time_params = {name: float(param) for name, param in parts.groupdict().items() if param}
time_delta = int(timedelta(**time_params).total_seconds() * 1000)
if tracking_uri:
set_tracking_uri(tracking_uri)
if not is_tracking_uri_set():
raise MlflowException(
"Tracking URL is not set. Please set MLFLOW_TRACKING_URI environment variable "
"or provide --tracking-uri cli option."
)
deleted_run_ids_older_than = backend_store._get_deleted_runs(older_than=time_delta)
run_ids = run_ids.split(",") if run_ids else deleted_run_ids_older_than
deleted_logged_model_ids = (
backend_store._get_deleted_logged_models() if not skip_logged_models else []
)
deleted_logged_model_ids_older_than = (
backend_store._get_deleted_logged_models(older_than=time_delta)
if not skip_logged_models
else []
)
logged_model_ids = (
logged_model_ids.split(",") if logged_model_ids else deleted_logged_model_ids_older_than
)
time_threshold = get_current_time_millis() - time_delta
if not skip_experiments:
if experiment_ids:
experiment_ids = experiment_ids.split(",")
experiments = [backend_store.get_experiment(id) for id in experiment_ids]
# Ensure that the specified experiments are soft-deleted
active_experiment_ids = [
e.experiment_id for e in experiments if e.lifecycle_stage != LifecycleStage.DELETED
]
if active_experiment_ids:
raise MlflowException(
f"Experiments {active_experiment_ids} are not in the deleted lifecycle stage. "
"Only experiments in the deleted lifecycle stage can be hard-deleted.",
error_code=INVALID_PARAMETER_VALUE,
)
# Ensure that the specified experiments are old enough
if older_than:
non_old_experiment_ids = [
e.experiment_id
for e in experiments
if e.last_update_time is None or e.last_update_time >= time_threshold
]
if non_old_experiment_ids:
raise MlflowException(
f"Experiments {non_old_experiment_ids} are not older than the required"
f"age. Only experiments older than {older_than} can be deleted.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
filter_string = f"last_update_time < {time_threshold}" if older_than else None
def fetch_experiments(token=None):
page = backend_store.search_experiments(
view_type=ViewType.DELETED_ONLY,
filter_string=filter_string,
page_token=token,
)
return (page + fetch_experiments(page.token)) if page.token else page
experiment_ids = [exp.experiment_id for exp in fetch_experiments()]
def fetch_runs(token=None):
page = backend_store.search_runs(
experiment_ids=experiment_ids,
filter_string="",
run_view_type=ViewType.DELETED_ONLY,
page_token=token,
)
return (page + fetch_runs(page.token)) if page.token else page
run_ids.extend([run.info.run_id for run in fetch_runs()])
for run_id in set(run_ids):
run = backend_store.get_run(run_id)
if run.info.lifecycle_stage != LifecycleStage.DELETED:
raise MlflowException(
f"Run {run_id} is not in `deleted` lifecycle stage. Only runs in"
" `deleted` lifecycle stage can be deleted."
)
# raise MlflowException if run_id is newer than older_than parameter
if older_than and run_id not in deleted_run_ids_older_than:
raise MlflowException(
f"Run {run_id} is not older than the required age. "
f"Only runs older than {older_than} can be deleted.",
error_code=INVALID_PARAMETER_VALUE,
)
# raise MlflowException if run_id is newer than older_than parameter
if older_than and run_id not in deleted_run_ids_older_than:
raise MlflowException(
f"Run {run_id} is not older than the required age. "
f"Only runs older than {older_than} can be deleted.",
error_code=INVALID_PARAMETER_VALUE,
)
artifact_repo = get_artifact_repository(run.info.artifact_uri)
try:
artifact_repo.delete_artifacts()
except InvalidUrlException as iue:
click.echo(
click.style(
f"An exception {iue!r} was raised during the deletion of a model artifact",
fg="yellow",
)
)
click.echo(
click.style(
f"Unable to resolve the provided artifact URL: '{artifact_repo}'. "
"The gc process will continue and bypass artifact deletion. "
"Please ensure that the artifact exists "
"and consider manually deleting any unused artifacts. ",
fg="yellow",
),
)
backend_store._hard_delete_run(run_id)
click.echo(f"Run with ID {run_id} has been permanently deleted.")
if not skip_logged_models:
for model_id in set(logged_model_ids):
if model_id not in deleted_logged_model_ids:
raise MlflowException(
f"Logged model {model_id} is not in `deleted` lifecycle stage. "
"Only logged models in `deleted` lifecycle stage can be deleted."
)
if older_than and model_id not in deleted_logged_model_ids_older_than:
raise MlflowException(
f"Logged model {model_id} is not older than the required age. "
f"Only logged models older than {older_than} can be deleted.",
error_code=INVALID_PARAMETER_VALUE,
)
logged_model = backend_store.get_logged_model(model_id, allow_deleted=True)
artifact_repo = get_artifact_repository(logged_model.artifact_location)
try:
artifact_repo.delete_artifacts()
except InvalidUrlException as iue:
click.echo(
click.style(
f"An exception {iue!r} was raised during the deletion of a model artifact",
fg="yellow",
)
)
click.echo(
click.style(
f"Unable to resolve the provided artifact URL: '{artifact_repo}'. "
"The gc process will continue and bypass artifact deletion. "
"Please ensure that the artifact exists "
"and consider manually deleting any unused artifacts. ",
fg="yellow",
),
)
backend_store._hard_delete_logged_model(model_id)
click.echo(f"Logged model with ID {model_id} has been permanently deleted.")
if not skip_experiments:
for experiment_id in experiment_ids:
backend_store._hard_delete_experiment(experiment_id)
click.echo(f"Experiment with ID {experiment_id} has been permanently deleted.")
@cli.command(short_help="Prints out useful information for debugging issues with MLflow.")
@click.option(
"--mask-envs",
is_flag=True,
help=(
"If set (the default behavior without setting this flag is not to obfuscate information), "
'mask the MLflow environment variable values (e.g. `"MLFLOW_ENV_VAR": "***"`) '
"in the output to prevent leaking sensitive information."
),
)
def doctor(mask_envs):
mlflow.doctor(mask_envs)
cli.add_command(mlflow.deployments.cli.commands)
cli.add_command(mlflow.experiments.commands)
cli.add_command(mlflow.store.artifact.cli.commands)
cli.add_command(mlflow.runs.commands)
cli.add_command(mlflow.db.commands)
# Add traces CLI commands
from mlflow.cli import traces
cli.add_command(traces.commands)
# Add scorers CLI commands
from mlflow.cli import scorers
cli.add_command(scorers.commands)
# Add AI commands CLI
cli.add_command(ai_commands.commands)
try:
from mlflow.mcp.cli import cli as mcp_cli
cli.add_command(mcp_cli)
except ImportError:
pass
# Add Claude Code integration commands
try:
import mlflow.claude_code.cli
cli.add_command(mlflow.claude_code.cli.commands)
except ImportError:
pass
# We are conditional loading these commands since the skinny client does
# not support them due to the pandas and numpy dependencies of MLflow Models
try:
import mlflow.models.cli
cli.add_command(mlflow.models.cli.commands)
except ImportError:
pass
try:
import mlflow.sagemaker.cli
cli.add_command(mlflow.sagemaker.cli.commands)
except ImportError:
pass
with contextlib.suppress(ImportError):
import mlflow.gateway.cli
cli.add_command(mlflow.gateway.cli.commands)
if __name__ == "__main__":
cli()
|
AliasedGroup
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/resources/beta/messages/messages.py
|
{
"start": 78307,
"end": 153637
}
|
class ____(AsyncAPIResource):
@cached_property
def batches(self) -> AsyncBatches:
return AsyncBatches(self._client)
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return AsyncMessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return AsyncMessagesWithStreamingResponse(self)
@overload
async def create(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[BetaJSONOutputFormatParam] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[False] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
tools: Iterable[BetaToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BetaMessage:
"""
Send a structured list of input messages with text and/or image content, and the
model will generate the next message in the conversation.
The Messages API can be used for either single queries or stateless multi-turn
conversations.
Learn more about the Messages API in our
[user guide](https://docs.claude.com/en/docs/initial-setup)
Args:
max_tokens: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
[models](https://docs.claude.com/en/docs/models-overview) for details.
messages: Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
container: Container identifier for reuse across requests.
context_management: Context management configuration.
This allows you to control how Claude manages context across multiple requests,
such as whether to clear function results or not.
mcp_servers: MCP servers to be utilized in this request
metadata: An object describing metadata about the request.
output_config: Configuration options for the model's output. Controls aspects like how much
effort the model puts into its response.
output_format: A schema to specify Claude's output format in responses.
service_tier: Determines whether to use priority capacity (if available) or standard capacity
for this request.
Anthropic offers different levels of service for your API requests. See
[service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
stop_sequences: Custom text sequences that will cause the model to stop generating.
Our models will normally stop when they have naturally completed their turn,
which will result in a response `stop_reason` of `"end_turn"`.
If you want the model to stop generating when it encounters custom strings of
text, you can use the `stop_sequences` parameter. If the model encounters one of
the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
and the response `stop_sequence` value will contain the matched stop sequence.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
thinking: Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
tool_choice: How the model should use the provided tools. The model can use a specific tool,
any available tool, decide by itself, or not use tools at all.
tools: Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def create(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
stream: Literal[True],
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[BetaJSONOutputFormatParam] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
tools: Iterable[BetaToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncStream[BetaRawMessageStreamEvent]:
"""
Send a structured list of input messages with text and/or image content, and the
model will generate the next message in the conversation.
The Messages API can be used for either single queries or stateless multi-turn
conversations.
Learn more about the Messages API in our
[user guide](https://docs.claude.com/en/docs/initial-setup)
Args:
max_tokens: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
[models](https://docs.claude.com/en/docs/models-overview) for details.
messages: Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
container: Container identifier for reuse across requests.
context_management: Context management configuration.
This allows you to control how Claude manages context across multiple requests,
such as whether to clear function results or not.
mcp_servers: MCP servers to be utilized in this request
metadata: An object describing metadata about the request.
output_config: Configuration options for the model's output. Controls aspects like how much
effort the model puts into its response.
output_format: A schema to specify Claude's output format in responses.
service_tier: Determines whether to use priority capacity (if available) or standard capacity
for this request.
Anthropic offers different levels of service for your API requests. See
[service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
stop_sequences: Custom text sequences that will cause the model to stop generating.
Our models will normally stop when they have naturally completed their turn,
which will result in a response `stop_reason` of `"end_turn"`.
If you want the model to stop generating when it encounters custom strings of
text, you can use the `stop_sequences` parameter. If the model encounters one of
the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
and the response `stop_sequence` value will contain the matched stop sequence.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
thinking: Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
tool_choice: How the model should use the provided tools. The model can use a specific tool,
any available tool, decide by itself, or not use tools at all.
tools: Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def create(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
stream: bool,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[BetaJSONOutputFormatParam] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
tools: Iterable[BetaToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BetaMessage | AsyncStream[BetaRawMessageStreamEvent]:
"""
Send a structured list of input messages with text and/or image content, and the
model will generate the next message in the conversation.
The Messages API can be used for either single queries or stateless multi-turn
conversations.
Learn more about the Messages API in our
[user guide](https://docs.claude.com/en/docs/initial-setup)
Args:
max_tokens: The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
[models](https://docs.claude.com/en/docs/models-overview) for details.
messages: Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
stream: Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
container: Container identifier for reuse across requests.
context_management: Context management configuration.
This allows you to control how Claude manages context across multiple requests,
such as whether to clear function results or not.
mcp_servers: MCP servers to be utilized in this request
metadata: An object describing metadata about the request.
output_config: Configuration options for the model's output. Controls aspects like how much
effort the model puts into its response.
output_format: A schema to specify Claude's output format in responses.
service_tier: Determines whether to use priority capacity (if available) or standard capacity
for this request.
Anthropic offers different levels of service for your API requests. See
[service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
stop_sequences: Custom text sequences that will cause the model to stop generating.
Our models will normally stop when they have naturally completed their turn,
which will result in a response `stop_reason` of `"end_turn"`.
If you want the model to stop generating when it encounters custom strings of
text, you can use the `stop_sequences` parameter. If the model encounters one of
the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
and the response `stop_sequence` value will contain the matched stop sequence.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
temperature: Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
thinking: Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
tool_choice: How the model should use the provided tools. The model can use a specific tool,
any available tool, decide by itself, or not use tools at all.
tools: Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
top_p: Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["max_tokens", "messages", "model"], ["max_tokens", "messages", "model", "stream"])
async def create(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[BetaJSONOutputFormatParam] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[False] | Literal[True] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
tools: Iterable[BetaToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BetaMessage | AsyncStream[BetaRawMessageStreamEvent]:
validate_output_format(output_format)
if not stream and not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
timeout = self._client._calculate_nonstreaming_timeout(
max_tokens, MODEL_NONSTREAMING_TOKENS.get(model, None)
)
if model in DEPRECATED_MODELS:
warnings.warn(
f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
DeprecationWarning,
stacklevel=3,
)
extra_headers = {
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
**(extra_headers or {}),
}
return await self._post(
"/v1/messages?beta=true",
body=await async_maybe_transform(
{
"max_tokens": max_tokens,
"messages": messages,
"model": model,
"container": container,
"context_management": context_management,
"mcp_servers": mcp_servers,
"metadata": metadata,
"output_config": output_config,
"output_format": output_format,
"service_tier": service_tier,
"stop_sequences": stop_sequences,
"stream": stream,
"system": system,
"temperature": temperature,
"thinking": thinking,
"tool_choice": tool_choice,
"tools": tools,
"top_k": top_k,
"top_p": top_p,
},
message_create_params.MessageCreateParamsStreaming
if stream
else message_create_params.MessageCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=BetaMessage,
stream=stream or False,
stream_cls=AsyncStream[BetaRawMessageStreamEvent],
)
async def parse(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[type[ResponseFormatT]] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[False] | Literal[True] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
tools: Iterable[BetaToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ParsedBetaMessage[ResponseFormatT]:
if not stream and not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
timeout = self._client._calculate_nonstreaming_timeout(
max_tokens, MODEL_NONSTREAMING_TOKENS.get(model, None)
)
if model in DEPRECATED_MODELS:
warnings.warn(
f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
DeprecationWarning,
stacklevel=3,
)
betas = [beta for beta in betas] if is_given(betas) else []
if "structured-outputs-2025-11-13" not in betas:
# Ensure structured outputs beta is included for parse method
betas.append("structured-outputs-2025-11-13")
extra_headers = {
"X-Stainless-Helper": "beta.messages.parse",
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else NOT_GIVEN}),
**(extra_headers or {}),
}
transformed_output_format: Optional[message_create_params.OutputFormat] | NotGiven = NOT_GIVEN
if is_given(output_format) and output_format is not None:
adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
try:
schema = adapted_type.json_schema()
transformed_output_format = message_create_params.OutputFormat(
schema=transform_schema(schema), type="json_schema"
)
except pydantic.errors.PydanticSchemaGenerationError as e:
raise TypeError(
(
"Could not generate JSON schema for the given `output_format` type. "
"Use a type that works with `pydanitc.TypeAdapter`"
)
) from e
def parser(response: BetaMessage) -> ParsedBetaMessage[ResponseFormatT]:
return parse_response(
response=response,
output_format=cast(
ResponseFormatT,
output_format if is_given(output_format) and output_format is not None else NOT_GIVEN,
),
)
return await self._post(
"/v1/messages?beta=true",
body=maybe_transform(
{
"max_tokens": max_tokens,
"messages": messages,
"model": model,
"container": container,
"context_management": context_management,
"mcp_servers": mcp_servers,
"output_config": output_config,
"metadata": metadata,
"output_format": transformed_output_format,
"service_tier": service_tier,
"stop_sequences": stop_sequences,
"stream": stream,
"system": system,
"temperature": temperature,
"thinking": thinking,
"tool_choice": tool_choice,
"tools": tools,
"top_k": top_k,
"top_p": top_p,
},
message_create_params.MessageCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
cast_to=cast(Type[ParsedBetaMessage[ResponseFormatT]], BetaMessage),
stream=False,
)
@overload
def tool_runner(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
tools: Iterable[BetaAsyncRunnableTool],
compaction_control: CompactionControl | Omit = omit,
max_iterations: int | Omit = omit,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[type[ResponseFormatT]] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[False] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> BetaAsyncToolRunner[ResponseFormatT]: ...
@overload
def tool_runner(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
tools: Iterable[BetaAsyncRunnableTool],
compaction_control: CompactionControl | Omit = omit,
stream: Literal[True],
max_iterations: int | Omit = omit,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[type[ResponseFormatT]] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> BetaAsyncStreamingToolRunner[ResponseFormatT]: ...
@overload
def tool_runner(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
tools: Iterable[BetaAsyncRunnableTool],
compaction_control: CompactionControl | Omit = omit,
stream: bool,
max_iterations: int | Omit = omit,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[type[ResponseFormatT]] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> BetaAsyncStreamingToolRunner[ResponseFormatT] | BetaAsyncToolRunner[ResponseFormatT]: ...
def tool_runner(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
tools: Iterable[BetaAsyncRunnableTool],
compaction_control: CompactionControl | Omit = omit,
max_iterations: int | Omit = omit,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[type[ResponseFormatT]] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
stream: Literal[True] | Literal[False] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> BetaAsyncToolRunner[ResponseFormatT] | BetaAsyncStreamingToolRunner[ResponseFormatT]:
"""Create a Message stream"""
if model in DEPRECATED_MODELS:
warnings.warn(
f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
DeprecationWarning,
stacklevel=3,
)
extra_headers = {
"X-Stainless-Helper": "BetaToolRunner",
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else NOT_GIVEN}),
**(extra_headers or {}),
}
params = cast(
message_create_params.ParseMessageCreateParamsBase[ResponseFormatT],
{
"max_tokens": max_tokens,
"messages": messages,
"model": model,
"container": container,
"context_management": context_management,
"mcp_servers": mcp_servers,
"metadata": metadata,
"output_config": output_config,
"output_format": output_format,
"service_tier": service_tier,
"stop_sequences": stop_sequences,
"system": system,
"temperature": temperature,
"thinking": thinking,
"tool_choice": tool_choice,
"tools": [tool.to_dict() for tool in tools],
"top_k": top_k,
"top_p": top_p,
},
)
if stream:
return BetaAsyncStreamingToolRunner[ResponseFormatT](
tools=tools,
params=params,
options={
"extra_headers": extra_headers,
"extra_query": extra_query,
"extra_body": extra_body,
"timeout": timeout,
},
client=cast("AsyncAnthropic", self._client),
max_iterations=max_iterations if is_given(max_iterations) else None,
compaction_control=compaction_control if is_given(compaction_control) else None,
)
return BetaAsyncToolRunner[ResponseFormatT](
tools=tools,
params=params,
options={
"extra_headers": extra_headers,
"extra_query": extra_query,
"extra_body": extra_body,
"timeout": timeout,
},
client=cast("AsyncAnthropic", self._client),
max_iterations=max_iterations if is_given(max_iterations) else None,
compaction_control=compaction_control if is_given(compaction_control) else None,
)
def stream(
self,
*,
max_tokens: int,
messages: Iterable[BetaMessageParam],
model: ModelParam,
metadata: BetaMetadataParam | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[type[ResponseFormatT]] | Omit = omit,
container: Optional[message_create_params.Container] | Omit = omit,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
service_tier: Literal["auto", "standard_only"] | Omit = omit,
stop_sequences: SequenceNotStr[str] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
temperature: float | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
tools: Iterable[BetaToolUnionParam] | Omit = omit,
top_k: int | Omit = omit,
top_p: float | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> BetaAsyncMessageStreamManager[ResponseFormatT]:
if model in DEPRECATED_MODELS:
warnings.warn(
f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
DeprecationWarning,
stacklevel=3,
)
extra_headers = {
"X-Stainless-Helper-Method": "stream",
"X-Stainless-Stream-Helper": "beta.messages",
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else NOT_GIVEN}),
**(extra_headers or {}),
}
transformed_output_format: Optional[message_create_params.OutputFormat] | NotGiven = NOT_GIVEN
if is_given(output_format) and output_format is not None:
adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
try:
schema = adapted_type.json_schema()
transformed_output_format = message_create_params.OutputFormat(
schema=transform_schema(schema), type="json_schema"
)
except pydantic.errors.PydanticSchemaGenerationError as e:
raise TypeError(
(
"Could not generate JSON schema for the given `output_format` type. "
"Use a type that works with `pydanitc.TypeAdapter`"
)
) from e
request = self._post(
"/v1/messages",
body=maybe_transform(
{
"max_tokens": max_tokens,
"messages": messages,
"model": model,
"metadata": metadata,
"output_config": output_config,
"output_format": transformed_output_format,
"container": container,
"context_management": context_management,
"mcp_servers": mcp_servers,
"service_tier": service_tier,
"stop_sequences": stop_sequences,
"system": system,
"temperature": temperature,
"thinking": thinking,
"top_k": top_k,
"top_p": top_p,
"tools": tools,
"tool_choice": tool_choice,
"stream": True,
},
message_create_params.MessageCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=BetaMessage,
stream=True,
stream_cls=AsyncStream[BetaRawMessageStreamEvent],
)
return BetaAsyncMessageStreamManager(request, output_format=cast(ResponseFormatT, output_format))
async def count_tokens(
self,
*,
messages: Iterable[BetaMessageParam],
model: ModelParam,
context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
output_config: BetaOutputConfigParam | Omit = omit,
output_format: Optional[BetaJSONOutputFormatParam] | Omit = omit,
system: Union[str, Iterable[BetaTextBlockParam]] | Omit = omit,
thinking: BetaThinkingConfigParam | Omit = omit,
tool_choice: BetaToolChoiceParam | Omit = omit,
tools: Iterable[message_count_tokens_params.Tool] | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BetaMessageTokensCount:
"""
Count the number of tokens in a Message.
The Token Count API can be used to count the number of tokens in a Message,
including tools, images, and documents, without creating it.
Learn more about token counting in our
[user guide](https://docs.claude.com/en/docs/build-with-claude/token-counting)
Args:
messages: Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
model: The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
context_management: Context management configuration.
This allows you to control how Claude manages context across multiple requests,
such as whether to clear function results or not.
mcp_servers: MCP servers to be utilized in this request
output_config: Configuration options for the model's output. Controls aspects like how much
effort the model puts into its response.
output_format: A schema to specify Claude's output format in responses.
system: System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
thinking: Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
tool_choice: How the model should use the provided tools. The model can use a specific tool,
any available tool, decide by itself, or not use tools at all.
tools: Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["token-counting-2024-11-01"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "token-counting-2024-11-01", **(extra_headers or {})}
return await self._post(
"/v1/messages/count_tokens?beta=true",
body=await async_maybe_transform(
{
"messages": messages,
"model": model,
"context_management": context_management,
"mcp_servers": mcp_servers,
"output_config": output_config,
"output_format": output_format,
"system": system,
"thinking": thinking,
"tool_choice": tool_choice,
"tools": tools,
},
message_count_tokens_params.MessageCountTokensParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=BetaMessageTokensCount,
)
|
AsyncMessages
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/parallel_for/control_flow_ops_test.py
|
{
"start": 75041,
"end": 75777
}
|
class ____(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell, 3, 5,
7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
@test_util.run_v1_only("b/122612051")
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell, 3, 5,
7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
|
RNNTest
|
python
|
django__django
|
tests/admin_scripts/tests.py
|
{
"start": 96520,
"end": 115353
}
|
class ____(LiveServerTestCase, AdminScriptTestCase):
available_apps = [
"admin_scripts",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
]
def test_wrong_args(self):
"""
Passing the wrong kinds of arguments outputs an error and prints usage.
"""
out, err = self.run_django_admin(["startproject"])
self.assertNoOutput(out)
self.assertOutput(err, "usage:")
self.assertOutput(err, "You must provide a project name.")
def test_simple_project(self):
"Make sure the startproject management command creates a project"
args = ["startproject", "testproject"]
testproject_dir = os.path.join(self.test_dir, "testproject")
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(
err,
"CommandError: 'testproject' conflicts with the name of an "
"existing Python module and cannot be used as a project name. "
"Please try another name.",
)
def test_invalid_project_name(self):
"""
Make sure the startproject management command validates a project name
"""
for bad_name in ("7testproject", "../testproject"):
with self.subTest(project_name=bad_name):
args = ["startproject", bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"Error: '%s' is not a valid project name. Please make "
"sure the name is a valid identifier." % bad_name,
)
self.assertFalse(os.path.exists(testproject_dir))
def test_importable_project_name(self):
"""
startproject validates that project name doesn't clash with existing
Python modules.
"""
bad_name = "os"
args = ["startproject", bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"CommandError: 'os' conflicts with the name of an existing "
"Python module and cannot be used as a project name. Please try "
"another name.",
)
self.assertFalse(os.path.exists(testproject_dir))
def test_command_does_not_import(self):
"""
startproject doesn't import modules (and cannot be fooled by a module
raising ImportError).
"""
bad_name = "raises_import_error"
args = ["startproject", bad_name]
testproject_dir = os.path.join(self.test_dir, bad_name)
with open(os.path.join(self.test_dir, "raises_import_error.py"), "w") as f:
f.write("raise ImportError")
out, err = self.run_django_admin(args)
self.assertOutput(
err,
"CommandError: 'raises_import_error' conflicts with the name of an "
"existing Python module and cannot be used as a project name. Please try "
"another name.",
)
self.assertNoOutput(out)
self.assertFalse(os.path.exists(testproject_dir))
def test_simple_project_different_directory(self):
"""
The startproject management command creates a project in a specific
directory.
"""
args = ["startproject", "testproject", "othertestproject"]
testproject_dir = os.path.join(self.test_dir, "othertestproject")
os.mkdir(testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(os.path.join(testproject_dir, "manage.py")))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(
err,
"already exists. Overlaying a project into an existing directory "
"won't replace conflicting files.",
)
def test_custom_project_template(self):
"""
The startproject management command is able to use a different project
template.
"""
template_path = os.path.join(custom_templates_dir, "project_template")
args = ["startproject", "--template", template_path, "customtestproject"]
testproject_dir = os.path.join(self.test_dir, "customtestproject")
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, "additional_dir")))
def test_custom_project_template_non_python_files_not_formatted(self):
template_path = os.path.join(custom_templates_dir, "project_template")
args = ["startproject", "--template", template_path, "customtestproject"]
testproject_dir = os.path.join(self.test_dir, "customtestproject")
_, err = self.run_django_admin(args)
self.assertNoOutput(err)
with open(
os.path.join(template_path, "additional_dir", "requirements.in")
) as f:
expected = f.read()
with open(
os.path.join(testproject_dir, "additional_dir", "requirements.in")
) as f:
result = f.read()
self.assertEqual(expected, result)
def test_template_dir_with_trailing_slash(self):
"Ticket 17475: Template dir passed has a trailing path separator"
template_path = os.path.join(custom_templates_dir, "project_template" + os.sep)
args = ["startproject", "--template", template_path, "customtestproject"]
testproject_dir = os.path.join(self.test_dir, "customtestproject")
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, "additional_dir")))
def test_custom_project_template_from_tarball_by_path(self):
"""
The startproject management command is able to use a different project
template from a tarball.
"""
template_path = os.path.join(custom_templates_dir, "project_template.tgz")
args = ["startproject", "--template", template_path, "tarballtestproject"]
testproject_dir = os.path.join(self.test_dir, "tarballtestproject")
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py")))
def test_custom_project_template_from_tarball_to_alternative_location(self):
"""
Startproject can use a project template from a tarball and create it in
a specified location.
"""
template_path = os.path.join(custom_templates_dir, "project_template.tgz")
args = [
"startproject",
"--template",
template_path,
"tarballtestproject",
"altlocation",
]
testproject_dir = os.path.join(self.test_dir, "altlocation")
os.mkdir(testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py")))
def test_custom_project_template_from_tarball_by_url(self):
"""
The startproject management command is able to use a different project
template from a tarball via a URL.
"""
template_url = "%s/custom_templates/project_template.tgz" % self.live_server_url
args = ["startproject", "--template", template_url, "urltestproject"]
testproject_dir = os.path.join(self.test_dir, "urltestproject")
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py")))
def test_custom_project_template_from_tarball_by_url_django_user_agent(self):
user_agent = None
def serve_template(request, *args, **kwargs):
nonlocal user_agent
user_agent = request.headers["User-Agent"]
return serve(request, *args, **kwargs)
old_urlpatterns = urls.urlpatterns[:]
try:
urls.urlpatterns += [
path(
"user_agent_check/<path:path>",
serve_template,
{"document_root": os.path.join(urls.here, "custom_templates")},
),
]
template_url = (
f"{self.live_server_url}/user_agent_check/project_template.tgz"
)
args = ["startproject", "--template", template_url, "urltestproject"]
_, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertIn("Django/%s" % get_version(), user_agent)
finally:
urls.urlpatterns = old_urlpatterns
def test_project_template_tarball_url(self):
"""
Startproject management command handles project template tar/zip balls
from non-canonical urls.
"""
template_url = (
"%s/custom_templates/project_template.tgz/" % self.live_server_url
)
args = ["startproject", "--template", template_url, "urltestproject"]
testproject_dir = os.path.join(self.test_dir, "urltestproject")
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py")))
def test_file_without_extension(self):
"""
Make sure the startproject management command is able to render custom
files
"""
template_path = os.path.join(custom_templates_dir, "project_template")
args = [
"startproject",
"--template",
template_path,
"customtestproject",
"-e",
"txt",
"-n",
"Procfile",
]
testproject_dir = os.path.join(self.test_dir, "customtestproject")
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, "additional_dir")))
base_path = os.path.join(testproject_dir, "additional_dir")
for f in ("Procfile", "additional_file.py", "requirements.txt"):
self.assertTrue(os.path.exists(os.path.join(base_path, f)))
with open(os.path.join(base_path, f)) as fh:
self.assertEqual(
fh.read().strip(), "# some file for customtestproject test project"
)
def test_custom_project_template_context_variables(self):
"Make sure template context variables are rendered with proper values"
template_path = os.path.join(custom_templates_dir, "project_template")
args = [
"startproject",
"--template",
template_path,
"another_project",
"project_dir",
]
testproject_dir = os.path.join(self.test_dir, "project_dir")
os.mkdir(testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, "manage.py")
with open(test_manage_py) as fp:
content = fp.read()
self.assertInAfterFormatting('project_name = "another_project"', content)
self.assertInAfterFormatting(
'project_directory = "%s"' % testproject_dir, content
)
def test_no_escaping_of_project_variables(self):
"Make sure template context variables are not html escaped"
# We're using a custom command so we need the alternate settings
self.write_settings("alternate_settings.py")
template_path = os.path.join(custom_templates_dir, "project_template")
args = [
"custom_startproject",
"--template",
template_path,
"another_project",
"project_dir",
"--extra",
"<&>",
"--settings=alternate_settings",
]
testproject_dir = os.path.join(self.test_dir, "project_dir")
os.mkdir(testproject_dir)
out, err = self.run_manage(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, "additional_dir", "extra.py")
with open(test_manage_py) as fp:
content = fp.read()
self.assertIn("<&>", content)
def test_custom_project_destination_missing(self):
"""
Create the directory when the provided destination directory doesn't
exist.
"""
template_path = os.path.join(custom_templates_dir, "project_template")
args = [
"startproject",
"--template",
template_path,
"yet_another_project",
"project_dir2",
]
testproject_dir = os.path.join(self.test_dir, "project_dir2")
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(testproject_dir))
def test_custom_project_template_with_non_ascii_templates(self):
"""
The startproject management command is able to render templates with
non-ASCII content.
"""
template_path = os.path.join(custom_templates_dir, "project_template")
args = [
"startproject",
"--template",
template_path,
"--extension=txt",
"customtestproject",
]
testproject_dir = os.path.join(self.test_dir, "customtestproject")
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
path = os.path.join(testproject_dir, "ticket-18091-non-ascii-template.txt")
with open(path, encoding="utf-8") as f:
self.assertEqual(
f.read().splitlines(False),
["Some non-ASCII text for testing ticket #18091:", "üäö €"],
)
def test_custom_project_template_hidden_directory_default_excluded(self):
"""Hidden directories are excluded by default."""
template_path = os.path.join(custom_templates_dir, "project_template")
args = [
"startproject",
"--template",
template_path,
"custom_project_template_hidden_directories",
"project_dir",
]
testproject_dir = os.path.join(self.test_dir, "project_dir")
os.mkdir(testproject_dir)
_, err = self.run_django_admin(args)
self.assertNoOutput(err)
hidden_dir = os.path.join(testproject_dir, ".hidden")
self.assertIs(os.path.exists(hidden_dir), False)
def test_custom_project_template_hidden_directory_included(self):
"""
Template context variables in hidden directories are rendered, if not
excluded.
"""
template_path = os.path.join(custom_templates_dir, "project_template")
project_name = "custom_project_template_hidden_directories_included"
args = [
"startproject",
"--template",
template_path,
project_name,
"project_dir",
"--exclude",
]
testproject_dir = os.path.join(self.test_dir, "project_dir")
os.mkdir(testproject_dir)
_, err = self.run_django_admin(args)
self.assertNoOutput(err)
render_py_path = os.path.join(testproject_dir, ".hidden", "render.py")
with open(render_py_path) as fp:
self.assertInAfterFormatting(
f"# The {project_name} should be rendered.",
fp.read(),
)
def test_custom_project_template_exclude_directory(self):
"""
Excluded directories (in addition to .git and __pycache__) are not
included in the project.
"""
template_path = os.path.join(custom_templates_dir, "project_template")
project_name = "custom_project_with_excluded_directories"
args = [
"startproject",
"--template",
template_path,
project_name,
"project_dir",
"--exclude",
"additional_dir",
"-x",
".hidden",
]
testproject_dir = os.path.join(self.test_dir, "project_dir")
os.mkdir(testproject_dir)
_, err = self.run_django_admin(args)
self.assertNoOutput(err)
excluded_directories = [
".hidden",
"additional_dir",
".git",
"__pycache__",
]
for directory in excluded_directories:
self.assertIs(
os.path.exists(os.path.join(testproject_dir, directory)),
False,
)
not_excluded = os.path.join(testproject_dir, project_name)
self.assertIs(os.path.exists(not_excluded), True)
@unittest.skipIf(
sys.platform == "win32",
"Windows only partially supports umasks and chmod.",
)
def test_honor_umask(self):
_, err = self.run_django_admin(["startproject", "testproject"], umask=0o077)
self.assertNoOutput(err)
testproject_dir = os.path.join(self.test_dir, "testproject")
self.assertIs(os.path.isdir(testproject_dir), True)
tests = [
(["manage.py"], 0o700),
(["testproject"], 0o700),
(["testproject", "settings.py"], 0o600),
]
for paths, expected_mode in tests:
file_path = os.path.join(testproject_dir, *paths)
with self.subTest(paths[-1]):
self.assertEqual(
stat.S_IMODE(os.stat(file_path).st_mode),
expected_mode,
)
def test_failure_to_format_code(self):
with AssertFormatterFailureCaughtContext(self) as ctx:
call_command(
"startapp",
"mynewapp",
directory=self.test_dir,
stdout=ctx.stdout,
stderr=ctx.stderr,
)
|
StartProject
|
python
|
huggingface__transformers
|
tests/models/gemma3/test_modeling_gemma3.py
|
{
"start": 20447,
"end": 49630
}
|
class ____(unittest.TestCase):
def setUp(self):
self.processor = Gemma3Processor.from_pretrained("google/gemma-3-4b-it", padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
self.messages = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{"type": "image", "url": url},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@require_deterministic_for_xpu
def test_model_4b_bf16(self):
model_id = "google/gemma-3-4b-it"
model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
inputs = self.processor.apply_chat_template(
self.messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
add_generation_prompt=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with turquoise water and a blue sky in the background. It looks like a'],
("cuda", (8, 0)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear turquoise water and a blue sky in the background. It looks like'],
("cuda", (8, 6)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear blue water and a blue sky in the background. It looks like'],
("rocm", (9, 4)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear blue water and a blue sky in the background. It looks like'],
("rocm", (9, 5)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown and white cow standing on a sandy beach with turquoise water and a distant coastline in the background. It looks'],
}
) # fmt: skip
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.assertEqual(output_text, EXPECTED_TEXT)
@require_torch_large_accelerator
@require_deterministic_for_xpu
def test_model_4b_batch(self):
model_id = "google/gemma-3-4b-it"
model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
messages_2 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
},
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
{"type": "text", "text": "Are these images identical?"},
],
},
]
inputs = self.processor.apply_chat_template(
[self.messages, messages_2],
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = Expectations(
{
("xpu", 3):
[
'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with turquoise water and a blue sky in the background. It looks like a',
"user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Image 1:** Shows a brown",
],
("cuda", (8,0)):
[
'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear turquoise water and a blue sky in the background. It looks like',
"user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Image 1:** Shows a brown"
],
("cuda", (8,6)):
[
'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with clear turquoise water and a blue sky in the background. It looks like',
"user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Image 1:** Shows a brown"
],
("rocm", (9, 4)):
[
'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown cow standing on a sandy beach with turquoise water and a blue sky in the background. It looks like a',
"user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. \n\nHere's a breakdown of the differences:\n\n* **Image 1:** Shows a cow"
],
("rocm", (9, 5)):
[
'user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nCertainly! \n\nThe image shows a brown and white cow standing on a sandy beach next to a turquoise ocean. There are some clouds in the blue',
'user\nYou are a helpful assistant.\n\n\n\n\n\n\n\n\n\nAre these images identical?\nmodel\nNo, these images are not identical. They depict very different scenes. \n\n* **Image 1** shows a cow standing on a beach',
],
}
) # fmt: skip
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.assertEqual(output_text, EXPECTED_TEXT)
@require_torch_large_accelerator
def test_model_4b_crops(self):
model_id = "google/gemma-3-4b-it"
model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
crop_config = {
"images_kwargs": {
"do_pan_and_scan": True,
"pan_and_scan_max_num_crops": 448,
"pan_and_scan_min_crop_size": 32,
"pan_and_scan_min_ratio_to_activate": 0.3,
}
}
inputs = self.processor.apply_chat_template(
self.messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
add_generation_prompt=True,
**crop_config,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="static")
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_NUM_IMAGES = 3 # one for the origin image and two crops of images
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): ["user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the"],
("cuda", (8, 0)): ["user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a blue sky with some white clouds in the background"],
("cuda", (8, 6)): ["user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the"],
("cuda", (9, 0)): ["user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the"],
("rocm", (9, 4)): ["user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the"],
("rocm", (9, 5)): ["user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a blue sky with some white clouds in the background"]
}
) # fmt: skip
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.assertEqual(len(inputs["pixel_values"]), EXPECTED_NUM_IMAGES)
self.assertEqual(output_text, EXPECTED_TEXT)
@require_torch_large_accelerator
@require_deterministic_for_xpu
def test_model_4b_batch_crops(self):
model_id = "google/gemma-3-4b-it"
model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
crop_config = {
"images_kwargs": {
"do_pan_and_scan": True,
"pan_and_scan_max_num_crops": 448,
"pan_and_scan_min_crop_size": 32,
"pan_and_scan_min_ratio_to_activate": 0.3,
}
}
messages_2 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
},
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
{"type": "text", "text": "Are these images identical?"},
],
},
]
inputs = self.processor.apply_chat_template(
[self.messages, messages_2],
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
**crop_config,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="static")
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_NUM_IMAGES = 9 # 3 * (one for the origin image and two crops of images) = 9
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): [
"user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the",
'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a'],
("cuda", 7): [],
("cuda", (8,0)): [
"user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a blue sky with some white clouds in the background",
'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a'
],
("cuda", (8, 6)): [
"user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the",
'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a'
],
("rocm", (9, 4)) : [
"user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There's a bright blue sky with some white clouds in the",
'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a'
],
("rocm", (9, 5)) : [
'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown cow standing on a sandy beach next to a turquoise ocean. There are clouds in the blue sky above.',
'user\nYou are a helpful assistant.\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nHere is the original image \n\n\n\n and here are some crops to help you see better \n\n\n\n \n\n\n\nAre these images identical?\nmodel\nNo, the images are not identical. \n\nThe first image shows a cow on a beach, while the second image shows a street scene with a',
],
}
) # fmt: skip
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.assertEqual(len(inputs["pixel_values"]), EXPECTED_NUM_IMAGES)
self.assertEqual(output_text, EXPECTED_TEXT)
@require_torch_large_accelerator
def test_model_4b_multiimage(self):
model_id = "google/gemma-3-4b-it"
model = Gemma3ForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
messages = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
{"type": "text", "text": "What do you see here?"},
],
},
]
inputs = self.processor.apply_chat_template(
messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="static")
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image:\n\n**Overall Scene:**\n\nIt looks like a street scene in a city with"],
("cuda", (8, 0)): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image:\n\n**Overall Scene:**\n\nIt looks like a street scene in a vibrant,"],
("cuda", (8, 6)): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image:\n\n**Overall Scene:**\n\nIt appears to be a street scene in a city"],
("cuda", (9, 0)): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image!\n\nHere's a description of the scene:\n\n* **Location:**"],
("rocm", (9, 4)): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image:\n\n**Overall Scene:**\n\nIt appears to be a street scene in a vibrant"],
("rocm", (9, 5)): ["user\nYou are a helpful assistant.\n\n\n\n\n\nWhat do you see here?\nmodel\nOkay, let's break down what I see in this image:\n\n**Main Features:**\n\n* **Chinese Archway:** The most prominent"],
}
) # fmt: skip
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.assertEqual(output_text, EXPECTED_TEXT)
@require_deterministic_for_xpu
def test_model_1b_text_only(self):
model_id = "google/gemma-3-1b-it"
model = Gemma3ForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
inputs = tokenizer("Write a poem about Machine Learning.", return_tensors="pt").to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, cache_implementation="static")
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a river deep,\nWith patterns hidden, secrets sleep.\nA neural net, a watchful eye,\nLearning'],
("cuda", 7): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a silent stream,\nInto the neural net, a waking dream.\nAlgorithms hum, a coded grace,\n'],
("cuda", 8): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a silent stream,\nInto the neural net, a waking dream.\nAlgorithms hum, a coded grace,\n'],
("rocm", (9, 4)): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a silent stream,\nInto the neural net, a waking dream.\nAlgorithms hum, a coded grace,\n'],
("rocm", (9, 5)): ['Write a poem about Machine Learning.\n\n---\n\nThe data flows, a river deep,\nWith patterns hidden, secrets sleep.\nA neural net, a watchful eye,\nLearning'],
}
) # fmt: skip
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.assertEqual(output_text, EXPECTED_TEXT)
# TODO: raushan FA2 generates gibberish for no reason, check later
@require_flash_attn
@require_torch_large_accelerator
@pytest.mark.flash_attn_test
def test_model_4b_flash_attn(self):
model_id = "google/gemma-3-4b-it"
model = Gemma3ForConditionalGeneration.from_pretrained(
model_id, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
).to(torch_device)
inputs = self.processor.apply_chat_template(
self.messages,
tokenize=True,
return_dict=True,
return_tensors="pt",
add_generation_prompt=True,
).to(torch_device)
output = model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach with turquoise water and a distant island in the background. It looks like a sunny day'],
("cuda", 7): [],
("cuda", 8): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach with turquoise water and a distant island in the background. It looks like a sunny day'],
("rocm", (9, 4)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach with turquoise water and a distant island in the background. It looks like a sunny day'],
("rocm", (9, 5)): ['user\nYou are a helpful assistant.\n\n\n\n\n\nWhat is shown in this image?\nmodel\nThe image shows a brown and white cow standing on a sandy beach with a turquoise ocean and a distant island in the background. It looks like a sunny'],
}
) # fmt: skip
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.assertEqual(output_text, EXPECTED_TEXT)
@parameterized.expand([("flash_attention_2",), ("sdpa",), ("eager",)])
def test_generation_beyond_sliding_window(self, attn_implementation: str):
"""Test that we can correctly generate beyond the sliding window. This is non trivial as
we need to correctly slice the attention mask in all cases (because we use a hybrid cache).
Outputs for every attention functions should be coherent and identical.
"""
model_id = "google/gemma-3-1b-it"
if attn_implementation == "flash_attention_2" and not is_flash_attn_2_available():
self.skipTest("FlashAttention2 is required for this test.")
input_text = [
"This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens
"A list of colors: red, blue", # This will almost all be padding tokens
]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
model = AutoModelForCausalLM.from_pretrained(
model_id, attn_implementation=attn_implementation, dtype=torch.float16
).to(torch_device)
# Make sure prefill is larger than sliding window
input_size = inputs.input_ids.shape[-1]
self.assertTrue(input_size > model.config.sliding_window)
out = model.generate(**inputs, max_new_tokens=20, do_sample=False)[:, input_size:]
output_text = tokenizer.batch_decode(out)
EXPECTED_COMPLETIONS = [
" and I'm going to take a walk.\n\nI really enjoy the scenery, and I'",
", green, yellow, orange, purple, brown, black, white, gray.\n\nI'",
]
self.assertEqual(output_text, EXPECTED_COMPLETIONS)
@pytest.mark.torch_export_test
def test_export_text_only(self):
if not is_torch_greater_or_equal("2.6.0"):
self.skipTest(reason="This test requires torch >= 2.6 to run.")
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
model_id = "google/gemma-3-1b-it"
model = AutoModelForCausalLM.from_pretrained(model_id)
self.assertEqual(model.config.cache_implementation, "hybrid")
# Export
model.eval()
exportable_module = TorchExportableModuleForDecoderOnlyLM(model, batch_size=1, max_cache_len=1024)
exported_program = exportable_module.export(
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
)
logging.info(f"\nExported program: {exported_program}")
# Test generation with the exported model
prompt = "What is the capital of France?"
max_new_tokens_to_generate = 20
# Generate text with the exported model
tokenizer = AutoTokenizer.from_pretrained(model_id)
export_generated_text = TorchExportableModuleForDecoderOnlyLM.generate(
exported_program, tokenizer, prompt, max_new_tokens=max_new_tokens_to_generate
)
logging.info(f"\nExport generated texts: '{export_generated_text}'")
input_text = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
eager_outputs = model.generate(
**input_text,
max_new_tokens=max_new_tokens_to_generate,
do_sample=False, # Use greedy decoding to match the exported model
)
eager_generated_text = tokenizer.decode(eager_outputs[0], skip_special_tokens=True)
logging.info(f"\nEager generated texts: '{eager_generated_text}'")
self.assertEqual(export_generated_text, eager_generated_text)
def test_dynamic_sliding_window_is_default(self):
"""
Test that the dynamic sliding window cache (added in #40039) is the default cache implementation for Gemma3
models, despite the fact that Hub checkpoints may have `cache_implementation="hybrid"` (static sliding window).
"""
model_id = "google/gemma-3-1b-it"
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
# the default cache is static sliding window
self.assertEqual(model.config.cache_implementation, "hybrid")
self.assertEqual(model.generation_config.cache_implementation, "hybrid")
tokenizer = AutoTokenizer.from_pretrained(model_id)
prompt = "What is the capital of France?"
model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
forward_outputs = model(**model_inputs)
self.assertIn("DynamicSlidingWindowLayer", str(forward_outputs.past_key_values))
generate_outputs = model.generate(
**model_inputs, max_new_tokens=2, do_sample=False, return_dict_in_generate=True
)
self.assertIn("DynamicSlidingWindowLayer", str(generate_outputs.past_key_values))
# If we manually specify the cache implementation = "hybrid", it will use the static sliding window cache
generate_outputs = model.generate(
**model_inputs,
max_new_tokens=2,
do_sample=False,
return_dict_in_generate=True,
cache_implementation="hybrid",
)
self.assertNotIn("DynamicSlidingWindowLayer", str(generate_outputs.past_key_values))
|
Gemma3IntegrationTest
|
python
|
allegroai__clearml
|
clearml/model.py
|
{
"start": 107380,
"end": 107472
}
|
class ____(object):
def wait(self, *_: Any, **__: Any) -> bool:
return True
|
Waitable
|
python
|
numba__numba
|
numba/core/types/abstract.py
|
{
"start": 9641,
"end": 9724
}
|
class ____(Type):
"""
Base class for objects that support len()
"""
|
Sized
|
python
|
keon__algorithms
|
algorithms/linkedlist/add_two_numbers.py
|
{
"start": 398,
"end": 1792
}
|
class ____:
def __init__(self, x):
self.val = x
self.next = None
def add_two_numbers(left: Node, right: Node) -> Node:
head = Node(0)
current = head
sum = 0
while left or right:
print("adding: ", left.val, right.val)
sum //= 10
if left:
sum += left.val
left = left.next
if right:
sum += right.val
right = right.next
current.next = Node(sum % 10)
current = current.next
if sum // 10 == 1:
current.next = Node(1)
return head.next
def convert_to_list(number: int) -> Node:
"""
converts a positive integer into a (reversed) linked list.
for example: give 112
result 2 -> 1 -> 1
"""
if number >= 0:
head = Node(0)
current = head
remainder = number % 10
quotient = number // 10
while quotient != 0:
current.next = Node(remainder)
current = current.next
remainder = quotient % 10
quotient //= 10
current.next = Node(remainder)
return head.next
else:
print("number must be positive!")
def convert_to_str(l: Node) -> str:
"""
converts the non-negative number list into a string.
"""
result = ""
while l:
result += str(l.val)
l = l.next
return result
|
Node
|
python
|
ray-project__ray
|
python/ray/tune/tests/_test_trial_runner_callbacks.py
|
{
"start": 1765,
"end": 2172
}
|
class ____(RayTrialExecutor):
def __init__(self):
super().__init__()
self.next_future_result = None
def start_trial(self, trial: Trial):
trial.status = Trial.RUNNING
return True
def continue_training(self, trial: Trial):
pass
def get_next_executor_event(self, live_trials, next_trial_exists):
return self.next_future_result
|
_MockTrialExecutor
|
python
|
pytorch__pytorch
|
torch/_inductor/constant_folding.py
|
{
"start": 2510,
"end": 15243
}
|
class ____(torch.fx.Interpreter):
def __init__(
self,
gm: torch.fx.GraphModule,
skip_constructors: bool = False,
lifted_constant_names: Optional[list[str]] = None,
skip_folding_node_fn: Optional[Callable[[torch.fx.Node], bool]] = None,
) -> None:
super().__init__(gm)
self.node_replacements: dict[torch.fx.Node, Any] = {}
self.replaced_uses: dict[torch.fx.Node, int] = collections.Counter()
self.unknown_value = object()
self.skip_constructors: bool = skip_constructors
# overwrite this to deallocate env values if their only remaining use
# is the output
self.user_to_last_uses = self.node_to_last_non_output_use()
self.lifted_constant_names = lifted_constant_names
self.deferred_value = object()
self.skip_folding_node_fn = skip_folding_node_fn
def _support_dynamic_shape(self) -> bool:
# ConstantFolder not support dynamic shape now
return False
def _deduce_value(self, node: torch.fx.Node) -> Any:
if self.lifted_constant_names is None:
return super().run_node(node)
# if lifted_constant_names is passed in, no concrete value is available
# so we just check if all inputs have values
if self.skip_folding_node_fn is not None and self.skip_folding_node_fn(node):
return self.unknown_value
flattened_node_inps = pytree.arg_tree_leaves(*node.args, **node.kwargs)
for inp in flattened_node_inps:
if (
isinstance(inp, torch.fx.Node)
and inp.name not in (self.lifted_constant_names or ())
and self.env[inp] != self.deferred_value
):
return self.unknown_value
return self.deferred_value
def is_impure(self, node: torch.fx.node.Node) -> bool:
def is_woq_int8_pattern(node: torch.fx.node.Node) -> bool:
return (
node.target is torch.ops.prims.convert_element_type.default # type: ignore[return-value]
and isinstance(node.args[0], torch.fx.Node)
and "val" in node.args[0].meta
and node.args[0].meta["val"].dtype == torch.int8 # type: ignore[union-attr]
and node.args[1] == torch.bfloat16
)
if (
is_woq_int8_pattern(node)
or (
node.target is torch.ops.aten.permute.default
and len(node.users) == 1
and is_woq_int8_pattern(next(iter(node.users)))
)
) and is_const_source(
node.args[0], # type: ignore[arg-type]
self.lifted_constant_names,
):
# Case 1: int8_weight -> dq -> bf16_weight
# Case 2: int8_weight -> permute -> dq -> bf16_weight
return True
quant_registered = (
getattr(torch.ops.quantized_decomposed, "dequantize_per_channel", None)
is not None
)
if quant_registered and node.target in [
torch.ops.quantized_decomposed.dequantize_per_channel.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
torch.ops.quantized_decomposed.convert_element_type.no_fuse,
]:
# For the pattern fp32_weight -> q -> dq
# We only folding fp32_weight -> q
# int8_weight and leave dq in graph to be fused
return True
if node.target in _dont_constant_fold:
return True
return False
def node_to_last_non_output_use(self) -> dict[torch.fx.Node, list[torch.fx.Node]]:
last_non_output_use = collections.defaultdict(list)
seen_uses = OrderedSet[torch.fx.Node]()
output_node = next(iter(reversed(self.module.graph.nodes))) # type: ignore[arg-type, union-attr]
for node in reversed(self.module.graph.nodes): # type: ignore[arg-type, union-attr]
if node.target == "output":
continue
def add_use(inp: torch.fx.Node) -> None:
if inp in seen_uses:
return
seen_uses.add(inp)
last_non_output_use[node].append(inp)
# In-place is fine since we don't mutate
pytree.tree_map_only_(torch.fx.Node, add_use, (node.args, node.kwargs))
# if this node is only used in output, we want to gc it right away
if len(node.users) == 1 and output_node in node.users:
last_non_output_use[node].append(node)
return last_non_output_use
def run_node(self, node: torch.fx.Node) -> Any:
if node.target == "output":
# because we remove nodes from env on last non output use,
# re-define them now or we'll get error in interpreter
def set_env(arg: torch.fx.Node) -> None:
self.env[arg] = self.unknown_value
# In-place is fine since we don't mutate
pytree.tree_map_only_(torch.fx.Node, set_env, node.args)
return super().run_node(node)
args, kwargs = self.fetch_args_kwargs_from_env(node)
flattened_inputs = pytree.arg_tree_leaves(*args, **kwargs)
# We need to do this weird thing because in cases where flattened_inputs
# contains a ScriptObject, equality checking results in a type error if
# the types are different.
if any(
type(self.unknown_value) is type(input_) and self.unknown_value == input_
for input_ in flattened_inputs
):
return self.unknown_value
# TODO - fix errors with this
if (
node.op == "call_function"
and node.target is aten._efficientzerotensor.default
):
return self.unknown_value
# TODO - constant folding triton kernel returns the inputs -- fix this
if (
node.op == "call_function"
and node.name == "triton_kernel_wrapper_functional_proxy"
):
return self.unknown_value
# skip constructors, since inductor generates optimal code for them already
# and turning into tensor would result in an additional global memory read
# TODO - more complicated strategy
if (
self.skip_constructors
and not is_const_source(node, self.lifted_constant_names)
and not any(isinstance(e, torch.Tensor) for e in flattened_inputs)
):
return self.unknown_value
# All mutations should either be removed or on inputs which we did not make constant
if (
isinstance(node.target, torch._ops.OpOverload)
and torch.Tag.nondeterministic_seeded in node.target.tags
):
return self.unknown_value
if node.op == "call_function" and isinstance(
node.target, torch._ops.HigherOrderOperator
):
return self.unknown_value
out = self._deduce_value(node)
if isinstance(out, torch._C.ScriptObject):
return out
if out == self.unknown_value:
return self.unknown_value
if not is_const_source(node, self.lifted_constant_names) and (
isinstance(out, torch.Tensor) or out == self.deferred_value
):
if out != self.deferred_value and out.device.type == "meta":
return out
if not self.insertable_tensor_check(out):
return out
if self.is_impure(node):
return self.unknown_value
self.add_node_replacement(node, out)
flattened_node_inps = pytree.arg_tree_leaves(*node.args, **node.kwargs)
for n in flattened_node_inps:
if not isinstance(n, torch.fx.Node):
continue
self.replaced_uses[n] += 1
for to_delete in self.user_to_last_uses.get(node, []):
if self.replaced_uses[to_delete] == len(to_delete.users):
self.node_replacements.pop(to_delete, None)
return out
def insertable_tensor_check(self, tensor: torch.Tensor) -> bool:
return True
def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
self.node_replacements[node] = tensor
def run(self) -> Any: # type: ignore[override]
env: dict[torch.fx.Node, Any] = {}
self.insert_placerholder_values(env)
return super().run(initial_env=env)
def insert_placerholder_values(self, env: dict[torch.fx.Node, Any]) -> None:
for n in self.module.graph.find_nodes(op="placeholder"): # type: ignore[operator, union-attr]
env[n] = self.unknown_value # type: ignore[assignment]
if self.lifted_constant_names is None:
return
for n in self.module.graph.nodes: # type: ignore[union-attr]
if n.name in (self.lifted_constant_names or ()):
env[n] = self.deferred_value
def constant_fold(
gm: torch.fx.GraphModule,
constraint_fn: Optional[Callable[[torch.fx.Node], bool]] = None,
) -> None:
with torch.utils._python_dispatch._disable_current_modes():
cf = ConstantFolder(gm, skip_constructors=True)
cf.run()
for node, constant in cf.node_replacements.items():
if constraint_fn is not None and not constraint_fn(node):
continue
replace_node_with_constant(gm, node, constant)
erased_params = []
for node in gm.graph.find_nodes(op="get_attr"):
if len(node.users) == 0:
if hasattr(gm, node.target):
delattr(gm, node.target)
erased_params.append(node)
for node in erased_params:
gm.graph.erase_node(node)
gm.graph.eliminate_dead_code()
gm.graph.lint()
gm.recompile()
def constant_graph_tag(
gm: torch.fx.GraphModule,
skip_constructors: bool = True,
lifted_constant_names: Optional[list[str]] = None,
skip_folding_node_fn: Optional[Callable[[torch.fx.Node], bool]] = None,
) -> None:
with torch.utils._python_dispatch._disable_current_modes():
cf = ConstantFolder(
gm,
skip_constructors=skip_constructors,
lifted_constant_names=lifted_constant_names,
skip_folding_node_fn=skip_folding_node_fn,
)
cf.run()
for node in gm.graph.nodes:
if skip_folding_node_fn is not None and skip_folding_node_fn(node):
node.meta[META_TAG] = MODULE_TAG
continue
if (
is_const_source(node, lifted_constant_names)
or node in cf.node_replacements
or node in cf.replaced_uses
):
node.meta[META_TAG] = CONST_MODULE_TAG
else:
node.meta[META_TAG] = MODULE_TAG
def run_and_get_constant_graph(
gm: torch.fx.GraphModule,
skip_constructors: bool = True,
lifted_constant_names: Optional[list[str]] = None,
skip_folding_node_fn: Optional[Callable[[torch.fx.Node], bool]] = None,
) -> torch.fx.GraphModule:
"""
Construct a GraphModule which corresponds to the part which could be
constant folded in provided gm.
"""
constant_graph_tag(
gm, skip_constructors, lifted_constant_names, skip_folding_node_fn
)
def untag(node: torch.fx.Node) -> bool:
used_to_fold = False
for u in node.users:
if u.meta[META_TAG] == CONST_MODULE_TAG:
used_to_fold = True
break
if not used_to_fold:
node.meta[META_TAG] = MODULE_TAG
return used_to_fold
# We rewrite the tags, if it's a constant being directly consumed, without
# any folding opportunity, we keep it in main gm.
for node in gm.graph.nodes:
if node.op == "get_attr" or (node.name in (lifted_constant_names or ())):
untag(node)
new_graph = torch.fx.Graph()
node_remapping: dict[torch.fx.Node, torch.fx.Node] = {}
output_nodes = []
for node in gm.graph.nodes:
if node.meta[META_TAG] == MODULE_TAG:
continue
new_node = new_graph.node_copy(node, lambda x: node_remapping[x])
node_remapping[node] = new_node
for user in node.users:
if user.meta[META_TAG] == MODULE_TAG:
output_nodes.append(new_node)
break
new_graph.output(tuple(output_nodes))
new_graph.lint()
new_gm = torch.fx.GraphModule(gm, new_graph)
return new_gm
|
ConstantFolder
|
python
|
apache__airflow
|
task-sdk/tests/task_sdk/definitions/test_dag.py
|
{
"start": 24145,
"end": 24338
}
|
class ____(BaseOperator):
"""
An operator that does nothing.
Used to test Dag cycle detection.
"""
def execute(self, context: Context) -> None:
pass
|
DoNothingOperator
|
python
|
apache__avro
|
lang/py/avro/test/test_compatibility.py
|
{
"start": 14153,
"end": 37754
}
|
class ____(unittest.TestCase):
def test_simple_schema_promotion(self):
field_alias_reader = parse(
json.dumps(
{
"name": "foo",
"type": "record",
"fields": [{"type": "int", "name": "bar", "aliases": ["f1"]}],
}
)
)
record_alias_reader = parse(
json.dumps(
{
"name": "other",
"type": "record",
"fields": [{"type": "int", "name": "f1"}],
"aliases": ["foo"],
}
)
)
writer = parse(
json.dumps(
{
"name": "foo",
"type": "record",
"fields": [
{"type": "int", "name": "f1"},
{
"type": "string",
"name": "f2",
},
],
}
)
)
# alias testing
res = ReaderWriterCompatibilityChecker().get_compatibility(field_alias_reader, writer)
self.assertIs(res.compatibility, SchemaCompatibilityType.compatible, res.locations)
res = ReaderWriterCompatibilityChecker().get_compatibility(record_alias_reader, writer)
self.assertIs(res.compatibility, SchemaCompatibilityType.compatible, res.locations)
def test_schema_compatibility(self):
# testValidateSchemaPairMissingField
writer = parse(
json.dumps(
{
"type": SchemaType.RECORD,
"name": "Record",
"fields": [
{"name": "oldField1", "type": SchemaType.INT},
{"name": "oldField2", "type": SchemaType.STRING},
],
}
)
)
reader = parse(
json.dumps(
{
"type": SchemaType.RECORD,
"name": "Record",
"fields": [{"name": "oldField1", "type": SchemaType.INT}],
}
)
)
self.assertTrue(self.are_compatible(reader, writer))
# testValidateSchemaPairMissingSecondField
reader = parse(
json.dumps(
{
"type": SchemaType.RECORD,
"name": "Record",
"fields": [{"name": "oldField2", "type": SchemaType.STRING}],
}
)
)
self.assertTrue(self.are_compatible(reader, writer))
# testValidateSchemaPairAllFields
reader = parse(
json.dumps(
{
"type": SchemaType.RECORD,
"name": "Record",
"fields": [
{"name": "oldField1", "type": SchemaType.INT},
{"name": "oldField2", "type": SchemaType.STRING},
],
}
)
)
self.assertTrue(self.are_compatible(reader, writer))
# testValidateSchemaNewFieldWithDefault
reader = parse(
json.dumps(
{
"type": SchemaType.RECORD,
"name": "Record",
"fields": [
{"name": "oldField1", "type": SchemaType.INT},
{"name": "newField2", "type": SchemaType.INT, "default": 42},
],
}
)
)
self.assertTrue(self.are_compatible(reader, writer))
# testValidateSchemaNewField
reader = parse(
json.dumps(
{
"type": SchemaType.RECORD,
"name": "Record",
"fields": [
{"name": "oldField1", "type": SchemaType.INT},
{"name": "newField2", "type": SchemaType.INT},
],
}
)
)
self.assertFalse(self.are_compatible(reader, writer))
# testValidateArrayWriterSchema
writer = parse(json.dumps({"type": SchemaType.ARRAY, "items": {"type": SchemaType.STRING}}))
reader = parse(json.dumps({"type": SchemaType.ARRAY, "items": {"type": SchemaType.STRING}}))
self.assertTrue(self.are_compatible(reader, writer))
reader = parse(json.dumps({"type": SchemaType.MAP, "values": {"type": SchemaType.STRING}}))
self.assertFalse(self.are_compatible(reader, writer))
# testValidatePrimitiveWriterSchema
writer = parse(json.dumps({"type": SchemaType.STRING}))
reader = parse(json.dumps({"type": SchemaType.STRING}))
self.assertTrue(self.are_compatible(reader, writer))
reader = parse(json.dumps({"type": SchemaType.INT}))
self.assertFalse(self.are_compatible(reader, writer))
# testUnionReaderWriterSubsetIncompatibility
writer = parse(
json.dumps(
{
"name": "Record",
"type": "record",
"fields": [
{
"name": "f1",
"type": [
SchemaType.INT,
SchemaType.STRING,
SchemaType.LONG,
],
}
],
}
)
)
reader = parse(
json.dumps(
{
"name": "Record",
"type": SchemaType.RECORD,
"fields": [{"name": "f1", "type": [SchemaType.INT, SchemaType.STRING]}],
}
)
)
reader = reader.fields[0].type
writer = writer.fields[0].type
self.assertIsInstance(reader, UnionSchema)
self.assertIsInstance(writer, UnionSchema)
self.assertFalse(self.are_compatible(reader, writer))
# testReaderWriterCompatibility
compatible_reader_writer_test_cases = [
(BOOLEAN_SCHEMA, BOOLEAN_SCHEMA),
(INT_SCHEMA, INT_SCHEMA),
(LONG_SCHEMA, INT_SCHEMA),
(LONG_SCHEMA, LONG_SCHEMA),
(FLOAT_SCHEMA, INT_SCHEMA),
(FLOAT_SCHEMA, LONG_SCHEMA),
(DOUBLE_SCHEMA, LONG_SCHEMA),
(DOUBLE_SCHEMA, INT_SCHEMA),
(DOUBLE_SCHEMA, FLOAT_SCHEMA),
(STRING_SCHEMA, STRING_SCHEMA),
(BYTES_SCHEMA, BYTES_SCHEMA),
(STRING_SCHEMA, BYTES_SCHEMA),
(BYTES_SCHEMA, STRING_SCHEMA),
(INT_ARRAY_SCHEMA, INT_ARRAY_SCHEMA),
(LONG_ARRAY_SCHEMA, INT_ARRAY_SCHEMA),
(INT_MAP_SCHEMA, INT_MAP_SCHEMA),
(LONG_MAP_SCHEMA, INT_MAP_SCHEMA),
(ENUM1_AB_SCHEMA, ENUM1_AB_SCHEMA),
(ENUM1_ABC_SCHEMA, ENUM1_AB_SCHEMA),
# Union related pairs
(EMPTY_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
(FLOAT_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
(FLOAT_UNION_SCHEMA, INT_UNION_SCHEMA),
(FLOAT_UNION_SCHEMA, LONG_UNION_SCHEMA),
(FLOAT_UNION_SCHEMA, INT_LONG_UNION_SCHEMA),
(INT_UNION_SCHEMA, INT_UNION_SCHEMA),
(INT_STRING_UNION_SCHEMA, STRING_INT_UNION_SCHEMA),
(INT_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
(LONG_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
(LONG_UNION_SCHEMA, INT_UNION_SCHEMA),
(FLOAT_UNION_SCHEMA, INT_UNION_SCHEMA),
(DOUBLE_UNION_SCHEMA, INT_UNION_SCHEMA),
(FLOAT_UNION_SCHEMA, LONG_UNION_SCHEMA),
(DOUBLE_UNION_SCHEMA, LONG_UNION_SCHEMA),
(FLOAT_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
(DOUBLE_UNION_SCHEMA, FLOAT_UNION_SCHEMA),
(STRING_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
(STRING_UNION_SCHEMA, BYTES_UNION_SCHEMA),
(BYTES_UNION_SCHEMA, EMPTY_UNION_SCHEMA),
(BYTES_UNION_SCHEMA, STRING_UNION_SCHEMA),
(DOUBLE_UNION_SCHEMA, INT_FLOAT_UNION_SCHEMA),
# Readers capable of reading all branches of a union are compatible
(FLOAT_SCHEMA, INT_FLOAT_UNION_SCHEMA),
(LONG_SCHEMA, INT_LONG_UNION_SCHEMA),
(DOUBLE_SCHEMA, INT_FLOAT_UNION_SCHEMA),
(DOUBLE_SCHEMA, INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA),
# Special case of singleton unions:
(FLOAT_SCHEMA, FLOAT_UNION_SCHEMA),
(INT_UNION_SCHEMA, INT_SCHEMA),
(INT_SCHEMA, INT_UNION_SCHEMA),
# Fixed types
(FIXED_4_BYTES, FIXED_4_BYTES),
# Tests involving records:
(EMPTY_RECORD1, EMPTY_RECORD1),
(EMPTY_RECORD1, A_INT_RECORD1),
(A_INT_RECORD1, A_INT_RECORD1),
(A_DINT_RECORD1, A_INT_RECORD1),
(A_DINT_RECORD1, A_DINT_RECORD1),
(A_INT_RECORD1, A_DINT_RECORD1),
(A_LONG_RECORD1, A_INT_RECORD1),
(A_INT_RECORD1, A_INT_B_INT_RECORD1),
(A_DINT_RECORD1, A_INT_B_INT_RECORD1),
(A_INT_B_DINT_RECORD1, A_INT_RECORD1),
(A_DINT_B_DINT_RECORD1, EMPTY_RECORD1),
(A_DINT_B_DINT_RECORD1, A_INT_RECORD1),
(A_INT_B_INT_RECORD1, A_DINT_B_DINT_RECORD1),
(parse(json.dumps({"type": "null"})), parse(json.dumps({"type": "null"}))),
(NULL_SCHEMA, NULL_SCHEMA),
(ENUM_AB_ENUM_DEFAULT_A_RECORD, ENUM_ABC_ENUM_DEFAULT_A_RECORD),
(
ENUM_AB_FIELD_DEFAULT_A_ENUM_DEFAULT_B_RECORD,
ENUM_ABC_FIELD_DEFAULT_B_ENUM_DEFAULT_A_RECORD,
),
(NS_RECORD1, NS_RECORD2),
(WITHOUT_NAMESPACE_RECORD, WITH_NAMESPACE_RECORD),
]
for reader, writer in compatible_reader_writer_test_cases:
self.assertTrue(self.are_compatible(reader, writer))
def test_schema_compatibility_fixed_size_mismatch(self):
incompatible_fixed_pairs = [
(FIXED_4_BYTES, FIXED_8_BYTES, "expected: 8, found: 4", "/size"),
(FIXED_8_BYTES, FIXED_4_BYTES, "expected: 4, found: 8", "/size"),
(
A_DINT_B_DFIXED_8_BYTES_RECORD1,
A_DINT_B_DFIXED_4_BYTES_RECORD1,
"expected: 4, found: 8",
"/fields/1/type/size",
),
(
A_DINT_B_DFIXED_4_BYTES_RECORD1,
A_DINT_B_DFIXED_8_BYTES_RECORD1,
"expected: 8, found: 4",
"/fields/1/type/size",
),
]
for reader, writer, message, location in incompatible_fixed_pairs:
result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
self.assertIs(result.compatibility, SchemaCompatibilityType.incompatible)
self.assertIn(
location,
result.locations,
f"expected {location}, found {result}",
)
self.assertIn(
message,
result.messages,
f"expected {location}, found {result}",
)
def test_schema_compatibility_missing_enum_symbols(self):
incompatible_pairs = [
# str(set) representation
(ENUM1_AB_SCHEMA, ENUM1_ABC_SCHEMA, "{'C'}", "/symbols"),
(ENUM1_BC_SCHEMA, ENUM1_ABC_SCHEMA, "{'A'}", "/symbols"),
(
RECORD1_WITH_ENUM_AB,
RECORD1_WITH_ENUM_ABC,
"{'C'}",
"/fields/0/type/symbols",
),
]
for reader, writer, message, location in incompatible_pairs:
result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
self.assertIs(result.compatibility, SchemaCompatibilityType.incompatible)
self.assertIn(message, result.messages)
self.assertIn(location, result.locations)
def test_schema_compatibility_missing_union_branch(self):
incompatible_pairs = [
(
INT_UNION_SCHEMA,
INT_STRING_UNION_SCHEMA,
{"reader union lacking writer type: STRING"},
{"/1"},
),
(
STRING_UNION_SCHEMA,
INT_STRING_UNION_SCHEMA,
{"reader union lacking writer type: INT"},
{"/0"},
),
(
INT_UNION_SCHEMA,
UNION_INT_RECORD1,
{"reader union lacking writer type: RECORD"},
{"/1"},
),
(
INT_UNION_SCHEMA,
UNION_INT_RECORD2,
{"reader union lacking writer type: RECORD"},
{"/1"},
),
(
UNION_INT_RECORD1,
UNION_INT_RECORD2,
{"reader union lacking writer type: RECORD"},
{"/1"},
),
(
INT_UNION_SCHEMA,
UNION_INT_ENUM1_AB,
{"reader union lacking writer type: ENUM"},
{"/1"},
),
(
INT_UNION_SCHEMA,
UNION_INT_FIXED_4_BYTES,
{"reader union lacking writer type: FIXED"},
{"/1"},
),
(
INT_UNION_SCHEMA,
UNION_INT_BOOLEAN,
{"reader union lacking writer type: BOOLEAN"},
{"/1"},
),
(
INT_UNION_SCHEMA,
LONG_UNION_SCHEMA,
{"reader union lacking writer type: LONG"},
{"/0"},
),
(
INT_UNION_SCHEMA,
FLOAT_UNION_SCHEMA,
{"reader union lacking writer type: FLOAT"},
{"/0"},
),
(
INT_UNION_SCHEMA,
DOUBLE_UNION_SCHEMA,
{"reader union lacking writer type: DOUBLE"},
{"/0"},
),
(
INT_UNION_SCHEMA,
BYTES_UNION_SCHEMA,
{"reader union lacking writer type: BYTES"},
{"/0"},
),
(
INT_UNION_SCHEMA,
UNION_INT_ARRAY_INT,
{"reader union lacking writer type: ARRAY"},
{"/1"},
),
(
INT_UNION_SCHEMA,
UNION_INT_MAP_INT,
{"reader union lacking writer type: MAP"},
{"/1"},
),
(
INT_UNION_SCHEMA,
UNION_INT_NULL,
{"reader union lacking writer type: NULL"},
{"/1"},
),
(
INT_UNION_SCHEMA,
INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA,
{
"reader union lacking writer type: LONG",
"reader union lacking writer type: FLOAT",
"reader union lacking writer type: DOUBLE",
},
{"/1", "/2", "/3"},
),
(
A_DINT_B_DINT_UNION_RECORD1,
A_DINT_B_DINT_STRING_UNION_RECORD1,
{"reader union lacking writer type: STRING"},
{"/fields/1/type/1"},
),
]
for reader, writer, message, location in incompatible_pairs:
result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
self.assertIs(result.compatibility, SchemaCompatibilityType.incompatible)
self.assertEqual(result.messages, message)
self.assertEqual(result.locations, location)
def test_schema_compatibility_name_mismatch(self):
incompatible_pairs = [
(ENUM1_AB_SCHEMA, ENUM2_AB_SCHEMA, "expected: Enum2", "/name"),
(EMPTY_RECORD2, EMPTY_RECORD1, "expected: Record1", "/name"),
(FIXED_4_BYTES, FIXED_4_ANOTHER_NAME, "expected: AnotherName", "/name"),
(
A_DINT_B_DENUM_1_RECORD1,
A_DINT_B_DENUM_2_RECORD1,
"expected: Enum2",
"/fields/1/type/name",
),
]
for reader, writer, message, location in incompatible_pairs:
result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
self.assertIs(result.compatibility, SchemaCompatibilityType.incompatible)
self.assertIn(message, result.messages)
self.assertIn(location, result.locations)
def test_schema_compatibility_reader_field_missing_default_value(self):
incompatible_pairs = [
(A_INT_RECORD1, EMPTY_RECORD1, "a", "/fields/0"),
(A_INT_B_DINT_RECORD1, EMPTY_RECORD1, "a", "/fields/0"),
]
for reader, writer, message, location in incompatible_pairs:
result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
self.assertIs(result.compatibility, SchemaCompatibilityType.incompatible)
self.assertEqual(len(result.messages), 1)
self.assertEqual(len(result.locations), 1)
self.assertEqual(message, "".join(result.messages))
self.assertEqual(location, "".join(result.locations))
def test_schema_compatibility_type_mismatch(self):
incompatible_pairs = [
(
NULL_SCHEMA,
INT_SCHEMA,
"reader type: null not compatible with writer type: int",
"/",
),
(
NULL_SCHEMA,
LONG_SCHEMA,
"reader type: null not compatible with writer type: long",
"/",
),
(
BOOLEAN_SCHEMA,
INT_SCHEMA,
"reader type: boolean not compatible with writer type: int",
"/",
),
(
INT_SCHEMA,
NULL_SCHEMA,
"reader type: int not compatible with writer type: null",
"/",
),
(
INT_SCHEMA,
BOOLEAN_SCHEMA,
"reader type: int not compatible with writer type: boolean",
"/",
),
(
INT_SCHEMA,
LONG_SCHEMA,
"reader type: int not compatible with writer type: long",
"/",
),
(
INT_SCHEMA,
FLOAT_SCHEMA,
"reader type: int not compatible with writer type: float",
"/",
),
(
INT_SCHEMA,
DOUBLE_SCHEMA,
"reader type: int not compatible with writer type: double",
"/",
),
(
LONG_SCHEMA,
FLOAT_SCHEMA,
"reader type: long not compatible with writer type: float",
"/",
),
(
LONG_SCHEMA,
DOUBLE_SCHEMA,
"reader type: long not compatible with writer type: double",
"/",
),
(
FLOAT_SCHEMA,
DOUBLE_SCHEMA,
"reader type: float not compatible with writer type: double",
"/",
),
(
DOUBLE_SCHEMA,
STRING_SCHEMA,
"reader type: double not compatible with writer type: string",
"/",
),
(
FIXED_4_BYTES,
STRING_SCHEMA,
"reader type: fixed not compatible with writer type: string",
"/",
),
(
STRING_SCHEMA,
BOOLEAN_SCHEMA,
"reader type: string not compatible with writer type: boolean",
"/",
),
(
STRING_SCHEMA,
INT_SCHEMA,
"reader type: string not compatible with writer type: int",
"/",
),
(
BYTES_SCHEMA,
NULL_SCHEMA,
"reader type: bytes not compatible with writer type: null",
"/",
),
(
BYTES_SCHEMA,
INT_SCHEMA,
"reader type: bytes not compatible with writer type: int",
"/",
),
(
A_INT_RECORD1,
INT_SCHEMA,
"reader type: record not compatible with writer type: int",
"/",
),
(
INT_ARRAY_SCHEMA,
LONG_ARRAY_SCHEMA,
"reader type: int not compatible with writer type: long",
"/items",
),
(
INT_MAP_SCHEMA,
INT_ARRAY_SCHEMA,
"reader type: map not compatible with writer type: array",
"/",
),
(
INT_ARRAY_SCHEMA,
INT_MAP_SCHEMA,
"reader type: array not compatible with writer type: map",
"/",
),
(
INT_MAP_SCHEMA,
LONG_MAP_SCHEMA,
"reader type: int not compatible with writer type: long",
"/values",
),
(
INT_SCHEMA,
ENUM2_AB_SCHEMA,
"reader type: int not compatible with writer type: enum",
"/",
),
(
ENUM2_AB_SCHEMA,
INT_SCHEMA,
"reader type: enum not compatible with writer type: int",
"/",
),
(
FLOAT_SCHEMA,
INT_LONG_FLOAT_DOUBLE_UNION_SCHEMA,
"reader type: float not compatible with writer type: double",
"/",
),
(
LONG_SCHEMA,
INT_FLOAT_UNION_SCHEMA,
"reader type: long not compatible with writer type: float",
"/",
),
(
INT_SCHEMA,
INT_FLOAT_UNION_SCHEMA,
"reader type: int not compatible with writer type: float",
"/",
),
# (INT_LIST_RECORD, LONG_LIST_RECORD, "reader type: int not compatible with writer type: long", "/fields/0/type"),
(
NULL_SCHEMA,
INT_SCHEMA,
"reader type: null not compatible with writer type: int",
"/",
),
]
for reader, writer, message, location in incompatible_pairs:
result = ReaderWriterCompatibilityChecker().get_compatibility(reader, writer)
self.assertIs(result.compatibility, SchemaCompatibilityType.incompatible)
self.assertIn(message, result.messages)
self.assertIn(location, result.locations)
def are_compatible(self, reader: Schema, writer: Schema) -> bool:
return ReaderWriterCompatibilityChecker().get_compatibility(reader, writer).compatibility is SchemaCompatibilityType.compatible
|
TestCompatibility
|
python
|
pandas-dev__pandas
|
pandas/core/window/expanding.py
|
{
"start": 727,
"end": 44356
}
|
class ____(RollingAndExpandingMixin):
"""
Provide expanding window calculations.
An expanding window yields the value of an aggregation statistic with all the data
available up to that point in time.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value;
otherwise, result is ``np.nan``.
method : str {'single', 'table'}, default 'single'
Execute the rolling operation per single column or row (``'single'``)
or over the entire object (``'table'``).
This argument is only implemented when specifying ``engine='numba'``
in the method call.
Returns
-------
pandas.api.typing.Expanding
An instance of Expanding for further expanding window calculations,
e.g. using the ``sum`` method.
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
See :ref:`Windowing Operations <window.expanding>` for further usage details
and examples.
Examples
--------
>>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
**min_periods**
Expanding sum with 1 vs 3 observations needed to calculate a value.
>>> df.expanding(1).sum()
B
0 0.0
1 1.0
2 3.0
3 3.0
4 7.0
>>> df.expanding(3).sum()
B
0 NaN
1 NaN
2 3.0
3 3.0
4 7.0
"""
_attributes: list[str] = ["min_periods", "method"]
def __init__(
self,
obj: NDFrame,
min_periods: int = 1,
method: str = "single",
selection=None,
) -> None:
super().__init__(
obj=obj,
min_periods=min_periods,
method=method,
selection=selection,
)
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
return ExpandingIndexer()
def aggregate(self, func=None, *args, **kwargs):
"""
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a Series/Dataframe or when passed to
Series/Dataframe.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
See Also
--------
DataFrame.aggregate : Similar DataFrame method.
Series.aggregate : Similar Series method.
Notes
-----
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
A passed user-defined-function will be passed a Series for evaluation.
If ``func`` defines an index relabeling, ``axis`` must be ``0`` or ``index``.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.ewm(alpha=0.5).mean()
A B C
0 1.000000 4.000000 7.000000
1 1.666667 4.666667 7.666667
2 2.428571 5.428571 8.428571
"""
return super().aggregate(func, *args, **kwargs)
agg = aggregate
def count(self, numeric_only: bool = False):
"""
Calculate the expanding count of non NaN observations.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.count : Aggregating count for Series.
DataFrame.count : Aggregating count for DataFrame.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser.expanding().count()
a 1.0
b 2.0
c 3.0
d 4.0
dtype: float64
"""
return super().count(numeric_only=numeric_only)
def apply(
self,
func: Callable[..., Any],
raw: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
):
"""
Calculate the expanding custom aggregation function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``. Can also accept a
Numba JIT function with ``engine='numba'`` specified.
raw : bool, default False
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
engine : str, default None
* ``'cython'`` : Runs rolling apply through C-extensions from cython.
* ``'numba'`` : Runs rolling apply through JIT compiled code from numba.
Only available when ``raw`` is set to ``True``.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to both the ``func`` and the ``apply`` rolling aggregation.
args : tuple, default None
Positional arguments to be passed into func.
kwargs : dict, default None
Keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.apply : Aggregating apply for Series.
DataFrame.apply : Aggregating apply for DataFrame.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser.expanding().apply(lambda s: s.max() - 2 * s.min())
a -1.0
b 0.0
c 1.0
d 2.0
dtype: float64
"""
return super().apply(
func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
@overload
def pipe(
self,
func: Callable[Concatenate[Self, P], T],
*args: P.args,
**kwargs: P.kwargs,
) -> T: ...
@overload
def pipe(
self,
func: tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
) -> T: ...
@final
def pipe(
self,
func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str],
*args: Any,
**kwargs: Any,
) -> T:
"""
Apply a ``func`` with arguments to this Expanding object and return its result.
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy, Rolling, Expanding or
Resampler
objects.
Instead of writing
>>> h = lambda x, arg2, arg3: x + 1 - arg2 * arg3
>>> g = lambda x, arg1: x * 5 / arg1
>>> f = lambda x: x**4
>>> df = pd.DataFrame(
... {"A": [1, 2, 3, 4]}, index=pd.date_range("2012-08-02", periods=4)
... )
>>> h(g(f(df.rolling("2D")), arg1=1), arg2=2, arg3=3) # doctest: +SKIP
You can write
>>> (
... df.rolling("2D").pipe(f).pipe(g, arg1=1).pipe(h, arg2=2, arg3=3)
... ) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this Expanding object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
Expanding object.
*args : iterable, optional
Positional arguments passed into `func`.
**kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
Expanding
The original object with the function `func` applied.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full Expanding object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
>>> df = pd.DataFrame(
... {"A": [1, 2, 3, 4]}, index=pd.date_range("2012-08-02", periods=4)
... )
>>> df
A
2012-08-02 1
2012-08-03 2
2012-08-04 3
2012-08-05 4
To get the difference between each expanding window's maximum and minimum
value in one pass, you can do
>>> df.expanding().pipe(lambda x: x.max() - x.min())
A
2012-08-02 0.0
2012-08-03 1.0
2012-08-04 2.0
2012-08-05 3.0
"""
return super().pipe(func, *args, **kwargs)
def sum(
self,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding sum.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.sum : Aggregating sum for Series.
DataFrame.sum : Aggregating sum for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for extended
documentation and performance considerations for the Numba engine.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser.expanding().sum()
a 1.0
b 3.0
c 6.0
d 10.0
dtype: float64
"""
return super().sum(
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
def max(
self,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding maximum.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.max : Aggregating max for Series.
DataFrame.max : Aggregating max for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for extended
documentation and performance considerations for the Numba engine.
Examples
--------
>>> ser = pd.Series([3, 2, 1, 4], index=["a", "b", "c", "d"])
>>> ser.expanding().max()
a 3.0
b 3.0
c 3.0
d 4.0
dtype: float64
"""
return super().max(
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
def min(
self,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding minimum.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.min : Aggregating min for Series.
DataFrame.min : Aggregating min for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for extended
documentation and performance considerations for the Numba engine.
Examples
--------
>>> ser = pd.Series([2, 3, 4, 1], index=["a", "b", "c", "d"])
>>> ser.expanding().min()
a 2.0
b 2.0
c 2.0
d 1.0
dtype: float64
"""
return super().min(
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
def mean(
self,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding mean.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.mean : Aggregating mean for Series.
DataFrame.mean : Aggregating mean for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for extended
documentation and performance considerations for the Numba engine.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser.expanding().mean()
a 1.0
b 1.5
c 2.0
d 2.5
dtype: float64
"""
return super().mean(
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
def median(
self,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding median.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.median : Aggregating median for Series.
DataFrame.median : Aggregating median for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for extended
documentation and performance considerations for the Numba engine.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser.expanding().median()
a 1.0
b 1.5
c 2.0
d 2.5
dtype: float64
"""
return super().median(
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
def std(
self,
ddof: int = 1,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding standard deviation.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
numpy.std : Equivalent method for NumPy array.
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.std : Aggregating std for Series.
DataFrame.std : Aggregating std for DataFrame.
Notes
-----
The default ``ddof`` of 1 used in :meth:`Series.std` is different
than the default ``ddof`` of 0 in :func:`numpy.std`.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
return super().std(
ddof=ddof,
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
def var(
self,
ddof: int = 1,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding variance.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
numpy.var : Equivalent method for NumPy array.
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.var : Aggregating var for Series.
DataFrame.var : Aggregating var for DataFrame.
Notes
-----
The default ``ddof`` of 1 used in :meth:`Series.var` is different
than the default ``ddof`` of 0 in :func:`numpy.var`.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
return super().var(
ddof=ddof,
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
def sem(self, ddof: int = 1, numeric_only: bool = False):
"""
Calculate the expanding standard error of mean.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.sem : Aggregating sem for Series.
DataFrame.sem : Aggregating sem for DataFrame.
Notes
-----
A minimum of one period is required for the calculation.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> s.expanding().sem()
0 NaN
1 0.707107
2 0.707107
3 0.745356
dtype: float64
"""
return super().sem(ddof=ddof, numeric_only=numeric_only)
def skew(self, numeric_only: bool = False):
"""
Calculate the expanding unbiased skewness.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
scipy.stats.skew : Third moment of a probability density.
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.skew : Aggregating skew for Series.
DataFrame.skew : Aggregating skew for DataFrame.
Notes
-----
A minimum of three periods is required for the rolling calculation.
Examples
--------
>>> ser = pd.Series([-1, 0, 2, -1, 2], index=["a", "b", "c", "d", "e"])
>>> ser.expanding().skew()
a NaN
b NaN
c 0.935220
d 1.414214
e 0.315356
dtype: float64
"""
return super().skew(numeric_only=numeric_only)
def kurt(self, numeric_only: bool = False):
"""
Calculate the expanding Fisher's definition of kurtosis without bias.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
scipy.stats.kurtosis : Reference SciPy method.
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.kurt : Aggregating kurt for Series.
DataFrame.kurt : Aggregating kurt for DataFrame.
Notes
-----
A minimum of four periods is required for the calculation.
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> print(f"{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}")
-1.200000
>>> print(f"{scipy.stats.kurtosis(arr, bias=False):.6f}")
4.999874
>>> s = pd.Series(arr)
>>> s.expanding(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 4.999874
dtype: float64
"""
return super().kurt(numeric_only=numeric_only)
def first(self, numeric_only: bool = False):
"""
Calculate the expanding First (left-most) element of the window.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
GroupBy.first : Similar method for GroupBy objects.
Expanding.last : Method to get the last element in each window.
Examples
--------
The example below will show an expanding calculation with a window size of
three.
>>> s = pd.Series(range(5))
>>> s.expanding(3).first()
0 NaN
1 NaN
2 0.0
3 0.0
4 0.0
dtype: float64
"""
return super().first(numeric_only=numeric_only)
def last(self, numeric_only: bool = False):
"""
Calculate the expanding Last (right-most) element of the window.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
GroupBy.last : Similar method for GroupBy objects.
Expanding.first : Method to get the first element in each window.
Examples
--------
The example below will show an expanding calculation with a window size of
three.
>>> s = pd.Series(range(5))
>>> s.expanding(3).last()
0 NaN
1 NaN
2 2.0
3 3.0
4 4.0
dtype: float64
"""
return super().last(numeric_only=numeric_only)
def quantile(
self,
q: float,
interpolation: QuantileInterpolation = "linear",
numeric_only: bool = False,
):
"""
Calculate the expanding quantile.
Parameters
----------
q : float
Quantile to compute. 0 <= quantile <= 1.
.. deprecated:: 2.1.0
This was renamed from 'quantile' to 'q' in version 2.1.0.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.quantile : Aggregating quantile for Series.
DataFrame.quantile : Aggregating quantile for DataFrame.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4, 5, 6], index=["a", "b", "c", "d", "e", "f"])
>>> ser.expanding(min_periods=4).quantile(0.25)
a NaN
b NaN
c NaN
d 1.75
e 2.00
f 2.25
dtype: float64
"""
return super().quantile(
q=q,
interpolation=interpolation,
numeric_only=numeric_only,
)
def rank(
self,
method: WindowingRankType = "average",
ascending: bool = True,
pct: bool = False,
numeric_only: bool = False,
):
"""
Calculate the expanding rank.
Parameters
----------
method : {'average', 'min', 'max'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.rank : Aggregating rank for Series.
DataFrame.rank : Aggregating rank for DataFrame.
Examples
--------
>>> s = pd.Series([1, 4, 2, 3, 5, 3])
>>> s.expanding().rank()
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 3.5
dtype: float64
>>> s.expanding().rank(method="max")
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 4.0
dtype: float64
>>> s.expanding().rank(method="min")
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 3.0
dtype: float64
"""
return super().rank(
method=method,
ascending=ascending,
pct=pct,
numeric_only=numeric_only,
)
def nunique(
self,
numeric_only: bool = False,
):
"""
Calculate the expanding nunique.
.. versionadded:: 3.0.0
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.nunique : Aggregating nunique for Series.
DataFrame.nunique : Aggregating nunique for DataFrame.
Examples
--------
>>> s = pd.Series([1, 4, 2, 3, 5, 3])
>>> s.expanding().nunique()
0 1.0
1 2.0
2 3.0
3 4.0
4 5.0
5 5.0
dtype: float64
"""
return super().nunique(
numeric_only=numeric_only,
)
def cov(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
ddof: int = 1,
numeric_only: bool = False,
):
"""
Calculate the expanding sample covariance.
Parameters
----------
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.cov : Aggregating cov for Series.
DataFrame.cov : Aggregating cov for DataFrame.
Examples
--------
>>> ser1 = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser2 = pd.Series([10, 11, 13, 16], index=["a", "b", "c", "d"])
>>> ser1.expanding().cov(ser2)
a NaN
b 0.500000
c 1.500000
d 3.333333
dtype: float64
"""
return super().cov(
other=other,
pairwise=pairwise,
ddof=ddof,
numeric_only=numeric_only,
)
def corr(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
ddof: int = 1,
numeric_only: bool = False,
):
"""
Calculate the expanding correlation.
Parameters
----------
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.corr : Aggregating corr for Series.
DataFrame.corr : Aggregating corr for DataFrame.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
>>> ser1 = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser2 = pd.Series([10, 11, 13, 16], index=["a", "b", "c", "d"])
>>> ser1.expanding().corr(ser2)
a NaN
b 1.000000
c 0.981981
d 0.975900
dtype: float64
"""
return super().corr(
other=other,
pairwise=pairwise,
ddof=ddof,
numeric_only=numeric_only,
)
@set_module("pandas.api.typing")
|
Expanding
|
python
|
django-extensions__django-extensions
|
tests/management/commands/test_reset_db.py
|
{
"start": 6180,
"end": 9436
}
|
class ____(TestCase):
"""Tests for reset_db command and sqlite3 engine."""
@mock.patch("sys.stdout", new_callable=StringIO)
@mock.patch("django_extensions.management.commands.reset_db.input")
def test_should_cancel_reset_db_if_input_is_different_than_yes(
self, m_input, m_stdout
):
m_input.return_value = "no"
call_command("reset_db")
self.assertEqual("Reset cancelled.\n", m_stdout.getvalue())
@mock.patch("sys.stdout", new_callable=StringIO)
def test_should_drop_and_create_database_and_print_success_messsage(self, m_stdout):
m_database = mock.MagicMock()
m_database.__spec__ = mock.Mock()
m_cursor = mock.Mock()
m_database.connect.return_value.cursor.return_value = m_cursor
expected_calls = [
mock.call('DROP DATABASE "test_db";'),
mock.call(
'CREATE DATABASE "test_db" WITH OWNER = "foo" ENCODING = \'UTF8\';'
),
]
mock_kwargs = {"psycopg2": m_database}
has_psycopg3 = importlib.util.find_spec("psycopg") is not None
if has_psycopg3:
mock_kwargs = {"psycopg": m_database}
with mock.patch.dict("sys.modules", **mock_kwargs):
call_command("reset_db", "--noinput", verbosity=2)
m_database.connect.assert_called_once_with(
dbname="template1",
host="127.0.0.1",
password="bar",
port="5432",
user="foo",
)
m_cursor.execute.assert_has_calls(expected_calls, any_order=False)
self.assertEqual("Reset successful.\n", m_stdout.getvalue())
@override_settings(DEFAULT_TABLESPACE="TEST_TABLESPACE")
@mock.patch("sys.stdout", new_callable=StringIO)
def test_should_drop_create_database_close_sessions_and_print_success_messsage(
self, m_stdout
):
m_database = mock.MagicMock()
m_database.__spec__ = mock.Mock()
m_cursor = mock.Mock()
m_database.connect.return_value.cursor.return_value = m_cursor
expected_calls = [
mock.call(
"\n SELECT pg_terminate_backend(pg_stat_activity.pid)\n FROM pg_stat_activity\n WHERE pg_stat_activity.datname = 'test_db';\n "
),
mock.call('DROP DATABASE "test_db";'),
mock.call(
'CREATE DATABASE "test_db" WITH OWNER = "foo" ENCODING = \'UTF8\' TABLESPACE = TEST_TABLESPACE;'
),
]
mock_kwargs = {"psycopg2": m_database}
has_psycopg3 = importlib.util.find_spec("psycopg") is not None
if has_psycopg3:
mock_kwargs = {"psycopg": m_database}
with mock.patch.dict("sys.modules", **mock_kwargs):
call_command("reset_db", "--noinput", "--close-sessions", verbosity=2)
m_database.connect.assert_called_once_with(
dbname="template1",
host="127.0.0.1",
password="bar",
port="5432",
user="foo",
)
m_cursor.execute.assert_has_calls(expected_calls, any_order=False)
self.assertEqual("Reset successful.\n", m_stdout.getvalue())
|
ResetDbPostgresqlTests
|
python
|
ipython__ipython
|
IPython/lib/demo.py
|
{
"start": 20371,
"end": 21642
}
|
class ____(Demo):
"""Demo where each line is executed as a separate block.
The input script should be valid Python code.
This class doesn't require any markup at all, and it's meant for simple
scripts (with no nesting or any kind of indentation) which consist of
multiple lines of input to be executed, one at a time, as if they had been
typed in the interactive prompt.
Note: the input can not have *any* indentation, which means that only
single-lines of input are accepted, not even function definitions are
valid."""
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
|
LineDemo
|
python
|
huggingface__transformers
|
tests/models/llava_next_video/test_video_processing_llava_next_video.py
|
{
"start": 1049,
"end": 3249
}
|
class ____:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, images):
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
|
LlavaNextVideoProcessingTester
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 216606,
"end": 216948
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "status")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
status = sgqlc.types.Field("UserStatus", graphql_name="status")
|
ChangeUserStatusPayload
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py
|
{
"start": 22941,
"end": 23528
}
|
class ____(MetadataValue[str]):
"""Container class for markdown metadata entry data.
Args:
md_str (Optional[str]): The markdown as a string.
"""
md_str: PublicAttr[Optional[str]] = ""
@public
@property
def value(self) -> str:
"""Optional[str]: The wrapped markdown as a string."""
return self.md_str if self.md_str is not None else ""
# This should be deprecated or fixed so that `value` does not return itself.
@public
@whitelist_for_serdes(storage_name="PythonArtifactMetadataEntryData")
@record(kw_only=False)
|
MarkdownMetadataValue
|
python
|
pandas-dev__pandas
|
pandas/tests/test_expressions.py
|
{
"start": 2114,
"end": 14893
}
|
class ____:
@staticmethod
def call_op(df, other, flex: bool, opname: str):
if flex:
op = lambda x, y: getattr(x, opname)(y)
op.__name__ = opname
else:
op = getattr(operator, opname)
with option_context("compute.use_numexpr", False):
expected = op(df, other)
expr.get_test_result()
result = op(df, other)
return result, expected
@pytest.mark.parametrize(
"fixture",
[
"_integer",
"_integer2",
"_integer_integers",
"_frame",
"_frame2",
"_mixed",
"_mixed2",
],
)
@pytest.mark.parametrize("flex", [True, False])
@pytest.mark.parametrize(
"arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"]
)
def test_run_arithmetic(self, request, fixture, flex, arith, monkeypatch):
df = request.getfixturevalue(fixture)
with monkeypatch.context() as m:
m.setattr(expr, "_MIN_ELEMENTS", 0)
result, expected = self.call_op(df, df, flex, arith)
if arith == "truediv":
assert all(x.kind == "f" for x in expected.dtypes.values)
tm.assert_equal(expected, result)
for i in range(len(df.columns)):
result, expected = self.call_op(
df.iloc[:, i], df.iloc[:, i], flex, arith
)
if arith == "truediv":
assert expected.dtype.kind == "f"
tm.assert_equal(expected, result)
@pytest.mark.parametrize(
"fixture",
[
"_integer",
"_integer2",
"_integer_integers",
"_frame",
"_frame2",
"_mixed",
"_mixed2",
],
)
@pytest.mark.parametrize("flex", [True, False])
def test_run_binary(self, request, fixture, flex, comparison_op, monkeypatch):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
df = request.getfixturevalue(fixture)
arith = comparison_op.__name__
with option_context("compute.use_numexpr", False):
other = df + 1
with monkeypatch.context() as m:
m.setattr(expr, "_MIN_ELEMENTS", 0)
expr.set_test_mode(True)
result, expected = self.call_op(df, other, flex, arith)
used_numexpr = expr.get_test_result()
assert used_numexpr, "Did not use numexpr as expected."
tm.assert_equal(expected, result)
for i in range(len(df.columns)):
binary_comp = other.iloc[:, i] + 1
self.call_op(df.iloc[:, i], binary_comp, flex, "add")
def test_invalid(self):
array = np.random.default_rng(2).standard_normal(1_000_001)
array2 = np.random.default_rng(2).standard_normal(100)
# no op
result = expr._can_use_numexpr(operator.add, None, array, array, "evaluate")
assert not result
# min elements
result = expr._can_use_numexpr(operator.add, "+", array2, array2, "evaluate")
assert not result
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate")
assert result
@pytest.mark.filterwarnings("ignore:invalid value encountered in:RuntimeWarning")
@pytest.mark.parametrize(
"opname,op_str",
[("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")],
)
@pytest.mark.parametrize(
"left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")]
)
def test_binary_ops(self, request, opname, op_str, left_fix, right_fix):
left = request.getfixturevalue(left_fix)
right = request.getfixturevalue(right_fix)
def testit(left, right, opname, op_str):
if opname == "pow":
left = np.abs(left)
op = getattr(operator, opname)
# array has 0s
result = expr.evaluate(op, left, left, use_numexpr=True)
expected = expr.evaluate(op, left, left, use_numexpr=False)
tm.assert_numpy_array_equal(result, expected)
result = expr._can_use_numexpr(op, op_str, right, right, "evaluate")
assert not result
with option_context("compute.use_numexpr", False):
testit(left, right, opname, op_str)
expr.set_numexpr_threads(1)
testit(left, right, opname, op_str)
expr.set_numexpr_threads()
testit(left, right, opname, op_str)
@pytest.mark.parametrize(
"left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")]
)
def test_comparison_ops(self, request, comparison_op, left_fix, right_fix):
left = request.getfixturevalue(left_fix)
right = request.getfixturevalue(right_fix)
def testit():
f12 = left + 1
f22 = right + 1
op = comparison_op
result = expr.evaluate(op, left, f12, use_numexpr=True)
expected = expr.evaluate(op, left, f12, use_numexpr=False)
tm.assert_numpy_array_equal(result, expected)
result = expr._can_use_numexpr(op, op, right, f22, "evaluate")
assert not result
with option_context("compute.use_numexpr", False):
testit()
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
@pytest.mark.parametrize("cond", [True, False])
@pytest.mark.parametrize("fixture", ["_frame", "_frame2", "_mixed", "_mixed2"])
def test_where(self, request, cond, fixture):
df = request.getfixturevalue(fixture)
def testit():
c = np.empty(df.shape, dtype=np.bool_)
c.fill(cond)
result = expr.where(c, df.values, df.values + 1)
expected = np.where(c, df.values, df.values + 1)
tm.assert_numpy_array_equal(result, expected)
with option_context("compute.use_numexpr", False):
testit()
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
@pytest.mark.parametrize(
"op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")]
)
def test_bool_ops_raise_on_arithmetic(self, op_str, opname):
df = DataFrame(
{
"a": np.random.default_rng(2).random(10) > 0.5,
"b": np.random.default_rng(2).random(10) > 0.5,
}
)
msg = f"operator '{opname}' not implemented for bool dtypes"
f = getattr(operator, opname)
err_msg = re.escape(msg)
with pytest.raises(NotImplementedError, match=err_msg):
f(df, df)
with pytest.raises(NotImplementedError, match=err_msg):
f(df.a, df.b)
with pytest.raises(NotImplementedError, match=err_msg):
f(df.a, True)
with pytest.raises(NotImplementedError, match=err_msg):
f(False, df.a)
with pytest.raises(NotImplementedError, match=err_msg):
f(False, df)
with pytest.raises(NotImplementedError, match=err_msg):
f(df, True)
@pytest.mark.parametrize(
"op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")]
)
def test_bool_ops_warn_on_arithmetic(self, op_str, opname, monkeypatch):
n = 10
df = DataFrame(
{
"a": np.random.default_rng(2).random(n) > 0.5,
"b": np.random.default_rng(2).random(n) > 0.5,
}
)
subs = {"+": "|", "*": "&", "-": "^"}
sub_funcs = {"|": "or_", "&": "and_", "^": "xor"}
f = getattr(operator, opname)
fe = getattr(operator, sub_funcs[subs[op_str]])
if op_str == "-":
# raises TypeError
return
msg = "operator is not supported by numexpr"
ne = import_optional_dependency("numexpr", errors="ignore")
warning = (
UserWarning
if ne
and op_str in {"+", "*"}
and Version(ne.__version__) < Version("2.13.1")
else None
)
with monkeypatch.context() as m:
m.setattr(expr, "_MIN_ELEMENTS", 5)
with option_context("compute.use_numexpr", True):
with tm.assert_produces_warning(warning, match=msg):
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(warning, match=msg):
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(warning, match=msg):
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(warning, match=msg):
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(warning, match=msg):
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(warning, match=msg):
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
@pytest.mark.parametrize(
"test_input,expected",
[
(
DataFrame(
[[0, 1, 2, "aa"], [0, 1, 2, "aa"]], columns=["a", "b", "c", "dtype"]
),
DataFrame([[False, False], [False, False]], columns=["a", "dtype"]),
),
(
DataFrame(
[[0, 3, 2, "aa"], [0, 4, 2, "aa"], [0, 1, 1, "bb"]],
columns=["a", "b", "c", "dtype"],
),
DataFrame(
[[False, False], [False, False], [False, False]],
columns=["a", "dtype"],
),
),
],
)
def test_bool_ops_column_name_dtype(self, test_input, expected):
# GH 22383 - .ne fails if columns containing column name 'dtype'
result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"arith", ("add", "sub", "mul", "mod", "truediv", "floordiv")
)
@pytest.mark.parametrize("axis", (0, 1))
def test_frame_series_axis(self, axis, arith, _frame, monkeypatch):
# GH#26736 Dataframe.floordiv(Series, axis=1) fails
df = _frame
if axis == 1:
other = df.iloc[0, :]
else:
other = df.iloc[:, 0]
with monkeypatch.context() as m:
m.setattr(expr, "_MIN_ELEMENTS", 0)
op_func = getattr(df, arith)
with option_context("compute.use_numexpr", False):
expected = op_func(other, axis=axis)
result = op_func(other, axis=axis)
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"op",
[
"__mod__",
"__rmod__",
"__floordiv__",
"__rfloordiv__",
],
)
@pytest.mark.parametrize("scalar", [-5, 5])
def test_python_semantics_with_numexpr_installed(
self, op, box_with_array, scalar, monkeypatch
):
# https://github.com/pandas-dev/pandas/issues/36047
with monkeypatch.context() as m:
m.setattr(expr, "_MIN_ELEMENTS", 0)
data = np.arange(-50, 50)
obj = box_with_array(data)
method = getattr(obj, op)
result = method(scalar)
# compare result with numpy
with option_context("compute.use_numexpr", False):
expected = method(scalar)
tm.assert_equal(result, expected)
# compare result element-wise with Python
for i, elem in enumerate(data):
if box_with_array == DataFrame:
scalar_result = result.iloc[i, 0]
else:
scalar_result = result[i]
try:
expected = getattr(int(elem), op)(scalar)
except ZeroDivisionError:
pass
else:
assert scalar_result == expected
|
TestExpressions
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_token_view.py
|
{
"start": 4322,
"end": 8017
}
|
class ____(TestAuthorizedTokenViews):
"""
Tests for the Authorized Token DeleteView
"""
def test_delete_view_authorization_required(self):
"""
Test that the view redirects to login page if user is not logged-in.
"""
self.token = AccessToken.objects.create(
user=self.foo_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
url = reverse("oauth2_provider:authorized-token-delete", kwargs={"pk": self.token.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertTrue("/accounts/login/?next=" in response["Location"])
def test_delete_view_works(self):
"""
Test that a GET on this view returns 200 if the token belongs to the logged-in user.
"""
self.token = AccessToken.objects.create(
user=self.foo_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
self.client.login(username="foo_user", password="123456")
url = reverse("oauth2_provider:authorized-token-delete", kwargs={"pk": self.token.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_delete_view_token_belongs_to_user(self):
"""
Test that a 404 is returned when trying to GET this view with someone else"s tokens.
"""
self.token = AccessToken.objects.create(
user=self.foo_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
self.client.login(username="bar_user", password="123456")
url = reverse("oauth2_provider:authorized-token-delete", kwargs={"pk": self.token.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_delete_view_post_actually_deletes(self):
"""
Test that a POST on this view works if the token belongs to the logged-in user.
"""
self.token = AccessToken.objects.create(
user=self.foo_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
self.client.login(username="foo_user", password="123456")
url = reverse("oauth2_provider:authorized-token-delete", kwargs={"pk": self.token.pk})
response = self.client.post(url)
self.assertFalse(AccessToken.objects.exists())
self.assertRedirects(response, reverse("oauth2_provider:authorized-token-list"))
def test_delete_view_only_deletes_user_own_token(self):
"""
Test that a 404 is returned when trying to POST on this view with someone else"s tokens.
"""
self.token = AccessToken.objects.create(
user=self.foo_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
self.client.login(username="bar_user", password="123456")
url = reverse("oauth2_provider:authorized-token-delete", kwargs={"pk": self.token.pk})
response = self.client.post(url)
self.assertTrue(AccessToken.objects.exists())
self.assertEqual(response.status_code, 404)
|
TestAuthorizedTokenDeleteView
|
python
|
celery__celery
|
celery/app/registry.py
|
{
"start": 279,
"end": 2001
}
|
class ____(dict):
"""Map of registered tasks."""
NotRegistered = NotRegistered
def __missing__(self, key):
raise self.NotRegistered(key)
def register(self, task):
"""Register a task in the task registry.
The task will be automatically instantiated if not already an
instance. Name must be configured prior to registration.
"""
if task.name is None:
raise InvalidTaskError(
'Task class {!r} must specify .name attribute'.format(
type(task).__name__))
task = inspect.isclass(task) and task() or task
add_autoretry_behaviour(task)
self[task.name] = task
def unregister(self, name):
"""Unregister task by name.
Arguments:
name (str): name of the task to unregister, or a
:class:`celery.app.task.Task` with a valid `name` attribute.
Raises:
celery.exceptions.NotRegistered: if the task is not registered.
"""
try:
self.pop(getattr(name, 'name', name))
except KeyError:
raise self.NotRegistered(name)
# -- these methods are irrelevant now and will be removed in 4.0
def regular(self):
return self.filter_types('regular')
def periodic(self):
return self.filter_types('periodic')
def filter_types(self, type):
return {name: task for name, task in self.items()
if getattr(task, 'type', 'regular') == type}
def _unpickle_task(name):
return get_current_app().tasks[name]
def _unpickle_task_v2(name, module=None):
if module:
import_module(module)
return get_current_app().tasks[name]
|
TaskRegistry
|
python
|
kamyu104__LeetCode-Solutions
|
Python/replace-words.py
|
{
"start": 84,
"end": 783
}
|
class ____(object):
def replaceWords(self, dictionary, sentence):
"""
:type dictionary: List[str]
:type sentence: str
:rtype: str
"""
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
for word in dictionary:
reduce(dict.__getitem__, word, trie).setdefault("_end")
def replace(word):
curr = trie
for i, c in enumerate(word):
if c not in curr:
break
curr = curr[c]
if "_end" in curr:
return word[:i+1]
return word
return " ".join(map(replace, sentence.split()))
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/incidents/utils/types.py
|
{
"start": 122,
"end": 258
}
|
class ____(TypedDict):
entity: str
subscription_id: str
values: Any
timestamp: datetime
@dataclass
|
QuerySubscriptionUpdate
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/linalg/linear_operator_householder.py
|
{
"start": 1446,
"end": 11311
}
|
class ____(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of Householder transformations.
This operator acts like a [batch] of householder reflections with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorHouseholder` is initialized with a (batch) vector.
A Householder reflection, defined via a vector `v`, which reflects points
in `R^n` about the hyperplane orthogonal to `v` and through the origin.
```python
# Create a 2 x 2 householder transform.
vec = [1 / np.sqrt(2), 1. / np.sqrt(2)]
operator = LinearOperatorHouseholder(vec)
operator.to_dense()
==> [[0., -1.]
[-1., -0.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
reflection_axis,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorHouseholder"):
r"""Initialize a `LinearOperatorHouseholder`.
Args:
reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The vector defining the hyperplane to reflect about.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This is autoset to true
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
This is autoset to false.
is_square: Expect that this operator acts like square [batch] matrices.
This is autoset to true.
name: A name for this `LinearOperator`.
Raises:
ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is
not `False` or `is_square` is not `True`.
"""
parameters = dict(
reflection_axis=reflection_axis,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[reflection_axis]):
self._reflection_axis = linear_operator_util.convert_nonref_to_tensor(
reflection_axis, name="reflection_axis")
self._check_reflection_axis(self._reflection_axis)
# Check and auto-set hints.
if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Householder operator is always self adjoint.")
else:
is_self_adjoint = True
if is_positive_definite is True: # pylint:disable=g-bool-id-comparison
raise ValueError(
"A Householder operator is always non-positive definite.")
else:
is_positive_definite = False
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Householder operator is always square.")
is_square = True
super(LinearOperatorHouseholder, self).__init__(
dtype=self._reflection_axis.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
def _check_reflection_axis(self, reflection_axis):
"""Static check of reflection_axis."""
if (reflection_axis.shape.ndims is not None and
reflection_axis.shape.ndims < 1):
raise ValueError(
"Argument reflection_axis must have at least 1 dimension. "
"Found: %s" % reflection_axis)
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
d_shape = self._reflection_axis.shape
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._reflection_axis)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _assert_positive_definite(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Householder operators are always "
"non-positive definite.")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _linop_adjoint(self) -> "LinearOperatorHouseholder":
return self
def _linop_inverse(self) -> "LinearOperatorHouseholder":
return self
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Given a vector `v`, we would like to reflect `x` about the hyperplane
# orthogonal to `v` going through the origin. We first project `x` to `v`
# to get v * dot(v, x) / dot(v, v). After we project, we can reflect the
# projection about the hyperplane by flipping sign to get
# -v * dot(v, x) / dot(v, v). Finally, we can add back the component
# that is orthogonal to v. This is invariant under reflection, since the
# whole hyperplane is invariant. This component is equal to x - v * dot(v,
# x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v)
# for the reflection.
# Note that because this is a reflection, it lies in O(n) (for real vector
# spaces) or U(n) (for complex vector spaces), and thus is its own adjoint.
reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.reflection_axis
)
x = linalg.adjoint(x) if adjoint_arg else x
normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
mat = normalized_axis[..., array_ops.newaxis]
x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True)
return x - 2 * mat * x_dot_normalized_v
def _trace(self):
# We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
shape = self.shape_tensor()
return math_ops.cast(
self._domain_dimension_tensor(shape=shape) - 2,
self.dtype) * array_ops.ones(
shape=self._batch_shape_tensor(shape=shape), dtype=self.dtype)
def _determinant(self):
# For householder transformations, the determinant is -1.
return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) # pylint: disable=invalid-unary-operand-type
def _log_abs_determinant(self):
# Orthogonal matrix -> log|Q| = 0.
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# A householder reflection is a reflection, hence is idempotent. Thus we
# can just apply a matmul.
return self._matmul(rhs, adjoint, adjoint_arg)
def _to_dense(self):
reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.reflection_axis
)
normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
mat = normalized_axis[..., array_ops.newaxis]
matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True)
return array_ops.matrix_set_diag(
matrix, 1. + array_ops.matrix_diag_part(matrix))
def _diag_part(self):
reflection_axis = tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.reflection_axis
)
normalized_axis = nn.l2_normalize(reflection_axis, axis=-1)
return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis)
def _eigvals(self):
# We have (n - 1) +1 eigenvalues and a single -1 eigenvalue.
result_shape = array_ops.shape(self.reflection_axis)
n = result_shape[-1]
ones_shape = array_ops.concat([result_shape[:-1], [n - 1]], axis=-1)
neg_shape = array_ops.concat([result_shape[:-1], [1]], axis=-1)
eigvals = array_ops.ones(shape=ones_shape, dtype=self.dtype)
eigvals = array_ops.concat(
[-array_ops.ones(shape=neg_shape, dtype=self.dtype), eigvals], axis=-1) # pylint: disable=invalid-unary-operand-type
return eigvals
def _cond(self):
# Householder matrices are rotations which have condition number 1.
return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
@property
def reflection_axis(self):
return self._reflection_axis
@property
def _composite_tensor_fields(self):
return ("reflection_axis",)
@property
def _experimental_parameter_ndims_to_matrix_ndims(self):
return {"reflection_axis": 1}
|
LinearOperatorHouseholder
|
python
|
numba__numba
|
numba/tests/test_interproc.py
|
{
"start": 236,
"end": 1099
}
|
class ____(unittest.TestCase):
def test_bar_call_foo(self):
global cfoo
cfoo = jit((int32, int32), nopython=True)(foo)
cbar = jit((int32, int32), nopython=True)(bar)
self.assertEqual(cbar(1, 2), 1 + 2 + 2)
def test_bar_call_foo_compiled_twice(self):
# When a function is compiled twice, then called from another
# compiled function, check that the right target is called.
# (otherwise, LLVM would assert out or crash)
global cfoo
for i in range(2):
cfoo = jit((int32, int32), nopython=True)(foo)
gc.collect()
cbar = jit((int32, int32), nopython=True)(bar)
self.assertEqual(cbar(1, 2), 1 + 2 + 2)
def test_callsite_compilation(self):
self.assertEqual(outer(1, 2), 1 + 2)
if __name__ == '__main__':
unittest.main()
|
TestInterProc
|
python
|
huggingface__transformers
|
src/transformers/models/exaone4/modular_exaone4.py
|
{
"start": 20910,
"end": 20987
}
|
class ____(LlamaForTokenClassification):
pass
|
Exaone4ForTokenClassification
|
python
|
great-expectations__great_expectations
|
great_expectations/exceptions/exceptions.py
|
{
"start": 12339,
"end": 12428
}
|
class ____(DatasourceInitializationError):
pass
|
DatasourceKeyPairAuthBadPassphraseError
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_vendor/rich/tree.py
|
{
"start": 380,
"end": 8446
}
|
class ____(JupyterMixin):
"""A renderable for a tree structure.
Attributes:
ASCII_GUIDES (GuideType): Guide lines used when Console.ascii_only is True.
TREE_GUIDES (List[GuideType, GuideType, GuideType]): Default guide lines.
Args:
label (RenderableType): The renderable or str for the tree label.
style (StyleType, optional): Style of this tree. Defaults to "tree".
guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
expanded (bool, optional): Also display children. Defaults to True.
highlight (bool, optional): Highlight renderable (if str). Defaults to False.
hide_root (bool, optional): Hide the root node. Defaults to False.
"""
ASCII_GUIDES = (" ", "| ", "+-- ", "`-- ")
TREE_GUIDES = [
(" ", "│ ", "├── ", "└── "),
(" ", "┃ ", "┣━━ ", "┗━━ "),
(" ", "║ ", "╠══ ", "╚══ "),
]
def __init__(
self,
label: RenderableType,
*,
style: StyleType = "tree",
guide_style: StyleType = "tree.line",
expanded: bool = True,
highlight: bool = False,
hide_root: bool = False,
) -> None:
self.label = label
self.style = style
self.guide_style = guide_style
self.children: List[Tree] = []
self.expanded = expanded
self.highlight = highlight
self.hide_root = hide_root
def add(
self,
label: RenderableType,
*,
style: Optional[StyleType] = None,
guide_style: Optional[StyleType] = None,
expanded: bool = True,
highlight: Optional[bool] = False,
) -> "Tree":
"""Add a child tree.
Args:
label (RenderableType): The renderable or str for the tree label.
style (StyleType, optional): Style of this tree. Defaults to "tree".
guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
expanded (bool, optional): Also display children. Defaults to True.
highlight (Optional[bool], optional): Highlight renderable (if str). Defaults to False.
Returns:
Tree: A new child Tree, which may be further modified.
"""
node = Tree(
label,
style=self.style if style is None else style,
guide_style=self.guide_style if guide_style is None else guide_style,
expanded=expanded,
highlight=self.highlight if highlight is None else highlight,
)
self.children.append(node)
return node
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
stack: List[Iterator[Tuple[bool, Tree]]] = []
pop = stack.pop
push = stack.append
new_line = Segment.line()
get_style = console.get_style
null_style = Style.null()
guide_style = get_style(self.guide_style, default="") or null_style
SPACE, CONTINUE, FORK, END = range(4)
_Segment = Segment
def make_guide(index: int, style: Style) -> Segment:
"""Make a Segment for a level of the guide lines."""
if options.ascii_only:
line = self.ASCII_GUIDES[index]
else:
guide = 1 if style.bold else (2 if style.underline2 else 0)
line = self.TREE_GUIDES[0 if options.legacy_windows else guide][index]
return _Segment(line, style)
levels: List[Segment] = [make_guide(CONTINUE, guide_style)]
push(iter(loop_last([self])))
guide_style_stack = StyleStack(get_style(self.guide_style))
style_stack = StyleStack(get_style(self.style))
remove_guide_styles = Style(bold=False, underline2=False)
depth = 0
while stack:
stack_node = pop()
try:
last, node = next(stack_node)
except StopIteration:
levels.pop()
if levels:
guide_style = levels[-1].style or null_style
levels[-1] = make_guide(FORK, guide_style)
guide_style_stack.pop()
style_stack.pop()
continue
push(stack_node)
if last:
levels[-1] = make_guide(END, levels[-1].style or null_style)
guide_style = guide_style_stack.current + get_style(node.guide_style)
style = style_stack.current + get_style(node.style)
prefix = levels[(2 if self.hide_root else 1) :]
renderable_lines = console.render_lines(
Styled(node.label, style),
options.update(
width=options.max_width
- sum(level.cell_length for level in prefix),
highlight=self.highlight,
height=None,
),
pad=options.justify is not None,
)
if not (depth == 0 and self.hide_root):
for first, line in loop_first(renderable_lines):
if prefix:
yield from _Segment.apply_style(
prefix,
style.background_style,
post_style=remove_guide_styles,
)
yield from line
yield new_line
if first and prefix:
prefix[-1] = make_guide(
SPACE if last else CONTINUE, prefix[-1].style or null_style
)
if node.expanded and node.children:
levels[-1] = make_guide(
SPACE if last else CONTINUE, levels[-1].style or null_style
)
levels.append(
make_guide(END if len(node.children) == 1 else FORK, guide_style)
)
style_stack.push(get_style(node.style))
guide_style_stack.push(get_style(node.guide_style))
push(iter(loop_last(node.children)))
depth += 1
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
stack: List[Iterator[Tree]] = [iter([self])]
pop = stack.pop
push = stack.append
minimum = 0
maximum = 0
measure = Measurement.get
level = 0
while stack:
iter_tree = pop()
try:
tree = next(iter_tree)
except StopIteration:
level -= 1
continue
push(iter_tree)
min_measure, max_measure = measure(console, options, tree.label)
indent = level * 4
minimum = max(min_measure + indent, minimum)
maximum = max(max_measure + indent, maximum)
if tree.expanded and tree.children:
push(iter(tree.children))
level += 1
return Measurement(minimum, maximum)
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.pip._vendor.rich.console import Group
from pipenv.patched.pip._vendor.rich.markdown import Markdown
from pipenv.patched.pip._vendor.rich.panel import Panel
from pipenv.patched.pip._vendor.rich.syntax import Syntax
from pipenv.patched.pip._vendor.rich.table import Table
table = Table(row_styles=["", "dim"])
table.add_column("Released", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
code = """\
|
Tree
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py
|
{
"start": 14778,
"end": 16180
}
|
class ____:
@mock.patch(MANAGED_KAFKA_PATH.format("types.ListConsumerGroupsResponse.to_dict"))
@mock.patch(MANAGED_KAFKA_PATH.format("types.ConsumerGroup.to_dict"))
@mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook"))
def test_execute(self, mock_hook, to_cluster_dict_mock, to_clusters_dict_mock):
page_token = "page_token"
page_size = 42
op = ManagedKafkaListConsumerGroupsOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
page_size=page_size,
page_token=page_token,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.list_consumer_groups.assert_called_once_with(
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
page_size=page_size,
page_token=page_token,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
|
TestManagedKafkaListConsumerGroupsOperator
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/ddl.py
|
{
"start": 33290,
"end": 34021
}
|
class ____(TableDropDDL):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
def __init__(self, element: Table, if_exists: bool = False) -> None:
"""Create a :class:`.DropTable` construct.
:param element: a :class:`_schema.Table` that's the subject
of the DROP.
:param on: See the description for 'on' in :class:`.DDL`.
:param if_exists: if True, an IF EXISTS operator will be applied to the
construct.
.. versionadded:: 1.4.0b2
"""
super().__init__(element, if_exists=if_exists)
def to_metadata(self, metadata: MetaData, table: Table) -> Self:
return self.__class__(table, if_exists=self.if_exists)
|
DropTable
|
python
|
encode__httpx
|
httpx/_client.py
|
{
"start": 19412,
"end": 42349
}
|
class ____(BaseClient):
"""
An HTTP client, with connection pooling, HTTP/2, redirects, cookie persistence, etc.
It can be shared between threads.
Usage:
```python
>>> client = httpx.Client()
>>> response = client.get('https://example.org')
```
**Parameters:**
* **auth** - *(optional)* An authentication class to use when sending
requests.
* **params** - *(optional)* Query parameters to include in request URLs, as
a string, dictionary, or sequence of two-tuples.
* **headers** - *(optional)* Dictionary of HTTP headers to include when
sending requests.
* **cookies** - *(optional)* Dictionary of Cookie items to include when
sending requests.
* **verify** - *(optional)* Either `True` to use an SSL context with the
default CA bundle, `False` to disable verification, or an instance of
`ssl.SSLContext` to use a custom context.
* **http2** - *(optional)* A boolean indicating if HTTP/2 support should be
enabled. Defaults to `False`.
* **proxy** - *(optional)* A proxy URL where all the traffic should be routed.
* **timeout** - *(optional)* The timeout configuration to use when sending
requests.
* **limits** - *(optional)* The limits configuration to use.
* **max_redirects** - *(optional)* The maximum number of redirect responses
that should be followed.
* **base_url** - *(optional)* A URL to use as the base when building
request URLs.
* **transport** - *(optional)* A transport class to use for sending requests
over the network.
* **trust_env** - *(optional)* Enables or disables usage of environment
variables for configuration.
* **default_encoding** - *(optional)* The default encoding to use for decoding
response text, if no charset information is included in a response Content-Type
header. Set to a callable for automatic character set detection. Default: "utf-8".
"""
def __init__(
self,
*,
auth: AuthTypes | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
verify: ssl.SSLContext | str | bool = True,
cert: CertTypes | None = None,
trust_env: bool = True,
http1: bool = True,
http2: bool = False,
proxy: ProxyTypes | None = None,
mounts: None | (typing.Mapping[str, BaseTransport | None]) = None,
timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
follow_redirects: bool = False,
limits: Limits = DEFAULT_LIMITS,
max_redirects: int = DEFAULT_MAX_REDIRECTS,
event_hooks: None | (typing.Mapping[str, list[EventHook]]) = None,
base_url: URL | str = "",
transport: BaseTransport | None = None,
default_encoding: str | typing.Callable[[bytes], str] = "utf-8",
) -> None:
super().__init__(
auth=auth,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
follow_redirects=follow_redirects,
max_redirects=max_redirects,
event_hooks=event_hooks,
base_url=base_url,
trust_env=trust_env,
default_encoding=default_encoding,
)
if http2:
try:
import h2 # noqa
except ImportError: # pragma: no cover
raise ImportError(
"Using http2=True, but the 'h2' package is not installed. "
"Make sure to install httpx using `pip install httpx[http2]`."
) from None
allow_env_proxies = trust_env and transport is None
proxy_map = self._get_proxy_map(proxy, allow_env_proxies)
self._transport = self._init_transport(
verify=verify,
cert=cert,
trust_env=trust_env,
http1=http1,
http2=http2,
limits=limits,
transport=transport,
)
self._mounts: dict[URLPattern, BaseTransport | None] = {
URLPattern(key): None
if proxy is None
else self._init_proxy_transport(
proxy,
verify=verify,
cert=cert,
trust_env=trust_env,
http1=http1,
http2=http2,
limits=limits,
)
for key, proxy in proxy_map.items()
}
if mounts is not None:
self._mounts.update(
{URLPattern(key): transport for key, transport in mounts.items()}
)
self._mounts = dict(sorted(self._mounts.items()))
def _init_transport(
self,
verify: ssl.SSLContext | str | bool = True,
cert: CertTypes | None = None,
trust_env: bool = True,
http1: bool = True,
http2: bool = False,
limits: Limits = DEFAULT_LIMITS,
transport: BaseTransport | None = None,
) -> BaseTransport:
if transport is not None:
return transport
return HTTPTransport(
verify=verify,
cert=cert,
trust_env=trust_env,
http1=http1,
http2=http2,
limits=limits,
)
def _init_proxy_transport(
self,
proxy: Proxy,
verify: ssl.SSLContext | str | bool = True,
cert: CertTypes | None = None,
trust_env: bool = True,
http1: bool = True,
http2: bool = False,
limits: Limits = DEFAULT_LIMITS,
) -> BaseTransport:
return HTTPTransport(
verify=verify,
cert=cert,
trust_env=trust_env,
http1=http1,
http2=http2,
limits=limits,
proxy=proxy,
)
def _transport_for_url(self, url: URL) -> BaseTransport:
"""
Returns the transport instance that should be used for a given URL.
This will either be the standard connection pool, or a proxy.
"""
for pattern, transport in self._mounts.items():
if pattern.matches(url):
return self._transport if transport is None else transport
return self._transport
def request(
self,
method: str,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Build and send a request.
Equivalent to:
```python
request = client.build_request(...)
response = client.send(request, ...)
```
See `Client.build_request()`, `Client.send()` and
[Merging of configuration][0] for how the various parameters
are merged with client-level configuration.
[0]: /advanced/clients/#merging-of-configuration
"""
if cookies is not None:
message = (
"Setting per-request cookies=<...> is being deprecated, because "
"the expected behaviour on cookie persistence is ambiguous. Set "
"cookies directly on the client instance instead."
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
request = self.build_request(
method=method,
url=url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
extensions=extensions,
)
return self.send(request, auth=auth, follow_redirects=follow_redirects)
@contextmanager
def stream(
self,
method: str,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> typing.Iterator[Response]:
"""
Alternative to `httpx.request()` that streams the response body
instead of loading it into memory at once.
**Parameters**: See `httpx.request`.
See also: [Streaming Responses][0]
[0]: /quickstart#streaming-responses
"""
request = self.build_request(
method=method,
url=url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
extensions=extensions,
)
response = self.send(
request=request,
auth=auth,
follow_redirects=follow_redirects,
stream=True,
)
try:
yield response
finally:
response.close()
def send(
self,
request: Request,
*,
stream: bool = False,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
) -> Response:
"""
Send a request.
The request is sent as-is, unmodified.
Typically you'll want to build one with `Client.build_request()`
so that any client-level configuration is merged into the request,
but passing an explicit `httpx.Request()` is supported as well.
See also: [Request instances][0]
[0]: /advanced/clients/#request-instances
"""
if self._state == ClientState.CLOSED:
raise RuntimeError("Cannot send a request, as the client has been closed.")
self._state = ClientState.OPENED
follow_redirects = (
self.follow_redirects
if isinstance(follow_redirects, UseClientDefault)
else follow_redirects
)
self._set_timeout(request)
auth = self._build_request_auth(request, auth)
response = self._send_handling_auth(
request,
auth=auth,
follow_redirects=follow_redirects,
history=[],
)
try:
if not stream:
response.read()
return response
except BaseException as exc:
response.close()
raise exc
def _send_handling_auth(
self,
request: Request,
auth: Auth,
follow_redirects: bool,
history: list[Response],
) -> Response:
auth_flow = auth.sync_auth_flow(request)
try:
request = next(auth_flow)
while True:
response = self._send_handling_redirects(
request,
follow_redirects=follow_redirects,
history=history,
)
try:
try:
next_request = auth_flow.send(response)
except StopIteration:
return response
response.history = list(history)
response.read()
request = next_request
history.append(response)
except BaseException as exc:
response.close()
raise exc
finally:
auth_flow.close()
def _send_handling_redirects(
self,
request: Request,
follow_redirects: bool,
history: list[Response],
) -> Response:
while True:
if len(history) > self.max_redirects:
raise TooManyRedirects(
"Exceeded maximum allowed redirects.", request=request
)
for hook in self._event_hooks["request"]:
hook(request)
response = self._send_single_request(request)
try:
for hook in self._event_hooks["response"]:
hook(response)
response.history = list(history)
if not response.has_redirect_location:
return response
request = self._build_redirect_request(request, response)
history = history + [response]
if follow_redirects:
response.read()
else:
response.next_request = request
return response
except BaseException as exc:
response.close()
raise exc
def _send_single_request(self, request: Request) -> Response:
"""
Sends a single request, without handling any redirections.
"""
transport = self._transport_for_url(request.url)
start = time.perf_counter()
if not isinstance(request.stream, SyncByteStream):
raise RuntimeError(
"Attempted to send an async request with a sync Client instance."
)
with request_context(request=request):
response = transport.handle_request(request)
assert isinstance(response.stream, SyncByteStream)
response.request = request
response.stream = BoundSyncStream(
response.stream, response=response, start=start
)
self.cookies.extract_cookies(response)
response.default_encoding = self._default_encoding
logger.info(
'HTTP Request: %s %s "%s %d %s"',
request.method,
request.url,
response.http_version,
response.status_code,
response.reason_phrase,
)
return response
def get(
self,
url: URL | str,
*,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `GET` request.
**Parameters**: See `httpx.request`.
"""
return self.request(
"GET",
url,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
def options(
self,
url: URL | str,
*,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send an `OPTIONS` request.
**Parameters**: See `httpx.request`.
"""
return self.request(
"OPTIONS",
url,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
def head(
self,
url: URL | str,
*,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `HEAD` request.
**Parameters**: See `httpx.request`.
"""
return self.request(
"HEAD",
url,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
def post(
self,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `POST` request.
**Parameters**: See `httpx.request`.
"""
return self.request(
"POST",
url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
def put(
self,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `PUT` request.
**Parameters**: See `httpx.request`.
"""
return self.request(
"PUT",
url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
def patch(
self,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `PATCH` request.
**Parameters**: See `httpx.request`.
"""
return self.request(
"PATCH",
url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
def delete(
self,
url: URL | str,
*,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `DELETE` request.
**Parameters**: See `httpx.request`.
"""
return self.request(
"DELETE",
url,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
def close(self) -> None:
"""
Close transport and proxies.
"""
if self._state != ClientState.CLOSED:
self._state = ClientState.CLOSED
self._transport.close()
for transport in self._mounts.values():
if transport is not None:
transport.close()
def __enter__(self: T) -> T:
if self._state != ClientState.UNOPENED:
msg = {
ClientState.OPENED: "Cannot open a client instance more than once.",
ClientState.CLOSED: (
"Cannot reopen a client instance, once it has been closed."
),
}[self._state]
raise RuntimeError(msg)
self._state = ClientState.OPENED
self._transport.__enter__()
for transport in self._mounts.values():
if transport is not None:
transport.__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None = None,
exc_value: BaseException | None = None,
traceback: TracebackType | None = None,
) -> None:
self._state = ClientState.CLOSED
self._transport.__exit__(exc_type, exc_value, traceback)
for transport in self._mounts.values():
if transport is not None:
transport.__exit__(exc_type, exc_value, traceback)
|
Client
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-lattice-points-inside-a-circle.py
|
{
"start": 80,
"end": 538
}
|
class ____(object):
def countLatticePoints(self, circles):
"""
:type circles: List[List[int]]
:rtype: int
"""
lookup = set()
for x, y, r in circles:
for i in xrange(-r, r+1):
for j in xrange(-r, r+1):
if i**2+j**2 <= r**2:
lookup.add(((x+i), (y+j)))
return len(lookup)
# Time: O(n * max_x * max_y)
# Space: O(1)
# math
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_organization_releases.py
|
{
"start": 94996,
"end": 98712
}
|
class ____(ReleaseCommitPatchTest):
@cached_property
def url(self):
return reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.org.slug}
)
def test_commits_with_patch_set(self) -> None:
response = self.client.post(
self.url,
data={
"version": "2d1ab93fe4bb42db80890f01f8358fc9f8fbff3b",
"projects": [self.project.slug],
"commits": [
{
"patch_set": [
{"path": "hello.py", "type": "M"},
{"path": "templates/hola.html", "type": "D"},
],
"repository": "laurynsentry/helloworld",
"author_email": "lauryndbrown@gmail.com",
"timestamp": "2018-11-29T18:50:28+03:00",
"author_name": "Lauryn Brown",
"message": "made changes to hello.",
"id": "2d1ab93fe4bb42db80890f01f8358fc9f8fbff3b",
},
{
"patch_set": [
{"path": "templates/hello.html", "type": "M"},
{"path": "templates/goodbye.html", "type": "A"},
],
"repository": "laurynsentry/helloworld",
"author_email": "lauryndbrown@gmail.com",
"timestamp": "2018-11-30T22:51:14+03:00",
"author_name": "Lauryn Brown",
"message": "Changed release",
"id": "be2fe070f6d1b8a572b67defc87af2582f9b0d78",
},
],
},
)
assert response.status_code == 201, (response.status_code, response.content)
assert response.data["version"]
release = Release.objects.get(organization_id=self.org.id, version=response.data["version"])
repo = Repository.objects.get(organization_id=self.org.id, name="laurynsentry/helloworld")
assert repo.provider is None
rc_list = list(
ReleaseCommit.objects.filter(release=release)
.select_related("commit", "commit__author")
.order_by("order")
)
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id
author = CommitAuthor.objects.get(
organization_id=self.org.id, email="lauryndbrown@gmail.com"
)
assert author.name == "Lauryn Brown"
commits = [rc.commit for rc in rc_list]
commits.sort(key=lambda c: c.date_added)
self.assert_commit(
commit=commits[0],
repo_id=repo.id,
key="2d1ab93fe4bb42db80890f01f8358fc9f8fbff3b",
author_id=author.id,
message="made changes to hello.",
)
self.assert_commit(
commit=commits[1],
repo_id=repo.id,
key="be2fe070f6d1b8a572b67defc87af2582f9b0d78",
author_id=author.id,
message="Changed release",
)
file_changes = CommitFileChange.objects.filter(organization_id=self.org.id).order_by(
"filename"
)
self.assert_file_change(file_changes[0], "M", "hello.py", commits[0].id)
self.assert_file_change(file_changes[1], "A", "templates/goodbye.html", commits[1].id)
self.assert_file_change(file_changes[2], "M", "templates/hello.html", commits[1].id)
self.assert_file_change(file_changes[3], "D", "templates/hola.html", commits[0].id)
|
OrganizationReleaseCreateCommitPatch
|
python
|
pytorch__pytorch
|
test/dynamo/test_higher_order_ops.py
|
{
"start": 158036,
"end": 160426
}
|
class ____(torch.nn.Module):
def forward(self, L_x_: "f32[5]"):
l_x_ = L_x_
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable("torch.func.{grad, vjp, jacrev, hessian} don't yet support saved tensor hooks. Please open an issue with your use case."); _saved_tensors_hooks_disable = None
_grad_increment_nesting = torch._C._functorch._grad_increment_nesting(); _grad_increment_nesting = None
aux: "f32[5]" = torch._C._functorch._wrap_for_grad(l_x_, 1); l_x_ = None
set_inplace_requires_grad_allowed = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed = None
child: "f32[5]" = torch._functorch.eager_transforms._set_tensor_requires_grad(aux); child = None
set_inplace_requires_grad_allowed_1 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_1 = None
sin: "f32[5]" = aux.sin()
primals_out: "f32[]" = sin.sum(); sin = None
aux_1: "f32[5]" = torch._C._functorch._unwrap_for_grad(aux, 1); aux = aux_1 = None
results: "f32[]" = torch._C._functorch._unwrap_for_grad(primals_out, 1); primals_out = None
_grad_decrement_nesting = torch._C._functorch._grad_decrement_nesting(); _grad_decrement_nesting = None
_saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None
return (results,)
""",
)
@config.patch(inline_inbuilt_nn_modules=True)
def test_functional_call(self):
def wrapper_fn(model, params, inputs, targets):
prediction = torch.func.functional_call(model, params, (inputs,))
return torch.nn.functional.mse_loss(prediction, targets)
model = torch.nn.Linear(3, 3)
params = dict(model.named_parameters())
inputs = torch.randn(64, 3)
targets = torch.randn(64, 3)
wrapped_gm = self._compile_check(wrapper_fn, (model, params, inputs, targets))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
if torch._dynamo.config.inline_inbuilt_nn_modules:
self.assertExpectedInline(
actual,
"""\
|
GraphModule
|
python
|
huggingface__transformers
|
tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py
|
{
"start": 9398,
"end": 15196
}
|
class ____(unittest.TestCase):
checkpoint_path = "microsoft/Phi-4-multimodal-instruct"
revision = "refs/pr/70"
image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
audio_url = "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav"
def setUp(self):
# Currently, the Phi-4 checkpoint on the hub is not working with the latest Phi-4 code, so the slow integration tests
# won't pass without using the correct revision (refs/pr/70)
self.processor = AutoProcessor.from_pretrained(self.checkpoint_path, revision=self.revision)
self.generation_config = GenerationConfig(max_new_tokens=20, do_sample=False)
self.user_token = "<|user|>"
self.assistant_token = "<|assistant|>"
self.end_token = "<|end|>"
self.image = Image.open(requests.get(self.image_url, stream=True).raw)
audio_bytes = requests.get(self.audio_url, stream=True).raw.data
samples = torchcodec.decoders.AudioDecoder(audio_bytes).get_all_samples()
self.audio, self.sampling_rate = samples.data, samples.sample_rate
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_text_only_generation(self):
model = AutoModelForCausalLM.from_pretrained(
self.checkpoint_path, revision=self.revision, dtype=torch.float16, device_map=torch_device
)
prompt = f"{self.user_token}What is the answer for 1+1? Explain it.{self.end_token}{self.assistant_token}"
inputs = self.processor(prompt, images=None, return_tensors="pt").to(torch_device)
output = model.generate(
**inputs,
generation_config=self.generation_config,
)
output = output[:, inputs["input_ids"].shape[1] :]
response = self.processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
EXPECTED_RESPONSE = "The answer for 1+1 is 2. This is because when you add one to another"
self.assertEqual(response, EXPECTED_RESPONSE)
def test_vision_text_generation(self):
model = AutoModelForCausalLM.from_pretrained(
self.checkpoint_path, revision=self.revision, dtype=torch.float16, device_map=torch_device
)
prompt = f"{self.user_token}<|image|>What is shown in this image?{self.end_token}{self.assistant_token}"
inputs = self.processor(prompt, images=self.image, return_tensors="pt").to(torch_device)
output = model.generate(
**inputs,
generation_config=self.generation_config,
)
output = output[:, inputs["input_ids"].shape[1] :]
response = self.processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
EXPECTED_RESPONSES = Expectations(
{
("cuda", 7): 'The image shows a vibrant scene at a traditional Chinese-style street entrance, known as a "gate"',
("cuda", 8): 'The image shows a vibrant scene at a street intersection in a city with a Chinese-influenced architectural',
}
) # fmt: skip
EXPECTED_RESPONSE = EXPECTED_RESPONSES.get_expectation()
self.assertEqual(response, EXPECTED_RESPONSE)
@require_torch_large_accelerator
def test_multi_image_vision_text_generation(self):
model = AutoModelForCausalLM.from_pretrained(
self.checkpoint_path, revision=self.revision, dtype=torch.float16, device_map=torch_device
)
images = []
placeholder = ""
for i in range(1, 5):
url = f"https://image.slidesharecdn.com/azureintroduction-191206101932/75/Introduction-to-Microsoft-Azure-Cloud-{i}-2048.jpg"
images.append(Image.open(requests.get(url, stream=True).raw))
placeholder += "<|image|>"
prompt = f"{self.user_token}{placeholder}Summarize the deck of slides.{self.end_token}{self.assistant_token}"
inputs = self.processor(prompt, images, return_tensors="pt").to(torch_device)
output = model.generate(
**inputs,
generation_config=self.generation_config,
)
output = output[:, inputs["input_ids"].shape[1] :]
response = self.processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
EXPECTED_RESPONSE = "The presentation provides an overview of Microsoft Azure, a cloud computing platform by Microsoft, and its various services"
self.assertEqual(response, EXPECTED_RESPONSE)
@require_torchcodec
def test_audio_text_generation(self):
model = AutoModelForCausalLM.from_pretrained(
self.checkpoint_path, revision=self.revision, dtype=torch.float16, device_map=torch_device
)
prompt = f"{self.user_token}<|audio|>What is happening in this audio?{self.end_token}{self.assistant_token}"
inputs = self.processor(prompt, audio=self.audio, sampling_rate=self.sampling_rate, return_tensors="pt").to(
torch_device
)
output = model.generate(
**inputs,
generation_config=self.generation_config,
)
output = output[:, inputs["input_ids"].shape[1] :]
response = self.processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
# Yes, it is truly the expected response... Even though the model correctly treats the audio file
EXPECTED_RESPONSE = "I'm sorry, but I can't listen to audio. However, if you describe the audio to me,"
self.assertEqual(response, EXPECTED_RESPONSE)
|
Phi4MultimodalIntegrationTest
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/conditional_virtual_dependency/package.py
|
{
"start": 216,
"end": 573
}
|
class ____(Package):
"""Brings in a virtual dependency if certain conditions are met."""
homepage = "https://dev.null"
version("1.0")
variant("stuff", default=True, description="nope")
variant("mpi", default=False, description="nope")
depends_on("stuff", when="+stuff")
depends_on("mpi", when="+mpi")
|
ConditionalVirtualDependency
|
python
|
xlwings__xlwings
|
xlwings/conversion/standard.py
|
{
"start": 5309,
"end": 5716
}
|
class ____:
def __init__(self, options):
self.options = options
def __call__(self, ctx):
if Markdown and isinstance(ctx.source_value, Markdown):
markdown.format_text(
ctx.range, ctx.source_value.text, ctx.source_value.style
)
if "formatter" in self.options:
self.options["formatter"](ctx.range, ctx.source_value)
|
FormatStage
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/solarization.py
|
{
"start": 370,
"end": 8015
}
|
class ____(BaseImagePreprocessingLayer):
"""Applies `(max_value - pixel + min_value)` for each pixel in the image.
When created without `threshold` parameter, the layer performs solarization
to all values. When created with specified `threshold` the layer only
augments pixels that are above the `threshold` value.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
addition_factor: (Optional) A tuple of two floats or a single float,
between 0 and 1.
For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, addition_factor)`. If specified, this value
(times the value range of input images, e.g. 255), is
added to each pixel before solarization and thresholding.
Defaults to 0.0.
threshold_factor: (Optional) A tuple of two floats or a single float.
For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, threshold_factor)`. If specified, only pixel
values above this threshold will be solarized.
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in input images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`. Typical values to pass
are `(0, 255)` (RGB image) or `(0., 1.)` (scaled image).
seed: Integer. Used to create a random seed.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
print(images[0, 0, 0])
# [59 62 63]
# Note that images are Tensor with values in the range [0, 255]
solarization = Solarization(value_range=(0, 255))
images = solarization(images)
print(images[0, 0, 0])
# [196, 193, 192]
```
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
_FACTOR_VALIDATION_ERROR = (
"The `addition_factor` and `threshold_factor` arguments "
"should be a number (or a list of two numbers) "
"in the range [0, 1]. "
)
def __init__(
self,
addition_factor=0.0,
threshold_factor=0.0,
value_range=(0, 255),
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self.addition_factor = self._set_factor(
addition_factor, "addition_factor"
)
self.threshold_factor = self._set_factor(
threshold_factor, "threshold_factor"
)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def _set_factor(self, factor, factor_name):
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
self._check_factor_range(factor[0])
self._check_factor_range(factor[1])
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
self._check_factor_range(factor)
lower, upper = [0, factor]
else:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: {factor_name}={factor}"
)
return lower, upper
def _check_factor_range(self, input_number):
if input_number > 1.0 or input_number < 0:
raise ValueError(
self._FACTOR_VALIDATION_ERROR
+ f"Received: input_number={input_number}"
)
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
if len(images_shape) == 4:
factor_shape = (images_shape[0], 1, 1, 1)
else:
factor_shape = (1, 1, 1)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
return {
"additions": self.backend.random.uniform(
minval=self.addition_factor[0],
maxval=self.addition_factor[1] * 255,
shape=factor_shape,
seed=seed,
dtype=self.compute_dtype,
),
"thresholds": self.backend.random.uniform(
minval=self.threshold_factor[0],
maxval=self.threshold_factor[1] * 255,
shape=factor_shape,
seed=seed,
dtype=self.compute_dtype,
),
}
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
if transformation is None:
return images
thresholds = transformation["thresholds"]
additions = transformation["additions"]
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
results = images + additions
results = self.backend.numpy.clip(results, 0, 255)
results = self.backend.numpy.where(
results < thresholds, results, 255 - results
)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
base_config = super().get_config()
config = {
"value_range": self.value_range,
"addition_factor": self.addition_factor,
"threshold_factor": self.threshold_factor,
"seed": self.seed,
}
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
|
Solarization
|
python
|
scrapy__scrapy
|
tests/test_downloader_handler_twisted_http2.py
|
{
"start": 6864,
"end": 6958
}
|
class ____(H2DownloadHandlerMixin, TestHttpsWrongHostnameBase):
pass
|
TestHttps2WrongHostname
|
python
|
astropy__astropy
|
astropy/io/votable/exceptions.py
|
{
"start": 32155,
"end": 32837
}
|
class ____(VOTableSpecWarning):
"""
``VALUES`` elements that reference another element should not have
their own content.
From the VOTable 1.2 spec:
The ``ref`` attribute of a ``VALUES`` element can be used to
avoid a repetition of the domain definition, by referring to a
previously defined ``VALUES`` element having the referenced
``ID`` attribute. When specified, the ``ref`` attribute
defines completely the domain without any other element or
attribute, as e.g. ``<VALUES ref="RAdomain"/>``
"""
message_template = "VALUES element with ref attribute has content ('{}')"
default_args = ("element",)
|
W44
|
python
|
pytorch__pytorch
|
test/mobile/model_test/nn_ops.py
|
{
"start": 2874,
"end": 4077
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.input1d = torch.randn(1, 4, 50)
self.module1d = nn.ModuleList(
[
nn.ReflectionPad1d(2),
nn.ReplicationPad1d(2),
nn.ConstantPad1d(2, 3.5),
]
)
self.input2d = torch.randn(1, 4, 30, 10)
self.module2d = nn.ModuleList(
[
nn.ReflectionPad2d(2),
nn.ReplicationPad2d(2),
nn.ZeroPad2d(2),
nn.ConstantPad2d(2, 3.5),
]
)
self.input3d = torch.randn(1, 4, 10, 4, 4)
self.module3d = nn.ModuleList(
[
nn.ReflectionPad3d(1),
nn.ReplicationPad3d(3),
nn.ConstantPad3d(3, 3.5),
]
)
def forward(self):
return len(
(
[module(self.input1d) for i, module in enumerate(self.module1d)],
[module(self.input2d) for i, module in enumerate(self.module2d)],
[module(self.input3d) for i, module in enumerate(self.module3d)],
)
)
|
NNPaddingModule
|
python
|
ansible__ansible
|
lib/ansible/_internal/_ssh/_ssh_agent.py
|
{
"start": 4300,
"end": 4883
}
|
class ____(int, VariableSized):
def to_blob(self) -> bytes:
if self < 0:
raise ValueError("negative mpint not allowed")
if not self:
return b""
nbytes = (self.bit_length() + 8) // 8
ret = bytearray(self.to_bytes(length=nbytes, byteorder='big'))
ret[:0] = uint32(len(ret)).to_blob()
return ret
@classmethod
def from_blob(cls, blob: memoryview | bytes) -> t.Self:
if blob and blob[0] > 127:
raise ValueError("Invalid data")
return cls.from_bytes(blob, byteorder='big')
|
mpint
|
python
|
wandb__wandb
|
wandb/automations/_generated/input_types.py
|
{
"start": 352,
"end": 819
}
|
class ____(GQLInput):
entity_name: str = Field(alias="entityName")
url_endpoint: str = Field(alias="urlEndpoint")
name: str = Field(max_length=64, pattern="^[-\\w]+([ ]+[-\\w]+)*$")
secret_ref: Optional[str] = Field(alias="secretRef", default=None)
access_token_ref: Optional[str] = Field(alias="accessTokenRef", default=None)
client_mutation_id: Optional[str] = Field(alias="clientMutationId", default=None)
|
CreateGenericWebhookIntegrationInput
|
python
|
pytorch__pytorch
|
torch/fx/experimental/proxy_tensor.py
|
{
"start": 43659,
"end": 43739
}
|
class ____:
proxy: _PySymProxyType
value: PySymType
|
_SympyExprTrackerValue
|
python
|
tensorflow__tensorflow
|
tensorflow/python/checkpoint/checkpoint.py
|
{
"start": 86102,
"end": 114922
}
|
class ____(autotrackable.AutoTrackable):
"""Manages saving/restoring trackable values to disk.
TensorFlow objects may contain trackable state, such as `tf.Variable`s,
`tf.keras.optimizers.Optimizer` implementations, `tf.data.Dataset` iterators,
`tf.keras.Layer` implementations, or `tf.keras.Model` implementations.
These are called **trackable objects**.
A `Checkpoint` object can be constructed to save either a single or group of
trackable objects to a checkpoint file. It maintains a `save_counter` for
numbering checkpoints.
Example:
```python
model = tf.keras.Model(...)
checkpoint = tf.train.Checkpoint(model)
# Save a checkpoint to /tmp/training_checkpoints-{save_counter}. Every time
# checkpoint.save is called, the save counter is increased.
save_path = checkpoint.save('/tmp/training_checkpoints')
# Restore the checkpointed values to the `model` object.
checkpoint.restore(save_path)
```
Example 2:
```python
import tensorflow as tf
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
# Create a Checkpoint that will manage two objects with trackable state,
# one we name "optimizer" and the other we name "model".
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... ) # Variables will be restored on creation.
status.assert_consumed() # Optional sanity checks.
checkpoint.save(file_prefix=checkpoint_prefix)
```
`Checkpoint.save()` and `Checkpoint.restore()` write and read object-based
checkpoints, in contrast to TensorFlow 1.x's `tf.compat.v1.train.Saver` which
writes and
reads `variable.name` based checkpoints. Object-based checkpointing saves a
graph of dependencies between Python objects (`Layer`s, `Optimizer`s,
`Variable`s, etc.) with named edges, and this graph is used to match variables
when restoring a checkpoint. It can be more robust to changes in the Python
program, and helps to support restore-on-create for variables.
`Checkpoint` objects have dependencies on the objects passed as keyword
arguments to their constructors, and each dependency is given a name that is
identical to the name of the keyword argument for which it was created.
TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
dependencies on their own variables (e.g. "kernel" and "bias" for
`tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
dependencies easy in user-defined classes, since `Model` hooks into attribute
assignment. For example:
```python
class Regress(tf.keras.Model):
def __init__(self):
super().__init__()
self.input_transform = tf.keras.layers.Dense(10)
# ...
def call(self, inputs):
x = self.input_transform(inputs)
# ...
```
This `Model` has a dependency named "input_transform" on its `Dense` layer,
which in turn depends on its variables. As a result, saving an instance of
`Regress` using `tf.train.Checkpoint` will also save all the variables created
by the `Dense` layer.
When variables are assigned to multiple workers, each worker writes its own
section of the checkpoint. These sections are then merged/re-indexed to behave
as a single checkpoint. This avoids copying all variables to one worker, but
does require that all workers see a common filesystem.
This function differs slightly from the Keras Model `save_weights` function.
`tf.keras.Model.save_weights` creates a checkpoint file with the name
specified in `filepath`, while `tf.train.Checkpoint` numbers the checkpoints,
using `filepath` as the prefix for the checkpoint file names. Aside from this,
`model.save_weights()` and `tf.train.Checkpoint(model).save()` are equivalent.
See the [guide to training
checkpoints](https://www.tensorflow.org/guide/checkpoint) for
details.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, root=None, **kwargs):
"""Creates a training checkpoint for a single or group of objects.
Args:
root: The root object to checkpoint. `root` may be a trackable object or
`WeakRef` of a trackable object.
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. All `kwargs` must be trackable objects, or a
nested structure of trackable objects (`list`, `dict`, or `tuple`).
Raises:
ValueError: If `root` or the objects in `kwargs` are not trackable. A
`ValueError` is also raised if the `root` object tracks different
objects from the ones listed in attributes in kwargs (e.g.
`root.child = A` and `tf.train.Checkpoint(root, child=B)` are
incompatible).
"""
super().__init__()
global _END_TIME_OF_LAST_WRITE
with _END_TIME_OF_LAST_WRITE_LOCK:
if _END_TIME_OF_LAST_WRITE is None:
_END_TIME_OF_LAST_WRITE = time.time()
# Store a reference to root and kwargs if we need to instantiate an
# AsyncCheckpointer later.
self._root = root
self._kwargs = kwargs
self._delete_tracking("_kwargs")
# Don't instantiate the AsyncCheckpointer unless required.
self._async_checkpointer_impl = None
# Store checkpoint options during the save/write calls so that subsequent
# read/restore calls are done properly. This is only populated when
# async read/write is enabled.
self._checkpoint_options = None
attached_dependencies = None
self._save_counter = None # Created lazily for restore-on-create.
self._save_assign_op = None
if root:
trackable_root = root() if isinstance(root, weakref.ref) else root
_assert_trackable(trackable_root, "root")
attached_dependencies = []
# All keyword arguments (including root itself) are set as children
# of root.
kwargs["root"] = root
trackable_root._maybe_initialize_trackable()
self._save_counter = data_structures.NoDependency(
trackable_root._lookup_dependency("save_counter"))
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
setattr(self, k, v)
# Call getattr instead of directly using v because setattr converts
# v to a Trackable data structure when v is a list/dict/tuple.
converted_v = getattr(self, k)
if isinstance(converted_v, weakref.ref):
converted_v = converted_v()
_assert_trackable(converted_v, k)
if root:
# Make sure that root doesn't already have dependencies with these names
child = trackable_root._lookup_dependency(k)
if child is None:
attached_dependencies.append(
base.WeakTrackableReference(k, converted_v))
elif child != converted_v:
raise ValueError(
f"Cannot create a Checkpoint with keyword argument {k} if "
f"root.{k} already exists.")
self._saver = TrackableSaver(
graph_view_lib.ObjectGraphView(
root if root else self,
attached_dependencies=attached_dependencies))
self._attached_dependencies = data_structures.NoDependency(
attached_dependencies)
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
# add_variable creates a dependency named "save_counter"; NoDependency
# prevents creating a second dependency named "_save_counter".
self._save_counter = data_structures.NoDependency(
add_variable(
self,
name="save_counter",
initializer=0,
dtype=dtypes.int64,
trainable=False))
if self._attached_dependencies is not None:
self._attached_dependencies.append(
# Store a stronge reference to the `save_counter`, so that if the
# `Checkpoint` object is deleted, the `save_counter` does not get
# deleted immediately. (The LoadStatus object needs to indirectly
# reference the counter through the ObjectGraphView).
base.TrackableReference("save_counter", self._save_counter))
# When loading a checkpoint, the save counter is created after
# the checkpoint has been loaded, so it must be handled in a deferred
# manner.
if isinstance(self.root, weakref.ref):
root = self.root()
else:
root = self.root
restore = root._deferred_dependencies.pop("save_counter", ()) # pylint: disable=protected-access
if restore:
restore[0].restore(self._save_counter)
def write(self, file_prefix, options=None):
"""Writes a training checkpoint.
The checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.write()` is
called.
`write` does not number checkpoints, increment `save_counter`, or update the
metadata used by `tf.train.latest_checkpoint`. It is primarily intended for
use by higher level checkpoint management utilities. `save` provides a very
basic implementation of these features.
Checkpoints written with `write` must be read with `read`.
Example usage:
```
step = tf.Variable(0, name="step")
checkpoint = tf.Checkpoint(step=step)
checkpoint.write("/tmp/ckpt")
# Later, read the checkpoint with read()
checkpoint.read("/tmp/ckpt")
# You can also pass options to write() and read(). For example this
# runs the IO ops on the localhost:
options = tf.CheckpointOptions(experimental_io_device="/job:localhost")
checkpoint.write("/tmp/ckpt", options=options)
# Later, read the checkpoint with read()
checkpoint.read("/tmp/ckpt", options=options)
```
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix).
options: Optional `tf.train.CheckpointOptions` object.
Returns:
The full path to the checkpoint (i.e. `file_prefix`).
"""
if isinstance(file_prefix, os.PathLike):
file_prefix = os.fspath(file_prefix)
return self._write(file_prefix, options)
def _async_checkpointer(self):
"""Returns an instantiated AsyncCheckpointHelper."""
if self._async_checkpointer_impl is None:
self._async_checkpointer_impl = (
async_checkpoint_helper.AsyncCheckpointHelper(
Checkpoint,
**self._kwargs))
return self._async_checkpointer_impl
def _write(self, file_prefix, options=None):
"""Internal method that implements Checkpoint.write().
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix).
options: Optional `tf.train.CheckpointOptions` object.
Returns:
The full path to the checkpoint (i.e. `file_prefix`).
"""
# Triggers TF2 async checkpoint handling if:
# 1. async checkpoint is enabled in CheckpointOptions
# 2. running in eager mode
if options and options.experimental_enable_async_checkpoint:
self._checkpoint_options = options
if checkpoint_context.in_preemption_save_context():
# Make sure all in-progress writes have completed before saving the
# final preemption checkpoint.
if self._async_checkpointer_impl is not None:
self._async_checkpointer_impl.sync()
# Additional work done will not be saved in a future checkpoint, so
# we use regular sync checkpoint to avoid overhead of dispatching
# checkpoint write to a new thread.
logging.warning(
"Switching to regular sync checkpoint for preemption checkpoint."
)
elif context.executing_eagerly():
return self._async_checkpointer()._write( # pylint: disable=protected-access
file_prefix, options)
else:
logging.warning(
"Saving async checkpoint in graph mode is currently not supported;"
" switching to regular sync checkpoint instead.")
start_time = time.time()
options = options or checkpoint_options.CheckpointOptions()
output = self._saver.save(file_prefix=file_prefix, options=options)
output = _convert_file_name_tensor_to_string(output)
# Execute callbacks (the only place they are executed; i.e. all entry points
# for callbacks will ultimately be directed to here for execution)
if options.experimental_write_callbacks:
_execute_callbacks(options.experimental_write_callbacks, output)
# Ensure save operations have completed when running in eager runtime.
if context.executing_eagerly():
context.async_wait()
end_time = time.time()
if not checkpoint_context.in_async_metrics_context():
# This records the time checkpoint._write() blocks on the main thread.
metrics.AddCheckpointWriteDuration(
api_label=_CHECKPOINT_V2,
microseconds=_get_duration_microseconds(start_time, end_time),
)
global _END_TIME_OF_LAST_WRITE
with _END_TIME_OF_LAST_WRITE_LOCK:
if not checkpoint_context.in_async_metrics_context():
metrics.AddTrainingTimeSaved(
api_label=_CHECKPOINT_V2,
microseconds=_get_duration_microseconds(
_END_TIME_OF_LAST_WRITE, end_time)
)
if checkpoint_context.in_preemption_save_context():
_preemption_checkpoint_saved_time_usecs.get_cell().increase_by(
_get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time)
)
_END_TIME_OF_LAST_WRITE = end_time
metrics.RecordCheckpointSize(
api_label=_CHECKPOINT_V2, filesize=_get_checkpoint_size(output)
)
return output
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def sync(self):
"""Wait for any outstanding save or restore operations."""
# Subclasses of Checkpoint may not have `_async_checkpointer_impl` so use
# `getattr` for safer check.
if getattr(self, "_async_checkpointer_impl", None) is not None:
self._async_checkpointer_impl.sync()
def save(self, file_prefix, options=None):
# pylint:disable=line-too-long
"""Saves a training checkpoint and provides basic checkpoint management.
The saved checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.save()` is
called.
`save` is a basic convenience wrapper around the `write` method,
sequentially numbering checkpoints using `save_counter` and updating the
metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint
management, for example garbage collection and custom numbering, may be
provided by other utilities which also wrap `write` and `read`.
(`tf.train.CheckpointManager` for example).
```
step = tf.Variable(0, name="step")
checkpoint = tf.train.Checkpoint(step=step)
checkpoint.save("/tmp/ckpt")
# Later, read the checkpoint with restore()
checkpoint.restore("/tmp/ckpt-1")
# You can also pass options to save() and restore(). For example this
# runs the IO ops on the localhost:
options = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
checkpoint.save("/tmp/ckpt", options=options)
# Later, read the checkpoint with restore()
checkpoint.restore("/tmp/ckpt-1", options=options)
```
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `Checkpoint.save_counter`.
options: Optional `tf.train.CheckpointOptions` object.
Returns:
The full path to the checkpoint.
"""
# Triggers TF2 async checkpoint handling if:
# 1. async checkpoint is enabled in CheckpointOptions
# 2. running in eager mode
if options and options.experimental_enable_async_checkpoint:
self._checkpoint_options = options
if checkpoint_context.in_preemption_save_context():
# Make sure all in-progress writes have completed before saving the
# final preemption checkpoint.
if self._async_checkpointer_impl is not None:
self._async_checkpointer_impl.sync()
# Additional work done will not be saved in a future checkpoint, so
# we use regular sync checkpoint to avoid overhead of dispatching
# checkpoint write to a new thread.
logging.warning(
"Switching to regular sync checkpoint for preemption checkpoint."
)
elif context.executing_eagerly():
return self._async_checkpointer().save(file_prefix, options)
else:
logging.warning(
"Saving async checkpoint in graph mode is currently not supported;"
" switching to regular sync checkpoint instead.")
if isinstance(file_prefix, os.PathLike):
file_prefix = os.fspath(file_prefix)
# pylint:enable=line-too-long
# We create a copy so that user's `options` instance would not be mutated
# by internal mechanisms.
options = copy.copy(options) or checkpoint_options.CheckpointOptions()
graph_building = not context.executing_eagerly()
if graph_building:
if ops.inside_function():
raise NotImplementedError(
"Calling tf.train.Checkpoint.save() from a function is not "
"supported, as save() modifies saving metadata in ways not "
"supported by TensorFlow Operations. Consider using "
"tf.train.Checkpoint.write(), a lower-level API which does not "
"update metadata. tf.train.latest_checkpoint and related APIs will "
"not see this checkpoint.")
session = get_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
if not graph_building or self._save_assign_op is None:
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1, read_value=True)
if graph_building:
self._save_assign_op = data_structures.NoDependency(assign_op)
if graph_building:
checkpoint_number = session.run(self._save_assign_op)
else:
checkpoint_number = assign_op.numpy()
if options.experimental_write_callbacks is None:
options.experimental_write_callbacks = [_update_checkpoint_state_internal]
else:
options.experimental_write_callbacks.append(
_update_checkpoint_state_internal
)
return self._write(
"%s-%d" % (file_prefix, checkpoint_number),
options=options)
def read(self, save_path, options=None):
"""Reads a training checkpoint written with `write`.
Reads this `Checkpoint` and any objects it depends on.
This method is just like `restore()` but does not expect the `save_counter`
variable in the checkpoint. It only restores the objects that the checkpoint
already depends on.
The method is primarily intended for use by higher level checkpoint
management utilities that use `write()` instead of `save()` and have their
own mechanisms to number and track checkpoints.
Example usage:
```python
# Create a checkpoint with write()
ckpt = tf.train.Checkpoint(v=tf.Variable(1.))
path = ckpt.write('/tmp/my_checkpoint')
# Later, load the checkpoint with read()
# With restore() assert_consumed() would have failed.
checkpoint.read(path).assert_consumed()
# You can also pass options to read(). For example this
# runs the IO ops on the localhost:
options = tf.train.CheckpointOptions(
experimental_io_device="/job:localhost")
checkpoint.read(path, options=options)
```
Args:
save_path: The path to the checkpoint as returned by `write`.
options: Optional `tf.train.CheckpointOptions` object.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration. See `restore` for details.
"""
if options and options.experimental_enable_async_checkpoint:
self._checkpoint_options = options
# Triggers TF2 async checkpoint handling if:
# 1. async checkpoint is enabled in CheckpointOptions
# 2. there's a preceding async save/write
# 3. running in eager mode
if (self._checkpoint_options and
self._checkpoint_options.experimental_enable_async_checkpoint):
if context.executing_eagerly():
return self._async_checkpointer().read(save_path, options)
else:
logging.warning(
"Saving async checkpoint in graph mode is currently not supported;"
" switching to regular sync checkpoint instead.")
start_time = time.time()
if isinstance(save_path, os.PathLike):
save_path = os.fspath(save_path)
options = options or checkpoint_options.CheckpointOptions()
result = self._saver.restore(save_path=save_path, options=options)
metrics.AddCheckpointReadDuration(
api_label=_CHECKPOINT_V2,
microseconds=_get_duration_microseconds(start_time, time.time()))
return result
def restore(self, save_path, options=None):
"""Restores a training checkpoint.
Restores this `Checkpoint` and any objects it depends on.
This method is intended to be used to load checkpoints created by `save()`.
For checkpoints created by `write()` use the `read()` method which does not
expect the `save_counter` variable added by `save()`.
`restore()` either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added after this call will be matched if they have a
corresponding object in the checkpoint (the restore request will queue in
any trackable object waiting for the expected dependency to be added).
```python
checkpoint = tf.train.Checkpoint( ... )
checkpoint.restore(path)
# You can additionally pass options to restore():
options = tf.CheckpointOptions(experimental_io_device="/job:localhost")
checkpoint.restore(path, options=options)
```
To ensure that loading is complete and no more deferred restorations will
take place, use the `assert_consumed()` method of the status object returned
by `restore()`:
```python
checkpoint.restore(path, options=options).assert_consumed()
```
The assert will raise an error if any Python objects in the dependency graph
were not found in the checkpoint, or if any checkpointed values do not have
a matching Python object.
Name-based `tf.compat.v1.train.Saver` checkpoints from TensorFlow 1.x can be
loaded using this method. Names are used to match variables. Re-encode
name-based checkpoints using `tf.train.Checkpoint.save` as soon as possible.
**Loading from SavedModel checkpoints**
To load values from a SavedModel, just pass the SavedModel directory
to checkpoint.restore:
```python
model = tf.keras.Model(...)
tf.saved_model.save(model, path) # or model.save(path, save_format='tf')
checkpoint = tf.train.Checkpoint(model)
checkpoint.restore(path).expect_partial()
```
This example calls `expect_partial()` on the loaded status, since
SavedModels saved from Keras often generates extra keys in the checkpoint.
Otherwise, the program prints a lot of warnings about unused keys at exit
time.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If the checkpoint was written by the
name-based `tf.compat.v1.train.Saver`, names are used to match
variables. This path may also be a SavedModel directory.
options: Optional `tf.train.CheckpointOptions` object.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration.
The returned status object has the following methods:
* `assert_consumed()`:
Raises an exception if any variables are unmatched: either
checkpointed values which don't have a matching Python object or
Python objects in the dependency graph with no values in the
checkpoint. This method returns the status object, and so may be
chained with other assertions.
* `assert_existing_objects_matched()`:
Raises an exception if any existing Python objects in the dependency
graph are unmatched. Unlike `assert_consumed`, this assertion will
pass if values in the checkpoint have no corresponding Python
objects. For example a `tf.keras.Layer` object which has not yet been
built, and so has not created any variables, will pass this assertion
but fail `assert_consumed`. Useful when loading part of a larger
checkpoint into a new Python program, e.g. a training checkpoint with
a `tf.compat.v1.train.Optimizer` was saved but only the state required
for
inference is being loaded. This method returns the status object, and
so may be chained with other assertions.
* `assert_nontrivial_match()`: Asserts that something aside from the root
object was matched. This is a very weak assertion, but is useful for
sanity checking in library code where objects may exist in the
checkpoint which haven't been created in Python and some Python
objects may not have a checkpointed value.
* `expect_partial()`: Silence warnings about incomplete checkpoint
restores. Warnings are otherwise printed for unused parts of the
checkpoint file or object when the `Checkpoint` object is deleted
(often at program shutdown).
Raises:
NotFoundError: if the a checkpoint or SavedModel cannot be found at
`save_path`.
"""
if options and options.experimental_enable_async_checkpoint:
self._checkpoint_options = options
# Triggers TF2 async checkpoint handling if:
# 1. async checkpoint is enabled in CheckpointOptions
# 2. there's a preceding async save/write
# 3. running in eager mode
if (self._checkpoint_options and
self._checkpoint_options.experimental_enable_async_checkpoint):
if context.executing_eagerly():
return self._async_checkpointer().restore(save_path, options)
else:
logging.warning(
"Saving async checkpoint in graph mode is currently not supported;"
" switching to regular sync checkpoint instead.")
orig_save_path = save_path
if isinstance(save_path, os.PathLike):
save_path = os.fspath(save_path)
if save_path is not None and gfile.IsDirectory(save_path) and (
(gfile.Exists(path_helpers.get_saved_model_pb_path(save_path)) or
gfile.Exists(path_helpers.get_saved_model_pbtxt_path(save_path)))):
save_path = path_helpers.get_variables_path(save_path)
try:
status = self.read(save_path, options=options)
if context.executing_eagerly():
context.async_wait() # Ensure restore operations have completed.
except errors_impl.NotFoundError as e:
raise errors_impl.NotFoundError(
None, None,
f"Error when restoring from checkpoint or SavedModel at "
f"{orig_save_path}: {e.message}"
f"\nPlease double-check that the path is correct. You may be missing "
"the checkpoint suffix (e.g. the '-1' in 'path/to/ckpt-1').")
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to errors when using,
# say, train.Saver() to save the model before initializing it.
self._maybe_create_save_counter()
if isinstance(status, NameBasedSaverStatus):
status.add_to_optionally_restored(self.save_counter)
return status
_preemption_checkpoint_saved_time_usecs = monitoring.Counter(
"/tensorflow/api/distribution_strategy/preemption_checkpoint_saved_time_usecs",
"Training time saved by PreemptionCheckpointHandler (us).")
|
Checkpoint
|
python
|
pytorch__pytorch
|
torch/fx/experimental/migrate_gradual_types/constraint.py
|
{
"start": 10451,
"end": 11986
}
|
class ____(Constraint):
def __init__(
self,
conv_result,
input_var,
c_out,
kernel,
padding,
stride,
dilation,
matching_constraint_vars,
):
"""
:param conv_result: the convolution result
:param input_var: input to convolution
:param c_out: output channel type
:param kernel: kernel tuple
"""
self.conv_result = conv_result
self.input_var = input_var
self.c_out = c_out
self.kernel = kernel
self.padding = padding
self.stride = stride
self.dilation = dilation
self.matching_constraint = matching_constraint_vars
def __repr__(self):
return (
f"{self.conv_result} ="
f" calc-conv({self.input_var},"
f" {self.c_out}, {self.kernel}, "
f"{self.padding}, {self.stride},"
f" {self.dilation})"
)
def __eq__(self, other):
if isinstance(other, CalcConv):
return (
self.conv_result == other.conv_result
and self.input_var == other.input_var
and self.c_out == other.c_out
and self.kernel == other.kernel
and self.padding == other.padding
and self.stride == other.stride
and self.dilation == other.dilation
and self.matching_constraint == other.matching_constraint
)
else:
return False
|
CalcConv
|
python
|
apache__airflow
|
providers/edge3/src/airflow/providers/edge3/worker_api/datamodels_ui.py
|
{
"start": 1534,
"end": 1669
}
|
class ____(BaseModel):
"""Worker Collection serializer."""
workers: list[Worker]
total_entries: int
|
WorkerCollectionResponse
|
python
|
jina-ai__jina
|
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
|
{
"start": 9234,
"end": 10053
}
|
class ____(object):
"""*
jina streaming gRPC service.
"""
@staticmethod
def Call(
request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.stream_stream(
request_iterator,
target,
'/jina.JinaRPC/Call',
jina__pb2.DataRequestProto.SerializeToString,
jina__pb2.DataRequestProto.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
|
JinaRPC
|
python
|
huggingface__transformers
|
tests/models/bark/test_modeling_bark.py
|
{
"start": 36328,
"end": 48323
}
|
class ____(unittest.TestCase):
@cached_property
def model(self):
return BarkModel.from_pretrained("suno/bark", revision="refs/pr/25", trust_remote_code=True).to(torch_device)
@cached_property
def processor(self):
return BarkProcessor.from_pretrained("suno/bark")
@cached_property
def inputs(self):
input_ids = self.processor("In the light of the moon, a little egg lay on a leaf", voice_preset="en_speaker_6")
for k, v in input_ids.items():
input_ids[k] = v.to(torch_device)
return input_ids
@cached_property
def semantic_generation_config(self):
semantic_generation_config = BarkSemanticGenerationConfig(**self.model.generation_config.semantic_config)
return semantic_generation_config
@cached_property
def coarse_generation_config(self):
coarse_generation_config = BarkCoarseGenerationConfig(**self.model.generation_config.coarse_acoustics_config)
return coarse_generation_config
@cached_property
def fine_generation_config(self):
fine_generation_config = BarkFineGenerationConfig(**self.model.generation_config.fine_acoustics_config)
return fine_generation_config
def test_model_can_generate(self):
# Bark has custom generate without inheriting GenerationMixin. This test could prevent regression.
self.assertTrue(self.model.can_generate())
def test_generate_semantic(self):
input_ids = self.inputs
# check first ids
expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip
# greedy decoding
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids)
def test_generate_semantic_early_stop(self):
input_ids = self.inputs
min_eos_p = 0.01
# check first ids
expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip
# Should be able to read min_eos_p from kwargs
with torch.no_grad():
torch.manual_seed(0)
output_ids_without_min_eos_p = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=0.9,
semantic_generation_config=self.semantic_generation_config,
)
torch.manual_seed(0)
output_ids_kwargs = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=0.9,
semantic_generation_config=self.semantic_generation_config,
min_eos_p=min_eos_p,
)
self.assertListEqual(output_ids_without_min_eos_p[0, : len(expected_output_ids)].tolist(), expected_output_ids)
self.assertLess(len(output_ids_kwargs[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist()))
# Should be able to read min_eos_p from the semantic generation config
self.semantic_generation_config.min_eos_p = min_eos_p
with torch.no_grad():
torch.manual_seed(0)
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=0.9,
semantic_generation_config=self.semantic_generation_config,
)
self.assertEqual(output_ids.shape, output_ids_kwargs.shape)
self.assertLess(len(output_ids[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist()))
self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids)
def test_generate_coarse(self):
input_ids = self.inputs
history_prompt = input_ids["history_prompt"]
# check first ids
expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ] # fmt: skip
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
output_ids = self.model.coarse_acoustics.generate(
output_ids,
history_prompt=history_prompt,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids)
def test_generate_fine(self):
input_ids = self.inputs
history_prompt = input_ids["history_prompt"]
# fmt: off
expected_output_ids = [
[1018, 651, 857, 642, 312, 531, 474, 524, 524, 776,],
[367, 394, 596, 342, 504, 492, 27, 27, 822, 822,],
[961, 955, 221, 955, 955, 686, 939, 939, 479, 176,],
[638, 365, 218, 944, 853, 363, 639, 22, 884, 456,],
[302, 912, 524, 38, 174, 209, 879, 23, 910, 227,],
[440, 673, 861, 666, 372, 558, 49, 172, 232, 342,],
[244, 358, 123, 356, 586, 520, 499, 877, 542, 637,],
[806, 685, 905, 848, 803, 810, 921, 208, 625, 203,],
]
# fmt: on
with torch.no_grad():
output_ids = self.model.semantic.generate(
**input_ids,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
)
output_ids = self.model.coarse_acoustics.generate(
output_ids,
history_prompt=history_prompt,
do_sample=False,
temperature=1.0,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
# greedy decoding
output_ids = self.model.fine_acoustics.generate(
output_ids,
history_prompt=history_prompt,
temperature=None,
semantic_generation_config=self.semantic_generation_config,
coarse_generation_config=self.coarse_generation_config,
fine_generation_config=self.fine_generation_config,
codebook_size=self.model.generation_config.codebook_size,
)
self.assertListEqual(output_ids[0, :, : len(expected_output_ids[0])].tolist(), expected_output_ids)
def test_generate_end_to_end(self):
input_ids = self.inputs
with torch.no_grad():
self.model.generate(**input_ids)
self.model.generate(**{key: val for (key, val) in input_ids.items() if key != "history_prompt"})
def test_generate_end_to_end_with_args(self):
input_ids = self.inputs
with torch.no_grad():
self.model.generate(**input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6)
self.model.generate(**input_ids, do_sample=True, temperature=0.6, num_beams=4)
def test_generate_batching(self):
args = {"do_sample": False, "temperature": None}
s1 = "I love HuggingFace"
s2 = "In the light of the moon, a little egg lay on a leaf"
voice_preset = "en_speaker_6"
input_ids = self.processor([s1, s2], voice_preset=voice_preset).to(torch_device)
# generate in batch
outputs, audio_lengths = self.model.generate(**input_ids, **args, return_output_lengths=True)
# generate one-by-one
s1 = self.processor(s1, voice_preset=voice_preset).to(torch_device)
s2 = self.processor(s2, voice_preset=voice_preset).to(torch_device)
output1 = self.model.generate(**s1, **args)
output2 = self.model.generate(**s2, **args)
# up until the coarse acoustic model (included), results are the same
# the fine acoustic model introduces small differences
# first verify if same length (should be the same because it's decided in the coarse model)
self.assertEqual(tuple(audio_lengths), (output1.shape[1], output2.shape[1]))
# then assert almost equal
torch.testing.assert_close(outputs[0, : audio_lengths[0]], output1.squeeze(), rtol=2e-3, atol=2e-3)
torch.testing.assert_close(outputs[1, : audio_lengths[1]], output2.squeeze(), rtol=2e-3, atol=2e-3)
# now test single input with return_output_lengths = True
outputs, _ = self.model.generate(**s1, **args, return_output_lengths=True)
self.assertTrue((outputs == output1).all().item())
def test_generate_end_to_end_with_sub_models_args(self):
input_ids = self.inputs
with torch.no_grad():
torch.manual_seed(0)
self.model.generate(
**input_ids, do_sample=False, temperature=1.0, coarse_do_sample=True, coarse_temperature=0.7
)
output_ids_without_min_eos_p = self.model.generate(
**input_ids,
do_sample=True,
temperature=0.9,
coarse_do_sample=True,
coarse_temperature=0.7,
fine_temperature=0.3,
)
output_ids_with_min_eos_p = self.model.generate(
**input_ids,
do_sample=True,
temperature=0.9,
coarse_temperature=0.7,
fine_temperature=0.3,
min_eos_p=0.1,
)
self.assertLess(
len(output_ids_with_min_eos_p[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist())
)
@require_torch_accelerator
def test_generate_end_to_end_with_offload(self):
input_ids = self.inputs
with torch.no_grad():
# standard generation
output_with_no_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0)
torch_accelerator_module = backend_torch_accelerator_module(torch_device)
torch_accelerator_module.empty_cache()
memory_before_offload = torch_accelerator_module.memory_allocated()
model_memory_footprint = self.model.get_memory_footprint()
# activate cpu offload
self.model.enable_cpu_offload()
memory_after_offload = torch_accelerator_module.memory_allocated()
# checks if the model have been offloaded
# CUDA memory usage after offload should be near 0, leaving room to small differences
room_for_difference = 1.1
self.assertGreater(
(memory_before_offload - model_memory_footprint) * room_for_difference, memory_after_offload
)
# checks if device is the correct one
self.assertEqual(self.model.device.type, torch_device)
# checks if hooks exist
self.assertTrue(hasattr(self.model.semantic, "_hf_hook"))
# output with cpu offload
output_with_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0)
# checks if same output
self.assertListAlmostEqual(output_with_no_offload.squeeze().tolist(), output_with_offload.squeeze().tolist())
def assertListAlmostEqual(self, list1, list2, tol=1e-6):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
|
BarkModelIntegrationTests
|
python
|
pypa__setuptools
|
setuptools/tests/test_manifest.py
|
{
"start": 3967,
"end": 9266
}
|
class ____(TempDirTestCase):
def setup_method(self, method):
super().setup_method(method)
f = open(os.path.join(self.temp_dir, 'setup.py'), 'w', encoding="utf-8")
f.write(SETUP_PY)
f.close()
"""
Create a file tree like:
- LICENSE
- README.rst
- testing.rst
- .hidden.rst
- app/
- __init__.py
- a.txt
- b.txt
- c.rst
- static/
- app.js
- app.js.map
- app.css
- app.css.map
"""
for fname in ['README.rst', '.hidden.rst', 'testing.rst', 'LICENSE']:
touch(os.path.join(self.temp_dir, fname))
# Set up the rest of the test package
test_pkg = os.path.join(self.temp_dir, 'app')
os.mkdir(test_pkg)
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
touch(os.path.join(test_pkg, fname))
# Some compiled front-end assets to include
static = os.path.join(test_pkg, 'static')
os.mkdir(static)
for fname in ['app.js', 'app.js.map', 'app.css', 'app.css.map']:
touch(os.path.join(static, fname))
def make_manifest(self, contents):
"""Write a MANIFEST.in."""
manifest = os.path.join(self.temp_dir, 'MANIFEST.in')
with open(manifest, 'w', encoding="utf-8") as f:
f.write(DALS(contents))
def get_files(self):
"""Run egg_info and get all the files to include, as a set"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = egg_info(dist)
cmd.ensure_finalized()
cmd.run()
return set(cmd.filelist.files)
def test_no_manifest(self):
"""Check a missing MANIFEST.in includes only the standard files."""
assert (default_files - set(['MANIFEST.in'])) == self.get_files()
def test_empty_files(self):
"""Check an empty MANIFEST.in includes only the standard files."""
self.make_manifest("")
assert default_files == self.get_files()
def test_include(self):
"""Include extra rst files in the project root."""
self.make_manifest("include *.rst")
files = default_files | set(['testing.rst', '.hidden.rst'])
assert files == self.get_files()
def test_exclude(self):
"""Include everything in app/ except the text files"""
ml = make_local_path
self.make_manifest(
"""
include app/*
exclude app/*.txt
"""
)
files = default_files | set([ml('app/c.rst')])
assert files == self.get_files()
def test_include_multiple(self):
"""Include with multiple patterns."""
ml = make_local_path
self.make_manifest("include app/*.txt app/static/*")
files = default_files | set([
ml('app/a.txt'),
ml('app/b.txt'),
ml('app/static/app.js'),
ml('app/static/app.js.map'),
ml('app/static/app.css'),
ml('app/static/app.css.map'),
])
assert files == self.get_files()
def test_graft(self):
"""Include the whole app/static/ directory."""
ml = make_local_path
self.make_manifest("graft app/static")
files = default_files | set([
ml('app/static/app.js'),
ml('app/static/app.js.map'),
ml('app/static/app.css'),
ml('app/static/app.css.map'),
])
assert files == self.get_files()
def test_graft_glob_syntax(self):
"""Include the whole app/static/ directory."""
ml = make_local_path
self.make_manifest("graft */static")
files = default_files | set([
ml('app/static/app.js'),
ml('app/static/app.js.map'),
ml('app/static/app.css'),
ml('app/static/app.css.map'),
])
assert files == self.get_files()
def test_graft_global_exclude(self):
"""Exclude all *.map files in the project."""
ml = make_local_path
self.make_manifest(
"""
graft app/static
global-exclude *.map
"""
)
files = default_files | set([ml('app/static/app.js'), ml('app/static/app.css')])
assert files == self.get_files()
def test_global_include(self):
"""Include all *.rst, *.js, and *.css files in the whole tree."""
ml = make_local_path
self.make_manifest(
"""
global-include *.rst *.js *.css
"""
)
files = default_files | set([
'.hidden.rst',
'testing.rst',
ml('app/c.rst'),
ml('app/static/app.js'),
ml('app/static/app.css'),
])
assert files == self.get_files()
def test_graft_prune(self):
"""Include all files in app/, except for the whole app/static/ dir."""
ml = make_local_path
self.make_manifest(
"""
graft app
prune app/static
"""
)
files = default_files | set([ml('app/a.txt'), ml('app/b.txt'), ml('app/c.rst')])
assert files == self.get_files()
|
TestManifestTest
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/util.py
|
{
"start": 21850,
"end": 29042
}
|
class ____(
inspection.Inspectable["AliasedInsp[_O]"], ORMColumnsClauseRole[_O]
):
r"""Represents an "aliased" form of a mapped class for usage with Query.
The ORM equivalent of a :func:`~sqlalchemy.sql.expression.alias`
construct, this object mimics the mapped class using a
``__getattr__`` scheme and maintains a reference to a
real :class:`~sqlalchemy.sql.expression.Alias` object.
A primary purpose of :class:`.AliasedClass` is to serve as an alternate
within a SQL statement generated by the ORM, such that an existing
mapped entity can be used in multiple contexts. A simple example::
# find all pairs of users with the same name
user_alias = aliased(User)
session.query(User, user_alias).join(
(user_alias, User.id > user_alias.id)
).filter(User.name == user_alias.name)
:class:`.AliasedClass` is also capable of mapping an existing mapped
class to an entirely new selectable, provided this selectable is column-
compatible with the existing mapped selectable, and it can also be
configured in a mapping as the target of a :func:`_orm.relationship`.
See the links below for examples.
The :class:`.AliasedClass` object is constructed typically using the
:func:`_orm.aliased` function. It also is produced with additional
configuration when using the :func:`_orm.with_polymorphic` function.
The resulting object is an instance of :class:`.AliasedClass`.
This object implements an attribute scheme which produces the
same attribute and method interface as the original mapped
class, allowing :class:`.AliasedClass` to be compatible
with any attribute technique which works on the original class,
including hybrid attributes (see :ref:`hybrids_toplevel`).
The :class:`.AliasedClass` can be inspected for its underlying
:class:`_orm.Mapper`, aliased selectable, and other information
using :func:`_sa.inspect`::
from sqlalchemy import inspect
my_alias = aliased(MyClass)
insp = inspect(my_alias)
The resulting inspection object is an instance of :class:`.AliasedInsp`.
.. seealso::
:func:`.aliased`
:func:`.with_polymorphic`
:ref:`relationship_aliased_class`
:ref:`relationship_to_window_function`
"""
__name__: str
def __init__(
self,
mapped_class_or_ac: _EntityType[_O],
alias: Optional[FromClause] = None,
name: Optional[str] = None,
flat: bool = False,
adapt_on_names: bool = False,
with_polymorphic_mappers: Optional[Sequence[Mapper[Any]]] = None,
with_polymorphic_discriminator: Optional[ColumnElement[Any]] = None,
base_alias: Optional[AliasedInsp[Any]] = None,
use_mapper_path: bool = False,
represents_outer_join: bool = False,
):
insp = cast(
"_InternalEntityType[_O]", inspection.inspect(mapped_class_or_ac)
)
mapper = insp.mapper
nest_adapters = False
if alias is None:
if insp.is_aliased_class and insp.selectable._is_subquery:
alias = insp.selectable.alias()
else:
alias = (
mapper._with_polymorphic_selectable._anonymous_fromclause(
name=name,
flat=flat,
)
)
elif insp.is_aliased_class:
nest_adapters = True
assert alias is not None
self._aliased_insp = AliasedInsp(
self,
insp,
alias,
name,
(
with_polymorphic_mappers
if with_polymorphic_mappers
else mapper.with_polymorphic_mappers
),
(
with_polymorphic_discriminator
if with_polymorphic_discriminator is not None
else mapper.polymorphic_on
),
base_alias,
use_mapper_path,
adapt_on_names,
represents_outer_join,
nest_adapters,
)
self.__name__ = f"aliased({mapper.class_.__name__})"
@classmethod
def _reconstitute_from_aliased_insp(
cls, aliased_insp: AliasedInsp[_O]
) -> AliasedClass[_O]:
obj = cls.__new__(cls)
obj.__name__ = f"aliased({aliased_insp.mapper.class_.__name__})"
obj._aliased_insp = aliased_insp
if aliased_insp._is_with_polymorphic:
for sub_aliased_insp in aliased_insp._with_polymorphic_entities:
if sub_aliased_insp is not aliased_insp:
ent = AliasedClass._reconstitute_from_aliased_insp(
sub_aliased_insp
)
setattr(obj, sub_aliased_insp.class_.__name__, ent)
return obj
def __getattr__(self, key: str) -> Any:
try:
_aliased_insp = self.__dict__["_aliased_insp"]
except KeyError:
raise AttributeError()
else:
target = _aliased_insp._target
# maintain all getattr mechanics
attr = getattr(target, key)
# attribute is a method, that will be invoked against a
# "self"; so just return a new method with the same function and
# new self
if hasattr(attr, "__call__") and hasattr(attr, "__self__"):
return types.MethodType(attr.__func__, self)
# attribute is a descriptor, that will be invoked against a
# "self"; so invoke the descriptor against this self
if hasattr(attr, "__get__"):
attr = attr.__get__(None, self)
# attributes within the QueryableAttribute system will want this
# to be invoked so the object can be adapted
if hasattr(attr, "adapt_to_entity"):
attr = attr.adapt_to_entity(_aliased_insp)
setattr(self, key, attr)
return attr
def _get_from_serialized(
self, key: str, mapped_class: _O, aliased_insp: AliasedInsp[_O]
) -> Any:
# this method is only used in terms of the
# sqlalchemy.ext.serializer extension
attr = getattr(mapped_class, key)
if hasattr(attr, "__call__") and hasattr(attr, "__self__"):
return types.MethodType(attr.__func__, self)
# attribute is a descriptor, that will be invoked against a
# "self"; so invoke the descriptor against this self
if hasattr(attr, "__get__"):
attr = attr.__get__(None, self)
# attributes within the QueryableAttribute system will want this
# to be invoked so the object can be adapted
if hasattr(attr, "adapt_to_entity"):
aliased_insp._weak_entity = weakref.ref(self)
attr = attr.adapt_to_entity(aliased_insp)
setattr(self, key, attr)
return attr
def __repr__(self) -> str:
return "<AliasedClass at 0x%x; %s>" % (
id(self),
self._aliased_insp._target.__name__,
)
def __str__(self) -> str:
return str(self._aliased_insp)
@inspection._self_inspects
|
AliasedClass
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/operators/trigger_dagrun.py
|
{
"start": 4034,
"end": 19358
}
|
class ____(BaseOperator):
"""
Triggers a DAG run for a specified DAG ID.
Note that if database isolation mode is enabled, not all features are supported.
:param trigger_dag_id: The ``dag_id`` of the DAG to trigger (templated).
:param trigger_run_id: The run ID to use for the triggered DAG run (templated).
If not provided, a run ID will be automatically generated.
:param conf: Configuration for the DAG run (templated).
:param logical_date: Logical date for the triggered DAG (templated).
:param reset_dag_run: Whether clear existing DAG run if already exists.
This is useful when backfill or rerun an existing DAG run.
This only resets (not recreates) the DAG run.
DAG run conf is immutable and will not be reset on rerun of an existing DAG run.
When reset_dag_run=False and dag run exists, DagRunAlreadyExists will be raised.
When reset_dag_run=True and dag run exists, existing DAG run will be cleared to rerun.
:param wait_for_completion: Whether or not wait for DAG run completion. (default: False)
:param poke_interval: Poke interval to check DAG run status when wait_for_completion=True.
(default: 60)
:param allowed_states: Optional list of allowed DAG run states of the triggered DAG. This is useful when
setting ``wait_for_completion`` to True. Must be a valid DagRunState.
Default is ``[DagRunState.SUCCESS]``.
:param failed_states: Optional list of failed or disallowed DAG run states of the triggered DAG. This is
useful when setting ``wait_for_completion`` to True. Must be a valid DagRunState.
Default is ``[DagRunState.FAILED]``.
:param skip_when_already_exists: Set to true to mark the task as SKIPPED if a DAG run of the triggered
DAG for the same logical date already exists.
:param fail_when_dag_is_paused: If the dag to trigger is paused, DagIsPaused will be raised.
:param deferrable: If waiting for completion, whether or not to defer the task until done,
default is ``False``.
:param openlineage_inject_parent_info: whether to include OpenLineage metadata about the parent task
in the triggered DAG run's conf, enabling improved lineage tracking. The metadata is only injected
if OpenLineage is enabled and running. This option does not modify any other part of the conf,
and existing OpenLineage-related settings in the conf will not be overwritten. The injection process
is safeguarded against exceptions - if any error occurs during metadata injection, it is gracefully
handled and the conf remains unchanged - so it's safe to use. Default is ``True``
"""
template_fields: Sequence[str] = (
"trigger_dag_id",
"trigger_run_id",
"logical_date",
"conf",
"wait_for_completion",
"skip_when_already_exists",
)
template_fields_renderers = {"conf": "py"}
ui_color = "#ffefeb"
operator_extra_links = [TriggerDagRunLink()]
def __init__(
self,
*,
trigger_dag_id: str,
trigger_run_id: str | None = None,
conf: dict | None = None,
logical_date: str | datetime.datetime | None | ArgNotSet = NOTSET,
reset_dag_run: bool = False,
wait_for_completion: bool = False,
poke_interval: int = 60,
allowed_states: list[str | DagRunState] | None = None,
failed_states: list[str | DagRunState] | None = None,
skip_when_already_exists: bool = False,
fail_when_dag_is_paused: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
openlineage_inject_parent_info: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_dag_id = trigger_dag_id
self.trigger_run_id = trigger_run_id
self.conf = conf
self.reset_dag_run = reset_dag_run
self.wait_for_completion = wait_for_completion
self.poke_interval = poke_interval
if allowed_states:
self.allowed_states = [DagRunState(s) for s in allowed_states]
else:
self.allowed_states = [DagRunState.SUCCESS]
if failed_states is not None:
self.failed_states = [DagRunState(s) for s in failed_states]
else:
self.failed_states = [DagRunState.FAILED]
self.skip_when_already_exists = skip_when_already_exists
self.fail_when_dag_is_paused = fail_when_dag_is_paused
self.openlineage_inject_parent_info = openlineage_inject_parent_info
self._defer = deferrable
self.logical_date = logical_date
if logical_date is NOTSET:
self.logical_date = NOTSET
elif logical_date is None or isinstance(logical_date, (str, datetime.datetime)):
self.logical_date = logical_date
else:
raise TypeError(
f"Expected str, datetime.datetime, or None for parameter 'logical_date'. Got {type(logical_date).__name__}"
)
if fail_when_dag_is_paused and AIRFLOW_V_3_0_PLUS:
raise NotImplementedError("Setting `fail_when_dag_is_paused` not yet supported for Airflow 3.x")
def execute(self, context: Context):
if self.logical_date is NOTSET:
# If no logical_date is provided we will set utcnow()
parsed_logical_date = timezone.utcnow()
elif self.logical_date is None or isinstance(self.logical_date, datetime.datetime):
parsed_logical_date = self.logical_date # type: ignore
elif isinstance(self.logical_date, str):
parsed_logical_date = timezone.parse(self.logical_date)
try:
if self.conf and isinstance(self.conf, str):
self.conf = json.loads(self.conf)
json.dumps(self.conf)
except (TypeError, JSONDecodeError):
raise ValueError("conf parameter should be JSON Serializable %s", self.conf)
if self.openlineage_inject_parent_info:
self.log.debug("Checking if OpenLineage information can be safely injected into dagrun conf.")
self.conf = safe_inject_openlineage_properties_into_dagrun_conf(
dr_conf=self.conf, ti=context.get("ti")
)
if self.trigger_run_id:
run_id = str(self.trigger_run_id)
else:
if AIRFLOW_V_3_0_PLUS:
run_id = DagRun.generate_run_id(
run_type=DagRunType.MANUAL,
logical_date=parsed_logical_date,
run_after=parsed_logical_date or timezone.utcnow(),
)
else:
run_id = DagRun.generate_run_id(DagRunType.MANUAL, parsed_logical_date or timezone.utcnow()) # type: ignore[misc,call-arg]
# Save run_id as task attribute - to be used by listeners
self.trigger_run_id = run_id
if self.fail_when_dag_is_paused:
dag_model = DagModel.get_current(self.trigger_dag_id)
if not dag_model:
raise ValueError(f"Dag {self.trigger_dag_id} is not found")
if dag_model.is_paused:
# TODO: enable this when dag state endpoint available from task sdk
# if AIRFLOW_V_3_0_PLUS:
# raise DagIsPaused(dag_id=self.trigger_dag_id)
raise AirflowException(f"Dag {self.trigger_dag_id} is paused")
if AIRFLOW_V_3_0_PLUS:
self._trigger_dag_af_3(
context=context, run_id=self.trigger_run_id, parsed_logical_date=parsed_logical_date
)
else:
self._trigger_dag_af_2(
context=context, run_id=self.trigger_run_id, parsed_logical_date=parsed_logical_date
)
def _trigger_dag_af_3(self, context, run_id, parsed_logical_date):
from airflow.providers.common.compat.sdk import DagRunTriggerException
raise DagRunTriggerException(
trigger_dag_id=self.trigger_dag_id,
dag_run_id=run_id,
conf=self.conf,
logical_date=parsed_logical_date,
reset_dag_run=self.reset_dag_run,
skip_when_already_exists=self.skip_when_already_exists,
wait_for_completion=self.wait_for_completion,
allowed_states=self.allowed_states,
failed_states=self.failed_states,
poke_interval=self.poke_interval,
deferrable=self._defer,
)
def _trigger_dag_af_2(self, context, run_id, parsed_logical_date):
try:
dag_run = trigger_dag(
dag_id=self.trigger_dag_id,
run_id=run_id,
conf=self.conf,
execution_date=parsed_logical_date,
replace_microseconds=False,
)
except DagRunAlreadyExists as e:
if self.reset_dag_run:
dag_run = e.dag_run
self.log.info("Clearing %s on %s", self.trigger_dag_id, dag_run.run_id)
# Get target dag object and call clear()
dag_model = DagModel.get_current(self.trigger_dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {self.trigger_dag_id} not found in DagModel")
# Note: here execution fails on database isolation mode. Needs structural changes for AIP-72
dag = SerializedDagModel.get_dag(self.trigger_dag_id)
dag.clear(start_date=dag_run.logical_date, end_date=dag_run.logical_date)
else:
if self.skip_when_already_exists:
raise AirflowSkipException(
"Skipping due to skip_when_already_exists is set to True and DagRunAlreadyExists"
)
raise e
if dag_run is None:
raise RuntimeError("The dag_run should be set here!")
# Store the run id from the dag run (either created or found above) to
# be used when creating the extra link on the webserver.
ti = context["task_instance"]
ti.xcom_push(key=XCOM_RUN_ID, value=dag_run.run_id)
if self.wait_for_completion:
# Kick off the deferral process
if self._defer:
self.defer(
trigger=DagStateTrigger(
dag_id=self.trigger_dag_id,
states=self.allowed_states + self.failed_states,
execution_dates=[dag_run.logical_date],
run_ids=[run_id],
poll_interval=self.poke_interval,
),
method_name="execute_complete",
)
# wait for dag to complete
while True:
self.log.info(
"Waiting for %s on %s to become allowed state %s ...",
self.trigger_dag_id,
run_id,
self.allowed_states,
)
time.sleep(self.poke_interval)
# Note: here execution fails on database isolation mode. Needs structural changes for AIP-72
dag_run.refresh_from_db()
state = dag_run.state
if state in self.failed_states:
raise AirflowException(f"{self.trigger_dag_id} failed with failed states {state}")
if state in self.allowed_states:
self.log.info("%s finished with allowed state %s", self.trigger_dag_id, state)
return
def execute_complete(self, context: Context, event: tuple[str, dict[str, Any]]):
"""
Handle task completion after returning from a deferral.
Args:
context: The Airflow context dictionary.
event: A tuple containing the class path of the trigger and the trigger event data.
"""
# Example event tuple content:
# (
# "airflow.providers.standard.triggers.external_task.DagStateTrigger",
# {
# 'dag_id': 'some_dag',
# 'states': ['success', 'failed'],
# 'poll_interval': 15,
# 'run_ids': ['manual__2025-11-19T17:49:20.907083+00:00'],
# 'execution_dates': [
# DateTime(2025, 11, 19, 17, 49, 20, 907083, tzinfo=Timezone('UTC'))
# ]
# }
# )
_, event_data = event
run_ids = event_data["run_ids"]
# Re-set as attribute after coming back from deferral - to be used by listeners.
# Just a safety check on length, we should always have single run_id here.
self.trigger_run_id = run_ids[0] if len(run_ids) == 1 else None
if AIRFLOW_V_3_0_PLUS:
self._trigger_dag_run_af_3_execute_complete(event_data=event_data)
else:
self._trigger_dag_run_af_2_execute_complete(event_data=event_data)
def _trigger_dag_run_af_3_execute_complete(self, event_data: dict[str, Any]):
failed_run_id_conditions = []
for run_id in event_data["run_ids"]:
state = event_data.get(run_id)
if state in self.failed_states:
failed_run_id_conditions.append(run_id)
continue
if state in self.allowed_states:
self.log.info(
"%s finished with allowed state %s for run_id %s",
self.trigger_dag_id,
state,
run_id,
)
if failed_run_id_conditions:
raise AirflowException(
f"{self.trigger_dag_id} failed with failed states {self.failed_states} for run_ids"
f" {failed_run_id_conditions}"
)
if not AIRFLOW_V_3_0_PLUS:
from airflow.utils.session import NEW_SESSION, provide_session # type: ignore[misc]
@provide_session
def _trigger_dag_run_af_2_execute_complete(
self, event_data: dict[str, Any], session: Session = NEW_SESSION
):
# This logical_date is parsed from the return trigger event
provided_logical_date = event_data["execution_dates"][0]
try:
# Note: here execution fails on database isolation mode. Needs structural changes for AIP-72
dag_run = session.execute(
select(DagRun).where(
DagRun.dag_id == self.trigger_dag_id, DagRun.execution_date == provided_logical_date
)
).scalar_one()
except NoResultFound:
raise AirflowException(
f"No DAG run found for DAG {self.trigger_dag_id} and logical date {self.logical_date}"
)
state = dag_run.state
if state in self.failed_states:
raise AirflowException(f"{self.trigger_dag_id} failed with failed state {state}")
if state in self.allowed_states:
self.log.info("%s finished with allowed state %s", self.trigger_dag_id, state)
return
raise AirflowException(
f"{self.trigger_dag_id} return {state} which is not in {self.failed_states}"
f" or {self.allowed_states}"
)
|
TriggerDagRunOperator
|
python
|
explosion__spaCy
|
setup.py
|
{
"start": 3001,
"end": 3398
}
|
class ____:
def build_options(self):
for e in self.extensions:
e.extra_compile_args += COMPILE_OPTIONS.get(
self.compiler.compiler_type, COMPILE_OPTIONS["other"]
)
for e in self.extensions:
e.extra_link_args += LINK_OPTIONS.get(
self.compiler.compiler_type, LINK_OPTIONS["other"]
)
|
build_ext_options
|
python
|
lazyprogrammer__machine_learning_examples
|
unsupervised_class2/xwing.py
|
{
"start": 1238,
"end": 3879
}
|
class ____(object):
def __init__(self, hidden_layer_sizes):
self.hidden_layer_sizes = hidden_layer_sizes
def fit(self, X, learning_rate=0.5, mu=0.99, epochs=50, batch_sz=100, show_fig=False):
# cast hyperparams
learning_rate = np.float32(learning_rate)
mu = np.float32(mu)
N, D = X.shape
n_batches = N // batch_sz
mi = D
self.layers = []
self.params = []
for mo in self.hidden_layer_sizes:
layer = Layer(mi, mo)
self.layers.append(layer)
self.params += layer.params
mi = mo
X_in = T.matrix('X')
X_hat = self.forward(X_in)
cost = -(X_in * T.log(X_hat) + (1 - X_in) * T.log(1 - X_hat)).mean()
cost_op = theano.function(
inputs=[X_in],
outputs=cost,
)
updates = momentum_updates(cost, self.params, mu, learning_rate)
train_op = theano.function(
inputs=[X_in],
outputs=cost,
updates=updates,
)
costs = []
for i in range(epochs):
print("epoch:", i)
X = shuffle(X)
for j in range(n_batches):
batch = X[j*batch_sz:(j*batch_sz + batch_sz)]
c = train_op(batch)
if j % 100 == 0:
print("j / n_batches:", j, "/", n_batches, "cost:", c)
costs.append(c)
if show_fig:
plt.plot(costs)
plt.show()
def forward(self, X):
Z = X
for layer in self.layers:
Z = layer.forward(Z)
self.map2center = theano.function(
inputs=[X],
outputs=Z,
)
for i in range(len(self.layers)-1, -1, -1):
Z = self.layers[i].forwardT(Z)
return Z
def main():
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
dae = DeepAutoEncoder([500, 300, 2])
dae.fit(Xtrain)
mapping = dae.map2center(Xtrain)
plt.scatter(mapping[:,0], mapping[:,1], c=Ytrain, s=100, alpha=0.5)
plt.show()
# purity measure from unsupervised machine learning pt 1
# NOTE: this will take a long time (i.e. just leave it overnight)
gmm = GaussianMixture(n_components=10)
gmm.fit(Xtrain)
print("Finished GMM training")
responsibilities_full = gmm.predict_proba(Xtrain)
print("full purity:", purity(Ytrain, responsibilities_full))
gmm.fit(mapping)
responsibilities_reduced = gmm.predict_proba(mapping)
print("reduced purity:", purity(Ytrain, responsibilities_reduced))
if __name__ == '__main__':
main()
|
DeepAutoEncoder
|
python
|
huggingface__transformers
|
src/transformers/models/clap/processing_clap.py
|
{
"start": 768,
"end": 1495
}
|
class ____(ProcessorMixin):
r"""
Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.
[`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the
[`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.
Args:
feature_extractor ([`ClapFeatureExtractor`]):
The audio processor is a required input.
tokenizer ([`RobertaTokenizerFast`]):
The tokenizer is a required input.
"""
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
__all__ = ["ClapProcessor"]
|
ClapProcessor
|
python
|
scipy__scipy
|
scipy/optimize/_nonlin.py
|
{
"start": 31694,
"end": 35949
}
|
class ____(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='anderson'`` in particular.
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.anderson(fun, [0, 0])
>>> sol
array([0.84116588, 0.15883789])
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in range(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in range(n):
for j in range(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in range(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in range(n):
for j in range(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
|
Anderson
|
python
|
ray-project__ray
|
python/ray/air/integrations/wandb.py
|
{
"start": 17129,
"end": 29081
}
|
class ____(LoggerCallback):
"""WandbLoggerCallback
Weights and biases (https://www.wandb.ai/) is a tool for experiment
tracking, model optimization, and dataset versioning. This Ray Tune
``LoggerCallback`` sends metrics to Wandb for automatic tracking and
visualization.
Example:
.. testcode::
import random
from ray import tune
from ray.air.integrations.wandb import WandbLoggerCallback
def train_func(config):
offset = random.random() / 5
for epoch in range(2, config["epochs"]):
acc = 1 - (2 + config["lr"]) ** -epoch - random.random() / epoch - offset
loss = (2 + config["lr"]) ** -epoch + random.random() / epoch + offset
train.report({"acc": acc, "loss": loss})
tuner = tune.Tuner(
train_func,
param_space={
"lr": tune.grid_search([0.001, 0.01, 0.1, 1.0]),
"epochs": 10,
},
run_config=tune.RunConfig(
callbacks=[WandbLoggerCallback(project="Optimization_Project")]
),
)
results = tuner.fit()
.. testoutput::
:hide:
...
Args:
project: Name of the Wandb project. Mandatory.
group: Name of the Wandb group. Defaults to the trainable
name.
api_key_file: Path to file containing the Wandb API KEY. This
file only needs to be present on the node running the Tune script
if using the WandbLogger.
api_key: Wandb API Key. Alternative to setting ``api_key_file``.
excludes: List of metrics and config that should be excluded from
the log.
log_config: Boolean indicating if the ``config`` parameter of
the ``results`` dict should be logged. This makes sense if
parameters will change during training, e.g. with
PopulationBasedTraining. Defaults to False.
upload_checkpoints: If ``True``, model checkpoints will be uploaded to
Wandb as artifacts. Defaults to ``False``.
video_kwargs: Dictionary of keyword arguments passed to wandb.Video()
when logging videos. Videos have to be logged as 5D numpy arrays
to be affected by this parameter. For valid keyword arguments, see
https://docs.wandb.ai/ref/python/data-types/video/. Defaults to ``None``.
image_kwargs: Dictionary of keyword arguments passed to wandb.Image()
when logging images. Images have to be logged as 3D or 4D numpy arrays
to be affected by this parameter. For valid keyword arguments, see
https://docs.wandb.ai/ref/python/data-types/image/. Defaults to ``None``.
**kwargs: The keyword arguments will be passed to ``wandb.init()``.
Wandb's ``group``, ``run_id`` and ``run_name`` are automatically selected
by Tune, but can be overwritten by filling out the respective configuration
values.
Please see here for all other valid configuration settings:
https://docs.wandb.ai/ref/python/init/
""" # noqa: E501
# Do not log these result keys
_exclude_results = ["done", "should_checkpoint"]
AUTO_CONFIG_KEYS = [
"trial_id",
"experiment_tag",
"node_ip",
"experiment_id",
"hostname",
"pid",
"date",
]
"""Results that are saved with `wandb.config` instead of `wandb.log`."""
_logger_actor_cls = _WandbLoggingActor
def __init__(
self,
project: Optional[str] = None,
group: Optional[str] = None,
api_key_file: Optional[str] = None,
api_key: Optional[str] = None,
excludes: Optional[List[str]] = None,
log_config: bool = False,
upload_checkpoints: bool = False,
save_checkpoints: bool = False,
upload_timeout: int = DEFAULT_SYNC_TIMEOUT,
video_kwargs: Optional[dict] = None,
image_kwargs: Optional[dict] = None,
**kwargs,
):
if not wandb:
raise RuntimeError(
"Wandb was not found - please install with `pip install wandb`"
)
if save_checkpoints:
warnings.warn(
"`save_checkpoints` is deprecated. Use `upload_checkpoints` instead.",
DeprecationWarning,
)
upload_checkpoints = save_checkpoints
self.project = project
self.group = group
self.api_key_path = api_key_file
self.api_key = api_key
self.excludes = excludes or []
self.log_config = log_config
self.upload_checkpoints = upload_checkpoints
self._upload_timeout = upload_timeout
self.video_kwargs = video_kwargs or {}
self.image_kwargs = image_kwargs or {}
self.kwargs = kwargs
self._remote_logger_class = None
self._trial_logging_actors: Dict[
"Trial", ray.actor.ActorHandle[_WandbLoggingActor]
] = {}
self._trial_logging_futures: Dict["Trial", ray.ObjectRef] = {}
self._logging_future_to_trial: Dict[ray.ObjectRef, "Trial"] = {}
self._trial_queues: Dict["Trial", Queue] = {}
def setup(self, *args, **kwargs):
self.api_key_file = (
os.path.expanduser(self.api_key_path) if self.api_key_path else None
)
_set_api_key(self.api_key_file, self.api_key)
self.project = _get_wandb_project(self.project)
if not self.project:
raise ValueError(
"Please pass the project name as argument or through "
f"the {WANDB_PROJECT_ENV_VAR} environment variable."
)
if not self.group and os.environ.get(WANDB_GROUP_ENV_VAR):
self.group = os.environ.get(WANDB_GROUP_ENV_VAR)
def log_trial_start(self, trial: "Trial"):
config = trial.config.copy()
config.pop("callbacks", None) # Remove callbacks
exclude_results = self._exclude_results.copy()
# Additional excludes
exclude_results += self.excludes
# Log config keys on each result?
if not self.log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = trial.trial_id if trial else None
trial_name = str(trial) if trial else None
# Project name for Wandb
wandb_project = self.project
# Grouping
wandb_group = self.group or trial.experiment_dir_name if trial else None
# remove unpickleable items!
config = _clean_log(config)
config = {
key: value for key, value in config.items() if key not in self.excludes
}
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=False,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config,
)
wandb_init_kwargs.update(self.kwargs)
self._start_logging_actor(trial, exclude_results, **wandb_init_kwargs)
def _start_logging_actor(
self, trial: "Trial", exclude_results: List[str], **wandb_init_kwargs
):
# Reuse actor if one already exists.
# This can happen if the trial is restarted.
if trial in self._trial_logging_futures:
return
if not self._remote_logger_class:
env_vars = {}
# API key env variable is not set if authenticating through `wandb login`
if WANDB_ENV_VAR in os.environ:
env_vars[WANDB_ENV_VAR] = os.environ[WANDB_ENV_VAR]
self._remote_logger_class = ray.remote(
num_cpus=0,
**_force_on_current_node(),
runtime_env={"env_vars": env_vars},
max_restarts=-1,
max_task_retries=-1,
)(self._logger_actor_cls)
self._trial_queues[trial] = Queue(
actor_options={
"num_cpus": 0,
**_force_on_current_node(),
"max_restarts": -1,
"max_task_retries": -1,
}
)
self._trial_logging_actors[trial] = self._remote_logger_class.remote(
logdir=trial.local_path,
queue=self._trial_queues[trial],
exclude=exclude_results,
to_config=self.AUTO_CONFIG_KEYS,
**wandb_init_kwargs,
)
logging_future = self._trial_logging_actors[trial].run.remote()
self._trial_logging_futures[trial] = logging_future
self._logging_future_to_trial[logging_future] = trial
def _signal_logging_actor_stop(self, trial: "Trial"):
self._trial_queues[trial].put((_QueueItem.END, None))
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_logging_actors:
self.log_trial_start(trial)
result = _clean_log(
result, video_kwargs=self.video_kwargs, image_kwargs=self.image_kwargs
)
self._trial_queues[trial].put((_QueueItem.RESULT, result))
def log_trial_save(self, trial: "Trial"):
if self.upload_checkpoints and trial.checkpoint:
checkpoint_root = None
if isinstance(trial.checkpoint.filesystem, pyarrow.fs.LocalFileSystem):
checkpoint_root = trial.checkpoint.path
if checkpoint_root:
self._trial_queues[trial].put((_QueueItem.CHECKPOINT, checkpoint_root))
def log_trial_end(self, trial: "Trial", failed: bool = False):
self._signal_logging_actor_stop(trial=trial)
self._cleanup_logging_actors()
def _cleanup_logging_actor(self, trial: "Trial"):
del self._trial_queues[trial]
del self._trial_logging_futures[trial]
ray.kill(self._trial_logging_actors[trial])
del self._trial_logging_actors[trial]
def _cleanup_logging_actors(self, timeout: int = 0, kill_on_timeout: bool = False):
"""Clean up logging actors that have finished uploading to wandb.
Waits for `timeout` seconds to collect finished logging actors.
Args:
timeout: The number of seconds to wait. Defaults to 0 to clean up
any immediate logging actors during the run.
This is set to a timeout threshold to wait for pending uploads
on experiment end.
kill_on_timeout: Whether or not to kill and cleanup the logging actor if
it hasn't finished within the timeout.
"""
futures = list(self._trial_logging_futures.values())
done, remaining = ray.wait(futures, num_returns=len(futures), timeout=timeout)
for ready_future in done:
finished_trial = self._logging_future_to_trial.pop(ready_future)
self._cleanup_logging_actor(finished_trial)
if kill_on_timeout:
for remaining_future in remaining:
trial = self._logging_future_to_trial.pop(remaining_future)
self._cleanup_logging_actor(trial)
def on_experiment_end(self, trials: List["Trial"], **info):
"""Wait for the actors to finish their call to `wandb.finish`.
This includes uploading all logs + artifacts to wandb."""
self._cleanup_logging_actors(timeout=self._upload_timeout, kill_on_timeout=True)
def __del__(self):
if ray.is_initialized():
for trial in list(self._trial_logging_actors):
self._signal_logging_actor_stop(trial=trial)
self._cleanup_logging_actors(timeout=2, kill_on_timeout=True)
self._trial_logging_actors = {}
self._trial_logging_futures = {}
self._logging_future_to_trial = {}
self._trial_queues = {}
|
WandbLoggerCallback
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/auth/middlewares/refresh_token.py
|
{
"start": 1293,
"end": 3099
}
|
class ____(BaseHTTPMiddleware):
"""
Middleware to handle JWT token refresh.
This middleware:
1. Extracts JWT token from cookies and build the user from the token
2. Calls ``refresh_user`` method from auth manager with the user
3. If ``refresh_user`` returns a user, generate a JWT token based upon this user and send it in the
response as cookie
"""
async def dispatch(self, request: Request, call_next):
new_user = None
current_token = request.cookies.get(COOKIE_NAME_JWT_TOKEN)
try:
if current_token:
new_user = await self._refresh_user(current_token)
if new_user:
request.state.user = new_user
response = await call_next(request)
if new_user:
# If we created a new user, serialize it and set it as a cookie
new_token = get_auth_manager().generate_jwt(new_user)
secure = bool(conf.get("api", "ssl_cert", fallback=""))
response.set_cookie(
COOKIE_NAME_JWT_TOKEN,
new_token,
httponly=True,
secure=secure,
samesite="lax",
)
except HTTPException as exc:
# If any HTTPException is raised during user resolution or refresh, return it as response
return JSONResponse(status_code=exc.status_code, content={"detail": exc.detail})
return response
@staticmethod
async def _refresh_user(current_token: str) -> BaseUser | None:
try:
user = await resolve_user_from_token(current_token)
except HTTPException:
return None
return get_auth_manager().refresh_user(user=user)
|
JWTRefreshMiddleware
|
python
|
joke2k__faker
|
faker/providers/person/es_CL/__init__.py
|
{
"start": 141,
"end": 58496
}
|
class ____(PersonProvider):
formats_male = OrderedDict(
[
("{{given_name_male}} {{last_name}} {{last_name}}", 0.55),
("{{first_name_male}} {{last_name}} {{last_name}}", 0.25),
("{{first_name_male}} {{last_name}}", 0.17),
("{{given_name_male}} {{last_name}}-{{last_name}} {{last_name}}", 0.01),
("{{first_name_male}} {{last_name}}-{{last_name}} {{last_name}}", 0.01),
("{{first_name_male}} {{last_name}}-{{last_name}}", 0.01),
]
)
formats_female = OrderedDict(
[
("{{given_name_female}} {{last_name}} {{last_name}}", 0.55),
("{{first_name_female}} {{last_name}} {{last_name}}", 0.25),
("{{first_name_female}} {{last_name}}", 0.17),
("{{given_name_female}} {{last_name}}-{{last_name}} {{last_name}}", 0.01),
("{{first_name_female}} {{last_name}}-{{last_name}} {{last_name}}", 0.01),
("{{first_name_female}} {{last_name}}-{{last_name}}", 0.01),
]
)
formats = OrderedDict(
[
("formats_male", 0.48),
("formats_female", 0.52),
]
)
# Sources for names data:
# Servicio de Registro Civil e Identificación
# Inquiry under Law of Transparency #AK002T0020771 for names and last names
# https://docs.google.com/spreadsheets/d/1yJ2wVnlttoBaCMS-xWyw7fbUqe6xdYpg/edit?usp=sharing&ouid=105306283136031380407
# Data was truncated to 500 items for each category
# 500 male first names, weighted
first_names_male: Dict[str, float] = OrderedDict(
[
("José", 0.05357441),
("Juan", 0.05188295),
("Luis", 0.04369026),
("Carlos", 0.02752294),
("Jorge", 0.02148181),
("Manuel", 0.01846196),
("Francisco", 0.01755076),
("Víctor", 0.01596373),
("Cristian", 0.01564751),
("Pedro", 0.01471588),
("Sebastián", 0.01369752),
("Matías", 0.01313522),
("Felipe", 0.01255329),
("Benjamín", 0.01251180),
("Miguel", 0.01246343),
("Diego", 0.01215592),
("Rodrigo", 0.01212607),
("Héctor", 0.01203257),
("Sergio", 0.01171687),
("Daniel", 0.01127892),
("Eduardo", 0.01096382),
("Pablo", 0.01053620),
("Patricio", 0.01010251),
("Claudio", 0.00996611),
("Ricardo", 0.00938327),
("Vicente", 0.00932498),
("Fernando", 0.00900068),
("Mario", 0.00887763),
("Alejandro", 0.00886620),
("Javier", 0.00854411),
("Oscar", 0.00834245),
("Jaime", 0.00819461),
("Roberto", 0.00812813),
("Mauricio", 0.00789297),
("Martín", 0.00782052),
("Joaquín", 0.00753223),
("David", 0.00750623),
("Gabriel", 0.00734777),
("Marcelo", 0.00727643),
("Ignacio", 0.00682999),
("Raúl", 0.00676584),
("Guillermo", 0.00675521),
("Cristóbal", 0.00644608),
("Julio", 0.00639650),
("Tomás", 0.00638588),
("Marco", 0.00621497),
("Andrés", 0.00616670),
("Maximiliano", 0.00584847),
("César", 0.00522869),
("Hugo", 0.00493089),
("Bastián", 0.00487160),
("Nelson", 0.00476677),
("Gonzalo", 0.00475513),
("Lucas", 0.00456965),
("Mateo", 0.00453424),
("Iván", 0.00451005),
("Álvaro", 0.00443902),
("Fabián", 0.00422673),
("Jonathan", 0.00415701),
("Esteban", 0.00414517),
("Hernán", 0.00410914),
("Alonso", 0.00409913),
("Ángel", 0.00405187),
("Leonardo", 0.00399470),
("Gustavo", 0.00399227),
("Ramón", 0.00398701),
("Santiago", 0.00396991),
("Rafael", 0.00378140),
("Enrique", 0.00360958),
("Camilo", 0.00356728),
("Alex", 0.00356607),
("Alexis", 0.00356172),
("Antonio", 0.00353500),
("Christian", 0.00353399),
("Franco", 0.00352286),
("René", 0.00335560),
("Rubén", 0.00330075),
("Alberto", 0.00309433),
("Samuel", 0.00303513),
("Emilio", 0.00299425),
("Marcos", 0.00285198),
("Renato", 0.00282477),
("Máximo", 0.00270364),
("Luciano", 0.00268897),
("Alfredo", 0.00264688),
("Jean", 0.00260822),
("Arturo", 0.00260529),
("Osvaldo", 0.00252191),
("Germán", 0.00252150),
("Kevin", 0.00250956),
("Gaspar", 0.00245138),
("Segundo", 0.00244025),
("Bruno", 0.00235060),
("Ernesto", 0.00232257),
("Elías", 0.00225062),
("Domingo", 0.00223383),
("Rodolfo", 0.00223342),
("Humberto", 0.00222290),
("Ariel", 0.00221673),
("Julián", 0.00219548),
("Gerardo", 0.00219072),
("Alexander", 0.00217655),
("Richard", 0.00216310),
("Omar", 0.00213557),
("Cristopher", 0.00212606),
("Alfonso", 0.00196720),
("Simón", 0.00195900),
("Moisés", 0.00194736),
("Bernardo", 0.00194210),
("Orlando", 0.00188382),
("John", 0.00173183),
("Jesús", 0.00171827),
("Michael", 0.00170411),
("Emiliano", 0.00156255),
("Damián", 0.00155516),
("Rolando", 0.00154747),
("Armando", 0.00154524),
("Alan", 0.00153917),
("Angelo", 0.00147067),
("Ismael", 0.00143454),
("Danilo", 0.00142665),
("Isaac", 0.00140581),
("Leandro", 0.00140439),
("Joel", 0.00140378),
("Dante", 0.00139913),
("Adolfo", 0.00137201),
("Amaro", 0.00136118),
("Félix", 0.00135673),
("Ian", 0.00134115),
("Lorenzo", 0.00133740),
("Abraham", 0.00133123),
("Bryan", 0.00132516),
("Thomas", 0.00131969),
("Christopher", 0.00128317),
("Facundo", 0.00127446),
("Erick", 0.00125453),
("Freddy", 0.00125392),
("Leonel", 0.00123996),
("Walter", 0.00121962),
("Eugenio", 0.00120859),
("Wilson", 0.00119958),
("Aníbal", 0.00119230),
("Nicolás", 0.00119088),
("León", 0.00117166),
("Salvador", 0.00116255),
("Edgardo", 0.00115972),
("Boris", 0.00114120),
("Adrián", 0.00112126),
("Robinson", 0.00112066),
("Brayan", 0.00108676),
("William", 0.00108544),
("Reinaldo", 0.00106288),
("Jesus", 0.00106187),
("Isaías", 0.00104578),
("Dylan", 0.00103870),
("Aldo", 0.00102959),
("Gastón", 0.00101087),
("Benjamin", 0.00100581),
("Eric", 0.00100409),
("Ronald", 0.00098709),
("Aarón", 0.00098254),
("Paulo", 0.00097039),
("Octavio", 0.00092577),
("Mariano", 0.00092243),
("Erwin", 0.00091636),
("Hans", 0.00090816),
("Enzo", 0.00090715),
("Abel", 0.00089723),
("Valentín", 0.00089400),
("Guido", 0.00089126),
("Augusto", 0.00086516),
("Heriberto", 0.00084694),
("Axel", 0.00084563),
("Cristofer", 0.00084350),
("Jordán", 0.00083177),
("Darío", 0.00082074),
("Israel", 0.00081760),
("Clemente", 0.00081163),
("Giovanni", 0.00079473),
("Johan", 0.00078937),
("Josue", 0.00078927),
("Jhon", 0.00078643),
("Rigoberto", 0.00077662),
("Néstor", 0.00076640),
("Edgar", 0.00075314),
("Yerko", 0.00074808),
("Robert", 0.00074596),
("Exequiel", 0.00074444),
("Waldo", 0.00073958),
("Brian", 0.00073260),
("Lukas", 0.00072683),
("Henry", 0.00069354),
("Brandon", 0.00069243),
("Fredy", 0.00068656),
("Williams", 0.00067968),
("Paul", 0.00067907),
("Cesar", 0.00067047),
("Gregorio", 0.00066066),
("Jairo", 0.00065823),
("Raimundo", 0.00063212),
("Liam", 0.00062231),
("Mathias", 0.00062008),
("Martin", 0.00061401),
("Jimmy", 0.00060774),
("Gilberto", 0.00060763),
("Federico", 0.00060237),
("Dagoberto", 0.00059782),
("Max", 0.00058922),
("Wladimir", 0.00058851),
("Milton", 0.00058001),
("Braulio", 0.00057586),
("Michel", 0.00057566),
("Edwin", 0.00057424),
("Edison", 0.00056089),
("Fidel", 0.00055360),
("Jeremy", 0.00055147),
("Benito", 0.00054975),
("Efraín", 0.00054814),
("Horacio", 0.00054743),
("Erik", 0.00054358),
("Mauro", 0.00054085),
("Ramiro", 0.00053164),
("Leopoldo", 0.00052931),
("Ítalo", 0.00052830),
("Joseph", 0.00051272),
("Pascual", 0.00051120),
("Marcelino", 0.00050877),
("Eliseo", 0.00050705),
("Byron", 0.00049845),
("Santino", 0.00049653),
("Oliver", 0.00049056),
("Gael", 0.00048894),
("Darwin", 0.00048074),
("Misael", 0.00047933),
("Adán", 0.00047781),
("Baltazar", 0.00047528),
("Edmundo", 0.00047326),
("Bayron", 0.00046840),
("Anthony", 0.00046759),
("Emanuel", 0.00046374),
("Paolo", 0.00046101),
("Arnoldo", 0.00045919),
("Emmanuel", 0.00045727),
("Ulises", 0.00044978),
("Dilan", 0.00044523),
("Rogelio", 0.00044442),
("Nibaldo", 0.00043531),
("Cristhian", 0.00043147),
("Jeremías", 0.00042732),
("Johnny", 0.00042469),
("Sandro", 0.00042297),
("Thiago", 0.00042256),
("Flavio", 0.00042206),
("Elvis", 0.00041882),
("James", 0.00041700),
("Jacob", 0.00041528),
("Vladimir", 0.00040576),
("Marcial", 0.00040222),
("Herman", 0.00039838),
("Aurelio", 0.00039342),
("Arnaldo", 0.00038532),
("Saúl", 0.00038330),
("Edward", 0.00038269),
("Franklin", 0.00037359),
("Santos", 0.00036913),
("Florencio", 0.00036579),
("Erasmo", 0.00036013),
("Roger", 0.00035446),
("Cristobal", 0.00035426),
("Juvenal", 0.00035315),
("Fermín", 0.00034819),
("Joshua", 0.00034697),
("Frank", 0.00034627),
("Ezequiel", 0.00034596),
("Benedicto", 0.00034535),
("Gerald", 0.00034455),
("Lautaro", 0.00033979),
("Wilfredo", 0.00033949),
("Abelardo", 0.00033797),
("Gerson", 0.00033665),
("Joan", 0.00033341),
("Leónidas", 0.00033271),
("Patrick", 0.00033038),
("Matteo", 0.00032916),
("Ruperto", 0.00032765),
("Emerson", 0.00032016),
("Danny", 0.00031773),
("Nolberto", 0.00031712),
("Gino", 0.00031611),
("Amador", 0.00031571),
("Bernardino", 0.00031378),
("Andy", 0.00031125),
("Demian", 0.00031055),
("Eladio", 0.00030994),
("Piero", 0.00030559),
("Yonathan", 0.00029274),
("Agustin", 0.00028990),
("Peter", 0.00028828),
("Tomas", 0.00028798),
("Borja", 0.00028748),
("Jonatan", 0.00028748),
("Jhonny", 0.00028059),
("Nicanor", 0.00028039),
("Genaro", 0.00028009),
("Jason", 0.00027948),
("Celso", 0.00027857),
("Sixto", 0.00027756),
("Eleodoro", 0.00027645),
("Evaristo", 0.00027604),
("Teodoro", 0.00027594),
("Maicol", 0.00027554),
("Washington", 0.00027493),
("Aquiles", 0.00027260),
("Román", 0.00026876),
("Rosendo", 0.00026532),
("Aliro", 0.00026461),
("Rosamel", 0.00026349),
("Harold", 0.00026279),
("Justo", 0.00025843),
("Florentino", 0.00024690),
("Anselmo", 0.00024488),
("Hipólito", 0.00024467),
("Allan", 0.00024245),
("Edgard", 0.00024214),
("Eusebio", 0.00024184),
("Eliecer", 0.00023810),
("Jacinto", 0.00023698),
("Froilán", 0.00023678),
("Steven", 0.00023668),
("George", 0.00023526),
("Charles", 0.00023162),
("Belisario", 0.00023121),
("Valentino", 0.00023071),
("Pierre", 0.00022858),
("Fabio", 0.00022636),
("Junior", 0.00022605),
("Tito", 0.00022605),
("Salomón", 0.00022494),
("Clodomiro", 0.00022393),
("Gary", 0.00022312),
("Dionisio", 0.00022282),
("Alamiro", 0.00022150),
("Edson", 0.00021938),
("Renzo", 0.00021927),
("Denis", 0.00021887),
("Noah", 0.00021877),
("Anderson", 0.00021836),
("Amaru", 0.00021614),
("Edinson", 0.00021371),
("Delfín", 0.00021361),
("Bernabé", 0.00021098),
("Iker", 0.00020956),
("Matheo", 0.00020865),
("Belarmino", 0.00020845),
("Douglas", 0.00020511),
("Desiderio", 0.00020450),
("Alexi", 0.00020308),
("Isidro", 0.00020288),
("Ethan", 0.00020268),
("Elian", 0.00019964),
("Mirko", 0.00019772),
("Américo", 0.00019701),
("Demetrio", 0.00019600),
("Gumercindo", 0.00019408),
("Andrew", 0.00019327),
("Ciro", 0.00019286),
("Milán", 0.00019256),
("Stefano", 0.00019256),
("Remigio", 0.00019226),
("Thomás", 0.00019216),
("Leoncio", 0.00018973),
("Neftalí", 0.00018770),
("Wilmer", 0.00018760),
("Heraldo", 0.00018669),
("Josué", 0.00018608),
("Eleazar", 0.00018568),
("Ronny", 0.00018447),
("Justin", 0.00018366),
("Nahuel", 0.00018204),
("Yordan", 0.00018163),
("Jhonatan", 0.00018113),
("Tránsito", 0.00017991),
("Silvio", 0.00017870),
("Artemio", 0.00017688),
("Lucio", 0.00017637),
("Galvarino", 0.00017576),
("Narciso", 0.00017516),
("Eloy", 0.00017435),
("Aladino", 0.00017303),
("Wenceslao", 0.00017232),
("Nestor", 0.00017202),
("Feliciano", 0.00017182),
("Lisandro", 0.00017091),
("Yonatan", 0.00017081),
("Ramon", 0.00017040),
("Rudy", 0.00017040),
("Yeison", 0.00017000),
("Maikol", 0.00016939),
("Bairon", 0.00016868),
("Albert", 0.00016858),
("Avelino", 0.00016706),
("Jerson", 0.00016625),
("Herminio", 0.00016473),
("Andre", 0.00016362),
("Modesto", 0.00016352),
("Armin", 0.00016342),
("Cristián", 0.00016210),
("Atilio", 0.00016200),
("Custodio", 0.00016200),
("Dennis", 0.00016190),
("Gregory", 0.00016129),
("Jefferson", 0.00016099),
("Teófilo", 0.00016079),
("Lionel", 0.00015978),
("Willy", 0.00015978),
("Rómulo", 0.00015967),
("Carlo", 0.00015765),
("Igor", 0.00015664),
("Reynaldo", 0.00015563),
("Lino", 0.00015522),
("Basilio", 0.00015492),
("Marcel", 0.00015431),
("Blas", 0.00015381),
("Johann", 0.00015330),
("Eulogio", 0.00015310),
("Eleuterio", 0.00015209),
("Lian", 0.00015148),
("Isidoro", 0.00015117),
("Xavier", 0.00014986),
("Ivo", 0.00014976),
("Abdón", 0.00014935),
("Harry", 0.00014885),
("Alessandro", 0.00014753),
("Simon", 0.00014662),
("Arsenio", 0.00014601),
("Bladimir", 0.00014359),
("Jonas", 0.00014318),
("Cristhofer", 0.00014257),
("Joao", 0.00014237),
("Franz", 0.00014207),
("Jeison", 0.00014197),
("Milovan", 0.00014176),
("Floridor", 0.00014136),
("Jerónimo", 0.00013944),
("Tulio", 0.00013893),
("Jair", 0.00013782),
("Marlon", 0.00013772),
("Samir", 0.00013772),
("Onofre", 0.00013660),
("Percy", 0.00013509),
("Rony", 0.00013438),
("Yuri", 0.00013418),
("Jerman", 0.00013367),
("Giovanny", 0.00013286),
("Matthew", 0.00013205),
("Gian", 0.00013134),
("Jordan", 0.00013094),
("Abner", 0.00013013),
("Alain", 0.00012942),
("Ceferino", 0.00012912),
("Yohan", 0.00012912),
("Roque", 0.00012891),
("Eithan", 0.00012770),
("Paulino", 0.00012760),
("Rudecindo", 0.00012750),
("Mark", 0.00012740),
("Norman", 0.00012568),
("Fabrizio", 0.00012446),
("Norberto", 0.00012244),
("Kurt", 0.00012203),
("Gianfranco", 0.00012193),
("Johans", 0.00012102),
("Olegario", 0.00012041),
("Christofer", 0.00011981),
("Maykol", 0.00011839),
("Hermes", 0.00011829),
("Celestino", 0.00011788),
("Albino", 0.00011768),
("Fabricio", 0.00011738),
("Giancarlo", 0.00011738),
("Derek", 0.00011718),
("Iñaki", 0.00011687),
("Jan", 0.00011687),
("Zacarías", 0.00011596),
("Said", 0.00011586),
("Hardy", 0.00011566),
("Ronaldo", 0.00011556),
("Aron", 0.00011414),
("Eydan", 0.00011323),
("Elio", 0.00011313),
("Lenin", 0.00011262),
("Victoriano", 0.00011232),
("Jhoan", 0.00011110),
("Dany", 0.00011070),
("Eduard", 0.00011040),
("Gerónimo", 0.00010989),
("Cipriano", 0.00010979),
("Victorino", 0.00010908),
("Cornelio", 0.00010807),
("Anyelo", 0.00010797),
]
)
# 500 female first names, weighted
first_names_female: Dict[str, float] = OrderedDict(
[
("María", 0.09500510),
("Ana", 0.02063161),
("Rosa", 0.01863127),
("Claudia", 0.01307437),
("Carolina", 0.01284289),
("Camila", 0.01283978),
("Patricia", 0.01267301),
("Catalina", 0.01188959),
("Javiera", 0.01138562),
("Sofía", 0.01127980),
("Daniela", 0.01091069),
("Constanza", 0.01049726),
("Francisca", 0.01047776),
("Valentina", 0.01038257),
("Carmen", 0.00923868),
("Margarita", 0.00852030),
("Juana", 0.00831674),
("Sandra", 0.00805135),
("Marcela", 0.00804935),
("Fernanda", 0.00779061),
("Elizabeth", 0.00749475),
("Verónica", 0.00723435),
("Martina", 0.00696652),
("Isidora", 0.00684806),
("Alejandra", 0.00682778),
("Cecilia", 0.00669337),
("Antonia", 0.00647906),
("Emilia", 0.00646743),
("Paola", 0.00644926),
("Marta", 0.00641635),
("Mónica", 0.00632094),
("Andrea", 0.00620359),
("Paula", 0.00598596),
("Gloria", 0.00587238),
("Isabel", 0.00583215),
("Pamela", 0.00573874),
("Florencia", 0.00561851),
("Katherine", 0.00555291),
("Laura", 0.00550238),
("Paulina", 0.00547535),
("Teresa", 0.00543800),
("Natalia", 0.00532886),
("Silvia", 0.00527810),
("Jessica", 0.00525306),
("Gabriela", 0.00523566),
("Gladys", 0.00515411),
("Bárbara", 0.00513106),
("Josefa", 0.00509771),
("Alicia", 0.00499510),
("Antonella", 0.00498789),
("Nicole", 0.00473403),
("Victoria", 0.00468760),
("Anahí", 0.00467751),
("Carla", 0.00463840),
("Agustina", 0.00455208),
("Karen", 0.00454133),
("Jacqueline", 0.00452925),
("Sara", 0.00451917),
("Luz", 0.00446099),
("Nancy", 0.00444426),
("Lorena", 0.00440536),
("Viviana", 0.00438287),
("Sonia", 0.00437256),
("Ximena", 0.00432957),
("Olga", 0.00431705),
("Amanda", 0.00416989),
("Elena", 0.00416524),
("Maite", 0.00408014),
("Luisa", 0.00407449),
("Susana", 0.00390373),
("Blanca", 0.00381785),
("Karina", 0.00380766),
("Macarena", 0.00380378),
("Ruth", 0.00376111),
("Marisol", 0.00360221),
("Eliana", 0.00359900),
("Ángela", 0.00356044),
("Angélica", 0.00356022),
("Cristina", 0.00355102),
("Julia", 0.00347921),
("Trinidad", 0.00343445),
("Valeria", 0.00338414),
("Evelyn", 0.00333128),
("Isabella", 0.00325449),
("Norma", 0.00320319),
("Tamara", 0.00317216),
("Adriana", 0.00311011),
("Ingrid", 0.00307764),
("Lucía", 0.00300461),
("Fabiola", 0.00299597),
("Lidia", 0.00294179),
("Belén", 0.00293359),
("Magdalena", 0.00291375),
("Romina", 0.00289048),
("Ignacia", 0.00286256),
("Erika", 0.00278266),
("Rocío", 0.00277291),
("Miriam", 0.00270354),
("Edith", 0.00266919),
("Elsa", 0.00266343),
("Graciela", 0.00265867),
("Karla", 0.00263407),
("Julieta", 0.00261091),
("Irma", 0.00259816),
("Berta", 0.00258276),
("Raquel", 0.00255539),
("Inés", 0.00255317),
("Mercedes", 0.00253755),
("Hilda", 0.00251306),
("Maritza", 0.00246818),
("Mariana", 0.00246364),
("Beatriz", 0.00236591),
("Roxana", 0.00232612),
("Vanessa", 0.00232081),
("Josefina", 0.00229687),
("Emma", 0.00227183),
("Renata", 0.00225942),
("Yolanda", 0.00224435),
("Clara", 0.00222451),
("Pía", 0.00218019),
("Flor", 0.00215260),
("Mariela", 0.00212600),
("Myriam", 0.00203758),
("Yasna", 0.00200090),
("Marcia", 0.00199669),
("Elisa", 0.00198904),
("Paz", 0.00194017),
("Emily", 0.00193962),
("Nelly", 0.00192488),
("Monserrat", 0.00192222),
("Leonor", 0.00191879),
("Jeannette", 0.00191757),
("Jocelyn", 0.00191502),
("Ema", 0.00191380),
("Soledad", 0.00191236),
("Elba", 0.00189751),
("Anaís", 0.00184055),
("Violeta", 0.00179800),
("Iris", 0.00178692),
("Génesis", 0.00177296),
("Fresia", 0.00176886),
("Diana", 0.00176775),
("Matilde", 0.00176520),
("Liliana", 0.00176066),
("Alexandra", 0.00174559),
("Jennifer", 0.00173451),
("Solange", 0.00170714),
("Aurora", 0.00170326),
("Loreto", 0.00169617),
("Amelia", 0.00168398),
("Johanna", 0.00166415),
("Mia", 0.00161240),
("Bernardita", 0.00160320),
("Denisse", 0.00159733),
("Rosario", 0.00159101),
("Amalia", 0.00158392),
("Eva", 0.00156874),
("Ester", 0.00154159),
("Nataly", 0.00152530),
("Ivonne", 0.00149826),
("Nora", 0.00149317),
("Lilian", 0.00149294),
("Irene", 0.00147322),
("Marina", 0.00147156),
("Valeska", 0.00145039),
("Maribel", 0.00143433),
("Sylvia", 0.00141926),
("Millaray", 0.00139299),
("Michelle", 0.00138103),
("Bernarda", 0.00137715),
("Pilar", 0.00135809),
("Virginia", 0.00135443),
("Marianela", 0.00133482),
("Noemí", 0.00131133),
("Aída", 0.00130257),
("Tania", 0.00129448),
("Eugenia", 0.00129304),
("Doris", 0.00129249),
("Catherine", 0.00129072),
("Consuelo", 0.00128385),
("Estefanía", 0.00128218),
("Matilda", 0.00128130),
("Dominga", 0.00128119),
("Judith", 0.00126933),
("Rebeca", 0.00126235),
("Carol", 0.00125082),
("Mirta", 0.00124949),
("Tatiana", 0.00120462),
("Amparo", 0.00119276),
("Cynthia", 0.00119165),
("Guillermina", 0.00118877),
("Olivia", 0.00118301),
("Rafaela", 0.00117791),
("Jenny", 0.00116251),
("Silvana", 0.00116007),
("Marjorie", 0.00114821),
("Paloma", 0.00114245),
("Magaly", 0.00113879),
("Marlene", 0.00113181),
("Mireya", 0.00113059),
("Krishna", 0.00110544),
("Nicol", 0.00110045),
("Leslie", 0.00109081),
("Yesenia", 0.00108915),
("Ámbar", 0.00107386),
("Elvira", 0.00106732),
("Georgina", 0.00106178),
("Leticia", 0.00106145),
("Jimena", 0.00103064),
("Noelia", 0.00102544),
("Adela", 0.00100870),
("Dominique", 0.00100760),
("Colomba", 0.00100649),
("Nadia", 0.00098277),
("Pascal", 0.00095119),
("Stephanie", 0.00094787),
("Erica", 0.00094111),
("Luciana", 0.00092726),
("Yessica", 0.00092682),
("Johana", 0.00092405),
("Melissa", 0.00092050),
("Lissette", 0.00091972),
("Celia", 0.00090355),
("Alondra", 0.00090199),
("Priscila", 0.00090199),
("Abigail", 0.00089667),
("Mabel", 0.00089656),
("Rita", 0.00089158),
("Karin", 0.00089113),
("Angelina", 0.00088980),
("Lucila", 0.00088172),
("Geraldine", 0.00087795),
("Priscilla", 0.00087562),
("Delia", 0.00086022),
("Carola", 0.00085324),
("Mayra", 0.00084072),
("Danitza", 0.00083916),
("Rossana", 0.00083861),
("Samantha", 0.00083673),
("Filomena", 0.00082819),
("Brenda", 0.00082387),
("Jazmín", 0.00081756),
("Scarlett", 0.00081745),
("Damaris", 0.00081257),
("Esperanza", 0.00080792),
("Lucy", 0.00079429),
("Vania", 0.00079074),
("Oriana", 0.00077456),
("Zoila", 0.00076891),
("Yessenia", 0.00076381),
("Rayén", 0.00076282),
("Tiare", 0.00074564),
("Danae", 0.00074121),
("Dayana", 0.00073966),
("Katalina", 0.00073766),
("Sophia", 0.00072658),
("Thiare", 0.00072459),
("Francesca", 0.00072248),
("Manuela", 0.00072104),
("Fanny", 0.00071672),
("Anita", 0.00071594),
("Mary", 0.00070520),
("Joselyn", 0.00069655),
("Marie", 0.00069001),
("Vilma", 0.00068846),
("Eloísa", 0.00068026),
("Jeanette", 0.00067882),
("Hortensia", 0.00067749),
("Ernestina", 0.00067727),
("Alba", 0.00067428),
("Dina", 0.00066896),
("Haydée", 0.00066342),
("Lía", 0.00066187),
("Montserrat", 0.00065433),
("Debora", 0.00064480),
("Dafne", 0.00064414),
("Herminia", 0.00064104),
("Corina", 0.00062464),
("Giovanna", 0.00062397),
("Rosalía", 0.00062187),
("Yaritza", 0.00061965),
("Guadalupe", 0.00061522),
("Alison", 0.00060480),
("Celeste", 0.00060214),
("Aylin", 0.00059970),
("Carmela", 0.00058619),
("Cindy", 0.00058441),
("Susan", 0.00058064),
("Zunilda", 0.00058031),
("Mirtha", 0.00057943),
("Almendra", 0.00057920),
("Kimberly", 0.00057776),
("Regina", 0.00057577),
("Martha", 0.00057444),
("Kiara", 0.00057355),
("Estela", 0.00056990),
("Maira", 0.00056923),
("Zulema", 0.00056868),
("Estrella", 0.00054895),
("Gisela", 0.00054873),
("Ida", 0.00054840),
("Pascuala", 0.00054541),
("Petronila", 0.00054053),
("Uberlinda", 0.00053998),
("Ayleen", 0.00053588),
("Allison", 0.00053111),
("Franchesca", 0.00053023),
("Mayte", 0.00052934),
("Aracely", 0.00052890),
("Gilda", 0.00052723),
("Pascale", 0.00052602),
("Clementina", 0.00052457),
("Luzmira", 0.00052336),
("Yenny", 0.00052302),
("Margot", 0.00051859),
("Natalie", 0.00051505),
("Mía", 0.00051482),
("Yenifer", 0.00051416),
("Bianca", 0.00050441),
("Cinthia", 0.00050341),
("Rafaella", 0.00050053),
("Maura", 0.00049898),
("Claudina", 0.00049599),
("Melanie", 0.00049222),
("Daisy", 0.00049100),
("Erna", 0.00048114),
("Sabina", 0.00047803),
("Scarlet", 0.00047205),
("Nathaly", 0.00046850),
("Mirna", 0.00046773),
("Nilda", 0.00046751),
("Lina", 0.00046673),
("Ada", 0.00046596),
("Makarena", 0.00045909),
("Astrid", 0.00045753),
("Gina", 0.00045720),
("Celinda", 0.00045676),
("Leontina", 0.00045388),
("Jenifer", 0.00045078),
("Marilyn", 0.00044834),
("Yohana", 0.00044701),
("Grace", 0.00044668),
("Ashley", 0.00044479),
("Janet", 0.00044479),
("Ninoska", 0.00044379),
("Anahis", 0.00044280),
("Teresita", 0.00044280),
("Adelina", 0.00044246),
("Elcira", 0.00044246),
("Pabla", 0.00044158),
("Maricel", 0.00044058),
("Elisabeth", 0.00043981),
("Jovita", 0.00043881),
("Caroline", 0.00043859),
("Nathalie", 0.00043792),
("Isolina", 0.00043061),
("Delfina", 0.00043016),
("Angie", 0.00042850),
("Fiorella", 0.00042130),
("Dora", 0.00041975),
("Giselle", 0.00041676),
("Yanet", 0.00041310),
("Yoselin", 0.00041299),
("Alice", 0.00041077),
("Edita", 0.00041044),
("Fabiana", 0.00041000),
("Nayareth", 0.00040933),
("Genoveva", 0.00040678),
("Helen", 0.00040590),
("Vivian", 0.00040390),
("Lucrecia", 0.00040246),
("Herminda", 0.00040213),
("Luna", 0.00040113),
("Scarleth", 0.00040113),
("Monica", 0.00040036),
("Marion", 0.00039969),
("Orfelina", 0.00039659),
("Digna", 0.00039426),
("Yasmín", 0.00039382),
("Marcelina", 0.00039127),
("Lisette", 0.00039061),
("Linda", 0.00038939),
("Katherinne", 0.00038928),
("Amy", 0.00038894),
("Nidia", 0.00038551),
("Ivette", 0.00038418),
("Yanira", 0.00038407),
("Milena", 0.00038096),
("Emelina", 0.00037897),
("Flora", 0.00037831),
("Estefany", 0.00037786),
("Esmeralda", 0.00037509),
("Francia", 0.00037487),
("Vanesa", 0.00036423),
("Araceli", 0.00036346),
("Edelmira", 0.00036335),
("Yanina", 0.00036324),
("Helena", 0.00036091),
("Darling", 0.00035936),
("Clorinda", 0.00035814),
("Betty", 0.00035747),
("Veronica", 0.00035747),
("Juliana", 0.00035603),
("Tabita", 0.00035348),
("Jeniffer", 0.00035171),
("Otilia", 0.00035094),
("Nieves", 0.00034938),
("Amaya", 0.00034916),
("Esther", 0.00034839),
("Leyla", 0.00034828),
("Maricela", 0.00034794),
("Alejandrina", 0.00034761),
("Jenniffer", 0.00034728),
("Rose", 0.00034584),
("Jacinta", 0.00034362),
("Albertina", 0.00033997),
("Lucinda", 0.00033808),
("Aurelia", 0.00033708),
("Juanita", 0.00033697),
("Rosalba", 0.00033664),
("Adelaida", 0.00033199),
("Denise", 0.00033154),
("Mery", 0.00033121),
("Alexia", 0.00033066),
("Enriqueta", 0.00032955),
("Katia", 0.00032933),
("Nélida", 0.00032922),
("Evelin", 0.00032722),
("Brígida", 0.00032645),
("Dolores", 0.00032545),
("Anna", 0.00032467),
("Florinda", 0.00032013),
("Gricelda", 0.00031836),
("América", 0.00031736),
("Doralisa", 0.00031703),
("Ramona", 0.00031603),
("Cinthya", 0.00031470),
("Gisselle", 0.00031381),
("Yesica", 0.00031381),
("Scarlette", 0.00031370),
("Úrsula", 0.00031326),
("Daniella", 0.00031248),
("Alma", 0.00031204),
("Clarisa", 0.00030916),
("Deyanira", 0.00030905),
("Amada", 0.00030872),
("Karol", 0.00030816),
("Kelly", 0.00030761),
("Leidy", 0.00030683),
("Yuliana", 0.00030650),
("Lourdes", 0.00030440),
("Flavia", 0.00030318),
("Natacha", 0.00030185),
("Lorenza", 0.00029830),
("Marisel", 0.00029819),
("Rocio", 0.00029764),
("Clotilde", 0.00029675),
("Ariela", 0.00029664),
("Marisa", 0.00029631),
("Nayaret", 0.00029608),
("Soraya", 0.00029608),
("Antonieta", 0.00029431),
("Ruby", 0.00029110),
("Melany", 0.00029065),
("Magali", 0.00028977),
("Barbara", 0.00028777),
("Yamilet", 0.00028556),
("Anastasia", 0.00028511),
("Elia", 0.00028434),
("Lesly", 0.00028412),
("Deisy", 0.00028367),
("Milagros", 0.00028013),
("Jael", 0.00027924),
("Florentina", 0.00027880),
("Katerine", 0.00027791),
("Madeleine", 0.00027758),
("Ayelén", 0.00027658),
("Francis", 0.00027547),
("Wilma", 0.00027525),
("Mariluz", 0.00027492),
("Natali", 0.00027381),
("Nury", 0.00027359),
("Giuliana", 0.00027337),
("Gema", 0.00027315),
("Massiel", 0.00027293),
("Rachel", 0.00027270),
("Paulette", 0.00027248),
("Micaela", 0.00027137),
("Dania", 0.00026905),
("Natividad", 0.00026849),
("Yocelyn", 0.00026783),
("Yanara", 0.00026528),
("Katherin", 0.00026473),
("Sarah", 0.00026461),
("Melania", 0.00026439),
("Sarai", 0.00026384),
("Perla", 0.00026207),
("Sabrina", 0.00026118),
("Muriel", 0.00026007),
("Cintia", 0.00025985),
]
)
@property
def first_names(self): # type: ignore[override]
"""Returns a list of weighted first names, male and female."""
if not hasattr(self, "_first_names"):
self._first_names = OrderedDict()
for a, b in zip_longest(self.first_names_male.items(), self.first_names_female.items()):
if a is not None:
name, weight = a
self._first_names[name] = weight / 2
if b is not None:
name, weight = b
self._first_names[name] = weight / 2
return self._first_names
# 500 last names, weighted
last_names = OrderedDict(
[
("González", 0.02683604),
("Muñoz", 0.02047480),
("Rojas", 0.01508949),
("Díaz", 0.01491392),
("Pérez", 0.01227842),
("Soto", 0.01044305),
("Rodríguez", 0.00997861),
("Contreras", 0.00993588),
("Silva", 0.00932900),
("López", 0.00920382),
("Morales", 0.00901722),
("Sepúlveda", 0.00880392),
("Martínez", 0.00870346),
("Hernández", 0.00867623),
("Torres", 0.00844247),
("Flores", 0.00836659),
("Ramírez", 0.00809392),
("Fuentes", 0.00808812),
("Castillo", 0.00801363),
("Espinoza", 0.00788287),
("Araya", 0.00787643),
("Reyes", 0.00758987),
("Gutiérrez", 0.00753243),
("Valenzuela", 0.00751303),
("Castro", 0.00732126),
("Vargas", 0.00724265),
("Sánchez", 0.00722920),
("Vásquez", 0.00699836),
("Fernández", 0.00677539),
("Álvarez", 0.00659731),
("Gómez", 0.00658808),
("Tapia", 0.00631937),
("Herrera", 0.00623804),
("Cortés", 0.00613157),
("García", 0.00612128),
("Carrasco", 0.00605067),
("Núñez", 0.00597788),
("Jara", 0.00568990),
("Vergara", 0.00543105),
("Rivera", 0.00538544),
("Figueroa", 0.00513368),
("Riquelme", 0.00501507),
("Bravo", 0.00496506),
("Miranda", 0.00492273),
("Vera", 0.00488902),
("Molina", 0.00478491),
("Vega", 0.00463878),
("Sandoval", 0.00456813),
("Campos", 0.00453386),
("Ortiz", 0.00437677),
("Orellana", 0.00435350),
("Salazar", 0.00429255),
("Zúñiga", 0.00426568),
("Olivares", 0.00425670),
("Romero", 0.00414512),
("Gallardo", 0.00413093),
("Garrido", 0.00407209),
("Alarcón", 0.00407085),
("Guzmán", 0.00403413),
("Parra", 0.00390092),
("Saavedra", 0.00387443),
("Peña", 0.00387328),
("Aguilera", 0.00384177),
("Navarro", 0.00382743),
("Henríquez", 0.00381134),
("Cáceres", 0.00371244),
("Pizarro", 0.00370441),
("Godoy", 0.00367051),
("Aravena", 0.00365821),
("Jiménez", 0.00359039),
("Escobar", 0.00355175),
("Ruiz", 0.00353889),
("Leiva", 0.00348804),
("Medina", 0.00344091),
("Vidal", 0.00337984),
("Cárdenas", 0.00335514),
("Yáñez", 0.00334424),
("Salinas", 0.00333792),
("Valdés", 0.00333438),
("Moreno", 0.00325766),
("Lagos", 0.00318407),
("Maldonado", 0.00318255),
("Bustos", 0.00308706),
("Pino", 0.00302189),
("Carvajal", 0.00294762),
("Palma", 0.00294040),
("Alvarado", 0.00291871),
("Ortega", 0.00289513),
("Sanhueza", 0.00287199),
("Navarrete", 0.00286994),
("Guerrero", 0.00285879),
("Ramos", 0.00285476),
("Paredes", 0.00283341),
("Sáez", 0.00282436),
("Bustamante", 0.00280019),
("Toro", 0.00279548),
("Poblete", 0.00277637),
("Mora", 0.00274113),
("Donoso", 0.00272059),
("Velásquez", 0.00271278),
("Venegas", 0.00270150),
("Acuña", 0.00267882),
("Pinto", 0.00267108),
("Acevedo", 0.00266916),
("Toledo", 0.00262872),
("Quezada", 0.00261595),
("Farías", 0.00260009),
("Aguilar", 0.00259665),
("San Martín", 0.00259182),
("Arriagada", 0.00259178),
("Rivas", 0.00255249),
("Cerda", 0.00253610),
("Salas", 0.00250877),
("Cornejo", 0.00250865),
("Arias", 0.00247106),
("Cabrera", 0.00245006),
("Durán", 0.00244504),
("Hidalgo", 0.00242676),
("Arancibia", 0.00242276),
("Marín", 0.00240593),
("Méndez", 0.00239469),
("Troncoso", 0.00234412),
("Osorio", 0.00234024),
("Ulloa", 0.00232537),
("Inostroza", 0.00231406),
("Villarroel", 0.00231381),
("Delgado", 0.00228236),
("Cuevas", 0.00227765),
("Ríos", 0.00226799),
("Pacheco", 0.00225965),
("Calderón", 0.00225919),
("Lara", 0.00224862),
("Ojeda", 0.00223799),
("León", 0.00220174),
("Correa", 0.00219774),
("Villalobos", 0.00215563),
("Ponce", 0.00212502),
("Barrera", 0.00209673),
("Burgos", 0.00209540),
("Chávez", 0.00209403),
("Cifuentes", 0.00208313),
("Catalán", 0.00208213),
("Moya", 0.00206590),
("Concha", 0.00201908),
("Ávila", 0.00200483),
("Zapata", 0.00199565),
("Guerra", 0.00197511),
("Salgado", 0.00195438),
("Barría", 0.00193901),
("Alfaro", 0.00191432),
("Gajardo", 0.00189681),
("Uribe", 0.00188327),
("Meza", 0.00185182),
("Astudillo", 0.00183289),
("Aguirre", 0.00182031),
("Cruz", 0.00181786),
("Becerra", 0.00180856),
("Retamal", 0.00180751),
("Mendoza", 0.00179192),
("Neira", 0.00178706),
("Pereira", 0.00178309),
("Ahumada", 0.00176419),
("Villegas", 0.00175511),
("Valdebenito", 0.00173854),
("Pavez", 0.00173026),
("Barrientos", 0.00170380),
("Jorquera", 0.00169141),
("Moraga", 0.00168413),
("Cárcamo", 0.00167957),
("Valencia", 0.00167161),
("Gálvez", 0.00166746),
("Lobos", 0.00166690),
("Barraza", 0.00165862),
("Canales", 0.00165701),
("Guajardo", 0.00165624),
("Araneda", 0.00164477),
("Mansilla", 0.00162051),
("Urrutia", 0.00160508),
("Mancilla", 0.00159963),
("Abarca", 0.00159944),
("Andrade", 0.00158767),
("Quiroz", 0.00158624),
("Valdivia", 0.00158485),
("Ibarra", 0.00158271),
("Mella", 0.00157726),
("Gatica", 0.00157255),
("Leal", 0.00156976),
("Cid", 0.00154797),
("Mardones", 0.00152328),
("Riveros", 0.00152269),
("Albornoz", 0.00151925),
("Cisternas", 0.00151761),
("Vallejos", 0.00151693),
("Solís", 0.00150807),
("Baeza", 0.00150525),
("Gaete", 0.00147643),
("Fuentealba", 0.00147544),
("Manríquez", 0.00147026),
("Córdova", 0.00146422),
("Rebolledo", 0.00145805),
("Caro", 0.00145344),
("Suárez", 0.00143779),
("Carrillo", 0.00142716),
("Carreño", 0.00140997),
("Cofré", 0.00140222),
("Oyarzún", 0.00140036),
("Varas", 0.00138394),
("Santibáñez", 0.00136064),
("Barra", 0.00136061),
("Márquez", 0.00135707),
("Fuenzalida", 0.00131692),
("Zamora", 0.00131596),
("Arenas", 0.00131267),
("Opazo", 0.00130920),
("Cabezas", 0.00130372),
("Pardo", 0.00127540),
("Vilches", 0.00126641),
("Santander", 0.00126170),
("Berríos", 0.00124955),
("Roa", 0.00124847),
("Véliz", 0.00123772),
("Arévalo", 0.00122129),
("Rubio", 0.00120847),
("Montecinos", 0.00120057),
("Robles", 0.00119641),
("Plaza", 0.00119366),
("Ibáñez", 0.00119093),
("Parada", 0.00117860),
("Meneses", 0.00117822),
("Briones", 0.00117429),
("Mena", 0.00117398),
("Huerta", 0.00116162),
("Román", 0.00115523),
("Zamorano", 0.00114932),
("Mamani", 0.00113704),
("Rosales", 0.00113646),
("Peralta", 0.00112319),
("Cancino", 0.00111678),
("Faúndez", 0.00111285),
("Maturana", 0.00111164),
("Beltrán", 0.00110835),
("Oyarzo", 0.00110764),
("Jaramillo", 0.00110631),
("Jofré", 0.00110141),
("Tobar", 0.00109837),
("Aguayo", 0.00109791),
("Palacios", 0.00109289),
("Avendaño", 0.00108908),
("Galaz", 0.00108412),
("Gallegos", 0.00107582),
("Urra", 0.00107492),
("Zambrano", 0.00106761),
("Ayala", 0.00106246),
("Cortez", 0.00105490),
("Santana", 0.00105177),
("Olguín", 0.00104610),
("Riffo", 0.00104121),
("Astorga", 0.00103681),
("Garcés", 0.00103603),
("Villanueva", 0.00103454),
("Hermosilla", 0.00102636),
("Marchant", 0.00102556),
("Arce", 0.00101592),
("Bastías", 0.00101118),
("Galleguillos", 0.00100511),
("Suazo", 0.00100378),
("Monsalve", 0.00099612),
("Rubilar", 0.00098757),
("Lillo", 0.00098546),
("Padilla", 0.00098472),
("Candia", 0.00098237),
("Quintana", 0.00098128),
("Almonacid", 0.00097657),
("Lizama", 0.00096650),
("Cabello", 0.00096566),
("Espinosa", 0.00096337),
("Duarte", 0.00095256),
("Osses", 0.00094444),
("Cartes", 0.00094150),
("Barrios", 0.00093806),
("Loyola", 0.00093697),
("Novoa", 0.00093524),
("Seguel", 0.00093452),
("Norambuena", 0.00093397),
("Mellado", 0.00093307),
("Serrano", 0.00092513),
("Leyton", 0.00091829),
("Carmona", 0.00091801),
("Montenegro", 0.00091004),
("Segovia", 0.00090726),
("Cea", 0.00088448),
("Benavides", 0.00088352),
("Hormazábal", 0.00088324),
("Verdugo", 0.00088157),
("Jerez", 0.00087726),
("Martinez", 0.00087525),
("Mondaca", 0.00087385),
("Segura", 0.00087376),
("Pastén", 0.00086416),
("Oliva", 0.00085762),
("Cordero", 0.00085374),
("Aranda", 0.00084897),
("Céspedes", 0.00084814),
("Urbina", 0.00084485),
("Briceño", 0.00084439),
("Luna", 0.00083924),
("Matus", 0.00083599),
("Cisterna", 0.00083484),
("Varela", 0.00083373),
("Echeverría", 0.00083342),
("Aedo", 0.00082765),
("Bahamondes", 0.00082669),
("Altamirano", 0.00082598),
("Merino", 0.00082487),
("Arellano", 0.00082462),
("Matamala", 0.00082121),
("Elgueta", 0.00081083),
("Hurtado", 0.00081043),
("Brito", 0.00080209),
("Barahona", 0.00079001),
("Valderrama", 0.00078669),
("Madrid", 0.00078592),
("Estay", 0.00078471),
("Aburto", 0.00078080),
("Bórquez", 0.00077910),
("Acosta", 0.00077774),
("Órdenes", 0.00077433),
("Fierro", 0.00077414),
("Domínguez", 0.00077262),
("Lizana", 0.00076764),
("Villagra", 0.00076584),
("Alegría", 0.00076534),
("Maureira", 0.00075208),
("Urzúa", 0.00075118),
("Oyarce", 0.00074914),
("Trujillo", 0.00074390),
("Olave", 0.00074362),
("Ferrada", 0.00074062),
("Rosas", 0.00073020),
("Bugueño", 0.00072636),
("Vivanco", 0.00072540),
("Lorca", 0.00072113),
("Rozas", 0.00072075),
("Montero", 0.00072035),
("Águila", 0.00071803),
("Montoya", 0.00071493),
("Zepeda", 0.00071261),
("Vicencio", 0.00071137),
("Garay", 0.00069454),
("Gamboa", 0.00069389),
("Lazo", 0.00069274),
("Aliaga", 0.00069215),
("Villagrán", 0.00068574),
("Aros", 0.00068193),
("Aránguiz", 0.00068044),
("Baez", 0.00067759),
("Pozo", 0.00067759),
("Belmar", 0.00067734),
("Casanova", 0.00066929),
("Bernal", 0.00066644),
("Machuca", 0.00066572),
("Escalona", 0.00066507),
("Ávalos", 0.00066461),
("Quinteros", 0.00066039),
("Collao", 0.00065640),
("Letelier", 0.00064540),
("Quispe", 0.00064078),
("Marambio", 0.00063951),
("Mejías", 0.00063561),
("Saldivia", 0.00063496),
("Armijo", 0.00063393),
("Orrego", 0.00063127),
("Piña", 0.00062780),
("Chacón", 0.00062674),
("Bello", 0.00062597),
("Rocha", 0.00062355),
("Pinilla", 0.00062318),
("Parraguez", 0.00061441),
("Oñate", 0.00060908),
("Iturra", 0.00060459),
("Arredondo", 0.00060270),
("Fredes", 0.00060217),
("Jaque", 0.00059945),
("Blanco", 0.00059935),
("Chamorro", 0.00059864),
("Quiroga", 0.00059483),
("Chandía", 0.00059424),
("Ceballos", 0.00059158),
("Saldías", 0.00059148),
("Barros", 0.00058888),
("Llanos", 0.00058866),
("Benítez", 0.00058522),
("Peñaloza", 0.00058491),
("Páez", 0.00058426),
("Pulgar", 0.00058302),
("Melo", 0.00058290),
("Ruz", 0.00057822),
("Medel", 0.00057689),
("Ampuero", 0.00057673),
("Avilés", 0.00057590),
("Pincheira", 0.00057351),
("Bascuñán", 0.00057302),
("Azócar", 0.00057168),
("Villa", 0.00057078),
("Tello", 0.00057047),
("Luengo", 0.00056787),
("Ovalle", 0.00056645),
("Madariaga", 0.00056164),
("Celis", 0.00056130),
("Cubillos", 0.00055932),
("Prado", 0.00055635),
("Angulo", 0.00055579),
("Estrada", 0.00055418),
("Arroyo", 0.00055303),
("Mercado", 0.00054947),
("Castañeda", 0.00054829),
("Barriga", 0.00054575),
("Lucero", 0.00054559),
("Valladares", 0.00054274),
("Coronado", 0.00053983),
("Pineda", 0.00053896),
("Rojo", 0.00053760),
("Ibacache", 0.00053747),
("Quijada", 0.00053639),
("Bahamonde", 0.00052744),
("Zurita", 0.00052424),
("Salamanca", 0.00051517),
("Galdames", 0.00051507),
("Ferreira", 0.00051433),
("Santos", 0.00051231),
("Labra", 0.00051173),
("Naranjo", 0.00051021),
("Badilla", 0.00051011),
("Veloso", 0.00050866),
("Prieto", 0.00050785),
("Villar", 0.00050785),
("Ormeño", 0.00050776),
("Ossandón", 0.00050754),
("Lira", 0.00050624),
("Bobadilla", 0.00050571),
("Apablaza", 0.00050395),
("Cepeda", 0.00050252),
("Paz", 0.00050252),
("Sierra", 0.00049617),
("Esparza", 0.00049574),
("Zavala", 0.00049530),
("Quintanilla", 0.00049459),
("Veas", 0.00049134),
("Sobarzo", 0.00048920),
("Videla", 0.00048811),
("Fonseca", 0.00047584),
("Toloza", 0.00047113),
("Agüero", 0.00046766),
("Olmos", 0.00046568),
("Arteaga", 0.00046562),
("Allende", 0.00046472),
("Montecino", 0.00046395),
("Quiñones", 0.00045976),
("Agurto", 0.00045958),
("Zárate", 0.00045933),
("Villablanca", 0.00045911),
("Guevara", 0.00045679),
("Solar", 0.00045577),
("Cruces", 0.00045391),
("Retamales", 0.00045140),
("Alvarez", 0.00045037),
("Astete", 0.00044954),
("De La Fuente", 0.00044650),
("Aracena", 0.00043996),
("Alvear", 0.00043910),
("Millán", 0.00043160),
("Zenteno", 0.00043135),
("Erices", 0.00043101),
("Meléndez", 0.00043064),
("Carrera", 0.00042884),
("Olea", 0.00042800),
("Cavieres", 0.00042779),
("Moncada", 0.00042583),
("Cares", 0.00042565),
("Vejar", 0.00042546),
("Arcos", 0.00042432),
("Montes", 0.00042150),
("Encina", 0.00041985),
("Fica", 0.00041784),
("Inzunza", 0.00041641),
("Droguett", 0.00041195),
("Caballero", 0.00041127),
("Lazcano", 0.00040950),
("Bruna", 0.00040805),
("Olmedo", 0.00040802),
("Corvalán", 0.00040634),
("Morán", 0.00040365),
("Olate", 0.00040114),
("Allendes", 0.00039928),
("Saldaña", 0.00039903),
("Viveros", 0.00039723),
("Moyano", 0.00039609),
("Choque", 0.00039550),
("Dinamarca", 0.00039107),
("Adasme", 0.00039098),
]
)
prefixes_male = ("Sr.", "Dr.", "Don")
prefixes_female = ("Srta.", "Sra.", "Dra.", "Doña")
def name(self) -> str:
# Select format, then generate name
format: str = self.random_element(self.formats)
pattern: str = self.random_element(getattr(self, format))
return self.generator.parse(pattern)
def given_name(self) -> str:
"""Generates a composite given name with two unique names"""
if self.random_int(0, 1) == 1:
source = self.first_names_female
else:
source = self.first_names_male
names = self.random_elements(source, length=2, unique=True) # type: ignore[var-annotated]
return " ".join(names)
def given_name_male(self) -> str:
"""Generates a composite male given name with two unique names"""
names = self.random_elements(self.first_names_male, length=2, unique=True) # type: ignore[var-annotated]
return " ".join(names)
def given_name_female(self) -> str:
"""Generates a composite female given name with two unique names"""
names = self.random_elements(self.first_names_female, length=2, unique=True) # type: ignore[var-annotated]
return " ".join(names)
|
Provider
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/grpc.py
|
{
"start": 19386,
"end": 19466
}
|
class ____(_QueryReference):
target_collection: str
|
_QueryReferenceMultiTarget
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/envs/phys2d/cartpole.py
|
{
"start": 8796,
"end": 9890
}
|
class ____(FunctionalJaxVectorEnv, EzPickle):
"""Jax-based implementation of the vectorized CartPole environment."""
metadata = {
"render_modes": ["rgb_array"],
"render_fps": 50,
"jax": True,
"autoreset_mode": AutoresetMode.NEXT_STEP,
}
def __init__(
self,
num_envs: int,
render_mode: str | None = None,
max_episode_steps: int = 200,
**kwargs: Any,
):
"""Constructor for the vectorized CartPole where the kwargs are applied to the functional environment."""
EzPickle.__init__(
self,
num_envs=num_envs,
render_mode=render_mode,
max_episode_steps=max_episode_steps,
**kwargs,
)
env = CartPoleFunctional(**kwargs)
env.transform(jax.jit)
FunctionalJaxVectorEnv.__init__(
self,
func_env=env,
num_envs=num_envs,
metadata=self.metadata,
render_mode=render_mode,
max_episode_steps=max_episode_steps,
)
|
CartPoleJaxVectorEnv
|
python
|
numba__numba
|
numba/core/types/misc.py
|
{
"start": 3455,
"end": 3825
}
|
class ____(Type):
"""
Pointer to a Numba "meminfo" (i.e. the information for a managed
piece of memory).
"""
mutable = True
def __init__(self, dtype):
self.dtype = dtype
name = "memory-managed *%s" % dtype
super(MemInfoPointer, self).__init__(name)
@property
def key(self):
return self.dtype
|
MemInfoPointer
|
python
|
walkccc__LeetCode
|
solutions/1659. Maximize Grid Happiness/1659.py
|
{
"start": 0,
"end": 2113
}
|
class ____:
def getMaxGridHappiness(
self,
m: int,
n: int,
introvertsCount: int,
extrovertsCount: int,
) -> int:
def getPlacementCost(
i: int,
j: int,
inMask: int,
exMask: int,
diff: int,
) -> int:
"""Calculates the cost based on left and up neighbors.
The `diff` parameter represents the happiness change due to the current
placed person in (i, j). We add `diff` each time we encounter a neighbor
(left or up) who is already placed.
1. If the neighbor is an introvert, we subtract 30 from cost.
2. If the neighbor is an extrovert, we add 20 to from cost.
"""
cost = 0
if i > 0:
if (1 << (n - 1)) & inMask:
cost += diff - 30
if (1 << (n - 1)) & exMask:
cost += diff + 20
if j > 0:
if 1 & inMask:
cost += diff - 30
if 1 & exMask:
cost += diff + 20
return cost
@functools.lru_cache(None)
def dp(
pos: int, inMask: int, exMask: int, inCount: int, exCount: int
) -> int:
# `inMask` is the placement of introvert people in the last n cells.
# e.g. if we have m = 2, n = 3, i = 1, j = 1, then inMask = 0b101 means
#
# ? 1 0
# 1 x ? (x := current position)
i, j = divmod(pos, n)
if i == m:
return 0
shiftedInMask = (inMask << 1) & ((1 << n) - 1)
shiftedExMask = (exMask << 1) & ((1 << n) - 1)
skip = dp(pos + 1, shiftedInMask, shiftedExMask, inCount, exCount)
placeIntrovert = (
120 + getPlacementCost(i, j, inMask, exMask, -30) +
dp(pos + 1, shiftedInMask + 1, shiftedExMask, inCount - 1, exCount)
if inCount > 0
else -math.inf)
placeExtrovert = (
40 + getPlacementCost(i, j, inMask, exMask, 20) +
dp(pos + 1, shiftedInMask, shiftedExMask + 1, inCount, exCount - 1)
if exCount > 0
else -math.inf)
return max(skip, placeIntrovert, placeExtrovert)
return dp(0, 0, 0, introvertsCount, extrovertsCount)
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/api/bases/organization.py
|
{
"start": 6225,
"end": 6440
}
|
class ____(OrganizationPermission):
scope_map = {
"GET": ["org:read"],
"POST": ["org:admin"],
"PUT": ["org:admin"],
"DELETE": ["org:admin"],
}
|
OrganizationAuthProviderPermission
|
python
|
ray-project__ray
|
python/ray/llm/_internal/common/utils/cloud_utils.py
|
{
"start": 11289,
"end": 16164
}
|
class ____:
"""A cache that works with both sync and async fetch functions.
The purpose of this data structure is to cache the result of a function call
usually used to fetch a value from a cloud object store.
The idea is this:
- Cloud operations are expensive
- In LoRA specifically, we would fetch remote storage to download the model weights
at each request.
- If the same model is requested many times, we don't want to inflate the time to first token.
- We control the cache via not only the least recently used eviction policy, but also
by expiring cache entries after a certain time.
- If the object is missing, we cache the missing status for a small duration while if
the object exists, we cache the object for a longer duration.
"""
def __init__(
self,
max_size: int,
fetch_fn: Union[Callable[[str], Any], Callable[[str], Awaitable[Any]]],
missing_expire_seconds: Optional[int] = None,
exists_expire_seconds: Optional[int] = None,
missing_object_value: Any = object(),
):
"""Initialize the cache.
Args:
max_size: Maximum number of items to store in cache
fetch_fn: Function to fetch values (can be sync or async)
missing_expire_seconds: How long to cache missing objects (None for no expiration)
exists_expire_seconds: How long to cache existing objects (None for no expiration)
"""
self._cache: Dict[str, _CacheEntry] = {}
self._max_size = max_size
self._fetch_fn = fetch_fn
self._missing_expire_seconds = missing_expire_seconds
self._exists_expire_seconds = exists_expire_seconds
self._is_async = inspect.iscoroutinefunction(fetch_fn) or (
callable(fetch_fn) and inspect.iscoroutinefunction(fetch_fn.__call__)
)
self._missing_object_value = missing_object_value
# Lock for thread-safe cache access
self._lock = asyncio.Lock()
async def aget(self, key: str) -> Any:
"""Async get value from cache or fetch it if needed."""
if not self._is_async:
raise ValueError("Cannot use async get() with sync fetch function")
async with self._lock:
value, should_fetch = self._check_cache(key)
if not should_fetch:
return value
# Fetch new value
value = await self._fetch_fn(key)
self._update_cache(key, value)
return value
def get(self, key: str) -> Any:
"""Sync get value from cache or fetch it if needed."""
if self._is_async:
raise ValueError("Cannot use sync get() with async fetch function")
# For sync access, we use a simple check-then-act pattern
# This is safe because sync functions are not used in async context
value, should_fetch = self._check_cache(key)
if not should_fetch:
return value
# Fetch new value
value = self._fetch_fn(key)
self._update_cache(key, value)
return value
def _check_cache(self, key: str) -> tuple[Any, bool]:
"""Check if key exists in cache and is valid.
Returns:
Tuple of (value, should_fetch)
where should_fetch is True if we need to fetch a new value
"""
now = time.monotonic()
if key in self._cache:
value, expire_time = self._cache[key]
if expire_time is None or now < expire_time:
return value, False
return None, True
def _update_cache(self, key: str, value: Any) -> None:
"""Update cache with new value."""
now = time.monotonic()
# Calculate expiration
expire_time = None
if (
self._missing_expire_seconds is not None
or self._exists_expire_seconds is not None
):
if value is self._missing_object_value:
expire_time = (
now + self._missing_expire_seconds
if self._missing_expire_seconds
else None
)
else:
expire_time = (
now + self._exists_expire_seconds
if self._exists_expire_seconds
else None
)
# Enforce size limit by removing oldest entry if needed
# This is an O(n) operation but it's fine since the cache size is usually small.
if len(self._cache) >= self._max_size:
oldest_key = min(
self._cache, key=lambda k: self._cache[k].expire_time or float("inf")
)
del self._cache[oldest_key]
self._cache[key] = _CacheEntry(value, expire_time)
def __len__(self) -> int:
return len(self._cache)
|
CloudObjectCache
|
python
|
pytorch__pytorch
|
torch/_inductor/pattern_matcher.py
|
{
"start": 28823,
"end": 28893
}
|
class ____(_TargetExprVarArgs):
op = "call_method"
|
CallMethodVarArgs
|
python
|
mahmoud__boltons
|
boltons/socketutils.py
|
{
"start": 25631,
"end": 26146
}
|
class ____(socket.timeout, Error):
"""Inheriting from :exc:`socket.timeout`, Timeout is used to indicate
when a socket operation did not complete within the time
specified. Raised from any of :class:`BufferedSocket`'s ``recv``
methods.
"""
def __init__(self, timeout, extra=""):
msg = 'socket operation timed out'
if timeout is not None:
msg += ' after %sms.' % (timeout * 1000)
if extra:
msg += ' ' + extra
super().__init__(msg)
|
Timeout
|
python
|
ApeWorX__ape
|
src/ape/api/providers.py
|
{
"start": 7160,
"end": 32211
}
|
class ____(BaseInterfaceModel):
"""
An abstraction of a connection to a network in an ecosystem. Example ``ProviderAPI``
implementations include the `ape-infura <https://github.com/ApeWorX/ape-infura>`__
plugin or the `ape-hardhat <https://github.com/ApeWorX/ape-hardhat>`__ plugin.
"""
# TODO: In 0.9, make not optional.
NAME: ClassVar[Optional[str]] = None
# TODO: Remove in 0.9 and have NAME be defined at the class-level (in plugins).
name: str
"""(deprecated: use NAME). The name of the provider (should be the plugin name)."""
network: NetworkAPI
"""A reference to the network this provider provides."""
provider_settings: dict = {}
"""The settings for the provider, as overrides to the configuration."""
# TODO: In 0.9, make @property that returns value from config,
# and use REQUEST_HEADER as plugin-defined constants.
request_header: dict = {}
"""A header to set on HTTP/RPC requests."""
block_page_size: int = 100
"""
The amount of blocks to fetch in a response, as a default.
This is particularly useful for querying logs across a block range.
"""
concurrency: int = 4
"""
How many parallel threads to use when fetching logs.
"""
@property
def data_folder(self) -> Path:
"""
The path to the provider's data,
e.g. ``$HOME/.api/{self.name}`` unless overridden.
"""
return self.config_manager.DATA_FOLDER / self.name
@property
@abstractmethod
def is_connected(self) -> bool:
"""
``True`` if currently connected to the provider. ``False`` otherwise.
"""
@property
def connection_str(self) -> str:
"""
The str representing how to connect
to the node, such as an HTTP URL
or an IPC path.
"""
return ""
@abstractmethod
def connect(self):
"""
Connect to a provider, such as start-up a process or create an HTTP connection.
"""
@abstractmethod
def disconnect(self):
"""
Disconnect from a provider, such as tear-down a process or quit an HTTP session.
"""
@property
def ipc_path(self) -> Optional[Path]:
"""
Return the IPC path for the provider, if supported.
"""
return None
@property
def http_uri(self) -> Optional[str]:
"""
Return the raw HTTP/HTTPS URI to connect to this provider, if supported.
"""
return None
@property
def ws_uri(self) -> Optional[str]:
"""
Return the raw WS/WSS URI to connect to this provider, if supported.
"""
return None
@property
def settings(self) -> "PluginConfig":
"""
The combination of settings from ``ape-config.yaml`` and ``.provider_settings``.
"""
CustomConfig = self.config.__class__
data = {**self.config.model_dump(), **self.provider_settings}
return CustomConfig.model_validate(data)
@property
def connection_id(self) -> Optional[str]:
"""
A connection ID to uniquely identify and manage multiple
connections to providers, especially when working with multiple
providers of the same type, like multiple Geth --dev nodes.
"""
try:
chain_id = self.chain_id
except Exception:
if chain_id := self.settings.get("chain_id"):
pass
else:
# A connection is required to obtain a chain ID for this provider.
return None
# NOTE: If other provider settings are different, ``.update_settings()``
# should be called.
return f"{self.network_choice}:{chain_id}"
@abstractmethod
def update_settings(self, new_settings: dict):
"""
Change a provider's setting, such as configure a new port to run on.
May require a reconnect.
Args:
new_settings (dict): The new provider settings.
"""
@property
@abstractmethod
def chain_id(self) -> int:
"""
The blockchain ID.
See `ChainList <https://chainlist.org/>`__ for a comprehensive list of IDs.
"""
@abstractmethod
def get_balance(self, address: "AddressType", block_id: Optional["BlockID"] = None) -> int:
"""
Get the balance of an account.
Args:
address (:class:`~ape.types.address.AddressType`): The address of the account.
block_id (:class:`~ape.types.BlockID`): Optionally specify a block
ID. Defaults to using the latest block.
Returns:
int: The account balance.
"""
@abstractmethod
def get_code(
self, address: "AddressType", block_id: Optional["BlockID"] = None
) -> "ContractCode":
"""
Get the bytes a contract.
Args:
address (:class:`~ape.types.address.AddressType`): The address of the contract.
block_id (Optional[:class:`~ape.types.BlockID`]): The block ID
for checking a previous account nonce.
Returns:
:class:`~ape.types.ContractCode`: The contract bytecode.
"""
@property
def network_choice(self) -> str:
"""
The connected network choice string.
"""
if self.network.is_adhoc and self.connection_str:
# `custom` is not a real network and is same
# as using raw connection str
return self.connection_str
elif self.network.is_adhoc:
raise ProviderError("Custom network provider missing `connection_str`.")
return f"{self.network.choice}:{self.name}"
@abstractmethod
def make_request(self, rpc: str, parameters: Optional[Iterable] = None) -> Any:
"""
Make a raw RPC request to the provider.
Advanced features such as tracing may utilize this to by-pass unnecessary
class-serializations.
"""
@raises_not_implemented
def stream_request( # type: ignore[empty-body]
self, method: str, params: Iterable, iter_path: str = "result.item"
) -> Iterator[Any]:
"""
Stream a request, great for large requests like events or traces.
Args:
method (str): The RPC method to call.
params (Iterable): Parameters for the method.s
iter_path (str): The response dict-path to the items.
Returns:
An iterator of items.
"""
# TODO: In 0.9, delete this method.
def get_storage_at(self, *args, **kwargs) -> HexBytes:
warnings.warn(
"'provider.get_storage_at()' is deprecated. Use 'provider.get_storage()'.",
DeprecationWarning,
)
return self.get_storage(*args, **kwargs)
@raises_not_implemented
def get_storage( # type: ignore[empty-body]
self, address: "AddressType", slot: int, block_id: Optional["BlockID"] = None
) -> HexBytes:
"""
Gets the raw value of a storage slot of a contract.
Args:
address (AddressType): The address of the contract.
slot (int): Storage slot to read the value of.
block_id (Optional[:class:`~ape.types.BlockID`]): The block ID
for checking a previous storage value.
Returns:
HexBytes: The value of the storage slot.
"""
@abstractmethod
def get_nonce(self, address: "AddressType", block_id: Optional["BlockID"] = None) -> int:
"""
Get the number of times an account has transacted.
Args:
address (AddressType): The address of the account.
block_id (Optional[:class:`~ape.types.BlockID`]): The block ID
for checking a previous account nonce.
Returns:
int
"""
@abstractmethod
def estimate_gas_cost(self, txn: TransactionAPI, block_id: Optional["BlockID"] = None) -> int:
"""
Estimate the cost of gas for a transaction.
Args:
txn (:class:`~ape.api.transactions.TransactionAPI`):
The transaction to estimate the gas for.
block_id (Optional[:class:`~ape.types.BlockID`]): The block ID
to use when estimating the transaction. Useful for checking a
past estimation cost of a transaction.
Returns:
int: The estimated cost of gas to execute the transaction
reported in the fee-currency's smallest unit, e.g. Wei. If the
provider's network has been configured with a gas limit override, it
will be returned. If the gas limit configuration is "max" this will
return the block maximum gas limit.
"""
@property
@abstractmethod
def gas_price(self) -> int:
"""
The price for what it costs to transact
(pre-`EIP-1559 <https://eips.ethereum.org/EIPS/eip-1559>`__).
"""
@property
@abstractmethod
def max_gas(self) -> int:
"""
The max gas limit value you can use.
"""
@property
def config(self) -> "PluginConfig":
"""
The provider's configuration.
"""
return self.config_manager.get_config(self.name)
@property
def priority_fee(self) -> int:
"""
A miner tip to incentivize them to include your transaction in a block.
Raises:
NotImplementedError: When the provider does not implement
`EIP-1559 <https://eips.ethereum.org/EIPS/eip-1559>`__ typed transactions.
"""
raise APINotImplementedError("priority_fee is not implemented by this provider")
@property
def supports_tracing(self) -> bool:
"""
``True`` when the provider can provide transaction traces.
"""
return False
@property
def base_fee(self) -> int:
"""
The minimum value required to get your transaction included on the next block.
Only providers that implement `EIP-1559 <https://eips.ethereum.org/EIPS/eip-1559>`__
will use this property.
Raises:
NotImplementedError: When this provider does not implement
`EIP-1559 <https://eips.ethereum.org/EIPS/eip-1559>`__.
"""
raise APINotImplementedError("base_fee is not implemented by this provider")
@abstractmethod
def get_block(self, block_id: "BlockID") -> BlockAPI:
"""
Get a block.
Args:
block_id (:class:`~ape.types.BlockID`): The ID of the block to get.
Can be ``"latest"``, ``"earliest"``, ``"pending"``, a block hash or a block number.
Raises:
:class:`~ape.exceptions.BlockNotFoundError`: Likely the exception raised when a block
is not found (depends on implementation).
Returns:
:class:`~ape.types.BlockID`: The block for the given ID.
"""
# TODO: In 0.9, change the return value to be `CallResult`
# (right now it does only when using raise_on_revert=False and it reverts).
@abstractmethod
def send_call(
self,
txn: TransactionAPI,
block_id: Optional["BlockID"] = None,
state: Optional[dict] = None,
**kwargs,
) -> HexBytes: # Return value of function
"""
Execute a new transaction call immediately without creating a
transaction on the blockchain.
Args:
txn: :class:`~ape.api.transactions.TransactionAPI`
block_id (Optional[:class:`~ape.types.BlockID`]): The block ID
to use to send a call at a historical point of a contract.
Useful for checking a past estimation cost of a transaction.
state (Optional[dict]): Modify the state of the blockchain
prior to sending the call, for testing purposes.
**kwargs: Provider-specific extra kwargs.
Returns:
HexBytes: The returndata of the transaction call. Even though it isn't
mentioned in the type, you can also return a ``:class:`~ape.api.providers.CallResult``
here and Ape will handle it. In 0.9, it will be the primary return type.
"""
@abstractmethod
def get_receipt(self, txn_hash: str, **kwargs) -> ReceiptAPI:
"""
Get the information about a transaction from a transaction hash.
Args:
txn_hash (str): The hash of the transaction to retrieve.
kwargs: Any other kwargs that other providers might allow when fetching a receipt.
Returns:
:class:`~api.providers.ReceiptAPI`:
The receipt of the transaction with the given hash.
"""
@abstractmethod
def get_transactions_by_block(self, block_id: "BlockID") -> Iterator[TransactionAPI]:
"""
Get the information about a set of transactions from a block.
Args:
block_id (:class:`~ape.types.BlockID`): The ID of the block.
Returns:
Iterator[:class: `~ape.api.transactions.TransactionAPI`]
"""
@raises_not_implemented
def get_transactions_by_account_nonce( # type: ignore[empty-body]
self,
account: "AddressType",
start_nonce: int = 0,
stop_nonce: int = -1,
) -> Iterator[ReceiptAPI]:
"""
Get account history for the given account.
Args:
account (:class:`~ape.types.address.AddressType`): The address of the account.
start_nonce (int): The nonce of the account to start the search with.
stop_nonce (int): The nonce of the account to stop the search with.
Returns:
Iterator[:class:`~ape.api.transactions.ReceiptAPI`]
"""
@abstractmethod
def send_transaction(self, txn: TransactionAPI) -> ReceiptAPI:
"""
Send a transaction to the network.
Args:
txn (:class:`~ape.api.transactions.TransactionAPI`): The transaction to send.
Returns:
:class:`~ape.api.transactions.ReceiptAPI`
"""
@abstractmethod
def get_contract_logs(self, log_filter: "LogFilter") -> Iterator["ContractLog"]:
"""
Get logs from contracts.
Args:
log_filter (:class:`~ape.types.LogFilter`): A mapping of event ABIs to
topic filters. Defaults to getting all events.
Returns:
Iterator[:class:`~ape.types.ContractLog`]
"""
def send_private_transaction(self, txn: TransactionAPI, **kwargs) -> ReceiptAPI:
"""
Send a transaction through a private mempool (if supported by the Provider).
Raises:
:class:`~ape.exceptions.APINotImplementedError`: If using a non-local
network and not implemented by the provider.
Args:
txn (:class:`~ape.api.transactions.TransactionAPI`): The transaction
to privately publish.
**kwargs: Additional kwargs to be optionally handled by the provider.
Returns:
:class:`~ape.api.transactions.ReceiptAPI`
"""
if self.network.is_dev:
# Send the transaction as normal so testers can verify private=True
# and the txn still goes through.
logger.warning(
f"private=True is set but connected to network '{self.network.name}' ."
f"Using regular '{self.send_transaction.__name__}()' method (not private)."
)
return self.send_transaction(txn)
# What happens normally from `raises_not_implemented()` decorator.
raise _create_raises_not_implemented_error(self.send_private_transaction)
@raises_not_implemented
def snapshot(self) -> "SnapshotID": # type: ignore[empty-body]
"""
Defined to make the ``ProviderAPI`` interchangeable with a
:class:`~ape.api.providers.TestProviderAPI`, as in
:class:`ape.managers.chain.ChainManager`.
Raises:
:class:`~ape.exceptions.APINotImplementedError`: Unless overridden.
"""
@raises_not_implemented
def restore(self, snapshot_id: "SnapshotID"):
"""
Defined to make the ``ProviderAPI`` interchangeable with a
:class:`~ape.api.providers.TestProviderAPI`, as in
:class:`ape.managers.chain.ChainManager`.
Raises:
:class:`~ape.exceptions.APINotImplementedError`: Unless overridden.
"""
@raises_not_implemented
def set_timestamp(self, new_timestamp: int):
"""
Defined to make the ``ProviderAPI`` interchangeable with a
:class:`~ape.api.providers.TestProviderAPI`, as in
:class:`ape.managers.chain.ChainManager`.
Raises:
:class:`~ape.exceptions.APINotImplementedError`: Unless overridden.
"""
@raises_not_implemented
def mine(self, num_blocks: int = 1):
"""
Defined to make the ``ProviderAPI`` interchangeable with a
:class:`~ape.api.providers.TestProviderAPI`, as in
:class:`ape.managers.chain.ChainManager`.
Raises:
:class:`~ape.exceptions.APINotImplementedError`: Unless overridden.
"""
@raises_not_implemented
def set_balance(self, address: "AddressType", amount: int):
"""
Change the balance of an account.
Args:
address (AddressType): An address on the network.
amount (int): The balance to set in the address.
"""
@raises_not_implemented
def get_test_account(self, index: int) -> "TestAccountAPI": # type: ignore[empty-body]
"""
Retrieve one of the provider-generated test accounts.
Args:
index (int): The index of the test account in the HD-Path.
Returns:
:class:`~ape.api.accounts.TestAccountAPI`
"""
@log_instead_of_fail(default="<ProviderAPI>")
def __repr__(self) -> str:
return f"<{self.name.capitalize()} chain_id={self.chain_id}>"
@raises_not_implemented
def set_code( # type: ignore[empty-body]
self, address: "AddressType", code: "ContractCode"
) -> bool:
"""
Change the code of a smart contract, for development purposes.
Test providers implement this method when they support it.
Args:
address (AddressType): An address on the network.
code (:class:`~ape.types.ContractCode`): The new bytecode.
"""
@raises_not_implemented
def set_storage( # type: ignore[empty-body]
self, address: "AddressType", slot: int, value: HexBytes
):
"""
Sets the raw value of a storage slot of a contract.
Args:
address (str): The address of the contract.
slot (int): Storage slot to write the value to.
value: (HexBytes): The value to overwrite the raw storage slot with.
"""
@raises_not_implemented
def unlock_account(self, address: "AddressType") -> bool: # type: ignore[empty-body]
"""
Ask the provider to allow an address to submit transactions without validating
signatures. This feature is intended to be subclassed by a
:class:`~ape.api.providers.TestProviderAPI` so that during a fork-mode test,
a transaction can be submitted by an arbitrary account or contract without a private key.
Raises:
NotImplementedError: When this provider does not support unlocking an account.
Args:
address (:class:`~ape.types.address.AddressType`): The address to unlock.
Returns:
bool: ``True`` if successfully unlocked account and ``False`` otherwise.
"""
@raises_not_implemented
def relock_account(self, address: "AddressType"):
"""
Stop impersonating an account.
Args:
address (:class:`~ape.types.address.AddressType`): The address to relock.
"""
@raises_not_implemented
def get_transaction_trace( # type: ignore[empty-body]
self, txn_hash: Union[HexBytes, str]
) -> "TraceAPI":
"""
Provide a detailed description of opcodes.
Args:
txn_hash (Union[HexBytes, str]): The hash of a transaction
to trace.
Returns:
:class:`~ape.api.trace.TraceAPI`: A transaction trace.
"""
@raises_not_implemented
def poll_blocks( # type: ignore[empty-body]
self,
stop_block: Optional[int] = None,
required_confirmations: Optional[int] = None,
new_block_timeout: Optional[int] = None,
) -> Iterator[BlockAPI]:
"""
Poll new blocks.
**NOTE**: When a chain reorganization occurs, this method logs an error and
yields the missed blocks, even if they were previously yielded with different
block numbers.
**NOTE**: This is a daemon method; it does not terminate unless an exception occurs
or a ``stop_block`` is given.
Args:
stop_block (Optional[int]): Optionally set a future block number to stop at.
Defaults to never-ending.
required_confirmations (Optional[int]): The amount of confirmations to wait
before yielding the block. The more confirmations, the less likely a reorg will occur.
Defaults to the network's configured required confirmations.
new_block_timeout (Optional[float]): The amount of time to wait for a new block before
timing out. Defaults to 10 seconds for local networks or ``50 * block_time`` for live
networks.
Returns:
Iterator[:class:`~ape.api.providers.BlockAPI`]
"""
@raises_not_implemented
def poll_logs( # type: ignore[empty-body]
self,
stop_block: Optional[int] = None,
address: Optional["AddressType"] = None,
topics: Optional[list[Union[str, list[str]]]] = None,
required_confirmations: Optional[int] = None,
new_block_timeout: Optional[int] = None,
events: Optional[list["EventABI"]] = None,
) -> Iterator["ContractLog"]:
"""
Poll new blocks. Optionally set a start block to include historical blocks.
**NOTE**: This is a daemon method; it does not terminate unless an exception occurs.
Usage example::
for new_log in contract.MyEvent.poll_logs():
print(f"New event log found: block_number={new_log.block_number}")
Args:
stop_block (Optional[int]): Optionally set a future block number to stop at.
Defaults to never-ending.
address (Optional[str]): The address of the contract to filter logs by.
Defaults to all addresses.
topics (Optional[list[Union[str, list[str]]]]): The topics to filter logs by.
Defaults to all topics.
required_confirmations (Optional[int]): The amount of confirmations to wait
before yielding the block. The more confirmations, the less likely a reorg will occur.
Defaults to the network's configured required confirmations.
new_block_timeout (Optional[int]): The amount of time to wait for a new block before
quitting. Defaults to 10 seconds for local networks or ``50 * block_time`` for live
networks.
events (Optional[list[``EventABI``]]): An optional list of events to listen on.
Returns:
Iterator[:class:`~ape.types.ContractLog`]
"""
def prepare_transaction(self, txn: TransactionAPI) -> TransactionAPI:
"""
Set default values on the transaction.
Raises:
:class:`~ape.exceptions.TransactionError`: When given negative required confirmations.
Args:
txn (:class:`~ape.api.transactions.TransactionAPI`): The transaction to prepare.
Returns:
:class:`~ape.api.transactions.TransactionAPI`
"""
return txn
def get_virtual_machine_error(self, exception: Exception, **kwargs) -> VirtualMachineError:
"""
Get a virtual machine error from an error returned from your RPC.
Args:
exception (Exception): The error returned from your RPC client.
Returns:
:class:`~ape.exceptions.VirtualMachineError`: An error representing what
went wrong in the call.
"""
return VirtualMachineError(base_err=exception, **kwargs)
def _get_request_headers(self) -> RPCHeaders:
# Internal helper method called by NetworkManager
headers = RPCHeaders(**self.request_header)
# Have to do it this way to avoid "multiple-keys" error.
configured_headers: dict = self.config.get("request_headers", {})
for key, value in configured_headers.items():
headers[key] = value
return headers
|
ProviderAPI
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1557053,
"end": 1557263
}
|
class ____(SingleTimeUnit):
"""UtcSingleTimeUnit schema wrapper."""
_schema = {"$ref": "#/definitions/UtcSingleTimeUnit"}
def __init__(self, *args):
super().__init__(*args)
|
UtcSingleTimeUnit
|
python
|
pytorch__pytorch
|
torch/_inductor/compiler_bisector.py
|
{
"start": 511,
"end": 2401
}
|
class ____(BinarySubsystem):
name: str = field(init=False)
config_name: str
config_field: str
config_value: object
def __post_init__(self) -> None:
self.name = f"{self.config_name}_{self.config_field}"
# Dictionary of backend -> subsystems
BACKENDS: dict[str, list[Subsystem]] = {
# run dynamo without aot_autograd
"eager": [],
# run dynamo with aot_autograd, but no partitioner or decomps
"aot_eager": [],
# run dynamo with aot autograd, decompositions and partitioner
"aot_eager_decomp_partition": [
ConfigChange("aot_eager_decomp_partition", "cse", False),
BisectSubsystem(
"decomposition"
), # number of decompositions we apply in tracing
], # TODO - add cse ?
# applies CrossRefFakeMode on invocation
"aot_eager_decomp_partition_crossref": [],
"inductor": [
BisectSubsystem("pre_grad_passes"), # passes applied on pre-grad IR
BisectSubsystem("joint_graph_passes"), # passes applied on joint graph
BisectSubsystem(
"post_grad_passes"
), # passes applied individually on forward, and backward in inductor
ConfigChange("inductor", "fallback_random", True),
ConfigChange("inductor", "emulate_precision_casts", True),
ConfigChange("inductor", "layout_optimization", False),
ConfigChange("inductor", "comprehensive_padding", False),
BisectSubsystem("lowerings"), # lowering aten operators to inductor
], # TODO - add more - fusions ?
}
subsystem_call_counter: dict[str, int] = collections.Counter()
call_counter_debug_info: dict[int, str] = {}
def reset_counters() -> None:
subsystem_call_counter.clear()
call_counter_debug_info.clear()
@functools.cache
def get_env_val(env_str: str) -> Optional[str]:
return os.environ.get(env_str, None)
@dataclasses.dataclass
|
ConfigChange
|
python
|
getsentry__sentry
|
tests/sentry/middleware/test_access_log_middleware.py
|
{
"start": 12185,
"end": 12408
}
|
class ____(LogCaptureAPITestCase):
endpoint = "dummy-fail-endpoint"
def test_access_log_fail(self) -> None:
self.get_error_response(status_code=500)
self.assert_access_log_recorded()
|
TestAccessLogFail
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py
|
{
"start": 1232,
"end": 2161
}
|
class ____(object):
"""Abstraction for the state of the CFG walk for reaching definition analysis.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
their possible definitions
"""
def __init__(self, init_from=None):
if init_from:
self.value = set(init_from)
else:
self.value = set()
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __or__(self, other):
assert isinstance(other, _NodeState)
result = _NodeState(self.value)
result.value.update(other.value)
return result
def __add__(self, value):
result = _NodeState(self.value)
result.value.add(value)
return result
def __repr__(self):
return 'NodeState[%s]=%s' % (id(self), repr(self.value))
|
_NodeState
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance6.py
|
{
"start": 1097,
"end": 1137
}
|
class ____(Generic[_T2]):
pass
|
ParentB
|
python
|
keras-team__keras
|
keras/src/ops/image.py
|
{
"start": 20209,
"end": 25505
}
|
class ____(Operation):
def __init__(
self,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
*,
name=None,
):
super().__init__(name=name)
if isinstance(size, int):
size = (size, size)
self.size = size
self.strides = strides
self.dilation_rate = dilation_rate
self.padding = padding
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return _extract_patches(
images=images,
size=self.size,
strides=self.strides,
dilation_rate=self.dilation_rate,
padding=self.padding,
data_format=self.data_format,
)
def compute_output_spec(self, images):
images_shape = list(images.shape)
original_ndim = len(images_shape)
if not self.strides:
strides = (self.size[0], self.size[1])
if self.data_format == "channels_last":
channels_in = images_shape[-1]
else:
channels_in = images_shape[-3]
if original_ndim == 3:
images_shape = [1] + images_shape
filters = self.size[0] * self.size[1] * channels_in
kernel_size = (self.size[0], self.size[1])
out_shape = compute_conv_output_shape(
images_shape,
filters,
kernel_size,
strides=strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if original_ndim == 3:
out_shape = out_shape[1:]
return KerasTensor(shape=out_shape, dtype=images.dtype)
@keras_export("keras.ops.image.extract_patches")
def extract_patches(
images,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
):
"""Extracts patches from the image(s).
Args:
images: Input image or batch of images. Must be 3D or 4D.
size: Patch size int or tuple (patch_height, patch_width)
strides: strides along height and width. If not specified, or
if `None`, it defaults to the same value as `size`.
dilation_rate: This is the input stride, specifying how far two
consecutive patch samples are in the input. For value other than 1,
strides must be 1. NOTE: `strides > 1` is not supported in
conjunction with `dilation_rate > 1`
padding: The type of padding algorithm to use: `"same"` or `"valid"`.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
Extracted patches 3D (if not batched) or 4D (if batched)
Examples:
>>> image = np.random.random(
... (2, 20, 20, 3)
... ).astype("float32") # batch of 2 RGB images
>>> patches = keras.ops.image.extract_patches(image, (5, 5))
>>> patches.shape
(2, 4, 4, 75)
>>> image = np.random.random((20, 20, 3)).astype("float32") # 1 RGB image
>>> patches = keras.ops.image.extract_patches(image, (3, 3), (1, 1))
>>> patches.shape
(18, 18, 27)
"""
if any_symbolic_tensors((images,)):
return ExtractPatches(
size=size,
strides=strides,
dilation_rate=dilation_rate,
padding=padding,
data_format=data_format,
).symbolic_call(images)
return _extract_patches(
images, size, strides, dilation_rate, padding, data_format=data_format
)
def _extract_patches(
images,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
):
if isinstance(size, int):
patch_h = patch_w = size
elif len(size) == 2:
patch_h, patch_w = size[0], size[1]
else:
raise TypeError(
"Invalid `size` argument. Expected an "
f"int or a tuple of length 2. Received: size={size}"
)
data_format = backend.standardize_data_format(data_format)
if data_format == "channels_last":
channels_in = images.shape[-1]
elif data_format == "channels_first":
channels_in = images.shape[-3]
if not strides:
strides = size
out_dim = patch_h * patch_w * channels_in
kernel = backend.numpy.eye(out_dim, dtype=images.dtype)
kernel = backend.numpy.reshape(
kernel, (patch_h, patch_w, channels_in, out_dim)
)
_unbatched = False
if len(images.shape) == 3:
_unbatched = True
images = backend.numpy.expand_dims(images, axis=0)
patches = backend.nn.conv(
inputs=images,
kernel=kernel,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
if _unbatched:
patches = backend.numpy.squeeze(patches, axis=0)
return patches
|
ExtractPatches
|
python
|
gevent__gevent
|
src/gevent/tests/test__greenlet.py
|
{
"start": 27555,
"end": 27673
}
|
class ____(TestKill):
def _start_greenlet(self, g):
g.start_later(timing.LARGE_TICK)
|
TestKillAfterStartLater
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.